text stringlengths 4 1.02M | meta dict |
|---|---|
import binascii
import hashlib
from neo.Core.Cryptography.Crypto import Crypto
from neo.Core.Fixed8 import Fixed8
from neo.Core.UInt160 import UInt160
from neo.Core.State.ContractState import ContractPropertyState
# used for ApplicationEngine.Run
from neo.SmartContract import TriggerType
from neo.VM import OpCode
from neo.VM.ExecutionEngine import ExecutionEngine
from neo.VM.InteropService import Array
from neo.VM.OpCode import APPCALL, TAILCALL, \
SYSCALL, NOP, SHA256, SHA1, HASH160, HASH256, CHECKSIG, CHECKMULTISIG, VERIFY
from neo.logging import log_manager
from neo.SmartContract.StateMachine import StateMachine
from neo.EventHub import events
logger = log_manager.getLogger('vm')
def interop_hash(method):
return int.from_bytes(hashlib.sha256(method.encode()).digest()[:4], 'little', signed=False)
HASH_NEO_ASSET_CREATE = interop_hash("Neo.Asset.Create")
HASH_ANT_ASSET_CREATE = interop_hash("AntShares.Asset.Create")
HASH_NEO_ASSET_RENEW = interop_hash("Neo.Asset.Renew")
HASH_ANT_ASSET_RENEW = interop_hash("AntShares.Asset.Renew")
HASH_NEO_CONTRACT_CREATE = interop_hash("Neo.Contract.Create")
HASH_NEO_CONTRACT_MIGRATE = interop_hash("Neo.Contract.Migrate")
HASH_ANT_CONTRACT_CREATE = interop_hash("AntShares.Contract.Create")
HASH_ANT_CONTRACT_MIGRATE = interop_hash("AntShares.Contract.Migrate")
HASH_SYSTEM_STORAGE_PUT = interop_hash("System.Storage.Put")
HASH_SYSTEM_STORAGE_PUTEX = interop_hash("System.Storage.PutEx")
HASH_NEO_STORAGE_PUT = interop_hash("Neo.Storage.Put")
HASH_ANT_STORAGE_PUT = interop_hash("AntShares.Storage.Put")
class ApplicationEngine(ExecutionEngine):
ratio = 100000
gas_free = 10 * 100000000
max_free_ops = 500000
def GasConsumed(self):
return Fixed8(self.gas_consumed)
def __init__(self, trigger_type, container, snapshot, gas, testMode=False, exit_on_error=True):
super(ApplicationEngine, self).__init__(container=container, crypto=Crypto.Default(), table=snapshot, service=StateMachine(trigger_type, snapshot),
exit_on_error=exit_on_error)
self.gas_amount = self.gas_free + gas.value
self.testMode = testMode
self.snapshot = snapshot
self._is_stackitem_count_strict = True
self.debugger = None
self.gas_consumed = 0
self.invocation_args = None
def CheckDynamicInvoke(self):
cx = self.CurrentContext
opcode = cx.CurrentInstruction.OpCode
if opcode in [OpCode.APPCALL, OpCode.TAILCALL]:
script_hash = cx.CurrentInstruction.Operand
for b in script_hash:
# if any of the bytes are greater than 0, this is a normal app call
if b > 0:
return True
# if this is a dynamic app call, we will arrive here
# get the current executing script hash
current = UInt160(data=cx.ScriptHash())
current_contract_state = self.snapshot.Contracts[current.ToBytes()]
# if current contract state cant do dynamic calls, return False
return current_contract_state.HasDynamicInvoke
elif opcode in [OpCode.CALL_ED, OpCode.CALL_EDT]:
current = UInt160(data=cx.ScriptHash())
current_contract_state = self.snapshot.Contracts[current.ToBytes()]
return current_contract_state.HasDynamicInvoke
else:
return True
def PreExecuteInstruction(self):
if self.CurrentContext.InstructionPointer >= self.CurrentContext.Script.Length:
return True
self.gas_consumed = self.gas_consumed + (self.GetPrice() * self.ratio)
if not self.testMode and self.gas_consumed > self.gas_amount:
return False
if self.testMode and self.ops_processed > self.max_free_ops:
logger.debug("Too many free operations processed")
return False
try:
if not self.CheckDynamicInvoke():
return False
except Exception:
pass
return True
def GetPrice(self):
opcode = self.CurrentContext.CurrentInstruction.OpCode
if opcode <= NOP:
return 0
elif opcode in [APPCALL, TAILCALL]:
return 10
elif opcode == SYSCALL:
return self.GetPriceForSysCall()
elif opcode in [SHA1, SHA256]:
return 10
elif opcode in [HASH160, HASH256]:
return 20
elif opcode in [CHECKSIG, VERIFY]:
return 100
elif opcode == CHECKMULTISIG:
if self.CurrentContext.EvaluationStack.Count == 0:
return 1
item = self.CurrentContext.EvaluationStack.Peek()
if isinstance(item, Array):
n = item.Count
else:
n = item.GetBigInteger()
if n < 1:
return 1
return 100 * n
else:
return 1
def GetPriceForSysCall(self):
instruction = self.CurrentContext.CurrentInstruction
api_hash = instruction.TokenU32 if len(instruction.Operand) == 4 else interop_hash(instruction.TokenString)
price = self._Service.GetPrice(api_hash)
if price > 0:
return price
if api_hash == HASH_NEO_ASSET_CREATE or api_hash == HASH_ANT_ASSET_CREATE:
return int(5000 * 100000000 / self.ratio)
if api_hash == HASH_ANT_ASSET_RENEW or api_hash == HASH_ANT_ASSET_RENEW:
return int(self.CurrentContext.EvaluationStack.Peek(1).GetBigInteger() * 5000 * 100000000 / self.ratio)
if api_hash == HASH_NEO_CONTRACT_CREATE or api_hash == HASH_NEO_CONTRACT_MIGRATE or api_hash == HASH_ANT_CONTRACT_CREATE or api_hash == HASH_ANT_CONTRACT_MIGRATE:
fee = int(100 * 100000000 / self.ratio) # 100 gas for contract with no storage no dynamic invoke
contract_properties = self.CurrentContext.EvaluationStack.Peek(3).GetBigInteger()
if contract_properties < 0 or contract_properties > 0xff:
raise ValueError("Invalid contract properties")
if contract_properties & ContractPropertyState.HasStorage > 0:
fee += int(400 * 100000000 / self.ratio) # if contract has storage, we add 400 gas
if contract_properties & ContractPropertyState.HasDynamicInvoke > 0:
fee += int(500 * 100000000 / self.ratio) # if it has dynamic invoke, add extra 500 gas
return fee
if api_hash == HASH_SYSTEM_STORAGE_PUT or api_hash == HASH_SYSTEM_STORAGE_PUTEX or api_hash == HASH_NEO_STORAGE_PUT or api_hash == HASH_ANT_STORAGE_PUT:
l1 = len(self.CurrentContext.EvaluationStack.Peek(1).GetByteArray())
l2 = len(self.CurrentContext.EvaluationStack.Peek(2).GetByteArray())
return (int((l1 + l2 - 1) / 1024) + 1) * 1000
return 1
@staticmethod
def Run(snapshot, script, container=None, exit_on_error=False, gas=Fixed8.Zero(), test_mode=True, wb=None):
"""
Runs a script in a test invoke environment
Args:
script (bytes): The script to run
container (neo.Core.TX.Transaction): [optional] the transaction to use as the script container
Returns:
ApplicationEngine
"""
engine = ApplicationEngine(TriggerType.Application, container, snapshot, gas, test_mode)
# maybe not the best solution
# but one for now
if not wb:
_script = binascii.unhexlify(script)
else:
_script = script
engine.LoadScript(_script)
engine.Execute()
for event in engine._Service.events_to_dispatch:
events.emit(event.event_type, event)
return engine
| {
"content_hash": "16e22765df2570cc1571ff09abd44f6f",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 170,
"avg_line_length": 38.40886699507389,
"alnum_prop": 0.6456329357445171,
"repo_name": "hal0x2328/neo-python",
"id": "a7cb982ef1ae65b528ddef38d6dd04a6fb0e9f9b",
"size": "7797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/SmartContract/ApplicationEngine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2059"
},
{
"name": "Makefile",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "1758220"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
} |
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| {
"content_hash": "876d477188b6fd813970c9a89a07028f",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 28.096153846153847,
"alnum_prop": 0.5859000684462696,
"repo_name": "PCATLMarketing/sovereigncoin",
"id": "44dc6fa79c9afc1f505cbde73747ed27ccd16b8b",
"size": "2922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/testgen/base58.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "105281"
},
{
"name": "C++",
"bytes": "2507350"
},
{
"name": "CSS",
"bytes": "1216"
},
{
"name": "IDL",
"bytes": "14266"
},
{
"name": "JavaScript",
"bytes": "70"
},
{
"name": "Nu",
"bytes": "275"
},
{
"name": "Objective-C",
"bytes": "2846"
},
{
"name": "PHP",
"bytes": "2270"
},
{
"name": "Perl",
"bytes": "14937"
},
{
"name": "Python",
"bytes": "72015"
},
{
"name": "Shell",
"bytes": "13971"
},
{
"name": "TypeScript",
"bytes": "5396370"
}
],
"symlink_target": ""
} |
import unittest
from datetime import timedelta
from airflow import DAG, models
from airflow.sensors.time_delta_sensor import TimeDeltaSensor
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
DEV_NULL = '/dev/null'
TEST_DAG_ID = 'unit_tests'
class TestTimedeltaSensor(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_timedelta_sensor(self):
op = TimeDeltaSensor(
task_id='timedelta_sensor_check',
delta=timedelta(seconds=2),
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
| {
"content_hash": "2b8473486c8aebbd3debb295c4e88c28",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 84,
"avg_line_length": 33.16,
"alnum_prop": 0.6803377563329313,
"repo_name": "wileeam/airflow",
"id": "c06694ee0b81dc8dc09db4425b3b6c774d35fb2a",
"size": "1616",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sensors/test_timedelta_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import unittest
import tempfile
import json
from pandas import DataFrame
from denovoFilter.change_last_base_sites import change_conserved_last_base_consequence
from tests.compare_dataframes import CompareTables
class TestChangeLastBaseSites(CompareTables):
def test_change_last_base_sites(self):
''' check conversion of consequences for sites at the last bae of exons
'''
temp = tempfile.NamedTemporaryFile(mode='w')
json.dump([['1', 10], ['2', 10]], temp)
temp.flush()
# define a table of variants. The first is a variant where the
# consequence needs to be changed, the second isn't at an appropriate
# location, and the third, although at an appropriate location, isn't
# for a SNV, so does not need to be changed
variants = DataFrame({'person_stable_id': ['a', 'b', 'c'],
'chrom': ['1', '2', '2'],
'pos': [10, 1, 10],
'ref': ['A', 'G', 'GG'],
'alt': ['C', 'T', 'G'],
'consequence': ['synonymous_variant', 'synonymous_variant', 'frameshift_variant'],
})
# define an expected output, aside from a modified consequence
expected = variants.copy()
expected['consequence'] = ['conserved_exon_terminus_variant', 'synonymous_variant', 'frameshift_variant']
self.compare_tables(change_conserved_last_base_consequence(variants, temp.name), expected)
| {
"content_hash": "7689db62c61d83e4ba087cb48f132ec8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 113,
"avg_line_length": 44.719298245614034,
"alnum_prop": 0.6939976461357396,
"repo_name": "jeremymcrae/denovoFilter",
"id": "a6f691cb80056e0639e230a33537a89bb2ee078f",
"size": "2549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_change_last_base_sites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113137"
}
],
"symlink_target": ""
} |
from buildbot import config
class ConfigErrorsMixin(object):
def assertConfigError(self, errors, substr_or_re):
if len(errors.errors) > 1:
self.fail("too many errors: %s" % (errors.errors,))
elif len(errors.errors) < 1:
self.fail("expected error did not occur")
elif isinstance(substr_or_re, str):
if substr_or_re not in errors.errors[0]:
self.fail("non-matching error: %s" % (errors.errors,))
else:
if not substr_or_re.search(errors.errors[0]):
self.fail("non-matching error: %s" % (errors.errors,))
def assertRaisesConfigError(self, substr_or_re, fn):
try:
fn()
except config.ConfigErrors, e:
self.assertConfigError(e, substr_or_re)
else:
self.fail("ConfigErrors not raised")
def assertNoConfigErrors(self, errors):
self.assertEqual(errors.errors, [])
| {
"content_hash": "5e91f874dbd300de7772bd956c095b4d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 36.53846153846154,
"alnum_prop": 0.5915789473684211,
"repo_name": "denny820909/builder",
"id": "3747977e8dd7d3ba01f94a4808d5b1ead5f7eef2",
"size": "1656",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/util/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
import sys
import inspect
PY2 = sys.version_info[0] == 2
def with_metaclass(meta, *bases):
# Taken from flask/six.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
if PY2:
text_type = unicode
string_type = basestring
from itertools import izip_longest as zip_longest
def with_str_method(cls):
"""Class decorator that handles __str__ compat between py2 and py3."""
# In python2, the __str__ should be __unicode__
# and __str__ should return bytes.
cls.__unicode__ = cls.__str__
def __str__(self):
return self.__unicode__().encode('utf-8')
cls.__str__ = __str__
return cls
def with_repr_method(cls):
"""Class decorator that handle __repr__ with py2 and py3."""
# This is almost the same thing as with_str_method *except*
# it uses the unicode_escape encoding. This also means we need to be
# careful encoding the input multiple times, so we only encode
# if we get a unicode type.
original_repr_method = cls.__repr__
def __repr__(self):
original_repr = original_repr_method(self)
if isinstance(original_repr, text_type):
original_repr = original_repr.encode('unicode_escape')
return original_repr
cls.__repr__ = __repr__
return cls
def get_methods(cls):
for name, method in inspect.getmembers(cls,
predicate=inspect.ismethod):
yield name, method
else:
text_type = str
string_type = str
from itertools import zip_longest
def with_str_method(cls):
# In python3, we don't need to do anything, we return a str type.
return cls
def with_repr_method(cls):
return cls
def get_methods(cls):
for name, method in inspect.getmembers(cls,
predicate=inspect.isfunction):
yield name, method
| {
"content_hash": "6bae5600aa84f0e6f89b19fd9fd0f916",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 32.56923076923077,
"alnum_prop": 0.5710911667453944,
"repo_name": "Drudenhaus/aws-ec2rescue-linux",
"id": "2ed0fe78792602e21bc9e84a11cec5284ccc42b4",
"size": "2117",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "lib/jmespath/compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "701"
},
{
"name": "Makefile",
"bytes": "4878"
},
{
"name": "Python",
"bytes": "4761897"
},
{
"name": "Shell",
"bytes": "5229"
}
],
"symlink_target": ""
} |
"""
Management class for Pool-related functions (join, eject, etc).
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class ResourcePool(object):
"""Implements resource pool operations."""
def __init__(self, session, virtapi):
host_rec = session.host.get_record(session.host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
"""Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
aggregate.update_metadata(metadata)
op(host)
except Exception:
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable '
'state during operation on %(host)s'),
{'aggregate_id': aggregate.id, 'host': host})
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate.metadata):
return
if CONF.xenserver.independent_compute:
raise exception.NotSupportedWithOption(
operation='adding to a XenServer pool',
option='CONF.xenserver.independent_compute')
invalid = {pool_states.CHANGING: _('setup in progress'),
pool_states.DISMISSED: _('aggregate deleted'),
pool_states.ERROR: _('aggregate in error')}
if (aggregate.metadata[pool_states.KEY] in invalid.keys()):
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate.id,
reason=invalid[aggregate.metadata[pool_states.KEY]])
if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED):
aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
aggregate.update_metadata(metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = aggregate.metadata['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
self._join_slave(aggregate.id, host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
aggregate.update_metadata(metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
context, host, aggregate, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
if not pool_states.is_hv_pool(aggregate.metadata):
return
invalid = {pool_states.CREATED: _('no hosts to remove'),
pool_states.CHANGING: _('setup in progress'),
pool_states.DISMISSED: _('aggregate deleted')}
if aggregate.metadata[pool_states.KEY] in invalid.keys():
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate.id,
reason=invalid[aggregate.metadata[pool_states.KEY]])
master_compute = aggregate.metadata['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = aggregate.metadata[host]
self._eject_slave(aggregate.id,
slave_info.get('compute_uuid'), host_uuid)
aggregate.update_metadata({host: None})
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
if len(aggregate.hosts) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate.id,
reason=_('Unable to eject %s '
'from the pool; pool not empty')
% host)
self._clear_pool(aggregate.id)
aggregate.update_metadata({'master_compute': None, host: None})
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
context, host, aggregate.id, master_compute, slave_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate.id,
action='remove_from_aggregate',
reason=_('Unable to eject %s '
'from the pool; No master found')
% host)
def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
"""Joins a slave into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': passwd,
'force': jsonutils.dumps(CONF.xenserver.use_join_force),
'master_addr': self._host_addr,
'master_user': CONF.xenserver.connection_username,
'master_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost.py', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error("Pool-Join failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %s '
'in the pool') % host)
def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
"""Eject a slave from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
# to deal with the fact that the admin should evacuate the host
# first. The eject wipes out the host completely.
vm_ref = self._session.VM.get_by_uuid(compute_uuid)
self._session.VM.clean_shutdown(vm_ref)
host_ref = self._session.host.get_by_uuid(host_uuid)
self._session.pool.eject(host_ref)
except self._session.XenAPI.Failure as e:
LOG.error("Pool-eject failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))
def _init_pool(self, aggregate_id, aggregate_name):
"""Set the name label of a XenServer pool."""
try:
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e:
LOG.error("Unable to set up pool: %s.", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=six.text_type(e.details))
def _clear_pool(self, aggregate_id):
"""Clear the name label of a XenServer pool."""
try:
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, '')
except self._session.XenAPI.Failure as e:
LOG.error("Pool-set_name_label failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))
def _create_slave_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
CONF.xenserver.connection_url, self._host_addr)
return {
"url": sender_url,
"user": CONF.xenserver.connection_username,
"passwd": CONF.xenserver.connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(None),
"xenhost_uuid": self._host_uuid,
}
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
return url.replace(temp_url.hostname, '%s' % host_addr)
| {
"content_hash": "112b9573f1f79c4cf97cd3690e397f30",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 48.346666666666664,
"alnum_prop": 0.559477845192131,
"repo_name": "phenoxim/nova",
"id": "5487d4fb8f79d334c95fa7b72c277c233e739312",
"size": "11533",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/virt/xenapi/pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16289098"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "282020"
}
],
"symlink_target": ""
} |
"""
Base classes for device interfaces.
:Device: The base class for all devices. This defines the interface that must be
implemented by all devices and therefore any workload and instrumentation
can always rely on.
:AndroidDevice: Implements most of the :class:`Device` interface, and extends it
with a number of Android-specific methods.
:BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific
runtime parameters.
:SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores
device runtime parameters.
"""
import os
import imp
import string
from collections import OrderedDict
from contextlib import contextmanager
from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter
from wlauto.core.extension_loader import ExtensionLoader
from wlauto.exceptions import DeviceError, ConfigError
from wlauto.utils.types import list_of_integers, list_of, caseless_string
__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta']
class RuntimeParameter(object):
"""
A runtime parameter which has its getter and setter methods associated it
with it.
"""
def __init__(self, name, getter, setter,
getter_args=None, setter_args=None,
value_name='value', override=False):
"""
:param name: the name of the parameter.
:param getter: the getter method which returns the value of this parameter.
:param setter: the setter method which sets the value of this parameter. The setter
always expects to be passed one argument when it is called.
:param getter_args: keyword arguments to be used when invoking the getter.
:param setter_args: keyword arguments to be used when invoking the setter.
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
hierarchy should be overridden. If this is ``False`` (the default), an exception
will be raised by the ``AttributeCollection`` instead.
"""
self.name = name
self.getter = getter
self.setter = setter
self.getter_args = getter_args or {}
self.setter_args = setter_args or {}
self.value_name = value_name
self.override = override
def __str__(self):
return self.name
__repr__ = __str__
class CoreParameter(RuntimeParameter):
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
def get_runtime_parameters(self, core_names):
params = []
for core in set(core_names):
name = string.Template(self.name).substitute(core=core)
getter = string.Template(self.getter).substitute(core=core)
setter = string.Template(self.setter).substitute(core=core)
getargs = dict(self.getter_args.items() + [('core', core)])
setargs = dict(self.setter_args.items() + [('core', core)])
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
return params
class DynamicModuleSpec(dict):
@property
def name(self):
return self.keys()[0]
def __init__(self, *args, **kwargs):
dict.__init__(self)
if args:
if len(args) > 1:
raise ValueError(args)
value = args[0]
else:
value = kwargs
if isinstance(value, basestring):
self[value] = {}
elif isinstance(value, dict) and len(value) == 1:
for k, v in value.iteritems():
self[k] = v
else:
raise ValueError(value)
class DeviceMeta(ExtensionMeta):
to_propagate = ExtensionMeta.to_propagate + [
('runtime_parameters', RuntimeParameter, AttributeCollection),
('dynamic_modules', DynamicModuleSpec, AttributeCollection),
]
class Device(Extension):
"""
Base class for all devices supported by Workload Automation. Defines
the interface the rest of WA uses to interact with devices.
:name: Unique name used to identify the device.
:platform: The name of the device's platform (e.g. ``Android``) this may
be used by workloads and instrumentation to assess whether they
can run on the device.
:working_directory: a string of the directory which is
going to be used by the workloads on the device.
:binaries_directory: a string of the binary directory for
the device.
:has_gpu: Should be ``True`` if the device as a separate GPU, and
``False`` if graphics processing is done on a CPU.
.. note:: Pretty much all devices currently on the market
have GPUs, however this may not be the case for some
development boards.
:path_module: The name of one of the modules implementing the os.path
interface, e.g. ``posixpath`` or ``ntpath``. You can provide
your own implementation rather than relying on one of the
standard library modules, in which case you need to specify
the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py'
:parameters: A list of RuntimeParameter objects. The order of the objects
is very important as the setters and getters will be called
in the order the RuntimeParameter objects inserted.
:active_cores: This should be a list of all the currently active cpus in
the device in ``'/sys/devices/system/cpu/online'``. The
returned list should be read from the device at the time
of read request.
"""
__metaclass__ = DeviceMeta
parameters = [
Parameter('core_names', kind=list_of(caseless_string), mandatory=True, default=None,
description="""
This is a list of all cpu cores on the device with each
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
order of the cores must match the order they are listed in
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
be an A7 core, and ``'cpu2'`` an A15.'
"""),
Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None,
description="""
This is a list indicating the cluster affinity of the CPU cores,
each element correponding to the cluster ID of the core coresponding
to it's index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
will be inferred from ``core_names`` if possible (assuming all cores with
the same name are on the same cluster).
"""),
]
runtime_parameters = []
# dynamic modules are loaded or not based on whether the device supports
# them (established at runtime by module probling the device).
dynamic_modules = []
# These must be overwritten by subclasses.
name = None
platform = None
default_working_directory = None
has_gpu = None
path_module = None
active_cores = None
def __init__(self, **kwargs): # pylint: disable=W0613
super(Device, self).__init__(**kwargs)
if not self.path_module:
raise NotImplementedError('path_module must be specified by the deriving classes.')
libpath = os.path.dirname(os.__file__)
modpath = os.path.join(libpath, self.path_module)
if not modpath.lower().endswith('.py'):
modpath += '.py'
try:
self.path = imp.load_source('device_path', modpath)
except IOError:
raise DeviceError('Unsupported path module: {}'.format(self.path_module))
def validate(self):
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
if self.core_names and not self.core_clusters:
self.core_clusters = []
clusters = []
for cn in self.core_names:
if cn not in clusters:
clusters.append(cn)
self.core_clusters.append(clusters.index(cn))
if len(self.core_names) != len(self.core_clusters):
raise ConfigError('core_names and core_clusters are of different lengths.')
def initialize(self, context):
"""
Initialization that is performed at the begining of the run (after the device has
been connecte).
"""
loader = ExtensionLoader()
for module_spec in self.dynamic_modules:
module = self._load_module(loader, module_spec)
if not hasattr(module, 'probe'):
message = 'Module {} does not have "probe" attribute; cannot be loaded dynamically'
raise ValueError(message.format(module.name))
if module.probe(self):
self.logger.debug('Installing module "{}"'.format(module.name))
self._install_module(module)
else:
self.logger.debug('Module "{}" is not supported by the device'.format(module.name))
def reset(self):
"""
Initiate rebooting of the device.
Added in version 2.1.3.
"""
raise NotImplementedError()
def boot(self, *args, **kwargs):
"""
Perform the seteps necessary to boot the device to the point where it is ready
to accept other commands.
Changed in version 2.1.3: no longer expected to wait until boot completes.
"""
raise NotImplementedError()
def connect(self, *args, **kwargs):
"""
Establish a connection to the device that will be used for subsequent commands.
Added in version 2.1.3.
"""
raise NotImplementedError()
def disconnect(self):
""" Close the established connection to the device. """
raise NotImplementedError()
def ping(self):
"""
This must return successfully if the device is able to receive commands, or must
raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond.
"""
raise NotImplementedError()
def get_runtime_parameter_names(self):
return [p.name for p in self._expand_runtime_parameters()]
def get_runtime_parameters(self):
""" returns the runtime parameters that have been set. """
# pylint: disable=cell-var-from-loop
runtime_parameters = OrderedDict()
for rtp in self._expand_runtime_parameters():
if not rtp.getter:
continue
getter = getattr(self, rtp.getter)
rtp_value = getter(**rtp.getter_args)
runtime_parameters[rtp.name] = rtp_value
return runtime_parameters
def set_runtime_parameters(self, params):
"""
The parameters are taken from the keyword arguments and are specific to
a particular device. See the device documentation.
"""
runtime_parameters = self._expand_runtime_parameters()
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
expected_keys = rtp_map.keys()
if not set(params.keys()) <= set(expected_keys):
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
for param in params:
rtp = rtp_map[param]
setter = getattr(self, rtp.setter)
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
setter(**args)
def capture_screen(self, filepath):
"""Captures the current device screen into the specified file in a PNG format."""
raise NotImplementedError()
def get_properties(self, output_path):
"""Captures and saves the device configuration properties version and
any other relevant information. Return them in a dict"""
raise NotImplementedError()
def listdir(self, path, **kwargs):
""" List the contents of the specified directory. """
raise NotImplementedError()
def push_file(self, source, dest):
""" Push a file from the host file system onto the device. """
raise NotImplementedError()
def pull_file(self, source, dest):
""" Pull a file from device system onto the host file system. """
raise NotImplementedError()
def delete_file(self, filepath):
""" Delete the specified file on the device. """
raise NotImplementedError()
def file_exists(self, filepath):
""" Check if the specified file or directory exist on the device. """
raise NotImplementedError()
def get_pids_of(self, process_name):
""" Returns a list of PIDs of the specified process name. """
raise NotImplementedError()
def kill(self, pid, as_root=False):
""" Kill the process with the specified PID. """
raise NotImplementedError()
def killall(self, process_name, as_root=False):
""" Kill all running processes with the specified name. """
raise NotImplementedError()
def install(self, filepath, **kwargs):
""" Install the specified file on the device. What "install" means is device-specific
and may possibly also depend on the type of file."""
raise NotImplementedError()
def uninstall(self, filepath):
""" Uninstall the specified file on the device. What "uninstall" means is device-specific
and may possibly also depend on the type of file."""
raise NotImplementedError()
def execute(self, command, timeout=None, **kwargs):
"""
Execute the specified command command on the device and return the output.
:param command: Command to be executed on the device.
:param timeout: If the command does not return after the specified time,
execute() will abort with an error. If there is no timeout for
the command, this should be set to 0 or None.
Other device-specific keyword arguments may also be specified.
:returns: The stdout output from the command.
"""
raise NotImplementedError()
def set_sysfile_value(self, filepath, value, verify=True):
"""
Write the specified value to the specified file on the device
and verify that the value has actually been written.
:param file: The file to be modified.
:param value: The value to be written to the file. Must be
an int or a string convertable to an int.
:param verify: Specifies whether the value should be verified, once written.
Should raise DeviceError if could write value.
"""
raise NotImplementedError()
def get_sysfile_value(self, sysfile, kind=None):
"""
Get the contents of the specified sysfile.
:param sysfile: The file who's contents will be returned.
:param kind: The type of value to be expected in the sysfile. This can
be any Python callable that takes a single str argument.
If not specified or is None, the contents will be returned
as a string.
"""
raise NotImplementedError()
def start(self):
"""
This gets invoked before an iteration is started and is endented to help the
device manange any internal supporting functions.
"""
pass
def stop(self):
"""
This gets invoked after iteration execution has completed and is endented to help the
device manange any internal supporting functions.
"""
pass
def __str__(self):
return 'Device<{}>'.format(self.name)
__repr__ = __str__
def _expand_runtime_parameters(self):
expanded_params = []
for param in self.runtime_parameters:
if isinstance(param, CoreParameter):
expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member
else:
expanded_params.append(param)
return expanded_params
@contextmanager
def _check_alive(self):
try:
yield
except Exception as e:
self.ping()
raise e
| {
"content_hash": "c7b0f983ac8e7b5e36ed101841a64511",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 115,
"avg_line_length": 39.235023041474655,
"alnum_prop": 0.6116983791402396,
"repo_name": "freedomtan/workload-automation",
"id": "f0338a487be7fca23f81ddab905d3231ecddcb95",
"size": "17614",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wlauto/core/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35633"
},
{
"name": "HTML",
"bytes": "8402"
},
{
"name": "Java",
"bytes": "92698"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "1208710"
},
{
"name": "Shell",
"bytes": "23385"
},
{
"name": "VimL",
"bytes": "901"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20150915_2025'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='problems_status',
),
]
| {
"content_hash": "9b27e78a59c43b7bf3c8f237f2f8c821",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 19.705882352941178,
"alnum_prop": 0.5940298507462687,
"repo_name": "wangmingjob/OnlineJudge",
"id": "64f8ae03913cb40e15cbc80b32966b0245c8edc1",
"size": "359",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "account/migrations/0004_remove_user_problems_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46937"
},
{
"name": "HTML",
"bytes": "151050"
},
{
"name": "JavaScript",
"bytes": "119114"
},
{
"name": "Python",
"bytes": "312924"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
Tests
-----
This module contains all the tests related to the app.
"""
#!/usr/bin/env python3
import os
import pegasus
import unittest
import tempfile
from bs4 import BeautifulSoup
class PegasusTestCase(unittest.TestCase):
def setUp(self):
"""
Set up the test environment:
1. Create a database temp file
2. Enable testing
3. Disable csrf protection
4. Initialize the database
"""
self.db_fd, pegasus.app.config['DATABASE'] = tempfile.mkstemp()
pegasus.app.config['TESTING'] = True
pegasus.app.config['CSRF_ENABLED'] = False
self.app = pegasus.app.test_client()
with pegasus.app.app_context():
pegasus.init_db()
def tearDown(self):
"""Close the test environment"""
os.close(self.db_fd)
os.unlink(pegasus.app.config['DATABASE'])
def register(self, name, username, password, email):
"""Register a new user"""
return self.app.post('/register', data = dict(
name = name,
username = username,
password = password,
email = email
), follow_redirects = True)
def get_csrf(self):
"""Get current CSRF token (if needed for a test)"""
rv = self.app.get('/')
soup = BeautifulSoup(rv.data, 'html.parser')
tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})
return tag['value']
def login(self, username, password):
"""Login into a registered account"""
return self.app.post('/login', data = dict(
username = username,
password = password
), follow_redirects = True)
def logout(self):
"""Logout from current login"""
return self.app.get('/logout', follow_redirects = True)
def create(self, title):
"""Create a new board while logged in"""
return self.app.post('/new-board', data = dict(
title = title
), follow_redirects = True)
def show_board(self, boardID):
"""Show board with a certain ID"""
return self.app.get('/board/' + boardID, follow_redirects = True)
def edit_profile(self, name, username, email):
"""Edit logged in user's profile (without password)"""
return self.app.post('/_editProfile', data = dict(
name = name,
username = username,
email = email
), follow_redirects = True)
def change_password(self, old_password, new_password):
"""Edit logged in user's password"""
data = dict(password = new_password)
data['old-password'] = old_password
return self.app.post('/_changePassword', data = data, follow_redirects = True)
def test_basic_ops(self):
"""
Sequence of tests divided into categories:
1. Registration
a. Be able to register a user
b. Be able to log out
c. Get an error when registering an existing username
d. Get an error when registering an existing email
2. Login
a. Get an error when the wrong password is entered
b. Get an error when a non-existing username is entered
c. Log in successfully
3. Edit Profile
a. Be able to edit name, username, and/or email if no conflict
b. Get an error if a username exists, regardless of capitals
c. Get an error if an email exists, regardless of capitals
d. Get an error if both an email and a username exist, regardless of capitals
e. Be able to change the password
f. Get an error if the old password entered is incorrect
4. Board Creation
a. Be able to create a board when logged in.
b. Be able to show that board.
c. Get a 404 error if attempting to show a board that does not exist
"""
success = 'OK'
# 1. Register
prefix = '[REGISTRATION]: '
rv = self.register('Scott', 'scott', 'tiger123', 'scott@tiger.org')
assert b'Successfully registered!' in rv.data
rv = self.logout()
assert b'You go bye bye :(' in rv.data
rv = self.register('Scott', 'scott', 'tiger111', 'scott2@tiger.org')
assert b'Username already in use' in rv.data
rv = self.register('Scott', 'scott2', 'tiger113', 'scott@tiger.org')
assert b'Email already in use' in rv.data
## Create another user for other tests
self.register('Tammy', 'tammy', 'catfish122', 'tammy@catfish.org')
self.logout()
print(prefix + success)
# 2. Login
prefix = '[LOGIN]: '
rv = self.login('scott', 'tiger111')
assert b'Invalid password' in rv.data
rv = self.login('notscott', 'tiger123')
assert b'Invalid username' in rv.data
rv = self.login('scott', 'tiger123')
assert b'Hey there!' in rv.data
print(prefix + success)
# 3. Edit Profile
prefix = '[EDIT PROFILE]: '
rv = self.edit_profile('NOT SCOTT OK', 'scott', 'scott@tiger.org')
assert b'None' in rv.data
rv = self.edit_profile('NOT SCOTT OK', 'tamMy', 'scott@tiger.org')
assert b'Username is not available' in rv.data
rv = self.edit_profile('NOT SCOTT OK', 'scott', 'tammy@CATFISH.org')
assert b'Email is not available' in rv.data
rv = self.edit_profile('NOT SCOTT OK', 'tammy', 'tammy@catfish.org')
assert b'Email is not available. Username is not available.' in rv.data
rv = self.change_password('tiger123', 'tiger')
assert b'None' in rv.data
rv = self.change_password('nottiger', 'tiger123')
assert b'Old password you entered is incorrect.' in rv.data
print(prefix + success)
# 4. Board Create/Show
prefix = '[BOARD CREATION]: '
rv = self.create('New Board')
assert b'Board successfully created!' in rv.data
rv = self.show_board('1')
assert b'New Board' in rv.data
rv = self.show_board('2')
assert rv.status == '404 NOT FOUND'
print(prefix + success)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "905c56c3a422565c24837610252c4ee0",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 89,
"avg_line_length": 37.51807228915663,
"alnum_prop": 0.5862235067437379,
"repo_name": "blaringsilence/pegasus",
"id": "885bafb451a208570e208e52532880ead1af82c8",
"size": "6228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_pegasus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5939"
},
{
"name": "HTML",
"bytes": "47288"
},
{
"name": "JavaScript",
"bytes": "98100"
},
{
"name": "Python",
"bytes": "59876"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_syslogd4_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_syslogd4_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_syslogd4_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd4_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd4_filter.fortios_log_syslogd4(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd4', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_syslogd4_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd4_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd4_filter.fortios_log_syslogd4(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd4', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_syslogd4_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd4_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd4_filter.fortios_log_syslogd4(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd4', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_syslogd4_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd4_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd4_filter.fortios_log_syslogd4(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd4', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| {
"content_hash": "3b6f5304d9764749373c14bbc64b3c2b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 133,
"avg_line_length": 35.60669456066945,
"alnum_prop": 0.5841363102232667,
"repo_name": "thaim/ansible",
"id": "5e16cc4804941e68a944b5b00cd5d39bf3bced1a",
"size": "9206",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_log_syslogd4_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import webapp2
from pages import author_list
class AuthorPage(webapp2.RequestHandler):
"""The amcgee user home page of the GiR App Labs at AAMU app."""
def get(self):
"""HTTP GET handler for the amcgee Users page."""
self.response.headers['Content-Type'] = 'text/plain'
self.response.write("Hi, I'm Anthony!")
author_list.Page.add_author('amcgee3', AuthorPage)
| {
"content_hash": "06bd5c34ed51ca7efa558a09afebd6c1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 23.58823529411765,
"alnum_prop": 0.6758104738154613,
"repo_name": "GIR-at-AAMU/gir_app_labs_at_aamu",
"id": "d27aa10ccb282cd08064de9796a6a90297d6a521",
"size": "1010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pages/authors/amcgee3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "957"
},
{
"name": "HTML",
"bytes": "6033"
},
{
"name": "Python",
"bytes": "43441"
}
],
"symlink_target": ""
} |
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
class EmailBackend(ModelBackend):
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(email__iexact=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return super(EmailBackend, self).authenticate(username, password)
| {
"content_hash": "655117270d35903184ecfcdcd37d5ee4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 40.90909090909091,
"alnum_prop": 0.6888888888888889,
"repo_name": "paulcwatts/django-auth-utils",
"id": "56221b13b6c6de29a0be077de868cb52452ab7b3",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auth_utils/backends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8014"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, vm_scale_set_name: str, vmss_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, "str"),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, vm_scale_set_name: str, vmss_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, "str"),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, "str"),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str, vm_scale_set_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class VirtualMachineScaleSetExtensionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2019_03_01.ComputeManagementClient`'s
:attr:`virtual_machine_scale_set_extensions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: Union[_models.VirtualMachineScaleSetExtension, IO],
**kwargs: Any
) -> _models.VirtualMachineScaleSetExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineScaleSetExtension]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineScaleSetExtension")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachineScaleSetExtension", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachineScaleSetExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: _models.VirtualMachineScaleSetExtension,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineScaleSetExtension]:
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be create or
updated. Required.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension. Required.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM scale set Extension
operation. Required.
:type extension_parameters:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineScaleSetExtension]:
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be create or
updated. Required.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension. Required.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM scale set Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: Union[_models.VirtualMachineScaleSetExtension, IO],
**kwargs: Any
) -> LROPoller[_models.VirtualMachineScaleSetExtension]:
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be create or
updated. Required.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension. Required.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM scale set Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters:
~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineScaleSetExtension]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineScaleSetExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, vm_scale_set_name: str, vmss_extension_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, vm_scale_set_name: str, vmss_extension_name: str, **kwargs: Any
) -> LROPoller[None]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be deleted.
Required.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension. Required.
:type vmss_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> _models.VirtualMachineScaleSetExtension:
"""The operation to get the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension. Required.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension. Required.
:type vmss_extension_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetExtension or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineScaleSetExtension]
request = build_get_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineScaleSetExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}"} # type: ignore
@distributed_trace
def list(
self, resource_group_name: str, vm_scale_set_name: str, **kwargs: Any
) -> Iterable["_models.VirtualMachineScaleSetExtension"]:
"""Gets a list of all extensions in a VM scale set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension. Required.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineScaleSetExtension or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineScaleSetExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineScaleSetExtensionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineScaleSetExtensionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions"} # type: ignore
| {
"content_hash": "aa26bb3b3dd5bbe41f8eb234c98d2f47",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 234,
"avg_line_length": 47.544011544011546,
"alnum_prop": 0.6515418234794221,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4b1f054c85b052cdd16fcc8226013491a7038ee8",
"size": "33448",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_virtual_machine_scale_set_extensions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('organizations', '0002_auto_20161210_1948'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=50, verbose_name='Name')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True, verbose_name='Slug')),
],
options={
'verbose_name_plural': 'Categories',
'ordering': ['created'],
'verbose_name': 'Category',
},
),
migrations.AlterModelOptions(
name='metacategory',
options={'ordering': ['pk'], 'verbose_name': 'MetaCategory', 'verbose_name_plural': 'MetaCategorys'},
),
migrations.AlterField(
model_name='organization',
name='jst',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teryt_tree.JednostkaAdministracyjna', verbose_name='Unit of administrative division'),
),
migrations.AddField(
model_name='organization',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Category', verbose_name='Category'),
),
]
| {
"content_hash": "74c40c65bd8df02ccd713cc77514dac1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 171,
"avg_line_length": 43,
"alnum_prop": 0.6188068756319515,
"repo_name": "watchdogpolska/watchdog-kj-kultura",
"id": "1bf2674b91617d8afdbe453ce92156210b662c25",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watchdog_kj_kultura/organizations/migrations/0003_auto_20161210_2136.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "182971"
},
{
"name": "HTML",
"bytes": "93827"
},
{
"name": "JavaScript",
"bytes": "3024"
},
{
"name": "Python",
"bytes": "168593"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signage', '0005_logentry_signage'),
]
operations = [
migrations.AlterField(
model_name='blade',
name='number',
field=models.CharField(db_column='numero', max_length=250, verbose_name='Blade Number'),
),
]
| {
"content_hash": "34f40099622cf7bb0e892642dcc7ecae",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 100,
"avg_line_length": 24,
"alnum_prop": 0.5911458333333334,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "6086510f20477ea4e950de6b1b190203cf0b9d48",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/signage/migrations/0006_auto_20190306_1555.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
} |
"""
Define the names making up the domain specific language
"""
from pyshould.expectation import (
Expectation, ExpectationNot,
ExpectationAll, ExpectationAny,
ExpectationNone, OPERATOR
)
from pyshould.dumper import Dumper
__author__ = "Ivan -DrSlump- Montes"
__email__ = "drslump@pollinimini.net"
__license__ = "MIT"
# Create instances to be used with the overloaded | operator
should = Expectation(deferred=True, factory=True)
should_not = ExpectationNot(deferred=True, factory=True)
should_all = ExpectationAll(deferred=True, factory=True)
should_any = ExpectationAny(deferred=True, factory=True)
should_none = ExpectationNone(deferred=True, factory=True)
should_either = Expectation(deferred=True, factory=True, def_op=OPERATOR.OR)
# Dumper instance for debugging mocks
dumper = Dumper()
def it(value):
""" Wraps a value in an expectation """
return Expectation(value)
def any_of(value, *args):
""" At least one of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAny(value)
def all_of(value, *args):
""" All the items in value should match """
if len(args):
value = (value,) + args
return ExpectationAll(value)
def none_of(value, *args):
""" None of the items in value should match """
if len(args):
value = (value,) + args
return ExpectationNone(value)
| {
"content_hash": "fef315f4e2393a46f2ac9cba2e3332f2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 76,
"avg_line_length": 24.56140350877193,
"alnum_prop": 0.695,
"repo_name": "drslump/pyshould",
"id": "f7b26ef76500c1df8917fbf50b851aecd7f04b24",
"size": "1400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyshould/dsl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "71568"
}
],
"symlink_target": ""
} |
import time
from math import ceil
import appleseed as asr
from ..logger import get_logger
from ..utils import util
logger = get_logger()
class FinalTileCallback(asr.ITileCallback):
"""
The TileCallback is responsible for sending the results of the render back to Blender
"""
def __init__(self, engine, scene):
super().__init__()
self.__engine = engine
self.__scene = scene
self.__pass_incremented = False
self.__render_stats = ["Starting", ""]
# Compute render resolution.
(width, height) = util.get_render_resolution(self.__scene)
# Compute render window.
if self.__scene.render.use_border:
self.__min_x = int(self.__scene.render.border_min_x * width)
self.__min_y = height - int(self.__scene.render.border_max_y * height)
self.__max_x = int(self.__scene.render.border_max_x * width) - 1
self.__max_y = height - int(self.__scene.render.border_min_y * height) - 1
else:
self.__min_x = 0
self.__min_y = 0
self.__max_x = width - 1
self.__max_y = height - 1
# Compute number of tiles.
vertical_tiles = int(ceil((self.__max_y - self.__min_y + 1) / self.__scene.appleseed.tile_size))
horizontal_tiles = int(ceil((self.__max_x - self.__min_x + 1) / self.__scene.appleseed.tile_size))
self.__total_tiles = vertical_tiles * horizontal_tiles
# Compute total pixel count.
self.__total_passes = scene.appleseed.renderer_passes
self.__total_pixels = (self.__max_x - self.__min_x + 1) * (self.__max_y - self.__min_y + 1) * self.__total_passes
self.__time_start = time.time()
self.__rendered_pixels = 0
self.__pass_number = 1
self.__rendered_tiles = 0
@property
def render_stats(self):
return self.__render_stats
def on_tiled_frame_begin(self, frame):
self.__pass_incremented = False
if self.__pass_number == 1:
self.__render_stats = ["appleseed Rendering", "Time Remaining: Unknown"]
def on_tiled_frame_end(self, frame):
if not self.__pass_incremented:
self.__pass_number += 1
self.__pass_incremented = True
self.__rendered_tiles = 0
def on_tile_begin(self, frame, tile_x, tile_y, thread_index, thread_count):
pass
def on_tile_end(self, frame, tile_x, tile_y):
"""
Processes the tile data as it finished
"""
logger.debug("Finished tile %s %s", tile_x, tile_y)
image = frame.image()
properties = image.properties()
# These are the starting pixel locations for the tile
x = tile_x * properties.m_tile_width
y = tile_y * properties.m_tile_height
tile = image.tile(tile_x, tile_y)
# Same as tile size from render settings
tile_w = tile.get_width()
tile_h = tile.get_height()
# Ignore tiles completely outside the render window.
if x > self.__max_x or x + tile_w - 1 < self.__min_x:
logger.debug("Skipping invisible tile")
return True
if y > self.__max_y or y + tile_h - 1 < self.__min_y:
logger.debug("Skipping invisible tile")
return True
# Image-space coordinates of the intersection between the tile and the render window.
ix0 = max(x, self.__min_x)
iy0 = max(y, self.__min_y)
ix1 = min(x + tile_w - 1, self.__max_x)
iy1 = min(y + tile_h - 1, self.__max_y)
# Number of rows and columns to skip in the input tile.
skip_x = ix0 - x
skip_y = iy0 - y
take_x = ix1 - ix0 + 1
take_y = iy1 - iy0 + 1
# Window-space coordinates of the intersection between the tile and the render window.
x0 = ix0 - self.__min_x # left
y0 = self.__max_y - iy1 # bottom
# Update image.
render_view = self.__engine.active_view_get()
result = self.__engine.begin_result(x0,
y0,
take_x,
take_y,
view=render_view)
layer = result.layers[0].passes.find_by_name("Combined", render_view)
pix = self.__get_pixels(image,
tile_x,
tile_y,
take_x,
take_y,
skip_x,
skip_y)
layer.rect = pix
if len(frame.aovs()) > 0:
self.__engine.update_result(result)
for aov in frame.aovs():
model = aov.get_model()
if model not in ("cryptomatte_object_aov", "cryptomatte_material_aov"):
image = aov.get_image()
pixel_buffer = self.__get_pixels(image,
tile_x,
tile_y,
take_x,
take_y,
skip_x,
skip_y)
layer = result.layers[0].passes.find_by_name(self.__map_aovs(aov.get_name()), render_view)
layer.rect = pixel_buffer
self.__engine.update_result(result)
else:
image = aov.get_cryptomatte_image()
pixel_buffer = self.__get_pixels(image,
tile_x,
tile_y,
take_x,
take_y,
skip_x,
skip_y)
crypto_pixels = self.__process_crypto_pixels(pixel_buffer)
for i, pixels in enumerate(crypto_pixels):
layer = result.layers[0].passes.find_by_name(f"{self.__map_aovs(model)}0{i}", render_view)
layer.rect = pixels
self.__engine.update_result(result)
self.__engine.end_result(result)
# Update progress bar.
self.__rendered_pixels += take_x * take_y
self.__engine.update_progress(self.__rendered_pixels / self.__total_pixels)
# Update stats.
seconds_per_pixel = (time.time() - self.__time_start) / self.__rendered_pixels
remaining_seconds = (self.__total_pixels - self.__rendered_pixels) * seconds_per_pixel
self.__rendered_tiles += 1
self.__render_stats = ["appleseed Rendering: Pass %i of %i, Tile %i of %i completed" %
(self.__pass_number,
self.__total_passes,
self.__rendered_tiles,
self.__total_tiles),
"Time Remaining: {0}".format(self.__format_seconds_to_hhmmss(remaining_seconds))]
@staticmethod
def __get_pixels(image, tile_x, tile_y, take_x, take_y, skip_x, skip_y):
tile = image.tile(tile_x, tile_y)
tile_w = tile.get_width()
tile_c = tile.get_channel_count()
floats = tile.get_storage()
pixel_buffer = []
for y in range(take_y - 1, -1, -1):
start_pix = (skip_y + y) * tile_w + skip_x
end_pix = start_pix + take_x
pixel_buffer.extend(floats[p * tile_c:p * tile_c + tile_c] for p in range(start_pix, end_pix))
return pixel_buffer
@staticmethod
def __process_crypto_pixels(pixel_buffer):
layer_1_pixels = list()
layer_2_pixels = list()
layer_3_pixels = list()
for pixel in pixel_buffer:
layer_1_pixels.append(pixel[3:7])
layer_2_pixels.append(pixel[7:11])
layer_3_pixels.append(pixel[11:])
return [layer_1_pixels, layer_2_pixels, layer_3_pixels]
@staticmethod
def __format_seconds_to_hhmmss(seconds):
hours = seconds // (60 * 60)
seconds %= (60 * 60)
minutes = seconds // 60
seconds %= 60
return "%02i:%02i:%02i" % (hours, minutes, seconds)
@staticmethod
def __map_aovs(aov_name):
aov_mapping = {'beauty': "Combined",
'diffuse': "Diffuse",
'screen_space_velocity': "Screen Space Velocity",
'direct_diffuse': "Direct Diffuse",
'indirect_diffuse': "Indirect Diffuse",
'glossy': "Glossy",
'direct_glossy': "Direct Glossy",
'indirect_glossy': "Indirect Glossy",
'normal': "Normal",
'position': "Position",
'uv': "UV",
'pixel_time': "Pixel Time",
'depth': "Z Depth",
'emission': "Emission",
'albedo': "Albedo",
'invalid_samples': "Invalid Samples",
'pixel_sample_count': "Pixel Sample Count",
'pixel_variation': "Pixel Variation",
'npr_shading': "NPR Shading",
'npr_contour': "NPR Contour",
'cryptomatte_object_aov': "CryptoObject",
'cryptomatte_material_aov': "CryptoMaterial"}
return aov_mapping[aov_name]
| {
"content_hash": "b07c1e651fb29785a830839856985076",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 121,
"avg_line_length": 39.483870967741936,
"alnum_prop": 0.478656045751634,
"repo_name": "dictoon/blenderseed",
"id": "71978958693942a01eb8d23a12f870249f80d550",
"size": "11100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "render/final_tilecallback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15512"
},
{
"name": "Python",
"bytes": "578048"
}
],
"symlink_target": ""
} |
import os
import time
import fasteners
from conans.util.files import load, save
from conans.util.log import logger
class NoLock(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
pass
class SimpleLock(object):
def __init__(self, filename):
self._lock = fasteners.InterProcessLock(filename, logger=logger)
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
self._lock.release()
READ_BUSY_DELAY = 0.5
WRITE_BUSY_DELAY = 0.25
class Lock(object):
@staticmethod
def clean(folder):
if os.path.exists(folder + ".count"):
os.remove(folder + ".count")
if os.path.exists(folder + ".count.lock"):
os.remove(folder + ".count.lock")
def __init__(self, folder, locked_item, output):
self._count_file = folder + ".count"
self._count_lock_file = folder + ".count.lock"
self._locked_item = locked_item
self._output = output
self._first_lock = True
@property
def files(self):
return self._count_file, self._count_lock_file
def _info_locked(self):
if self._first_lock:
self._first_lock = False
self._output.info("%s is locked by another concurrent conan process, wait..."
% str(self._locked_item))
self._output.info("If not the case, quit, and do 'conan remove --locks'")
def _readers(self):
try:
return int(load(self._count_file))
except IOError:
return 0
except (UnicodeEncodeError, ValueError):
self._output.warn("%s does not contain a number!" % self._count_file)
return 0
class ReadLock(Lock):
def __enter__(self):
while True:
with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
readers = self._readers()
if readers >= 0:
save(self._count_file, str(readers + 1))
break
self._info_locked()
time.sleep(READ_BUSY_DELAY)
def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
readers = self._readers()
save(self._count_file, str(readers - 1))
class WriteLock(Lock):
def __enter__(self):
while True:
with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
readers = self._readers()
if readers == 0:
save(self._count_file, "-1")
break
self._info_locked()
time.sleep(WRITE_BUSY_DELAY)
def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable
with fasteners.InterProcessLock(self._count_lock_file, logger=logger):
save(self._count_file, "0")
if exc_type is not None:
# If there was an exception while locking this, might be empty
# Try to clean up the trailing filelocks
try:
os.remove(self._count_file)
os.remove(self._count_lock_file)
path = os.path.dirname(self._count_file)
for _ in range(3):
try: # Take advantage that os.rmdir does not delete non-empty dirs
os.rmdir(path)
except Exception:
break # not empty
path = os.path.dirname(path)
except Exception:
pass
| {
"content_hash": "08b7c07bc0ef865bbc399c7a8203bb4e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 89,
"avg_line_length": 30.625,
"alnum_prop": 0.5488435374149659,
"repo_name": "conan-io/conan",
"id": "cce0a7a1205ce181d83d60c8992fb5e932ed6aa4",
"size": "3675",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/util/locks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
"""
=====================================================
Transform EEG data using current source density (CSD)
=====================================================
This script shows an example of how to use CSD [1]_ [2]_ [3]_.
CSD takes the spatial Laplacian of the sensor signal (derivative in both
x and y). It does what a planar gradiometer does in MEG. Computing these
spatial derivatives reduces point spread. CSD transformed data have a sharper
or more distinct topography, reducing the negative impact of volume conduction.
"""
# Authors: Alex Rockhill <aprockhill206@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Load sample subject data
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif',
preload=True)
events = mne.find_events(raw)
raw = raw.pick_types(meg=False, eeg=True, eog=True, ecg=True, stim=False,
exclude=raw.info['bads'])
raw.set_eeg_reference(projection=True).apply_proj()
###############################################################################
# Plot the raw data and CSD-transformed raw data:
raw_csd = mne.preprocessing.compute_current_source_density(raw)
raw.plot()
raw_csd.plot()
###############################################################################
# Also look at the power spectral densities:
raw.plot_psd()
raw_csd.plot_psd()
###############################################################################
# CSD can also be computed on Evoked (averaged) data.
# Here we epoch and average the data so we can demonstrate that.
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5,
preload=True)
evoked = epochs['auditory'].average()
###############################################################################
# First let's look at how CSD affects scalp topography:
times = np.array([-0.1, 0., 0.05, 0.1, 0.15])
evoked_csd = mne.preprocessing.compute_current_source_density(evoked)
evoked.plot_joint(title='Average Reference', show=False)
evoked_csd.plot_joint(title='Current Source Density')
###############################################################################
# CSD has parameters ``stiffness`` and ``lambda2`` affecting smoothing and
# spline flexibility, respectively. Let's see how they affect the solution:
fig, ax = plt.subplots(4, 4)
fig.subplots_adjust(hspace=0.5)
fig.set_size_inches(10, 10)
for i, lambda2 in enumerate([0, 1e-7, 1e-5, 1e-3]):
for j, m in enumerate([5, 4, 3, 2]):
this_evoked_csd = mne.preprocessing.compute_current_source_density(
evoked, stiffness=m, lambda2=lambda2)
this_evoked_csd.plot_topomap(
0.1, axes=ax[i, j], outlines='skirt', contours=4, time_unit='s',
colorbar=False, show=False)
ax[i, j].set_title('stiffness=%i\nλ²=%s' % (m, lambda2))
###############################################################################
# References
# ----------
# .. [1] Perrin F, Bertrand O, Pernier J. "Scalp current density mapping:
# Value and estimation from potential data." IEEE Trans Biomed Eng.
# 1987;34(4):283–288.
# .. [2] Perrin F, Pernier J, Bertrand O, Echallier JF. "Spherical splines
# for scalp potential and current density mapping."
# [Corrigenda EEG 02274, EEG Clin. Neurophysiol., 1990, 76, 565]
# Electroenceph Clin Neurophysiol. 1989;72(2):184–187.
# .. [3] Kayser J, Tenke CE. "On the benefits of using surface Laplacian
# (Current Source Density) methodology in electrophysiology.
# Int J Psychophysiol. 2015 Sep; 97(3): 171–173.
| {
"content_hash": "b87c598cded25e120fe2076a631927fa",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 41.21875,
"alnum_prop": 0.5691180187010362,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "e5cddbd49fe276f171459458ebbf80bc51cbb9a5",
"size": "3965",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "0.20/_downloads/8b03332e11f644d89651afa7abf9ddf6/plot_eeg_csd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
} |
"""Tests for core.domain.question_services."""
from __future__ import annotations
import logging
import re
from core import feconf
from core.domain import question_domain
from core.domain import question_fetchers
from core.domain import question_services
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(question_models,) = models.Registry.import_models([models.NAMES.question])
class QuestionServicesUnitTest(test_utils.GenericTestBase):
"""Test the question services module."""
def setUp(self):
"""Before each individual test, create dummy user."""
super(QuestionServicesUnitTest, self).setUp()
self.signup(self.TOPIC_MANAGER_EMAIL, self.TOPIC_MANAGER_USERNAME)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.topic_manager_id = self.get_user_id_from_email(
self.TOPIC_MANAGER_EMAIL)
self.new_user_id = self.get_user_id_from_email(
self.NEW_USER_EMAIL)
self.editor_id = self.get_user_id_from_email(
self.EDITOR_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.admin = user_services.get_user_actions_info(self.admin_id)
self.new_user = user_services.get_user_actions_info(self.new_user_id)
self.editor = user_services.get_user_actions_info(self.editor_id)
self.topic_id = topic_fetchers.get_new_topic_id()
subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title 1')
subtopic_1.skill_ids = ['skill_id_1']
subtopic_1.url_fragment = 'sub-one-frag'
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic_1], next_subtopic_id=2)
self.set_topic_managers([self.TOPIC_MANAGER_USERNAME], self.topic_id)
self.topic_manager = user_services.get_user_actions_info(
self.topic_manager_id)
self.save_new_skill(
'skill_1', self.admin_id, description='Skill Description 1')
self.save_new_skill(
'skill_2', self.admin_id, description='Skill Description 2')
self.save_new_skill(
'skill_3', self.admin_id, description='Skill Description 3')
self.question_id = question_services.get_new_question_id()
self.question = self.save_new_question(
self.question_id, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'],
inapplicable_skill_misconception_ids=[
'skillid12345-1', 'skillid12345-2'])
self.question_id_1 = question_services.get_new_question_id()
self.question_1 = self.save_new_question(
self.question_id_1, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
self.question_id_2 = question_services.get_new_question_id()
self.question_2 = self.save_new_question(
self.question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
def test_get_question_by_id(self):
question = question_services.get_question_by_id(self.question_id)
self.assertEqual(question.id, self.question_id)
question = question_services.get_question_by_id(
'question_id', strict=False)
self.assertIsNone(question)
with self.assertRaisesRegexp(
Exception, 'Entity for class QuestionModel with id question_id '
'not found'):
question_services.get_question_by_id('question_id')
def test_get_questions_by_skill_ids_with_fetch_by_difficulty(self):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_1, 'skill_2', 0.8)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_2, 'skill_2', 0.5)
questions = question_services.get_questions_by_skill_ids(
2, ['skill_1', 'skill_2'], True)
questions.sort(key=lambda question: question.last_updated)
self.assertEqual(len(questions), 2)
self.assertEqual(questions[0].to_dict(), self.question.to_dict())
self.assertEqual(questions[1].to_dict(), self.question_2.to_dict())
def test_get_total_question_count_for_skill_ids(self):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_1, 'skill_1', 0.8)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_2, 'skill_2', 0.5)
question_count = (
question_services.get_total_question_count_for_skill_ids(
['skill_1']))
self.assertEqual(question_count, 2)
question_count = (
question_services.get_total_question_count_for_skill_ids(
['skill_2']))
self.assertEqual(question_count, 1)
question_count = (
question_services.get_total_question_count_for_skill_ids(
['skill_1', 'skill_2']))
self.assertEqual(question_count, 3)
question_count = (
question_services.get_total_question_count_for_skill_ids(
['skill_1', 'skill_1']))
self.assertEqual(question_count, 2)
question_count = (
question_services.get_total_question_count_for_skill_ids(
['skill_1', 'skill_1', 'skill_2']))
self.assertEqual(question_count, 3)
def test_update_question_skill_link_difficulty(self):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
_, merged_question_skill_links = (
question_services.get_displayable_question_skill_link_details(
2, ['skill_1'], 0))
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.3])
question_services.update_question_skill_link_difficulty(
self.question_id, 'skill_1', 0.9)
_, merged_question_skill_links = (
question_services.get_displayable_question_skill_link_details(
2, ['skill_1'], 0))
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.9])
with self.assertRaisesRegexp(
Exception, 'The given question and skill are not linked.'):
question_services.update_question_skill_link_difficulty(
self.question_id, 'skill_10', 0.9)
def test_get_questions_by_skill_ids_without_fetch_by_difficulty(self):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_1, 'skill_2', 0.8)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_2, 'skill_2', 0.5)
questions = question_services.get_questions_by_skill_ids(
4, ['skill_1', 'skill_2'], False)
questions.sort(key=lambda question: question.last_updated)
self.assertEqual(len(questions), 3)
self.assertEqual(questions[0].to_dict(), self.question.to_dict())
self.assertEqual(questions[1].to_dict(), self.question_1.to_dict())
self.assertEqual(questions[2].to_dict(), self.question_2.to_dict())
def test_get_questions_by_skill_ids_raise_error_with_high_question_count(
self):
with self.assertRaisesRegexp(
Exception, 'Question count is too high, please limit the question '
'count to %d.' % feconf.MAX_QUESTIONS_FETCHABLE_AT_ONE_TIME):
question_services.get_questions_by_skill_ids(
25, ['skill_1', 'skill_2'], False)
def test_create_multi_question_skill_links_for_question(self):
self.question = self.save_new_question(
self.question_id, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
with self.assertRaisesRegexp(
Exception, 'Skill difficulties and skill ids should match. '
'The lengths of the two lists are different.'):
question_services.link_multiple_skills_for_question(
self.editor_id, self.question_id, ['skill_1', 'skill_2'],
[0.5])
question_services.link_multiple_skills_for_question(
self.editor_id, self.question_id, ['skill_1', 'skill_2'],
[0.5, 0.7])
skill_ids = [skill.id for skill in
question_services.get_skills_linked_to_question(
self.question_id)]
self.assertItemsEqual(skill_ids, ['skill_1', 'skill_2'])
def test_delete_question_skill_link(self):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_2', 0.3)
question_services.delete_question_skill_link(
self.editor_id, self.question_id, 'skill_1')
skill_ids = [skill.id for skill in
question_services.get_skills_linked_to_question(
self.question_id)]
self.assertItemsEqual(skill_ids, ['skill_2'])
question_services.delete_question_skill_link(
self.editor_id, self.question_id, 'skill_2')
question = question_services.get_question_by_id(
self.question_id, strict=False)
self.assertIsNone(question)
def test_linking_same_skill_to_question_twice(self):
question_id_2 = question_services.get_new_question_id()
self.save_new_question(
question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
skill_ids = [skill.id for skill in
question_services.get_skills_linked_to_question(
question_id_2)]
self.assertEqual(len(skill_ids), 1)
self.assertEqual(skill_ids[0], 'skill_1')
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.3)
skill_ids = [skill.id for skill in
question_services.get_skills_linked_to_question(
question_id_2)]
self.assertEqual(len(skill_ids), 1)
self.assertEqual(skill_ids[0], 'skill_1')
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_2', 0.3)
skill_ids = [skill.id for skill in
question_services.get_skills_linked_to_question(
question_id_2)]
self.assertEqual(len(skill_ids), 2)
self.assertItemsEqual(skill_ids, ['skill_1', 'skill_2'])
def test_create_and_get_question_skill_link(self):
question_id_2 = question_services.get_new_question_id()
with self.assertRaisesRegexp(
Exception,
re.escape(
'Entity for class QuestionModel with id %s not found' % (
question_id_2))):
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.5)
self.save_new_question(
question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
question_id_3 = question_services.get_new_question_id()
self.save_new_question(
question_id_3, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.5)
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_3', 0.8)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, question_id_3, 'skill_2', 0.2)
question_summaries, merged_question_skill_links = (
question_services.get_displayable_question_skill_link_details(
5, ['skill_1', 'skill_2', 'skill_3'], 0))
with self.assertRaisesRegexp(
Exception, 'Querying linked question summaries for more than 3 '
'skills at a time is not supported currently.'):
question_services.get_displayable_question_skill_link_details(
5, ['skill_1', 'skill_2', 'skill_3', 'skill_4'], 0)
question_ids = [summary.id for summary in question_summaries]
self.assertEqual(len(question_ids), 3)
self.assertEqual(len(merged_question_skill_links), 3)
self.assertItemsEqual(
question_ids, [self.question_id, question_id_2, question_id_3])
self.assertItemsEqual(
question_ids, [
question_skill_link.question_id
for question_skill_link in merged_question_skill_links])
# Make sure the correct skill description corresponds to respective
# question summaries.
for index, link_object in enumerate(merged_question_skill_links):
if question_ids[index] == self.question_id:
self.assertEqual(
['Skill Description 3', 'Skill Description 1'],
link_object.skill_descriptions)
self.assertEqual(
[0.8, 0.5], link_object.skill_difficulties)
elif question_ids[index] == question_id_2:
self.assertEqual(
['Skill Description 1'], link_object.skill_descriptions)
self.assertEqual(
[0.3], link_object.skill_difficulties)
else:
self.assertEqual(
['Skill Description 2'], link_object.skill_descriptions)
self.assertEqual(
[0.2], link_object.skill_difficulties)
question_summaries, merged_question_skill_links = (
question_services.get_displayable_question_skill_link_details(
5, ['skill_1', 'skill_3'], 0))
question_ids = [summary.id for summary in question_summaries]
self.assertEqual(len(question_ids), 2)
self.assertItemsEqual(
question_ids, [self.question_id, question_id_2])
with self.assertRaisesRegexp(
Exception, 'The given question is already linked to given skill'):
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.3)
def test_get_displayable_question_skill_link_details_with_no_skill_ids(
self):
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
question_services.create_new_question_skill_link(
self.editor_id, question_id, 'skill_1', 0.5)
question_summaries, merged_question_skill_links = (
question_services.get_displayable_question_skill_link_details(
2, [], 0))
self.assertEqual(question_summaries, [])
self.assertEqual(merged_question_skill_links, [])
def test_get_question_skill_links_of_skill(self):
# If the skill id doesnt exist at all, it returns an empty list.
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'non_existent_skill_id', 'Skill Description'))
self.assertEqual(len(question_skill_links), 0)
# If the question ids dont exist for a skill, it returns an empty list.
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'skill_1', 'Skill Description 1'))
self.assertEqual(len(question_skill_links), 0)
question_id_2 = question_services.get_new_question_id()
self.save_new_question(
question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
question_id_3 = question_services.get_new_question_id()
self.save_new_question(
question_id_3, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
# Setting skill difficulty for self.question_id.
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.5)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, question_id_3, 'skill_2', 0.1)
# When question ids exist, it returns a list of questionskilllinks.
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'skill_1', 'Skill Description 1'))
self.assertEqual(len(question_skill_links), 2)
self.assertTrue(isinstance(
question_skill_links[0], question_domain.QuestionSkillLink))
question_ids = [question_skill.question_id for question_skill
in question_skill_links]
self.assertItemsEqual(
question_ids, [self.question_id, question_id_2])
for question_skill in question_skill_links:
if question_skill.question_id == self.question_id:
self.assertEqual(question_skill.skill_difficulty, 0.5)
def test_get_question_summaries_by_ids(self):
question_summaries = question_services.get_question_summaries_by_ids([
self.question_id, 'invalid_question_id'])
self.assertEqual(len(question_summaries), 2)
self.assertEqual(question_summaries[0].id, self.question_id)
self.assertEqual(
question_summaries[0].question_content,
feconf.DEFAULT_INIT_STATE_CONTENT_STR)
self.assertIsNone(question_summaries[1])
def test_delete_question(self):
question_summary_model = question_models.QuestionSummaryModel.get(
self.question_id)
self.assertFalse(question_summary_model is None)
question_services.delete_question(self.editor_id, self.question_id)
with self.assertRaisesRegexp(Exception, (
'Entity for class QuestionModel with id %s not found' % (
self.question_id))):
question_models.QuestionModel.get(self.question_id)
with self.assertRaisesRegexp(Exception, (
'Entity for class QuestionSummaryModel with id %s not found' % (
self.question_id))):
question_models.QuestionSummaryModel.get(self.question_id)
def test_delete_question_marked_deleted(self):
question_models.QuestionModel.delete_multi(
[self.question_id], self.editor_id,
feconf.COMMIT_MESSAGE_QUESTION_DELETED, force_deletion=False)
question_model = question_models.QuestionModel.get_by_id(
self.question_id)
self.assertTrue(question_model.deleted)
question_services.delete_question(
self.editor_id, self.question_id, force_deletion=True)
question_model = question_models.QuestionModel.get_by_id(
self.question_id)
self.assertEqual(question_model, None)
self.assertEqual(
question_models.QuestionSummaryModel.get(
self.question_id, strict=False), None)
def test_delete_question_model_with_deleted_summary_model(self):
question_summary_model = (
question_models.QuestionSummaryModel.get(self.question_id))
question_summary_model.delete()
question_summary_model = (
question_models.QuestionSummaryModel.get(self.question_id, False))
self.assertIsNone(question_summary_model)
question_services.delete_question(
self.editor_id, self.question_id, force_deletion=True)
question_model = question_models.QuestionModel.get_by_id(
self.question_id)
self.assertEqual(question_model, None)
self.assertEqual(
question_models.QuestionSummaryModel.get(
self.question_id, strict=False), None)
def test_update_question(self):
new_question_data = self._create_valid_question_data('DEF')
change_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}
change_list = [question_domain.QuestionChange(change_dict)]
question_services.update_question(
self.editor_id, self.question_id, change_list,
'updated question data')
question = question_services.get_question_by_id(self.question_id)
self.assertEqual(
question.question_state_data.to_dict(), new_question_data.to_dict())
self.assertEqual(question.version, 2)
def test_cannot_update_question_with_no_commit_message(self):
new_question_data = self._create_valid_question_data('DEF')
change_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': new_question_data.to_dict(),
'old_value': self.question.question_state_data.to_dict()
}
change_list = [question_domain.QuestionChange(change_dict)]
with self.assertRaisesRegexp(
Exception, 'Expected a commit message, received none.'):
question_services.update_question(
self.editor_id, self.question_id, change_list, None)
def test_cannot_update_question_with_no_change_list(self):
with self.assertRaisesRegexp(
Exception,
'Unexpected error: received an invalid change list when trying to '
'save question'):
question_services.update_question(
self.editor_id, self.question_id, [],
'updated question data')
def test_update_question_language_code(self):
self.assertEqual(self.question.language_code, 'en')
change_dict = {
'cmd': 'update_question_property',
'property_name': 'language_code',
'new_value': 'bn',
'old_value': 'en'
}
change_list = [question_domain.QuestionChange(change_dict)]
question_services.update_question(
self.editor_id, self.question_id, change_list,
'updated question language code')
question = question_services.get_question_by_id(self.question_id)
self.assertEqual(question.language_code, 'bn')
self.assertEqual(question.version, 2)
def test_update_inapplicable_skill_misconception_ids(self):
self.assertEqual(
self.question.inapplicable_skill_misconception_ids,
['skillid12345-1', 'skillid12345-2'])
change_dict = {
'cmd': 'update_question_property',
'property_name': 'inapplicable_skill_misconception_ids',
'new_value': ['skillid12345-1'],
'old_value': []
}
change_list = [question_domain.QuestionChange(change_dict)]
question_services.update_question(
self.editor_id, self.question_id, change_list,
'updated inapplicable_skill_misconception_ids')
question = question_services.get_question_by_id(self.question_id)
self.assertEqual(
question.inapplicable_skill_misconception_ids, ['skillid12345-1'])
self.assertEqual(question.version, 2)
def test_cannot_update_question_with_invalid_change_list(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
assert_raises_context_manager = self.assertRaisesRegexp(
Exception, '\'str\' object has no attribute \'cmd\'')
with logging_swap, assert_raises_context_manager:
question_services.update_question(
self.editor_id, self.question_id, 'invalid_change_list',
'updated question language code')
self.assertEqual(len(observed_log_messages), 1)
self.assertRegexpMatches(
observed_log_messages[0],
'object has no attribute \'cmd\' %s '
'invalid_change_list' % self.question_id)
def test_replace_skill_id_for_all_questions(self):
question_id_2 = question_services.get_new_question_id()
self.save_new_question(
question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
question_id_3 = question_services.get_new_question_id()
self.save_new_question(
question_id_3, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.5)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, question_id_3, 'skill_2', 0.9)
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'skill_1', 'Skill Description 1'))
self.assertEqual(len(question_skill_links), 2)
question_ids = [question_skill.question_id for question_skill
in question_skill_links]
self.assertItemsEqual(
question_ids, [self.question_id, question_id_2])
for question_skill in question_skill_links:
if question_skill.question_id == self.question_id:
self.assertEqual(question_skill.skill_difficulty, 0.5)
question_services.replace_skill_id_for_all_questions(
'skill_1', 'Description 1', 'skill_3')
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'skill_1', 'Description 1'))
self.assertEqual(len(question_skill_links), 0)
question_skill_links = (
question_services.get_question_skill_links_of_skill(
'skill_3', 'Skill Description 3'))
question_ids = [question_skill.question_id for question_skill
in question_skill_links]
self.assertItemsEqual(
question_ids, [self.question_id, question_id_2])
for question_skill in question_skill_links:
if question_skill.question_id == self.question_id:
self.assertEqual(question_skill.skill_difficulty, 0.5)
questions = question_fetchers.get_questions_by_ids(
[self.question_id, question_id_2, question_id_3])
for question in questions:
if question.id in ([self.question_id, question_id_2]):
self.assertItemsEqual(question.linked_skill_ids, ['skill_3'])
else:
self.assertItemsEqual(question.linked_skill_ids, ['skill_2'])
def test_compute_summary_of_question(self):
question_summary = question_services.compute_summary_of_question(
self.question)
self.assertEqual(question_summary.id, self.question_id)
self.assertEqual(
question_summary.question_content,
feconf.DEFAULT_INIT_STATE_CONTENT_STR)
def test_get_skills_of_question(self):
# If the question id doesnt exist at all, it returns an empty list.
with self.assertRaisesRegexp(
Exception, 'Entity for class QuestionModel with id '
'non_existent_question_id not found'):
question_services.get_skills_linked_to_question(
'non_existent_question_id')
question_id_2 = question_services.get_new_question_id()
self.save_new_question(
question_id_2, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
question_id_3 = question_services.get_new_question_id()
self.save_new_question(
question_id_3, self.editor_id,
self._create_valid_question_data('ABC'), ['skill_2'])
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skill_1', 0.5)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_1', 0.3)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, 'skill_2', 0.0)
question_services.create_new_question_skill_link(
self.editor_id, question_id_3, 'skill_2', 0.1)
skills = (
question_services.get_skills_linked_to_question(
question_id_2))
self.assertTrue(isinstance(skills[0], skill_domain.Skill))
self.assertEqual(len(skills), 2)
skill_ids = [skill.id for skill in skills]
self.assertItemsEqual(
skill_ids, ['skill_1', 'skill_2'])
def test_get_interaction_id_for_question(self):
self.assertEqual(
question_services.get_interaction_id_for_question(
self.question_id), 'TextInput')
with self.assertRaisesRegexp(Exception, 'No questions exists with'):
question_services.get_interaction_id_for_question('fake_q_id')
def test_untag_deleted_misconceptions_on_no_change_to_skill(self):
misconceptions = [
skill_domain.Misconception(
0, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', True),
skill_domain.Misconception(
1, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', True),
skill_domain.Misconception(
2, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False),
skill_domain.Misconception(
3, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False),
skill_domain.Misconception(
4, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False)
]
self.save_new_skill(
'skillid12345', self.admin_id,
description='Skill with misconceptions',
misconceptions=misconceptions)
self.question_id = question_services.get_new_question_id()
question_state_data = self._create_valid_question_data('state name')
question_state_data.interaction.answer_groups = [
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_3',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-0'
}),
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_4',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-1'
}),
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_0',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_5',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-2'
})
]
question_state_data.written_translations.translations_mapping.update({
'feedback_0': {},
'feedback_1': {},
'feedback_2': {},
'rule_input_3': {},
'rule_input_4': {},
'rule_input_5': {}
})
question_state_data.recorded_voiceovers.voiceovers_mapping.update({
'feedback_0': {},
'feedback_1': {},
'feedback_2': {},
'rule_input_3': {},
'rule_input_4': {},
'rule_input_5': {}
})
question_state_data.next_content_id_index = 5
inapplicable_skill_misconception_ids = [
'skillid12345-3',
'skillid12345-4'
]
self.question = self.save_new_question(
self.question_id, self.editor_id,
question_state_data, ['skillid12345'],
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids))
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skillid12345', 0.5)
answer_groups = (
self.question.question_state_data.interaction.answer_groups)
actual_misconception_ids = [
answer_group.to_dict()['tagged_skill_misconception_id']
for answer_group in answer_groups
if answer_group.to_dict()['tagged_skill_misconception_id']]
expected_misconception_ids = [
'skillid12345-0',
'skillid12345-1',
'skillid12345-2'
]
self.assertEqual(
self.question.inapplicable_skill_misconception_ids,
inapplicable_skill_misconception_ids)
self.assertEqual(actual_misconception_ids, expected_misconception_ids)
# Try to untag deleted skill misconceptions when there are no deleted
# misconceptions.
question_services.untag_deleted_misconceptions(
self.editor_id, 'skillid12345',
'Skill with misconceptions', [])
# No change when skill misconception ids exist.
updated_question = question_services.get_question_by_id(
self.question_id)
self.assertEqual(
updated_question.inapplicable_skill_misconception_ids,
inapplicable_skill_misconception_ids)
self.assertEqual(actual_misconception_ids, expected_misconception_ids)
def test_untag_deleted_misconceptions_correctly_on_updating_skill(self):
misconceptions = [
skill_domain.Misconception(
0, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', True),
skill_domain.Misconception(
1, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', True),
skill_domain.Misconception(
2, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False),
skill_domain.Misconception(
3, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False),
skill_domain.Misconception(
4, 'misconception-name', '<p>description</p>',
'<p>default_feedback</p>', False)
]
self.save_new_skill(
'skillid12345', self.admin_id,
description='Skill with misconceptions',
misconceptions=misconceptions)
self.question_id = question_services.get_new_question_id()
question_state_data = self._create_valid_question_data('state name')
question_state_data.interaction.answer_groups = [
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_3',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-0'
}),
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_4',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-1'
}),
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_0',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': {
'contentId': 'rule_input_5',
'normalizedStrSet': ['Test']
}
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 'skillid12345-2'
})
]
question_state_data.written_translations.translations_mapping.update({
'feedback_0': {},
'feedback_1': {},
'feedback_2': {},
'rule_input_3': {},
'rule_input_4': {},
'rule_input_5': {}
})
question_state_data.recorded_voiceovers.voiceovers_mapping.update({
'feedback_0': {},
'feedback_1': {},
'feedback_2': {},
'rule_input_3': {},
'rule_input_4': {},
'rule_input_5': {}
})
question_state_data.next_content_id_index = 5
inapplicable_skill_misconception_ids = [
'skillid12345-3',
'skillid12345-4'
]
self.question = self.save_new_question(
self.question_id, self.editor_id,
question_state_data, ['skillid12345'],
inapplicable_skill_misconception_ids=(
inapplicable_skill_misconception_ids))
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, 'skillid12345', 0.5)
answer_groups = (
self.question.question_state_data.interaction.answer_groups)
actual_misconception_ids = [
answer_group.to_dict()['tagged_skill_misconception_id']
for answer_group in answer_groups
if answer_group.to_dict()['tagged_skill_misconception_id']]
expected_misconception_ids = [
'skillid12345-0',
'skillid12345-1',
'skillid12345-2'
]
self.assertEqual(
self.question.inapplicable_skill_misconception_ids,
inapplicable_skill_misconception_ids)
self.assertEqual(actual_misconception_ids, expected_misconception_ids)
# Delete few misconceptions.
change_list = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': 0,
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': 2,
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': 4,
})
]
skill_services.update_skill(
self.editor_id, 'skillid12345',
change_list, 'Delete misconceptions.')
self.process_and_flush_pending_tasks()
updated_question = question_services.get_question_by_id(
self.question_id)
updated_answer_groups = (
updated_question.question_state_data.interaction.answer_groups)
actual_misconception_ids = [
answer_group.to_dict()['tagged_skill_misconception_id']
for answer_group in updated_answer_groups
if answer_group.to_dict()['tagged_skill_misconception_id']]
expected_misconception_ids = ['skillid12345-1']
actual_inapplicable_skill_misconception_ids = (
updated_question.inapplicable_skill_misconception_ids)
expected_inapplicable_skill_misconception_ids = (
['skillid12345-3'])
self.assertEqual(
actual_inapplicable_skill_misconception_ids,
expected_inapplicable_skill_misconception_ids)
self.assertEqual(actual_misconception_ids, expected_misconception_ids)
class QuestionMigrationTests(test_utils.GenericTestBase):
def test_migrate_question_state_from_v29_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'correct_answer': 'This is the correct answer',
'answer_is_exclusive': False,
'explanation': {
'content_id': 'explanation_1',
'html': 'Solution explanation'
}
},
'id': 'TextInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=29)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_groups = question.question_state_data.interaction.answer_groups
self.assertEqual(answer_groups[0].tagged_skill_misconception_id, None)
def test_migrate_question_state_from_v30_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False
}
}
}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'correct_answer': 'This is the correct answer',
'answer_is_exclusive': False,
'explanation': {
'content_id': 'explanation_1',
'html': 'Solution explanation'
}
},
'id': 'TextInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=30)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
self.assertEqual(
question.question_state_data
.recorded_voiceovers.to_dict(), {
'voiceovers_mapping': {
'ca_placeholder_0': {},
'content': {
'en': {
'filename': 'test.mp3',
'file_size_bytes': 100,
'needs_update': False,
'duration_secs': 0.0}},
'rule_input_1': {}}})
def test_migrate_question_state_from_v31_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['A', 'B', 'C']
},
'rule_type': 'HasElementsIn'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'SetInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=31)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(
cust_args['buttonText'].value.unicode_str,
'Add item')
def test_migrate_question_state_from_v32_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['A']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': []
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=32)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(cust_args['choices'].value, [])
self.assertEqual(cust_args['showChoicesInShuffledOrder'].value, True)
def test_migrate_question_state_from_v33_to_latest(self):
feedback_html_content = (
'<p>Feedback</p><oppia-noninteractive-math raw_latex-with-value="'
'&quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': feedback_html_content
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['A']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': ''
},
'showChoicesInShuffledOrder': {
'value': True
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
expected_feeedback_html_content = (
'<p>Feedback</p><oppia-noninteractive-math math_content-with-val'
'ue="{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;,'
' &quot;svg_filename&quot;: &quot;&quot;}"></oppi'
'a-noninteractive-math>')
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=33))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_answer_group = (
question.question_state_data.interaction.answer_groups[0])
self.assertEqual(
migrated_answer_group.outcome.feedback.html,
expected_feeedback_html_content)
def test_migrate_question_state_from_v34_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'x+y'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}, {
'inputs': {
'x': 'x=y'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'correct_answer': {
'ascii': 'x=y',
'latex': 'x=y'
},
'answer_is_exclusive': False,
'explanation': {
'html': 'Solution explanation',
'content_id': 'content_2'
}
},
'id': 'MathExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=34)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_groups = question.question_state_data.interaction.answer_groups
self.assertEqual(
question.question_state_data.interaction.id, 'MathEquationInput')
self.assertEqual(len(answer_groups[0].rule_specs), 1)
self.assertEqual(
answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith')
self.assertEqual(
answer_groups[0].rule_specs[0].inputs, {'x': 'x=y', 'y': 'both'})
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'x+y'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}, {
'inputs': {
'x': '1.2 + 3'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'correct_answer': {
'ascii': 'x+y',
'latex': 'x+y'
},
'answer_is_exclusive': False,
'explanation': {
'html': 'Solution explanation',
'content_id': 'content_2'
}
},
'id': 'MathExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=34)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_groups = question.question_state_data.interaction.answer_groups
self.assertEqual(
question.question_state_data.interaction.id,
'AlgebraicExpressionInput')
self.assertEqual(len(answer_groups[0].rule_specs), 1)
self.assertEqual(
answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith')
self.assertEqual(
answer_groups[0].rule_specs[0].inputs, {'x': 'x+y'})
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': '1,2 + 3'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'correct_answer': {
'ascii': '1.2 + 3',
'latex': '1.2 + 3'
},
'answer_is_exclusive': False,
'explanation': {
'html': 'Solution explanation',
'content_id': 'content_2'
}
},
'id': 'MathExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=34)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_groups = question.question_state_data.interaction.answer_groups
self.assertEqual(
question.question_state_data.interaction.id,
'NumericExpressionInput')
self.assertEqual(len(answer_groups[0].rule_specs), 1)
self.assertEqual(
answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith')
self.assertEqual(
answer_groups[0].rule_specs[0].inputs, {'x': '1.2 + 3'})
answer_groups_list = [{
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'x+y'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}, {
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': '1.2 + 3'
},
'rule_type': 'IsMathematicallyEquivalentTo'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'feedback_3': {}
}
},
'written_translations': {
'translations_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'feedback_3': {}
}
},
'interaction': {
'answer_groups': answer_groups_list,
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback_3',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': None,
'id': 'MathExpressionInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=34)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_groups = question.question_state_data.interaction.answer_groups
self.assertEqual(
question.question_state_data.interaction.id,
'AlgebraicExpressionInput')
self.assertEqual(len(answer_groups), 1)
self.assertEqual(
answer_groups[0].rule_specs[0].rule_type, 'MatchesExactlyWith')
self.assertEqual(
answer_groups[0].rule_specs[0].inputs, {'x': 'x+y'})
state_data = question.question_state_data
self.assertEqual(sorted(
state_data.recorded_voiceovers.voiceovers_mapping.keys()), [
'content_1', 'feedback_1', 'feedback_3'])
self.assertEqual(sorted(
state_data.written_translations.translations_mapping.keys()), [
'content_1', 'feedback_1', 'feedback_3'])
def test_migrate_question_state_from_v35_to_latest(self):
# Test restructuring of written_translations.
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {
'en': {
'html': '<p>test</p>',
'needs_update': True
}
}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': None
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=35))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_translations_mapping = (
question
.question_state_data.written_translations.to_dict())
self.assertEqual(
migrated_translations_mapping,
{
'translations_mapping': {
'explanation': {
'en': {
'data_format': 'html',
'translation': '<p>test</p>',
'needs_update': True
}
}
}
})
# Test migration of PencilCodeEditor customization argument from
# intial_code to intialCode.
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'initial_code': {
'value': 'code'
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'PencilCodeEditor'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=35))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_ca = question.question_state_data.to_dict()['interaction'][
'customization_args']
self.assertEqual(
migrated_ca,
{
'initialCode': {
'value': 'code'
}
})
# Test population of default value of SubtitledHtml list.
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=35))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_ca = question.question_state_data.to_dict()['interaction'][
'customization_args']
self.assertEqual(
migrated_ca,
{
'choices': {
'value': [{'content_id': 'ca_choices_0', 'html': ''}]
},
'showChoicesInShuffledOrder': {'value': True}
})
# Test migration of html list to SubtitledHtml list.
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': ['one', 'two', 'three']
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=35))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_ca = question.question_state_data.to_dict()['interaction'][
'customization_args']
self.assertEqual(
migrated_ca,
{
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': 'one'
}, {
'content_id': 'ca_choices_1',
'html': 'two'
}, {
'content_id': 'ca_choices_2',
'html': 'three'
}]
},
'showChoicesInShuffledOrder': {'value': True}
})
def test_migrate_question_state_from_v36_to_latest(self):
# Test restructuring of written_translations.
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {}
},
'interaction': {
'answer_groups': [{
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {'x': 'test'},
'rule_type': 'CaseSensitiveEquals'
}],
'tagged_skill_misconception_id': None,
'training_data': []
}],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'TextInput'
},
'next_content_id_index': 2,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = (
question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=36))
commit_cmd = (
question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
}))
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
migrated_rule_spec = (
question
.question_state_data
.interaction.answer_groups[0]
.rule_specs[0].to_dict())
self.assertEqual(
migrated_rule_spec,
{
'inputs': {'x': {
'contentId': 'rule_input_2',
'normalizedStrSet': ['test']
}},
'rule_type': 'Equals'
})
def test_migrate_question_state_from_v37_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': '((x)^(2))/(2.5)-(alpha)/(beta)'
},
'rule_type': 'MatchesExactlyWith'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'AlgebraicExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=37)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(
cust_args['customOskLetters'].value, ['x', 'α', 'β'])
def test_migrate_question_state_from_v38_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': '1/2'
},
'rule_type': 'MatchesExactlyWith'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'NumericExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=38)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(
cust_args['placeholder'].value.unicode_str,
'Type an expression here, using only numbers.')
def test_migrate_question_state_with_text_input_from_v40_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'TextInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=40)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_group = question.question_state_data.interaction.answer_groups[0]
rule_spec = answer_group.rule_specs[0]
self.assertEqual(
rule_spec.inputs['x'],
{
'contentId': 'rule_input_4',
'normalizedStrSet': ['Test']
})
self.assertEqual(question.question_state_data.next_content_id_index, 5)
def test_migrate_question_state_with_set_input_from_v40_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'buttonText': {
'value': {
'content_id': 'ca_buttonText_0',
'unicode_str': ''
}
},
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'SetInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=40)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_group = question.question_state_data.interaction.answer_groups[0]
rule_spec = answer_group.rule_specs[0]
self.assertEqual(
rule_spec.inputs['x'],
{
'contentId': 'rule_input_4',
'unicodeStrSet': ['Test']
})
self.assertEqual(question.question_state_data.next_content_id_index, 5)
def test_migrate_question_state_from_v41_with_item_selection_input_interaction_to_latest(self): # pylint: disable=line-too-long
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['<p>Choice 1</p>', '<p>Choice 2</p>']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': [{
'content_id': 'ca_choices_2',
'html': '<p>Choice 1</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>Choice 2</p>'
}]
},
'maxAllowableSelectionCount': {'value': 2},
'minAllowableSelectionCount': {'value': 1}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {
'answer_is_exclusive': True,
'correct_answer': ['<p>Choice 1</p>'],
'explanation': {
'content_id': 'solution',
'html': 'This is <i>solution</i> for state1'
}
},
'id': 'ItemSelectionInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=41)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_group = question.question_state_data.interaction.answer_groups[0]
solution = question.question_state_data.interaction.solution
rule_spec = answer_group.rule_specs[0]
self.assertEqual(
rule_spec.inputs['x'],
['ca_choices_2', 'ca_choices_3'])
self.assertEqual(
solution.correct_answer, ['ca_choices_2'])
def test_migrate_question_state_from_v41_with_drag_and_drop_sort_input_interaction_to_latest(self): # pylint: disable=line-too-long
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': [['<p>Choice 1</p>', '<p>Choice 2</p>', 'invalid']]
},
'rule_type': 'IsEqualToOrdering'
}, {
'inputs': {
'x': [['<p>Choice 1</p>']]
},
'rule_type': 'IsEqualToOrderingWithOneItemAtIncorrectPosition'
}, {
'inputs': {
'x': '<p>Choice 1</p>',
'y': 1
},
'rule_type': 'HasElementXAtPositionY'
}, {
'inputs': {
'x': '<p>Choice 1</p>',
'y': '<p>Choice 2</p>'
},
'rule_type': 'HasElementXBeforeElementY'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'allowMultipleItemsInSamePosition': {'value': True},
'choices': {
'value': [{
'content_id': 'ca_choices_2',
'html': '<p>Choice 1</p>'
}, {
'content_id': 'ca_choices_3',
'html': '<p>Choice 2</p>'
}]
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {
'answer_is_exclusive': True,
'correct_answer': [['<p>Choice 1</p>', '<p>Choice 2</p>']],
'explanation': {
'content_id': 'solution',
'html': 'This is <i>solution</i> for state1'
}
},
'id': 'DragAndDropSortInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=41)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
answer_group = question.question_state_data.interaction.answer_groups[0]
solution = question.question_state_data.interaction.solution
self.assertEqual(
answer_group.rule_specs[0].inputs['x'],
[['ca_choices_2', 'ca_choices_3', 'invalid_content_id']])
self.assertEqual(
answer_group.rule_specs[1].inputs['x'],
[['ca_choices_2']])
self.assertEqual(
answer_group.rule_specs[2].inputs['x'],
'ca_choices_2')
self.assertEqual(
answer_group.rule_specs[3].inputs,
{'x': 'ca_choices_2', 'y': 'ca_choices_3'})
self.assertEqual(
solution.correct_answer, [['ca_choices_2', 'ca_choices_3']])
def test_migrate_question_state_from_v42_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': '1/2'
},
'rule_type': 'MatchesExactlyWith'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': (
'Type an expression here, using only numbers.')
}
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {},
'id': 'NumericExpressionInput'
},
'next_content_id_index': 3,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=42)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(
cust_args['useFractionForDivision'].value, True)
def test_migrate_question_state_from_v43_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'TextInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=43)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
linked_skill_id = question.question_state_data.linked_skill_id
self.assertEqual(
linked_skill_id, None)
def test_migrate_question_state_from_v44_to_latest(self):
answer_group = {
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': ['Test']
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': 'Question 1'
},
'recorded_voiceovers': {
'voiceovers_mapping': {}
},
'written_translations': {
'translations_mapping': {
'explanation': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'requireNonnegativeInput': {
'value': False
},
'rows': {'value': 1}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [],
'solution': {},
'id': 'NumericInput'
},
'next_content_id_index': 4,
'param_changes': [],
'solicit_answer_details': False,
'card_is_checkpoint': False,
'linked_skill_id': None,
'classifier_model_id': None
}
question_model = question_models.QuestionModel(
id='question_id',
question_state_data=question_state_dict,
language_code='en',
version=0,
linked_skill_ids=['skill_id'],
question_state_data_schema_version=44)
commit_cmd = question_domain.QuestionChange({
'cmd': question_domain.CMD_CREATE_NEW
})
commit_cmd_dicts = [commit_cmd.to_dict()]
question_model.commit(
'user_id_admin', 'question model created', commit_cmd_dicts)
question = question_fetchers.get_question_from_model(question_model)
self.assertEqual(
question.question_state_data_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
cust_args = question.question_state_data.interaction.customization_args
self.assertEqual(
cust_args['requireNonnegativeInput'].value, False)
| {
"content_hash": "8c33afa20556f5c3b7480c7a5a2c3401",
"timestamp": "",
"source": "github",
"line_count": 3220,
"max_line_length": 135,
"avg_line_length": 39.0639751552795,
"alnum_prop": 0.47691316998712097,
"repo_name": "kevinlee12/oppia",
"id": "edbae4be0c716c7daa3c69eec6abf642d372f4e4",
"size": "126411",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/question_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "205771"
},
{
"name": "HTML",
"bytes": "1835761"
},
{
"name": "JavaScript",
"bytes": "1182599"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "13670639"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13024194"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
# huckle's imports
from . import package
from . import config
from . import hutils
from . import hclinav
import sys
# navigate through the command line sequence for a given cliname
def navigate(argv):
nav = hclinav.navigator(root=config.url, apiname=config.cliname)
if len(argv) == 1:
hclinav.traverse_execution(nav)
length = len(argv[1:])
for i, x in enumerate(argv[1:]):
nav = hclinav.traverse_argument(nav, x)
if i == length - 1:
hclinav.traverse_execution(nav)
# huckle's minimal set of commands
def cli():
if len(sys.argv) > 2:
if sys.argv[1] == "cli" and sys.argv[2] == "install":
if len(sys.argv) > 3:
hclinav.pull(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "run":
if len(sys.argv) > 3:
config.parse_configuration(sys.argv[3])
navigate(sys.argv[3:])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "ls":
config.list_clis()
elif sys.argv[1] == "cli" and sys.argv[2] == "rm":
if len(sys.argv) > 3:
config.remove_cli(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "cli" and sys.argv[2] == "config":
if len(sys.argv) > 3:
config.config_list(sys.argv[3])
else:
huckle_help()
elif sys.argv[1] == "help":
hclinav.display_man_page(config.huckle_manpage_path)
sys.exit(0)
else:
huckle_help()
elif len(sys.argv) == 2:
if sys.argv[1] == "--version":
show_dependencies()
elif sys.argv[1] == "help":
hclinav.display_man_page(config.huckle_manpage_path)
sys.exit(0)
else:
huckle_help()
else:
huckle_help()
def huckle_help():
hutils.eprint("for help, use:\n")
hutils.eprint(" huckle help")
sys.exit(2)
# show huckle's version and the version of its dependencies
def show_dependencies():
dependencies = ""
for i, x in enumerate(package.dependencies):
dependencies += " "
dependencies += package.dependencies[i].rsplit('==', 1)[0] + "/"
dependencies += package.dependencies[i].rsplit('==', 1)[1]
print("huckle/" + package.__version__ + dependencies)
| {
"content_hash": "9fc93ee65e914d88d4e6d1cbe5563606",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 72,
"avg_line_length": 26.418367346938776,
"alnum_prop": 0.5268443414445731,
"repo_name": "cometaj2/huckle",
"id": "ba993707e3ec5d90f9d126cd5f345ae0d435628b",
"size": "2589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huckle/huckle.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22792"
},
{
"name": "Roff",
"bytes": "1865"
}
],
"symlink_target": ""
} |
"""Unit tests for feed_utility.py."""
from typing import Dict
from feeds import feed_utility
from feeds.tests.fixtures import * # pylint: disable=wildcard-import
def test_flatten_dict() -> None:
"""Test flattening of dict."""
input_dict = {"a": {"b": "c"}, 1: {"a": [1]}, "p": {"q": {"r": "s"}}}
expected_output = {"a.b": "c", "1.a": [1], "p.q.r": "s"}
assert feed_utility.flatten_dict(input_dict) == expected_output
def test_defflatten_dict() -> None:
"""Test deflattening of dict."""
input_dict = {"a.b": "c", "1.a": [1], "p.q.r": "s"}
expected_output = {"a": {"b": "c"}, "1": {"a": [1]}, "p": {"q": {"r": "s"}}}
assert feed_utility.deflatten_dict(input_dict) == expected_output
def test_swap_with_underscore() -> None:
"""Test converting of camelcase key name to snakecase key name."""
assert feed_utility.swap_with_underscore(
"workdaySettings") == "workday_settings"
def test_get_feed_details(get_flattened_response: Dict[str, str],
get_detailed_schema: Dict[str, str]) -> None:
"""Test printing of key-value pair after correlation with schema.
Args:
get_flattened_response (dict): Test input data
get_detailed_schema (dict): Test input data
"""
expected_output = (" Feed Settings:\n API Hostname: abc.workday.com\n "
"Tenant: ID\n")
assert feed_utility.get_feed_details(
get_flattened_response,
get_detailed_schema.log_type_schema) == expected_output
def test_snake_to_camel() -> None:
"""Test conversion of snakecase string to camelcase string."""
assert feed_utility.snake_to_camel("feed_schema") == "feedSchema"
def test_lower_or_none() -> None:
"""Test conversion of string to lowercase string."""
assert feed_utility.lower_or_none("TEST") == "test"
def test_lower_or_none_none() -> None:
"""Test return of string as None if string is None."""
assert feed_utility.lower_or_none(None) is None
def test_get_labels() -> None:
"""Test printing of key-value pair of labels field."""
expected_output = (" Labels:\n k: v\n")
assert feed_utility.get_labels({"labels": [{
"key": "k",
"value": "v"
}]}) == expected_output
def test_namespace() -> None:
"""Test printing of namespace field."""
expected_output = (" Namespace: sample_namespace\n")
assert feed_utility.get_namespace({"namespace": "sample_namespace"
}) == expected_output
def test_get_feed_display_name() -> None:
"""Test feed display name if exist in feed dictionary."""
expected_output = "\n Display Name: Dummy feed display name"
assert feed_utility.get_feed_display_name(
{"displayName": "Dummy feed display name"}) == expected_output
def test_get_feed_display_name_none() -> None:
"""Test feed display name if not exist in feed dictonary."""
assert not feed_utility.get_feed_display_name({})
| {
"content_hash": "eb1da0041c09c26b4253ea88c5e027a1",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 34.81927710843374,
"alnum_prop": 0.6359861591695501,
"repo_name": "chronicle/cli",
"id": "40c8f504dc7de2d711aab311cea9ec89f5925984",
"size": "3466",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "feeds/feed_utility_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303761"
}
],
"symlink_target": ""
} |
import boto3
import uuid
import sys
import getopt
elbclient = boto3.client('elb')
def usage ():
print "elb_helper.py -l <LB name> -i <instance> <-d[eregister]|-r[egister]|-s[status]>"
if(len(sys.argv) < 6):
usage()
sys.exit(2)
#Let's print the status of our instances
def printinstances( loadblancer, instance ):
try:
list_elb_resp = elbclient.describe_load_balancers(LoadBalancerNames=[loadblancer])
for list_instance in (list_elb_resp['LoadBalancerDescriptions'][0]['Instances' ]):
if (instance[0]['InstanceId']==list_instance['InstanceId']):
print ('Instance {1} registered with load balancer {0}'.format(loadblancer,list_instance['InstanceId']))
return;
print ('Instance {1} IS NOT registered with load balancer {0}'.format(loadblancer,list_instance['InstanceId']))
return;
except Exception as e:
print e
try:
opts, args = getopt.getopt(sys.argv[1:], 'l:i:rds', ['loadbalancer=', 'instance=', 'help', 'r|d|s'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(2)
elif opt in ('-l', '--loadbalancer'):
LB = arg
elif opt in ('-i', '--instance'):
InstanceID=[ {'InstanceId': arg } ]
elif opt in ('-r', '--register'):
response = elbclient.register_instances_with_load_balancer(
LoadBalancerName=LB,
Instances= InstanceID
)
elif opt in ('-d', '--deregister'):
response = elbclient.deregister_instances_from_load_balancer(
LoadBalancerName=LB,
Instances= InstanceID
)
elif opt in ('-s', '--status'):
printinstances(LB,InstanceID)
else:
usage()
sys.exit(2)
| {
"content_hash": "d16d12c46cf3edd9edaaf48f29d53252",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 121,
"avg_line_length": 30.964912280701753,
"alnum_prop": 0.6130311614730878,
"repo_name": "ceizner/codechallenge",
"id": "d7cba5bde528467c4631bbc1551ba5fc34a00ac7",
"size": "1805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elb_helper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1805"
}
],
"symlink_target": ""
} |
from google.cloud import datacatalog_v1beta1
def sample_search_catalog():
# Create a client
client = datacatalog_v1beta1.DataCatalogClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.SearchCatalogRequest(
query="query_value",
)
# Make the request
page_result = client.search_catalog(request=request)
# Handle the response
for response in page_result:
print(response)
# [END datacatalog_v1beta1_generated_DataCatalog_SearchCatalog_sync]
| {
"content_hash": "ac4842c78666ac13c2b4b8a81c83c5e3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 25.75,
"alnum_prop": 0.7165048543689321,
"repo_name": "googleapis/python-datacatalog",
"id": "8e50383bf7288b6871139a72e53ef0adcf9e554d",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datacatalog_v1beta1_generated_data_catalog_search_catalog_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3073442"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
} |
"""myBlog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from article import views as article
urlpatterns = [
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', article.home, name='home'),
url(r'^index\.html$', article.home, name='index'),
url(r'^about\.html$', article.about, name='about'),
url(r'^contact\.html$', article.contact, name='contact'),
url(r'^blog/article=(?P<pk>\d+)/$', article.blog, name='blog'),
url(r'^tag/(?P<tag>\w+)/$', article.tag, name='tag'),
url(r'^category/(?P<cate>\w+)/$', article.category, name='category'),
url(r'^thanks.html$', article.thanks, name='thanks')
]
| {
"content_hash": "e309d85f3cff53d5cd35cce217713606",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 41.21875,
"alnum_prop": 0.6603487490523123,
"repo_name": "ruter/myBlog",
"id": "ee0eb3c326e42652fdec89b7a6ecbd46a97043ef",
"size": "1343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myBlog/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13411"
},
{
"name": "HTML",
"bytes": "11437"
},
{
"name": "JavaScript",
"bytes": "4310"
},
{
"name": "Python",
"bytes": "17349"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#SIFT
sift = cv2.SIFT()
kp = sift.detect(gray,None)
img=cv2.drawKeypoints(gray,kp,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display the resulting frame
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | {
"content_hash": "26f975f3c979f68cdebc330ef61e732d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 24.869565217391305,
"alnum_prop": 0.6748251748251748,
"repo_name": "wasit7/cs634",
"id": "9631507f63dd1637cfadb922da8fc47661abb0a1",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017/test/week10_cv_sift.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2450104"
},
{
"name": "Python",
"bytes": "50308"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
import numpy,appdirs,os,pdb
def run_setup(*ags):
pkgs = [
'dstoolm4'
]
exts = []
setup(script_args = ags,name = 'dstoolm4',version = '4.0',description = 'dstoolm simulator',
author = 'ctogle',author_email = 'cogle@vt.edu',url = 'http://github.com/ctogle/dstoolm',
license = 'MIT License',long_description = '''dstoolm simulator''',
cmdclass = {'build_ext': build_ext},include_dirs = [numpy.get_include()],
scripts = [],packages = pkgs,ext_modules = exts,py_modules = [])
if __name__ == '__main__':run_setup('build','install','--user')
| {
"content_hash": "c9080f7f7929e6e7f7515a1611480e6c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 97,
"avg_line_length": 28.14814814814815,
"alnum_prop": 0.6355263157894737,
"repo_name": "ctogle/dstoolm",
"id": "9d8b59d47d1ac4ecdf3eb4182aa3c67a2193127a",
"size": "779",
"binary": false,
"copies": "2",
"ref": "refs/heads/4.0",
"path": "src/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6920"
}
],
"symlink_target": ""
} |
import django.db.models.deletion
import django_fsm
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0035_offeringpermission'),
('google', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GoogleCalendar',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('error_message', models.TextField(blank=True)),
('error_traceback', models.TextField(blank=True)),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
(
'backend_id',
models.CharField(
blank=True, db_index=True, max_length=255, null=True
),
),
('public', models.BooleanField(default=False)),
(
'offering',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to='marketplace.Offering',
),
),
],
options={
'verbose_name': 'Google calendar',
'verbose_name_plural': 'Google calendars',
},
),
]
| {
"content_hash": "a0918d7100ee985ed32055fb31907176",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 32.8125,
"alnum_prop": 0.3595238095238095,
"repo_name": "opennode/waldur-mastermind",
"id": "aa92d4b17055352bedb6a9a746f617319e46c02f",
"size": "2150",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/google/migrations/0002_googlecalendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
import httplib as http
from flask import request
from framework import sentry
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from osf.models import AbstractNode as Node, NotificationSubscription
from website.notifications import utils
from website.notifications.constants import NOTIFICATION_TYPES
from website.project.decorators import must_be_valid_project
@must_be_logged_in
def get_subscriptions(auth):
return utils.format_user_and_project_subscriptions(auth.user)
@must_be_logged_in
@must_be_valid_project
def get_node_subscriptions(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
return utils.format_data(auth.user, [node])
@must_be_logged_in
def get_file_subscriptions(auth, **kwargs):
node_id = request.args.get('node_id')
path = request.args.get('path')
provider = request.args.get('provider')
return utils.format_file_subscription(auth.user, node_id, path, provider)
@must_be_logged_in
def configure_subscription(auth):
user = auth.user
json_data = request.get_json()
target_id = json_data.get('id')
event = json_data.get('event')
notification_type = json_data.get('notification_type')
path = json_data.get('path')
provider = json_data.get('provider')
if not event or (notification_type not in NOTIFICATION_TYPES and notification_type != 'adopt_parent'):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Must provide an event and notification type for subscription.')
)
node = Node.load(target_id)
if 'file_updated' in event and path is not None and provider is not None:
wb_path = path.lstrip('/')
event = wb_path + '_file_updated'
event_id = utils.to_subscription_key(target_id, event)
if not node:
# if target_id is not a node it currently must be the current user
if not target_id == user._id:
sentry.log_message(
'{!r} attempted to subscribe to either a bad '
'id or non-node non-self id, {}'.format(user, target_id)
)
raise HTTPError(http.NOT_FOUND)
if notification_type == 'adopt_parent':
sentry.log_message(
'{!r} attempted to adopt_parent of a none node id, {}'.format(user, target_id)
)
raise HTTPError(http.BAD_REQUEST)
owner = user
else:
if not node.has_permission(user, 'read'):
sentry.log_message('{!r} attempted to subscribe to private node, {}'.format(user, target_id))
raise HTTPError(http.FORBIDDEN)
if notification_type != 'adopt_parent':
owner = node
else:
if 'file_updated' in event and len(event) > len('file_updated'):
pass
else:
parent = node.parent_node
if not parent:
sentry.log_message(
'{!r} attempted to adopt_parent of '
'the parentless project, {!r}'.format(user, node)
)
raise HTTPError(http.BAD_REQUEST)
# If adopt_parent make sure that this subscription is None for the current User
subscription = NotificationSubscription.load(event_id)
if not subscription:
return {} # We're done here
subscription.remove_user_from_subscription(user)
return {}
subscription = NotificationSubscription.load(event_id)
if not subscription:
subscription = NotificationSubscription(_id=event_id, owner=owner, event_name=event)
subscription.save()
if node and node._id not in user.notifications_configured:
user.notifications_configured[node._id] = True
user.save()
subscription.add_user_to_subscription(user, notification_type)
subscription.save()
return {'message': 'Successfully subscribed to {} list on {}'.format(notification_type, event_id)}
| {
"content_hash": "aa3dd85b2b68649128cc3aa3ea396c17",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 106,
"avg_line_length": 35.910714285714285,
"alnum_prop": 0.6372451516658378,
"repo_name": "cwisecarver/osf.io",
"id": "1645bd484c9ef580ca591fe058104b0e78f0dd83",
"size": "4022",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/notifications/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "217501"
},
{
"name": "JavaScript",
"bytes": "1712859"
},
{
"name": "Mako",
"bytes": "622293"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7621431"
}
],
"symlink_target": ""
} |
import connector
import hash
import almanac
import audit
import auth
import cache
import calendar
import chatlog
import conduit
import config
import conpherence
import countdown
import daemon
import dashboard
import dbdatetime
import differential
import diviner
import doorkeeper
import draft
import drydock
import fact
import feed
import file
import flag
import fund
import harbormaster
import herald
import legalpad
import maniphest
import metamta
import meta_data
import nuance
import oauth_server
import owners
import passphrase
import pastebin
import phame
import phlux
import pholio
import phortune
import phragment
import phrequent
import phriction
import policy
import ponder
import project
import releeph
import repository
import search
import slowvote
import system
import token
import user
import worker
import xhpastview
import xhprof
| {
"content_hash": "a631f355f18890d56d9b0ebc99225084",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 19,
"avg_line_length": 14.87719298245614,
"alnum_prop": 0.8643867924528302,
"repo_name": "veblush/PyPhabricatorDb",
"id": "31fd4bc20352a3a9be245580890f354ffd1e67d1",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyphabricatordb/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "Python",
"bytes": "252890"
}
],
"symlink_target": ""
} |
import os
from oslo_config import cfg
import heat_integrationtests
IntegrationTestGroup = [
cfg.StrOpt('username',
default=os.environ.get('OS_USERNAME'),
help="Username to use for API requests."),
cfg.StrOpt('password',
default=os.environ.get('OS_PASSWORD'),
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('tenant_name',
default=(os.environ.get('OS_PROJECT_NAME') or
os.environ.get('OS_TENANT_NAME')),
help="Tenant name to use for API requests."),
cfg.StrOpt('auth_url',
default=os.environ.get('OS_AUTH_URL'),
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('region',
default=os.environ.get('OS_REGION_NAME'),
help="The region name to us"),
cfg.StrOpt('instance_type',
help="Instance type for tests. Needs to be big enough for a "
"full OS plus the test workload"),
cfg.StrOpt('minimal_instance_type',
help="Instance type enough for simplest cases."),
cfg.StrOpt('image_ref',
help="Name of image to use for tests which boot servers."),
cfg.StrOpt('keypair_name',
default=None,
help="Name of existing keypair to launch servers with."),
cfg.StrOpt('minimal_image_ref',
help="Name of minimal (e.g cirros) image to use when "
"launching test instances."),
cfg.StrOpt('auth_version',
default='v2',
help="Identity API version to be used for authentication "
"for API tests."),
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.IntOpt('build_interval',
default=4,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('network_for_ssh',
default='heat-net',
help="Network used for SSH connections."),
cfg.StrOpt('fixed_network_name',
default='heat-net',
help="Visible fixed network name "),
cfg.StrOpt('floating_network_name',
default='public',
help="Visible floating network name "),
cfg.StrOpt('boot_config_env',
default=('heat_integrationtests/scenario/templates'
'/boot_config_none_env.yaml'),
help="Path to environment file which defines the "
"resource type Heat::InstallConfigAgent. Needs to "
"be appropriate for the image_ref."),
cfg.StrOpt('fixed_subnet_name',
default='heat-subnet',
help="Visible fixed sub-network name "),
cfg.IntOpt('ssh_timeout',
default=300,
help="Timeout in seconds to wait for authentication to "
"succeed."),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help="IP version used for SSH connections."),
cfg.IntOpt('ssh_channel_timeout',
default=60,
help="Timeout in seconds to wait for output from ssh "
"channel."),
cfg.IntOpt('tenant_network_mask_bits',
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.BoolOpt('skip_scenario_tests',
default=False,
help="Skip all scenario tests"),
cfg.BoolOpt('skip_functional_tests',
default=False,
help="Skip all functional tests"),
cfg.ListOpt('skip_functional_test_list',
help="List of functional test class or class.method "
"names to skip ex. AutoscalingGroupTest,"
"InstanceGroupBasicTest.test_size_updates_work"),
cfg.ListOpt('skip_scenario_test_list',
help="List of scenario test class or class.method "
"names to skip ex. NeutronLoadBalancerTest, "
"CeilometerAlarmTest.test_alarm"),
cfg.ListOpt('skip_test_stack_action_list',
help="List of stack actions in tests to skip "
"ex. ABANDON, ADOPT, SUSPEND, RESUME"),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
cfg.IntOpt('connectivity_timeout',
default=120,
help="Timeout in seconds to wait for connectivity to "
"server."),
cfg.IntOpt('sighup_timeout',
default=30,
help="Timeout in seconds to wait for adding or removing child"
"process after receiving of sighup signal")
]
def init_conf(read_conf=True):
default_config_files = None
if read_conf:
confpath = os.path.join(
os.path.dirname(os.path.realpath(heat_integrationtests.__file__)),
'heat_integrationtests.conf')
if os.path.isfile(confpath):
default_config_files = [confpath]
conf = cfg.ConfigOpts()
conf(args=[], project='heat_integrationtests',
default_config_files=default_config_files)
for opt in IntegrationTestGroup:
conf.register_opt(opt)
return conf
def list_opts():
yield None, IntegrationTestGroup
| {
"content_hash": "92530c6782ff0076b5984192153e5668",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 79,
"avg_line_length": 41.22794117647059,
"alnum_prop": 0.5639379347244515,
"repo_name": "takeshineshiro/heat",
"id": "b45a8d8c0fa6c1e99709ecabd43f19ef7426e247",
"size": "6180",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "heat_integrationtests/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6735948"
},
{
"name": "Shell",
"bytes": "33316"
}
],
"symlink_target": ""
} |
from .CoordSys import CoordSys
from .geocoords import CSGeo as ModuleCSGeo
class CSGeo(CoordSys, ModuleCSGeo):
"""
Python manager for geographic coordinate systems.
Implements `CoordSys`.
"""
DOC_CONFIG = {
"cfg": """
[coordsys]
# WGS84 (latitude, longitude) coordinate system
crs_string = EPSG:4326
space_dim = 2
""",
}
import pythia.pyre.inventory
crsString = pythia.pyre.inventory.str("crs_string", default="EPSG:4326")
crsString.meta['tip'] = "String specifying coordinate system (PROJ parameters, EPSG, or WKT). Default is WGS84."
spaceDim = pythia.pyre.inventory.int("space_dim", default=3)
spaceDim.meta['tip'] = "Number of dimensions for coordinate system."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="csgeo"):
"""
Constructor.
"""
CoordSys.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
CoordSys._configure(self)
ModuleCSGeo.setString(self, self.crsString)
ModuleCSGeo.setSpaceDim(self, self.spaceDim)
return
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleCSGeo.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def coordsys():
"""
Factory associated with CoordSys.
"""
return CSGeo()
# End of file
| {
"content_hash": "611726b7d9248da856173c0df93ea380",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 116,
"avg_line_length": 25.26153846153846,
"alnum_prop": 0.5286236297198539,
"repo_name": "geodynamics/spatialdata",
"id": "cbbd2878ec07bc2de97e6998340041a0b5afdc23",
"size": "2055",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "spatialdata/geocoords/CSGeo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "113500"
},
{
"name": "C++",
"bytes": "718586"
},
{
"name": "M4",
"bytes": "7988"
},
{
"name": "Makefile",
"bytes": "33546"
},
{
"name": "Python",
"bytes": "132716"
},
{
"name": "Shell",
"bytes": "2024"
}
],
"symlink_target": ""
} |
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
| {
"content_hash": "dd749451c0c97735b2a74c61700cd503",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 78,
"avg_line_length": 44.707602339181285,
"alnum_prop": 0.539241334205363,
"repo_name": "Glottotopia/aagd",
"id": "306fcf55ade9aef3df9fd964eb073874a6791838",
"size": "15315",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/support/werkzeug/contrib/atom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import slugify
from exceptions import IllegalMove, SameLevelMove, WrongLevelMove
slug_length = 50
class LocationBase(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(help_text='Unique identifier. May be used in URLs.', max_length=slug_length)
description = models.CharField(max_length=255, blank=True)
gn_name = models.CharField(max_length=100,
help_text="GeoNames Name", blank=True)
gn_id = models.CharField(max_length=20,
help_text="GeoNames ID", blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
ordering = ['name']
abstract = True
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)[:slug_length]
super(LocationBase, self).save(*args, **kwargs)
def get_kml_coordinates(self):
return "%s,%s,0" % (self.longitude, self.latitude)
@models.permalink
def get_absolute_url(self):
#import ipdb; ipdb.set_trace()
contenttype = ContentType.objects.get_for_model(self).model
return ('view_geoloc', [str(contenttype), str(self.id)])
def get_parents(self):
if hasattr(self, 'parent'):
parent = self.parent
return parent.get_parents() + [parent]
else:
return []
def moveto_parent(self, new_parent):
self._validate_move(new_parent)
return self._perform_move(new_parent)
def _validate_move(self, new_parent):
if not hasattr(self, 'parent'):
# Top level of tree, cannot move
raise IllegalMove()
if type(self) == type(new_parent):
# Parent cannot be of same type
raise SameLevelMove
parent_field = self._meta.get_field_by_name('parent')[0]
req_parent_type = parent_field.rel.to
if req_parent_type != type(new_parent):
# new_parent is wrong type for this class
raise WrongLevelMove
def _perform_move(self, new_parent):
# Check for conflicting children and merge if they exist
if hasattr(new_parent, 'children') and \
new_parent.children.filter(slug=self.slug):
to_merge = new_parent.children.get(slug=self.slug)
return self.merge(to_merge, self)
else:
# Simple move
self.parent = new_parent
self.save()
# Update museumobjects
field_changes = calc_field_changes(self)
self.museumobject_set.update(**field_changes)
return self
@staticmethod
def merge(target, old):
if hasattr(old, 'children'):
# Deal with all the children of old
targets_children = [child.slug for child in target.children.all()]
for child in old.children.all():
if child.slug in targets_children:
# Need to merge
match = target.children.get(slug=child.slug)
LocationBase.merge(match, child)
else:
# Simply move child
child.parent = target
child.save()
changes = calc_field_changes(target)
child.museumobject_set.update(**changes)
# now that old has no children
# Actually merge the two
changes = calc_field_changes(target)
old.museumobject_set.update(**changes)
if old.museumobject_set.exists():
raise Exception
else:
old.delete()
return target
def find_mo_field_name(element):
return element._meta.concrete_model.museumobject_set.\
related.field.name
def calc_field_changes(element):
"""
Walk up the tree of geo-locations, finding the new parents
These will be set onto all the museumobjects.
"""
fieldname = find_mo_field_name(element)
field_changes = {fieldname: element.id}
if hasattr(element, 'parent'):
field_changes.update(
calc_field_changes(element.parent))
return field_changes
class GlobalRegion(LocationBase):
icon_path = models.CharField(max_length=255, blank=True,
help_text="Relative path to icon")
icon_title = models.CharField(max_length=255, blank=True,
help_text="Icon title, displayed on browse page")
class Meta(LocationBase.Meta):
pass
class Country(LocationBase):
parent = models.ForeignKey(GlobalRegion, related_name='children',
verbose_name='Global region', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
verbose_name_plural = 'countries'
unique_together = ('parent', 'slug')
class StateProvince(LocationBase):
parent = models.ForeignKey(Country, related_name='children',
verbose_name='Country', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
unique_together = ('parent', 'slug')
class RegionDistrict(LocationBase):
parent = models.ForeignKey(StateProvince, related_name='children',
verbose_name='State/province', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
unique_together = ('parent', 'slug')
class Locality(LocationBase):
parent = models.ForeignKey(RegionDistrict, related_name='children',
verbose_name='Region/district', on_delete=models.PROTECT)
class Meta(LocationBase.Meta):
verbose_name_plural = 'localities'
unique_together = ('parent', 'slug')
class Place(models.Model):
country = models.CharField(max_length=30, blank=True)
region = models.CharField(max_length=40, blank=True)
australian_state = models.CharField(max_length=20, blank=True)
name = models.CharField(max_length=150)
is_corrected = models.BooleanField(default=False,
help_text="Has someone manually"
"moved the marker to it's correct location.")
gn_name = models.CharField(max_length=100,
help_text="GeoNames Name", blank=True)
gn_id = models.CharField(max_length=20,
help_text="GeoNames ID", blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
ordering = ["id"]
def __unicode__(self):
return ' > '.join([self.country, self.region, self.name])
@models.permalink
def get_absolute_url(self):
return ('place_detail', [str(self.id)])
def get_geonames_url(self):
if self.gn_id:
return "http://www.geonames.org/%s" % self.gn_id
else:
return False
def get_kml_coordinates(self):
return "%s,%s,0" % (self.longitude, self.latitude)
def geocode_net(self, force=False):
"""
Lookup the latitude and longitude of this place with GeoNames
Place must be saved after use. Set `force` to re-lookup the location.
Can take a few seconds to return, since this uses a network request.
"""
if self.gn_id and not force:
return
from utils import geocoders
geonames = geocoders.GeoNamesWithId()
place, geonameId, (lat, lng) = geonames.geocode('%s, %s' %
(self.name, self.country,),
exactly_one=False)[0]
self.gn_name = place
self.gn_id = geonameId
self.latitude = lat
self.longitude = lng
@staticmethod
def autocomplete_search_fields():
return ("country__icontains", "region__icontains",
"australian_state__icontains", "name__icontains")
class Region(models.Model):
name = models.CharField(max_length=60, unique=True)
description = models.CharField(max_length=200)
def __unicode__(self):
return self.name
| {
"content_hash": "432cbd597f863e93d63e5641043d55d9",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 104,
"avg_line_length": 32.764227642276424,
"alnum_prop": 0.61712158808933,
"repo_name": "uq-eresearch/uqam",
"id": "b6888f1be7ed3b99dac3739aad49bb86178ca787",
"size": "8060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "location/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "117676"
},
{
"name": "HTML",
"bytes": "108660"
},
{
"name": "JavaScript",
"bytes": "977528"
},
{
"name": "Python",
"bytes": "1297328"
},
{
"name": "Shell",
"bytes": "24566"
}
],
"symlink_target": ""
} |
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from airflow.ti_deps.deps.dag_unpaused_dep import DagUnpausedDep
from airflow.ti_deps.deps.dagrun_exists_dep import DagrunRunningDep
from airflow.ti_deps.deps.exec_date_after_start_date_dep import ExecDateAfterStartDateDep
from airflow.ti_deps.deps.not_running_dep import NotRunningDep
from airflow.ti_deps.deps.not_skipped_dep import NotSkippedDep
from airflow.ti_deps.deps.runnable_exec_date_dep import RunnableExecDateDep
from airflow.ti_deps.deps.valid_state_dep import ValidStateDep
from airflow.ti_deps.deps.task_concurrency_dep import TaskConcurrencyDep
from airflow.utils.state import State
class DepContext:
"""
A base class for contexts that specifies which dependencies should be evaluated in
the context for a task instance to satisfy the requirements of the context. Also
stores state related to the context that can be used by dependency classes.
For example there could be a SomeRunContext that subclasses this class which has
dependencies for:
- Making sure there are slots available on the infrastructure to run the task instance
- A task-instance's task-specific dependencies are met (e.g. the previous task
instance completed successfully)
- ...
:param deps: The context-specific dependencies that need to be evaluated for a
task instance to run in this execution context.
:type deps: set(airflow.ti_deps.deps.base_ti_dep.BaseTIDep)
:param flag_upstream_failed: This is a hack to generate the upstream_failed state
creation while checking to see whether the task instance is runnable. It was the
shortest path to add the feature. This is bad since this class should be pure (no
side effects).
:type flag_upstream_failed: bool
:param ignore_all_deps: Whether or not the context should ignore all ignoreable
dependencies. Overrides the other ignore_* parameters
:type ignore_all_deps: bool
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
Backfills)
:type ignore_depends_on_past: bool
:param ignore_in_retry_period: Ignore the retry period for task instances
:type ignore_in_retry_period: bool
:param ignore_in_reschedule_period: Ignore the reschedule period for task instances
:type ignore_in_reschedule_period: bool
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
trigger rule
:type ignore_task_deps: bool
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: bool
"""
def __init__(
self,
deps=None,
flag_upstream_failed=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_in_retry_period=False,
ignore_in_reschedule_period=False,
ignore_task_deps=False,
ignore_ti_state=False):
self.deps = deps or set()
self.flag_upstream_failed = flag_upstream_failed
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_in_retry_period = ignore_in_retry_period
self.ignore_in_reschedule_period = ignore_in_reschedule_period
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
# In order to be able to get queued a task must have one of these states
QUEUEABLE_STATES = {
State.FAILED,
State.NONE,
State.QUEUED,
State.SCHEDULED,
State.SKIPPED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.UP_FOR_RESCHEDULE,
}
# Context to get the dependencies that need to be met in order for a task instance to
# be backfilled.
QUEUE_DEPS = {
NotRunningDep(),
NotSkippedDep(),
RunnableExecDateDep(),
ValidStateDep(QUEUEABLE_STATES),
}
# Dependencies that need to be met for a given task instance to be able to get run by an
# executor. This class just extends QueueContext by adding dependencies for resources.
RUN_DEPS = QUEUE_DEPS | {
DagTISlotsAvailableDep(),
TaskConcurrencyDep(),
}
# TODO(aoen): SCHEDULER_DEPS is not coupled to actual execution in any way and
# could easily be modified or removed from the scheduler causing this dependency to become
# outdated and incorrect. This coupling should be created (e.g. via a dag_deps analog of
# ti_deps that will be used in the scheduler code) to ensure that the logic here is
# equivalent to the logic in the scheduler.
# Dependencies that need to be met for a given task instance to get scheduled by the
# scheduler, then queued by the scheduler, then run by an executor.
SCHEDULER_DEPS = RUN_DEPS | {
DagrunRunningDep(),
DagUnpausedDep(),
ExecDateAfterStartDateDep(),
}
| {
"content_hash": "97430dbb563462511eaba42f19962e5e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 90,
"avg_line_length": 43.549549549549546,
"alnum_prop": 0.7277616880430285,
"repo_name": "r39132/airflow",
"id": "4ec74a7e1e0a5d982a5f6778d8cf4e82d979215b",
"size": "5646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/ti_deps/dep_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2012 by Adam Ewing (adam.ewing@gmail.com)
Released under the MIT license, see LICENSE.txt
"""
import re
import os
import subprocess
import tempfile
import pysam
import sys
import gzip
import argparse
import peakparser
from string import maketrans
def checkfile(fname):
try:
open(fname)
except IOError as e:
print "can't find file: " + fname
sys.exit()
# parse TCGA filename
def getTypeFromTCGA(fname):
try:
fields = os.path.basename(fname).split('-')
sample = fields[3]
if sample[0] == '0':
return 'CANCER'
if sample[0] == '1':
return 'NORMAL'
return None
except:
#print "invalid TCGA filename: " + fname
return None
# some people choose to use reference genomes with chromosome names that don't begin in 'chr'
# since we do use 'chr', we need to detect those cases
def chromNameUsesPrefix(bam):
for ref in bam.references:
if re.search("^chr",ref):
return True
return False
# fix for the most common variation on chromosome names (leaving out the 'chr')
def fixChrName(name):
if name[0] != 'c':
return "chr" + name
else:
return name
# average base quality over an interval
def avgQual(qstring,start,end,zeroChar):
if start < 0:
start = 0
if end > len(qstring):
end = len(qstring)
offset = ord(zeroChar)
return sum(map(lambda x: ord(x)-offset, qstring[start:end]))/(end-start)
# capitalize 1-indexed seq between start and end
def capSeq(seq,start,end):
start -= 1
seq = seq.lower()
chars = []
for c in seq:
chars.append(c)
for i in range(start,end):
chars[i] = chars[i].upper()
return ''.join(chars)
# runs bwa stdsw to align two sequences
def bwastdsw(queryName,query,refName,ref,queryIsFile=False,refIsFile=False):
qfileName = ''
rfileName = ''
if queryIsFile:
qfileName = query
else:
qfile = tempfile.NamedTemporaryFile(delete=False)
qfile.write(">%s\n%s\n" % (queryName,query))
qfile.close()
qfileName = qfile.name
if refIsFile:
rfileName = ref
else:
rfile = tempfile.NamedTemporaryFile(delete=False)
rfile.write(">%s\n%s\n" % (refName,ref))
rfile.close()
rfileName = rfile.name
# can use the -f option to only consider forward strand since bwa prints aligned seqs
# in the forward direction
args = ['bwa', 'stdsw', rfileName, qfileName]
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
fnum = 0
alignments = []
for pline in p.stdout.readlines():
if re.search("^>", pline):
fnum = 0
col = pline.strip().split("\t")
alignments.append(BwastdswAlignResult(col))
alignments[-1].queryLen = len(query) # FIXME make this work with filename input
alignments[-1].targetLen = len(ref)
# print "%d\t" % fnum + "\t".join(col)
fnum += 1
elif fnum == 1:
alignments[-1].targetSeq = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
elif fnum == 2:
alignments[-1].matchString = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
elif fnum == 3:
alignments[-1].querySeq = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
p.stdout.close()
p.kill()
p.wait()
if not queryIsFile:
os.unlink(qfileName)
if not refIsFile:
os.unlink(rfileName)
if len(alignments) == 0:
return None
if len(alignments) == 1:
return alignments[0]
# more than 1 alignment, pick the 'best'
bestaln = alignments[0]
for align in alignments:
if align.alnLength() > bestaln.alnLength() and align.pctID() > 90:
bestaln = align
return bestaln
def fetchRegion(bamFile,refGenome,maxReadLen,chr,start,end,gene,zeroChar,minClipQual,usechr=False):
maxReadLen = int(maxReadLen)
start = int(start)
end = int(end)
if not usechr or not chromNameUsesPrefix(bamFile):
chr = chr.lstrip('chr')
regionRefSeq = refGenome.fetch(reference=chr, start=start-maxReadLen, end=end+maxReadLen)
cluster = ClippedCluster(chr,start,end,gene,maxReadLen)
for read in bamFile.fetch(chr, start-maxReadLen, end+maxReadLen):
if not read.is_unmapped and not read.is_duplicate:
cliplen = read.rlen - read.qlen # minimum soft clipping: if rlen < qlen --> bases were soft-clipped
if cliplen > 10:
refseq = ''
leftclip = 0
rightclip = 0
if (read.qstart == cliplen): # left-clipped
leftclip = cliplen
if (read.qstart == 0): # right-clipped
rightclip = cliplen
if (read.qstart > 0 and read.qstart < cliplen):
leftclip = cliplen - read.qstart
rightclip = cliplen - leftclip
breakside = ''
breakloc = 0
clipqual = 0
if (leftclip > rightclip-10): # 10 is arbitrary
breakside = 'L'
breakloc = read.pos
clipqual = avgQual(read.qual,0,leftclip,zeroChar)
elif (rightclip > leftclip-10):
breakside = 'R'
breakloc = read.pos + (read.rlen - rightclip)
clipqual = avgQual(read.qual,rightclip,read.rlen,zeroChar)
else:
breakside = 'A' # ambiguous
if (clipqual >= int(minClipQual)):
align = bwastdsw('query',read.seq,'target',regionRefSeq)
#print align
if align:
breakLeft = start - maxReadLen + align.targetStart
breakRight = start - maxReadLen + align.targetEnd
if (breakLeft >= start-10 and breakLeft <= end+10) or (breakRight >= start-10 and breakRight <= end+10):
cluster.aligns.append(align)
cluster.reads.append(read)
cluster.assignBreaks()
return cluster;
def mergeClusters(cl1,cl2,txList):
if not cl1.hasReads():
cl2.mapTx(txList)
return cl2
if not cl2.hasReads():
cl1.mapTx(txList)
return cl1
if (cl1.chr == cl2.chr and cl1.start == cl2.start and cl1.end == cl2.end and cl1.gene == cl2.gene and cl1.maxrdln == cl2.maxrdln):
new = ClippedCluster(cl1.chr, cl1.start, cl1.end, cl1.gene, cl1.maxrdln)
new.aligns = cl1.aligns
for align in cl2.aligns:
new.aligns.append(align)
new.reads = cl1.reads
for read in cl2.reads:
new.reads.append(read)
new.assignBreaks()
new.mapTx(txList)
new.type = cl1.type + "," + cl2.type
return new
else:
raise ValueError('cannot merge clusters that have different locations')
class ClippedCluster:
def __init__(self,chr,start,end,gene,maxReadLen):
self.chr = chr
self.start = int(start)
self.end = int(end)
self.gene = gene
self.maxrdln = maxReadLen
self.aligns = [] # BwastdswAlignResult objects
self.reads = [] # pysam AlignedRead objects
# assigned by functions:
self.assign = False # has assignBreaks() been run?
self.lgood = False # is left break good? (defined in bestBreakLeft())
self.rgood = False # is right break good? (defined in bestBreakRight())
self.lbest = 0 # best guess for left break
self.rbest = 0 # best guess for right break
self.lbreaks = [] # candidate left break positions
self.rbreaks = [] # candidate right break positions
self.type = ''
self.teqlen = [] # TE query lengths
self.tetype = [] # TE families (list of those detected)
self.testart = [] # list of TE starts (positions in TEs)
self.teend = [] # list of TE ends (positions in TEs)
self.testr = [] # list of TE orientations
self.minTEQueryLen = 15 # minimum length of TE seq for alignment to be valid
def hasReads(self):
if len(self.reads) > 0:
return True
return False
def mapTx(self,txs):
for i in range(len(self.reads)):
teAlign = partialMapTx(txs,
self.gene,
self.reads[i].seq,
self.aligns[i].queryStart,
self.aligns[i].queryEnd)
if (teAlign == None):
self.tetype.append('None')
self.testart.append(0)
self.teend.append(0)
self.testr.append('.')
self.teqlen.append(0)
elif (teAlign.queryLen >= self.minTEQueryLen):
self.tetype.append(teAlign.targetName)
self.testart.append(teAlign.targetStart)
self.teend.append(teAlign.targetEnd)
self.testr.append(teAlign.queryStr)
self.teqlen.append(teAlign.queryLen)
else:
self.tetype.append('None')
self.testart.append(0)
self.teend.append(0)
self.testr.append('.')
self.teqlen.append(teAlign.queryLen)
def assignBreaks(self):
for i in range(len(self.reads)):
leftmargin = self.aligns[i].queryStart - 1
rightmargin = self.reads[i].rlen - self.aligns[i].queryEnd
if leftmargin > rightmargin and rightmargin <= 10:
self.lbreaks.append(self.start - self.maxrdln + self.aligns[i].targetStart)
if leftmargin < rightmargin and leftmargin <= 10:
self.rbreaks.append(self.start - self.maxrdln + self.aligns[i].targetEnd)
self.assign = True
if len(self.lbreaks) > 0:
self.bestBreakLeft()
if len(self.rbreaks) > 0:
self.bestBreakRight()
self.retryBreaks()
def bestBreakLeft(self):
if not self.assign:
self.assignBreaks()
# count unique positions
posCountDict = {}
for pos in self.lbreaks:
if posCountDict.has_key(pos):
posCountDict[pos] += 1
else:
posCountDict[pos] = 1
# get modal position
maxCount = 0
maxCountPos = 0
for pos,count in posCountDict.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
# if over half the reads have this break, it's good
if float(maxCount)/float(len(self.lbreaks)) > 0.5:
self.lgood = True
self.lbest = int(maxCountPos)
def bestBreakRight(self):
if not self.assign:
self.assignBreaks()
# count unique positions
posCountDict = {}
for pos in self.rbreaks:
if posCountDict.has_key(pos):
posCountDict[pos] += 1
else:
posCountDict[pos] = 1
# get modal position
maxCount = 0
maxCountPos = 0
for pos,count in posCountDict.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
# if over half the reads have this break, it's good
if float(maxCount)/float(len(self.rbreaks)) > 0.5:
self.rgood = True
self.rbest = int(maxCountPos)
def retryBreaks(self):
"""look for breaks that aren't majority but do indicate a TSD"""
if self.lgood and not self.rgood:
# look for TSD in rbreaks
tsdopts = {} # TSD candidates, stores count
for rpos in self.rbreaks:
if (rpos - self.lgood) >= 2 and (rpos - self.lgood) <= 50:
if rpos in tsdopts:
tsdopts[rpos] += 1
else:
tsdopts[rpos] = 1
maxCount = 0
maxCountPos = 0
numTiedForBest = 0
for pos,count in tsdopts.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
if count == maxCount:
numTiedForBest += 1
if maxCount > 0 and numTiedForBest == 0:
self.rgood = True
sys.stderr.write("better break found for " + self.chr + ":" + str(self.start) + "-" + str(self.end) + "\n")
self.rbest = int(maxCountPos)
if self.rgood and not self.lgood:
# look for TSD in lbreaks
tsdopts = {} # TSD candidates, stores count
for lpos in self.lbreaks:
if (self.rgood - lpos) >= 2 and (self.rgood - lpos) <= 50:
if lpos in tsdopts:
tsdopts[lpos] += 1
else:
tsdopts[lpos] = 1
maxCount = 0
maxCountPos = 0
numTiedForBest = 0
for pos,count in tsdopts.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
if count == maxCount:
numTiedForBest += 1
if maxCount > 0 and numTiedForBest == 0:
self.lgood = True
sys.stderr.write("better break found for " + self.chr + ":" + str(self.start) + "-" + str(self.end) + "\n")
self.lbest = int(maxCountPos)
def majorityTE(self):
"""returns the most frequently identified TE"""
tecount = {}
i = 0
for te in self.tetype:
# only count reads with long enough TE seq for alignment
if te in tecount and self.teqlen[i] >= self.minTEQueryLen:
tecount[te] += 1
elif self.teqlen[i] >= self.minTEQueryLen:
tecount[te] = 1
i += 1
if len(self.tetype) > 0:
majTE = self.tetype[0]
maxcount = 0
for (te,count) in tecount.iteritems():
if count > maxcount:
majTE = te
maxcount = count
return majTE
else:
return 'None'
def outstring(self):
output = "\t".join((str(self.lgood), str(self.rgood), str(self.lbest), str(self.rbest),
str(len(self.lbreaks)), str(len(self.rbreaks)), str(len(self.reads)),
self.type, self.majorityTE()))
return output
def infodump(self):
output = ''
for i in range(len(self.reads)):
rbreak = self.start - self.maxrdln + self.aligns[i].targetEnd
lbreak = self.start - self.maxrdln + self.aligns[i].targetStart
outseq = capSeq(self.reads[i].seq, self.aligns[i].queryStart, self.aligns[i].queryEnd)
output += ("%s tr=%d tl=%d lr=%d ll=%d te=%s,%s,%s,%s"
% (outseq, rbreak, lbreak, self.aligns[i].queryStart, self.aligns[i].queryEnd,
self.tetype[i],str(self.testart[i]),str(self.teend[i]),self.testr[i]) + "\n")
output += ("leftgood=%s rightgood=%s leftbreak=%d rightbreak=%d type=%s"
% (self.lgood, self.rgood, self.lbest, self.rbest, self.type) + "\n")
return output
class BwastdswAlignResult:
def __init__(self,col):
self.queryName = col[3].lstrip('>')
self.targetName = col[0].lstrip('>')
self.queryStart = int(col[1])
self.queryEnd = int(col[2])
self.queryStr = col[4]
self.targetStart = int(col[5])
self.targetEnd = int(col[6])
self.cigarString = col[8]
self.queryLen = 0
self.targetLen = 0
self.targetSeq = ''
self.querySeq = ''
self.matchString = ''
def pctID(self):
return float(sum(map(lambda x: int(x=='|'), self.matchString)))/float(len(self.querySeq))*100
def alnLength(self):
return len(self.targetSeq)
def __str__(self):
output = ("queryName=%s targetName=%s queryStart=%d queryEnd=%d queryStr=%s targetStart=%d targetEnd=%d pctID=%f queryLen=%d targetLen=%d"
% (self.queryName, self.targetName, self.queryStart, self.queryEnd, self.queryStr, self.targetStart, self.targetEnd, self.pctID(),
self.queryLen, self.targetLen))
return output
class TranscriptSeq:
def __init__(self,genename):
self.gene = genename
self.txs = [] # transcript numbers
self.seqs = []
self.junc = []
def __str__(self):
output = self.gene + "\n"
for i in range(len(self.txs)):
output += str(self.txs[i]) + "\t" + self.seqs[i] + "\n"
return output
def fastahash(infile):
f = gzip.open(infile,'r')
tx = {}
gene = ''
seq = ''
for line in f:
if re.search("^>",line):
if gene:
tx[gene].seqs.append(seq) # finish last gene
fields = line.strip().strip(">").split('.')
juncstr = fields[-1]
txnum = fields[-2]
gene = fields[-3]
if tx.has_key(gene):
tx[gene].txs.append(int(txnum))
tx[gene].junc.append(juncstr)
else:
tx[gene] = TranscriptSeq(gene)
tx[gene].txs.append(int(txnum))
tx[gene].junc.append(juncstr)
seq = ''
else:
seq += line.strip()
f.close()
tx[gene].seqs.append(seq)
return tx
def partialMapTx(txs,queryName,querySeq,excludeStart,excludeEnd):
"""
Maps querySeq to a list of FASTA files (with names), returns best result (as BwastdswAlignResult)
Only maps region outside of (excludeStart,excludeEnd)
"""
excludeStart = int(excludeStart)
excludeEnd = int(excludeEnd)
results = []
if queryName in txs:
for txseq in txs[queryName].seqs:
leftQuery = querySeq[0:excludeStart]
rightQuery = querySeq[excludeEnd:len(querySeq)-1]
leftAlign = bwastdsw(queryName,leftQuery,queryName,txseq)
rightAlign = bwastdsw(queryName,rightQuery,queryName,txseq)
if leftAlign != None and leftAlign.pctID > 90 and len(leftAlign.querySeq) >= 15:
results.append(leftAlign)
if rightAlign != None and rightAlign.pctID > 90 and len(rightAlign.querySeq) >= 15:
results.append(rightAlign)
bestPctID = 0
bestResult = None
for result in results:
if result != None and result.pctID() > bestPctID:
bestResult = result
bestPctID = result.pctID()
return bestResult
def main(args):
peakparser.checkOutDir(args.outBaseName,args.outDirName)
configPath = args.outDirName + "/" + args.outBaseName + "/" + args.configFileName
checkfile(configPath)
configDict = peakparser.readConfig(configPath,args.outBaseName,args.outDirName)
# load hash of TranscriptSeq objects (genename --> multiple transcripts)
# will need to pass to partialMapTx() later
sys.stderr.write("loading " + args.mrnaFastaFile + "...\n")
checkfile(args.mrnaFastaFile)
txs = fastahash(args.mrnaFastaFile)
cancerBamFile = ''
normalBamFile = ''
refGenomeFile = args.refGenomeFile
cancerCallsFile = args.outDirName + "/" + args.outBaseName + "/canceronly.tab.txt"
normalCallsFile = args.outDirName + "/" + args.outBaseName + "/normalonly.tab.txt"
germCallsFile = args.outDirName + "/" + args.outBaseName + "/germline.tab.txt"
otherCallsFile = args.outDirName + "/" + args.outBaseName + "/uncategorized.tab.txt"
# fix if unmerged
if not configDict.has_key('bamFileName1'):
configDict['bamFileName1'] = configDict['bamFileName']
configDict['bamFileName2'] = configDict['bamFileName']
bamType1 = getTypeFromTCGA(configDict['bamFileName1'])
bamType2 = getTypeFromTCGA(configDict['bamFileName2'])
print ("bamfile1=%s bamFile2=%s bamType1=%s bamType2=%s"
% (configDict['bamFileName1'], configDict['bamFileName2'], bamType1, bamType2))
if bamType1 != bamType2 and bamType1 != None and bamType2 != None:
if bamType1 == 'CANCER':
if bamType2 != 'NORMAL':
raise NameError('bam1 is cancer but bam2 is not normal')
cancerBamFile = configDict['bamFileName1']
normalBamFile = configDict['bamFileName2']
if bamType2 == 'CANCER':
if bamType1 != 'NORMAL':
raise NameError('bam2 is cancer but bam1 is not normal')
cancerBamFile = configDict['bamFileName2']
normalBamFile = configDict['bamFileName1']
else:
print 'cannot determine bamfile cancer/normal from filenames in config.txt, defaulting to normal.'
normalBamFile = configDict['bamFileName1']
cancerBamFile = configDict['bamFileName2']
checkfile(cancerBamFile)
checkfile(normalBamFile)
checkfile(normalCallsFile)
checkfile(cancerCallsFile)
checkfile(germCallsFile)
checkfile(otherCallsFile)
checkfile(refGenomeFile)
cancerBam = pysam.Samfile(cancerBamFile, 'rb') # rb = read, binary
normalBam = pysam.Samfile(normalBamFile, 'rb') # rb = read, binary
cancerCalls = open(cancerCallsFile, 'r')
normalCalls = open(normalCallsFile, 'r')
germCalls = open(germCallsFile, 'r')
otherCalls = open(otherCallsFile, 'r')
refGenome = pysam.Fastafile(refGenomeFile)
cancerBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/cancerbreaks.tab.txt", 'w')
normalBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/normalbreaks.tab.txt", 'w')
germBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/germlinebreaks.tab.txt", 'w')
otherBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/uncategorizedbreaks.tab.txt", 'w')
callSetListNames = ('cancer', 'normal', 'germ','other')
callSetListInFiles = (cancerCalls, normalCalls, germCalls, otherCalls)
callSetListOutFiles = (cancerBreaksOut, normalBreaksOut, germBreaksOut, otherBreaksOut)
for i in range(len(callSetListNames)):
for line in callSetListInFiles[i]:
col = line.strip().split("\t")
chr = col[0]
start = int(col[1])
end = int(col[2])
gene = col[7]
cancerCluster = fetchRegion(cancerBam,refGenome,int(args.maxReadLen),chr,start,end,gene,args.zeroChar,int(args.minClipQual),args.usechr)
cancerCluster.type='CANCER'
normalCluster = fetchRegion(normalBam,refGenome,int(args.maxReadLen),chr,start,end,gene,args.zeroChar,int(args.minClipQual),args.usechr)
normalCluster.type='NORMAL'
mergeCluster = mergeClusters(cancerCluster,normalCluster,txs)
clusterout = mergeCluster.outstring()
infodumpout = mergeCluster.infodump()
callSetListOutFiles[i].write(line.strip("\n") + "\t" + clusterout + "\n" + infodumpout + "\n")
callSetListInFiles[i].close()
callSetListOutFiles[i].close()
if __name__ == '__main__':
# commandline args
parser = argparse.ArgumentParser(description='parse the output of discordant.py')
parser.add_argument('-c', '--config', dest='configFileName', default='config.txt',
help='config file left by discordant.py')
parser.add_argument('-o', '--outbasename', dest='outBaseName', required=True,
help='basename for output files')
parser.add_argument('-d', '--outdirname', dest='outDirName', default='output',
help='output directory')
parser.add_argument('-e-', '--eltfile', dest='eltFile', default='sumEltList.txt',
help='list of element families to include')
parser.add_argument('-l', '--maxReadLen', dest='maxReadLen', default=100,
help='max read length in basepairs (default 100 bp)')
parser.add_argument('-z', '--zerochar', dest='zeroChar', default='#',
help='for fastq quality scores, the character corresponding to zero (default #)')
parser.add_argument('-g', '--refgenome', dest='refGenomeFile', required=True,
help='ref genome in fasta format, indexed with samtools faidx')
parser.add_argument('-q', '--minclipqual', dest='minClipQual', default=30,
help='minimum avg. quality cutoff for trimmed region (default 30)')
parser.add_argument('-m', '--mrnafile', dest='mrnaFastaFile', required=True,
help='directory of FASTA files with TE reference sequences in them, plus a config.txt file with ref names')
parser.add_argument('--usechr', action="store_true", default=False,
help='set if reference genome uses "chr" prefix (default=False)')
args = parser.parse_args()
main(args)
| {
"content_hash": "113a938c698fce90980e5c0c12c37495",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 148,
"avg_line_length": 38.525835866261396,
"alnum_prop": 0.5704536489151874,
"repo_name": "adamewing/GRIPper",
"id": "1a2e1a0bb0a35368a061eec20643d5c25e364a61",
"size": "25373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pinpoint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97585"
}
],
"symlink_target": ""
} |
""" A memento for a workbench window. """
# Enthought library imports.
from traits.api import Any, Dict, HasTraits, Str, Tuple
class WorkbenchWindowMemento(HasTraits):
""" A memento for a workbench window. """
# The Id of the active perspective.
active_perspective_id = Str
# The memento for the editor area.
editor_area_memento = Any
# Mementos for each perspective that has been seen.
#
# The keys are the perspective Ids, the values are the toolkit-specific
# mementos.
perspective_mementos = Dict(Str, Any)
# The position of the window.
position = Tuple
# The size of the window.
size = Tuple
# Any extra data the toolkit implementation may want to keep.
toolkit_data = Any()
#### EOF ######################################################################
| {
"content_hash": "6c79e5fffd491eb864fbed1de52a4416",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6148325358851675,
"repo_name": "pankajp/pyface",
"id": "3b3ec350682e87ac0ea4364a148e2b6cde9469ae",
"size": "836",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyface/workbench/workbench_window_memento.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13515"
},
{
"name": "Python",
"bytes": "2279955"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
version = '2.3'
setup(name='op_robot_tests',
version=version,
description="",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'Faker',
'Pillow',
'PyYAML',
'barbecue',
'chromedriver',
'dateutils',
'dpath',
'haversine',
'iso8601',
'jsonpath-rw',
'munch',
'parse',
'pytz',
'robotframework',
'robotframework-selenium2library',
'selenium < 3.0.dev0',
],
entry_points={
'console_scripts': [
'op_tests = op_robot_tests.runner:runner',
],
}
)
| {
"content_hash": "ebfbf50942fd328a442c14fa90da0673",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 96,
"avg_line_length": 25.046511627906977,
"alnum_prop": 0.4911792014856082,
"repo_name": "bubanoid/robot_tests",
"id": "9af4480dbc3ac36a55dadfdc4afc80735cbf3ced",
"size": "1077",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67191"
},
{
"name": "RobotFramework",
"bytes": "507062"
}
],
"symlink_target": ""
} |
"""Models defining a product's material properties"""
| {
"content_hash": "5d311cdeb73841746b3bd1b5f29e3e48",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 53,
"avg_line_length": 54,
"alnum_prop": 0.7592592592592593,
"repo_name": "byteweaver/django-eca-catalogue",
"id": "1c89c30972135e10c5b3b1fc231ce0ab9aa462b4",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eca_catalogue/material/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12053"
}
],
"symlink_target": ""
} |
from collections import Counter
import StringIO
import gzip
import hashlib
import mailbox
import os
import random
import re
import string
import sys
import tempfile
import urlparse
import requests
import bs4
def normalize_email(email):
if email:
return email.replace(' at ', '@').lower()
class MailArchive(object):
def __init__(self, base_url):
self.base_url = base_url
self._archives = None
def cache_path(self, url):
md5 = hashlib.md5(url).hexdigest()
filename = urlparse.urlparse(url).path.split('/')[-1]
return 'cache/%s-%s' % (md5, filename)
def cache_url(self, url, contents):
open(self.cache_path(url), 'wb').write(contents)
def is_cached(self, url):
return os.path.exists(self.cache_path(url))
def download_archive(self, url):
r = requests.get(url)
return gzip.GzipFile(fileobj=StringIO.StringIO(r.content)).read()
def download(self):
r = requests.get(self.base_url)
soup = bs4.BeautifulSoup(r.text)
rel_archive_urls = [t['href'] for t in soup.find_all('a') if t['href'].endswith('.gz')]
abs_urls = [urlparse.urljoin(self.base_url, rel_url) for rel_url in rel_archive_urls]
for abs_url in abs_urls:
if not self.is_cached(abs_url):
arch = self.download_archive(abs_url)
self.cache_url(abs_url, arch)
#print abs_url, len(arch)
# else:
# print "%s is cached" % abs_url
open(self.cache_path(self.base_url), 'w').write('\n'.join(abs_urls))
def archives(self):
'''Returns all the mailbox.mbox objects for all months.'''
if not self._archives:
urls = open(self.cache_path(self.base_url))
self._archives = []
for url in urls:
cache_path = self.cache_path(url.strip())
if os.path.exists(cache_path):
self._archives.append(mailbox.mbox(cache_path))
return self._archives
def authors(self):
authors = set()
for archive in self.archives():
for message in archive:
if message.get('from'):
authors.add(normalize_email(message.get('from')))
else:
print "Message has no from field. Fields: %r" % message.keys()
return list(authors)
def messages_by_author(self, email_address):
messages = []
for archive in self.archives():
for message in archive:
from_email = normalize_email(message.get('from'))
if from_email and from_email.startswith(email_address):
messages.append(message)
return messages
class Message(object):
def __init__(self, msg_obj):
self.msg_obj = msg_obj
def body(self):
lines = []
for line in self.msg_obj.get_payload().split('\n'):
if line == '-------------- next part --------------':
break
if not line.startswith('>') and not line == '' and not line.startswith('On '):
lines.append(line)
return '\n'.join(lines)
# TODO(scotty): make a class
def choice(words):
random.seed
index = random.randint(0, len(words) - 1)
return words[index]
def test_sentence_substrings(sentence, text, n=6):
words = string.split(sentence)
groups = [words[i:i+n] for i in range(0, len(words), n)]
for group in groups:
group = " ".join(group)
if group in text:
return False
return True
def run(text):
text = re.sub(r'\([^)]*\)', '', text)
words = string.split(text)
arr = []
end_sentence = []
dict = {}
prev1 = ''
prev2 = ''
for word in words:
if prev1 != '' and prev2 != '':
key = (prev2, prev1)
if dict.has_key(key):
dict[key].append(word)
else:
dict[key] = [word]
if re.match("[\.\?\!]", prev1[-1:]):
end_sentence.append(key)
prev2 = prev1
prev1 = word
if end_sentence == []:
return
key = ()
count = 50
max_attempts = 50000
gtext = ""
sentence = []
attempts = 0
while 1:
if dict.has_key(key):
word = choice(dict[key])
sentence.append(word)
key = (key[1], word)
if key in end_sentence:
sentence_str = " ".join(sentence)
attempts += 1
# check if the beginning of sentence occurs in the text
if sentence_str[:15] not in gtext and sentence_str not in text and test_sentence_substrings(sentence_str, text):
gtext += sentence_str + " "
count = count - 1
sentence = []
key = choice(end_sentence)
if count <= 0 or attempts >= max_attempts:
break
else:
key = choice(end_sentence)
return gtext
def main(url, email):
if not os.path.exists('cache/'):
os.mkdir('cache/')
mail_archive = MailArchive(url)
mail_archive.download()
print mail_archive.authors()
messages = mail_archive.messages_by_author(email)
text = '\n\n'.join([Message(m).body() for m in messages])
print run(text)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| {
"content_hash": "0ee6210def741149323c5afa3ab32785",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 128,
"avg_line_length": 29.342245989304814,
"alnum_prop": 0.537452159650082,
"repo_name": "scottyallen/mailmark",
"id": "cf77ac18dc8ca20204898f9a0067faf9aac956ad",
"size": "5487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5487"
}
],
"symlink_target": ""
} |
from trac.core import *
from trac.resource import Resource
from trac.web.api import IRequestHandler
from trac.web.chrome import chrome_info_script, web_context
from trac.wiki.api import WikiSystem
from trac.wiki.formatter import format_to
class WikiRenderer(Component):
"""Wiki text renderer."""
implements(IRequestHandler)
is_valid_default_handler = False
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/wiki_render'
def process_request(self, req):
# Allow all POST requests (with a valid __FORM_TOKEN, ensuring that
# the client has at least some permission). Additionally, allow GET
# requests from TRAC_ADMIN for testing purposes.
if req.method != 'POST':
req.perm.require('TRAC_ADMIN')
realm = req.args.get('realm', WikiSystem.realm)
id = req.args.get('id')
version = req.args.getint('version')
text = req.args.get('text', '')
flavor = req.args.get('flavor')
options = {}
if 'escape_newlines' in req.args:
options['escape_newlines'] = \
req.args.getbool('escape_newlines', False)
if 'shorten' in req.args:
options['shorten'] = req.args.getbool('shorten', False)
resource = Resource(realm, id=id, version=version)
context = web_context(req, resource)
rendered = format_to(self.env, flavor, context, text, **options) + \
chrome_info_script(req)
req.send(rendered.encode('utf-8'))
| {
"content_hash": "84180aaa3bb08d86a509436774d4a050",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 36.18604651162791,
"alnum_prop": 0.6330334190231363,
"repo_name": "walty8/trac",
"id": "e64328538f65941142034231431fa7a06bdfb7a9",
"size": "2048",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "trac/wiki/web_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3268"
},
{
"name": "CSS",
"bytes": "72839"
},
{
"name": "HTML",
"bytes": "362545"
},
{
"name": "JavaScript",
"bytes": "88713"
},
{
"name": "Makefile",
"bytes": "19481"
},
{
"name": "PowerShell",
"bytes": "12638"
},
{
"name": "Python",
"bytes": "3953800"
},
{
"name": "Shell",
"bytes": "9573"
}
],
"symlink_target": ""
} |
from class_type import ClassType
class FileClass(ClassType):
def __init__(self):
super().__init__("file")
FILE_CLASS = FileClass()
FILE_TYPE = FILE_CLASS.instance()
| {
"content_hash": "5bdb9fa811bf084d077496c805495f26",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 33,
"avg_line_length": 18.1,
"alnum_prop": 0.6519337016574586,
"repo_name": "PiJoules/python-type-inference",
"id": "07edd44c3602dce5e8acc4f73773db418f3da382",
"size": "181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "builtin_types/file_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104822"
}
],
"symlink_target": ""
} |
"""
Cobbler Tops
============
Cobbler Tops is a master tops subsystem used to look up mapping information
from Cobbler via its API. The same cobbler.* parameters are used for both
the Cobbler tops and Cobbler pillar modules.
.. code-block:: yaml
master_tops:
cobbler: {}
cobbler.url: https://example.com/cobbler_api #default is http://localhost/cobbler_api
cobbler.user: username # default is no username
cobbler.password: password # default is no password
Module Documentation
====================
"""
import logging
import xmlrpc.client
# Set up logging
log = logging.getLogger(__name__)
__opts__ = {
"cobbler.url": "http://localhost/cobbler_api",
"cobbler.user": None,
"cobbler.password": None,
}
def top(**kwargs):
"""
Look up top data in Cobbler for a minion.
"""
url = __opts__["cobbler.url"]
user = __opts__["cobbler.user"]
password = __opts__["cobbler.password"]
minion_id = kwargs["opts"]["id"]
log.info("Querying cobbler for information for %r", minion_id)
try:
server = xmlrpc.client.Server(url, allow_none=True)
if user:
server.login(user, password)
data = server.get_blended_data(None, minion_id)
except Exception: # pylint: disable=broad-except
log.exception("Could not connect to cobbler.")
return {}
return {data["status"]: data["mgmt_classes"]}
| {
"content_hash": "d2e871f8bfd4e5dae85cb30fcb43f5bd",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 87,
"avg_line_length": 24.946428571428573,
"alnum_prop": 0.6420901932712957,
"repo_name": "saltstack/salt",
"id": "7a5d66662ab4f10f12a180c35d2ab7816115e14b",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/tops/cobbler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference("RevitAPI")
import Autodesk
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
host = UnwrapElement(tolist(IN[0]))
ftype = UnwrapElement(tolist(IN[1]))
fpts = UnwrapElement(tolist(IN[2]))
OUT = []
strt = Autodesk.Revit.DB.Structure.StructuralType.NonStructural
ftp_len = len(ftype) == 1
hst_len = len(host) == 1
TransactionManager.Instance.EnsureInTransaction(doc)
for i in xrange(len(fpts) ):
p = fpts[i].ToXyz(True)
j = 0 if ftp_len else i
k = 0 if hst_len else i
try:
if not ftype[j].IsActive : ftype[j].Activate()
level = doc.GetElement(host[k].LevelId)
nf = doc.Create.NewFamilyInstance(p,ftype[j],host[k],level,strt)
OUT.append(nf.ToDSType(False))
except:
OUT.append(None)
TransactionManager.Instance.TransactionTaskDone() | {
"content_hash": "9b855d20f96fc649774de7384ce05e33",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.7664930555555556,
"repo_name": "dimven/SpringNodes",
"id": "4364c4f900b15c7bf3a31f04fa31fd5f777f354b",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/FamilyInstance.ByHostAndPoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126626"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import unittest
import PRESUBMIT
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from PRESUBMIT_test_mocks import MockOutputApi, MockChange
class MockInputApi(object):
""" Mocked input api for unit testing of presubmit.
This lets us mock things like file system operations and changed files.
"""
def __init__(self):
self.re = re
self.os_path = os.path
self.files = []
self.is_committing = False
def AffectedFiles(self):
return self.files
def AffectedSourceFiles(self):
return self.files
def ReadFile(self, f):
""" Returns the mock contents of f if they've been defined.
"""
for api_file in self.files:
if api_file.LocalPath() == f:
return api_file.NewContents()
class MockFile(object):
"""Mock file object so that presubmit can act invoke file system operations.
"""
def __init__(self, local_path, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = ([(i + 1, l) for i, l in enumerate(new_contents)])
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def AbsoluteLocalPath(self):
return self._local_path
# Format string used as the contents of a mock sync.proto in order to
# test presubmit parsing of EntitySpecifics definition in that file.
MOCK_PROTOFILE_CONTENTS = ('\n'
'message EntitySpecifics {\n'
'//comment\n'
'\n'
'optional AutofillSpecifics autofill = 123;\n'
'optional AppSpecifics app = 456;\n'
'optional AppSettingSpecifics app_setting = 789;\n'
'optional ExtensionSettingSpecifics extension_setting = 910;\n'
'//comment\n'
'}\n')
# Format string used as the contents of a mock model_type.cc
# in order to test presubmit parsing of the ModelTypeInfoMap in that file.
MOCK_MODELTYPE_CONTENTS =('\n'
'const ModelTypeInfo kModelTypeInfoMap[] = {\n'
'{APP_SETTINGS, "APP_SETTING", "app_settings", "App settings",\n'
'sync_pb::EntitySpecifics::kAppSettingFieldNumber, 13},\n'
'%s\n'
'};\n')
class ModelTypeInfoChangeTest(unittest.TestCase):
"""Unit testing class that contains tests for sync/PRESUBMIT.py.
"""
def test_ValidChangeMultiLine(self):
results = self._testChange('{APPS, "APP", "apps", "Apps",\n'
'sync_pb::EntitySpecifics::kAppFieldNumber, 12},')
self.assertEqual(0, len(results))
def testValidChangeToleratesPluralization(self):
results = self._testChange('{APPS, "APP", "apps", "App",\n'
'sync_pb::EntitySpecifics::kAppFieldNumber, 12},')
self.assertEqual(0, len(results))
def testValidChangeGrandfatheredEntry(self):
results = self._testChange('{PROXY_TABS, "", "", "Tabs", -1, 25},')
self.assertEqual(0, len(results))
def testInvalidChangeMismatchedNotificationType(self):
results = self._testChange('{AUTOFILL, "AUTOFILL_WRONG", "autofill",\n'
'"Autofill",sync_pb::EntitySpecifics::kAutofillFieldNumber, 6},')
self.assertEqual(1, len(results))
self.assertTrue('notification type' in results[0].message)
def testInvalidChangeInconsistentModelType(self):
results = self._testChange('{AUTOFILL, "AUTOFILL", "autofill",\n'
'"Autofill Extra",sync_pb::EntitySpecifics::kAutofillFieldNumber, 6},')
self.assertEqual(1, len(results))
self.assertTrue('model type string' in results[0].message)
def testInvalidChangeNotTitleCased(self):
results = self._testChange('{AUTOFILL, "AUTOFILL", "autofill",\n'
'"autofill",sync_pb::EntitySpecifics::kAutofillFieldNumber, 6},')
self.assertEqual(1, len(results))
self.assertTrue('title' in results[0].message)
def testInvalidChangeInconsistentRootTag(self):
results = self._testChange('{AUTOFILL, "AUTOFILL", "autofill root",\n'
'"Autofill",sync_pb::EntitySpecifics::kAutofillFieldNumber, 6},')
self.assertEqual(1, len(results))
self.assertTrue('root tag' in results[0].message)
def testInvalidChangeDuplicatedValues(self):
results = self._testChange('{APP_SETTINGS, "APP_SETTING",\n'
'"app_settings", "App settings",\n'
'sync_pb::EntitySpecifics::kAppSettingFieldNumber, 13},\n')
self.assertEqual(6, len(results))
self.assertTrue('APP_SETTINGS' in results[0].message)
def testBlacklistedRootTag(self):
results = self._testChange('{EXTENSION_SETTING, "EXTENSION_SETTING",\n'
'"_mts_schema_descriptor","Extension Setting",\n'
'sync_pb::EntitySpecifics::kExtensionSettingFieldNumber, 6},')
self.assertEqual(2, len(results))
self.assertTrue('_mts_schema_descriptor' in results[0].message)
self.assertTrue("blacklist" in results[0].message)
def _testChange(self, modeltype_literal):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile(os.path.abspath('./protocol/sync.proto'),
MOCK_PROTOFILE_CONTENTS),
MockFile(os.path.abspath('./syncable/model_type.cc'),
MOCK_MODELTYPE_CONTENTS % (modeltype_literal))
]
return PRESUBMIT.CheckChangeOnCommit(mock_input_api, MockOutputApi())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3df262af65f2778ee688bc63f6078ac6",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 35.91034482758621,
"alnum_prop": 0.6982907624351834,
"repo_name": "google-ar/WebARonARCore",
"id": "0fd049de560336c32bc1f1b1e73f0f59cd1d10cb",
"size": "5374",
"binary": false,
"copies": "3",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "components/sync/PRESUBMIT_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
# DB
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')
# fix MySQL - https://www.pythonanywhere.com/forums/topic/2465/
SQLALCHEMY_POOL_RECYCLE = 280
SQLALCHEMY_TRACK_MODIFICATIONS = False
# end fix
# TG
TELEGRAM_BOT_RELEASE_TOKEN = os.getenv('TELEGRAM_BOT_RELEASE_TOKEN')
BOT_NAME = os.getenv('BOT_NAME')
IS_THREADED_BOT = True if os.getenv('IS_THREADED_BOT') == "True" else False
# YANDEX
YANDEX_API_KEY = os.getenv('YANDEX_API_KEY')
# FLASK
BASE_DOMAIN = os.getenv('BASE_DOMAIN')
SECRET_KEY = os.getenv('SECRET_KEY')
OTHER_SECRET_KEY = os.getenv('OTHER_SECRET_KEY')
| {
"content_hash": "a95a050dd8962af37bb8c9a0b35d1847",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.6721105527638191,
"repo_name": "EeOneDown/spbu4u",
"id": "d4deea6d80919b41a17f7812f0d6bc4b8b11a702",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "217"
},
{
"name": "Dockerfile",
"bytes": "159"
},
{
"name": "HTML",
"bytes": "2552"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "212945"
},
{
"name": "Shell",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from collections import namedtuple
from datetime import datetime, timedelta
import pytz
from casexml.apps.case.dbaccessors import get_open_case_docs_in_domain
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.xml import V2
import uuid
from xml.etree import ElementTree
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.domain.models import Domain
from corehq.apps.es.domains import DomainES
from corehq.apps.es import filters
from corehq.apps.hqcase.utils import submit_case_blocks, get_case_by_domain_hq_user_id
from corehq.feature_previews import CALLCENTER
from corehq.util.couch_helpers import paginate_view
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import UserTime
from dimagi.utils.couch import CriticalSection
class DomainLite(namedtuple('DomainLite', 'name default_timezone cc_case_type')):
@property
def midnights(self):
"""Returns a list containing a datetime for midnight in the domains timezone
on either side of the current date.
"""
tz = pytz.timezone(self.default_timezone)
now = datetime.utcnow()
midnight_utc = now.replace(hour=0, minute=0, second=0, microsecond=0)
midnight_tz1 = UserTime(midnight_utc, tz).server_time().done()
midnight_tz2 = midnight_tz1 + timedelta(days=(1 if midnight_tz1 < now else -1))
return sorted([midnight_tz1, midnight_tz2])
CallCenterCase = namedtuple('CallCenterCase', 'case_id hq_user_id')
def sync_user_case(commcare_user, case_type, owner_id, copy_user_data=True):
"""
Each time a CommCareUser is saved this method gets called and creates or updates
a case associated with the user with the user's details.
This is also called to create user cases when the usercase is used for the
first time.
"""
with CriticalSection(['user_case_%s_for_%s' % (case_type, commcare_user._id)]):
domain = commcare_user.project
def valid_element_name(name):
try:
ElementTree.fromstring('<{}/>'.format(name))
return True
except ElementTree.ParseError:
return False
# remove any keys that aren't valid XML element names
fields = {k: v for k, v in commcare_user.user_data.items() if valid_element_name(k)} if copy_user_data else {}
# language or phone_number can be null and will break
# case submission
fields.update({
'name': commcare_user.name or commcare_user.raw_username,
'username': commcare_user.raw_username,
'email': commcare_user.email,
'language': commcare_user.language or '',
'phone_number': commcare_user.phone_number or ''
})
case = get_case_by_domain_hq_user_id(domain.name, commcare_user._id, case_type)
close = commcare_user.to_be_deleted() or not commcare_user.is_active
caseblock = None
if case:
props = dict(case.dynamic_case_properties())
changed = close != case.closed
changed = changed or case.type != case_type
changed = changed or case.name != fields['name']
if not changed:
for field, value in fields.items():
if field != 'name' and props.get(field) != value:
changed = True
break
if changed:
caseblock = CaseBlock(
create=False,
case_id=case._id,
version=V2,
case_type=case_type,
close=close,
update=fields
)
else:
fields['hq_user_id'] = commcare_user._id
caseblock = CaseBlock(
create=True,
case_id=uuid.uuid4().hex,
owner_id=owner_id,
user_id=owner_id,
version=V2,
case_type=case_type,
update=fields
)
if caseblock:
casexml = ElementTree.tostring(caseblock.as_xml())
submit_case_blocks(casexml, domain.name)
def sync_call_center_user_case(user):
domain = user.project
if domain and domain.call_center_config.enabled:
sync_user_case(
user,
domain.call_center_config.case_type,
domain.call_center_config.case_owner_id
)
def sync_usercase(user):
domain = user.project
if domain and domain.usercase_enabled:
sync_user_case(
user,
USERCASE_TYPE,
user.get_id,
copy_user_data=False
)
def is_midnight_for_domain(midnight_form_domain, error_margin=15, current_time=None):
current_time = current_time or datetime.utcnow()
diff = current_time - midnight_form_domain
return diff.days >= 0 and diff < timedelta(minutes=error_margin)
def get_call_center_domains():
result = (
DomainES()
.is_active()
.is_snapshot(False)
.filter(filters.term('call_center_config.enabled', True))
.fields(['name', 'default_timezone', 'call_center_config.case_type'])
.run()
)
def to_domain_lite(hit):
return DomainLite(
name=hit['name'],
default_timezone=hit['default_timezone'],
cc_case_type=hit.get('call_center_config.case_type', '')
)
return [to_domain_lite(hit) for hit in result.hits]
def get_call_center_cases(domain_name, case_type, user=None):
all_cases = []
if user:
docs = (doc for owner_id in user.get_owner_ids()
for doc in get_open_case_docs_in_domain(domain_name, case_type,
owner_id=owner_id))
else:
docs = get_open_case_docs_in_domain(domain_name, case_type)
for case_doc in docs:
hq_user_id = case_doc.get('hq_user_id', None)
if hq_user_id:
all_cases.append(CallCenterCase(
case_id=case_doc['_id'],
hq_user_id=hq_user_id
))
return all_cases
@quickcache(['domain'])
def get_call_center_case_type_if_enabled(domain):
if CALLCENTER.enabled(domain):
return Domain.get_by_name(domain).call_center_config.case_type
| {
"content_hash": "a9a98ce0896ebf63eb374d9321c48a52",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 118,
"avg_line_length": 35.494505494505496,
"alnum_prop": 0.6052631578947368,
"repo_name": "puttarajubr/commcare-hq",
"id": "aacc8bff4c388724027f85dd76debbf80c45b2fb",
"size": "6460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/callcenter/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
import listmixin # Uses recipe 440656
import array
# Public domain
class BitList(listmixin.ListMixin):
"""
List of bits.
The constructor takes a list or string containing zeros and ones,
and creates an object that acts like list().
This class is memory compact (uses 1 byte per every 8 elements).
"""
def __init__(self, other=()):
self.data = array.array('B')
self.length = len(other)
if hasattr(other, 'capitalize'):
# Initialize from string.
for i in xrange((len(other) + 7) // 8):
c = other[i*8:(i+1)*8]
byte = 0
for j in xrange(len(c)):
if c[j] != '0':
byte |= 1<<j
self.data.append(byte)
else:
# Initialize from sequence.
for i in xrange((len(other) + 7) // 8):
c = other[i*8:(i+1)*8]
byte = 0
for j in xrange(len(c)):
if c[j]:
byte |= 1<<j
self.data.append(byte)
def _constructor(self, iterable):
return BitList(iterable)
def __len__(self):
return self.length
def _get_element(self, i):
return (self.data[i>>3]>>(i&7))&1
def _set_element(self, i, x):
index = i>>3
mask = (1<<(i&7))
if x and x != '0':
if not self.data[index] & mask:
self.data[index] |= mask
else:
if self.data[index] & mask:
self.data[index] ^= mask
def _resize_region(self, start, end, new_size):
"""
Resize slice self[start:end] so that it has size new_size.
"""
old_size = end - start
if new_size == old_size:
return
elif new_size > old_size:
delta = new_size - old_size
self.length += delta
add_bytes = (self.length + 7) // 8 - len(self.data)
self.data.extend(array.array('B', [0] * add_bytes))
for i in xrange(self.length-1, start+new_size-1, -1):
self._set_element(i, self._get_element(i - delta))
elif new_size < old_size:
delta = old_size - new_size
for i in xrange(start+new_size, self.length-delta):
self._set_element(i, self._get_element(i + delta))
self.length -= delta
del_bytes = len(self.data) - (self.length + 7) // 8
assert del_bytes <= len(self.data)
del self.data[len(self.data)-del_bytes:]
def __getstate__(self):
return (self.to_binary(), len(self))
def __setstate__(self, (data, length)):
self.__init__()
self[:] = BitList.from_binary(data, length)
def to_binary(self):
"""
Return base256_binary_str.
"""
return self.data.tostring()
def from_binary(base256_binary_str, num_bits):
"""
Return new BitList from base256_binary_str and number of bits.
"""
ans = BitList()
if len(base256_binary_str) != (num_bits+7)//8:
raise ValueError('invalid length')
ans.length = int(num_bits)
ans.data = array.array('B')
ans.data.fromstring(base256_binary_str)
return ans
from_binary = staticmethod(from_binary)
def set_bit(self, i, x):
"""
Set bit i to x (extending to the right with zeros if needed).
"""
i = int(i)
if i >= len(self):
self.extend([0] * (i + 1 - len(self)))
self[i] = x
def get_bit(self, i):
"""
Get bit i (or zero if i >= len(self)).
"""
i = int(i)
if i >= len(self):
return 0
return self[i]
| {
"content_hash": "9b311efbd2174a616d20e85eb50b983e",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 67,
"avg_line_length": 27.24793388429752,
"alnum_prop": 0.5665756748559296,
"repo_name": "ActiveState/code",
"id": "8e96290d2097e26e33411236ff3213023e735077",
"size": "3297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/440658_Memory_compacted_list_of_bits/recipe-440658.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Support for Home Assistant Cloud binary sensors."""
from __future__ import annotations
import asyncio
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import DISPATCHER_REMOTE_UPDATE, DOMAIN
WAIT_UNTIL_CHANGE = 3
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the cloud binary sensors."""
if discovery_info is None:
return
cloud = hass.data[DOMAIN]
async_add_entities([CloudRemoteBinary(cloud)])
class CloudRemoteBinary(BinarySensorEntity):
"""Representation of an Cloud Remote UI Connection binary sensor."""
_attr_name = "Remote UI"
_attr_device_class = BinarySensorDeviceClass.CONNECTIVITY
_attr_should_poll = False
_attr_unique_id = "cloud-remote-ui-connectivity"
_attr_entity_category = EntityCategory.DIAGNOSTIC
def __init__(self, cloud):
"""Initialize the binary sensor."""
self.cloud = cloud
self._unsub_dispatcher = None
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
return self.cloud.remote.is_connected
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.cloud.remote.certificate is not None
async def async_added_to_hass(self):
"""Register update dispatcher."""
async def async_state_update(data):
"""Update callback."""
await asyncio.sleep(WAIT_UNTIL_CHANGE)
self.async_write_ha_state()
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, DISPATCHER_REMOTE_UPDATE, async_state_update
)
async def async_will_remove_from_hass(self):
"""Register update dispatcher."""
if self._unsub_dispatcher is not None:
self._unsub_dispatcher()
self._unsub_dispatcher = None
| {
"content_hash": "ba489f52bb84ee86c4d21e5bcf1f630f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 72,
"avg_line_length": 31.546666666666667,
"alnum_prop": 0.6906170752324599,
"repo_name": "GenericStudent/home-assistant",
"id": "5f4c715c41a763bfa7c862fdce81448177811193",
"size": "2366",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/cloud/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
address = ""
# your SSH username
username = ""
# your SSH password
password = ""
# the directory in which data should be stored on the remote machine before loading into database
data_dir = ""
# the port used by the database on the remote machine
port = "4200"
| {
"content_hash": "524c7dd8e490b673a28fb1ab4de91c36",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 97,
"avg_line_length": 29,
"alnum_prop": 0.7279693486590039,
"repo_name": "mmmatthew/floodx_data_preprocessing",
"id": "823e7f836a7014e635805c0652fb1387abb3cd6a",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbconfig(TEMPLATE).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17846"
}
],
"symlink_target": ""
} |
from google.cloud import memcache_v1
def sample_update_instance():
# Create a client
client = memcache_v1.CloudMemcacheClient()
# Initialize request argument(s)
instance = memcache_v1.Instance()
instance.name = "name_value"
instance.node_count = 1070
instance.node_config.cpu_count = 976
instance.node_config.memory_size_mb = 1505
request = memcache_v1.UpdateInstanceRequest(
instance=instance,
)
# Make the request
operation = client.update_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END memcache_v1_generated_CloudMemcache_UpdateInstance_sync]
| {
"content_hash": "8d4b68ab3d13502a5b5dd3656851102b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 63,
"avg_line_length": 25.06896551724138,
"alnum_prop": 0.7015130674002751,
"repo_name": "googleapis/python-memcache",
"id": "bfe196593045ab9dccf9fae2f44e053183e8f002",
"size": "2116",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/memcache_v1_generated_cloud_memcache_update_instance_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "680016"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
} |
from itsdangerous import URLSafeTimedSerializer
def generate_confirmation_token(email):
from app import app
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
return serializer.dumps(email, salt=app.config['SECURITY_PASSWORD_SALT'])
def confirm_token(token, expiration=3600):
from app import app
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
try:
email = serializer.loads(
token,
salt=app.config['SECURITY_PASSWORD_SALT'],
max_age=expiration
)
return email
except:
return None
| {
"content_hash": "7a2027b57e0c94dcaaf884e56c0f92d9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 27.5,
"alnum_prop": 0.6710743801652893,
"repo_name": "jlopezvi/Consensus",
"id": "9f6a48c18c54694a406e5d8f352aeb02dac6e7f6",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uuid_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "70070"
},
{
"name": "HTML",
"bytes": "52461"
},
{
"name": "JavaScript",
"bytes": "169904"
},
{
"name": "Python",
"bytes": "123778"
}
],
"symlink_target": ""
} |
"""
Tests of the web browser interface (smtweb) using Selenium
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
from time import sleep
from builtins import input
from unittest import SkipTest
try:
from selenium import webdriver
have_selenium = True
except ImportError:
have_selenium = False
from subprocess import PIPE
import sarge
from nose.tools import assert_equal, assert_dict_contains_subset, assert_in
import utils
from utils import (setup as default_setup, teardown as default_teardown,
run, run_test, build_command, assert_file_exists, assert_in_output,
assert_config, assert_label_equal, assert_records, assert_return_code,
edit_parameters, expected_short_list, substitute_labels)
repository = "https://bitbucket.org/apdavison/ircr2013"
#repository = "/Users/andrew/dev/ircr2013"
def modify_script(filename):
def wrapped():
with open(os.path.join(utils.working_dir, filename), 'r') as fp:
script = fp.readlines()
with open(os.path.join(utils.working_dir, filename), 'w') as fp:
for line in script:
if "print(mean_bubble_size, median_bubble_size)" in line:
fp.write('print("Mean:", mean_bubble_size)\n')
fp.write('print("Median:", median_bubble_size)\n')
else:
fp.write(line)
return wrapped
setup_steps = [
("Get the example code",
"hg clone %s ." % repository,
assert_in_output, "updating to branch default"),
("Set up a Sumatra project",
"smt init -d Data -i . -e python -m glass_sem_analysis.py --on-changed=store-diff ProjectGlass",
assert_in_output, "Sumatra project successfully set up"),
("Run the ``glass_sem_analysis.py`` script with Sumatra",
"smt run -r 'initial run' default_parameters MV_HFV_012.jpg",
assert_in_output, ("2416.86315789 60.0", "histogram.png")),
("Comment on the outcome",
"smt comment 'works fine'"),
edit_parameters("default_parameters", "no_filter", "filter_size", 1),
("Run with changed parameters and user-defined label",
"smt run -l example_label -r 'No filtering' no_filter MV_HFV_012.jpg",
assert_in_output, "phases.png",
assert_label_equal, "example_label"),
("Change parameters from the command line",
"smt run -r 'Trying a different colourmap' default_parameters MV_HFV_012.jpg phases_colourmap=hot"),
("Add another comment",
"smt comment 'The default colourmap is nicer'"), # TODO add a comment to an older record (e.g. this colourmap is nicer than 'hot')")
("Add tags on the command line",
build_command("smt tag mytag {0} {1}", "labels")),
modify_script("glass_sem_analysis.py"),
("Run the modified code",
"smt run -r 'Added labels to output' default_parameters MV_HFV_012.jpg"),
]
def setup():
global server, driver
if not have_selenium:
raise SkipTest("Tests require Selenium")
default_setup()
for step in setup_steps:
if callable(step):
step()
else:
print(step[0]) # description
run_test(*step[1:])
server = sarge.Command("smtweb -p 8765 --no-browser", cwd=utils.working_dir,
stdout=sarge.Capture(), stderr=sarge.Capture())
server.run(async=True)
driver = webdriver.Firefox()
def teardown():
driver.close()
server.terminate()
default_teardown()
def test_start_page():
driver.get("http://127.0.0.1:8765")
# on homepage
assert_equal(driver.title, "List of projects")
# assert there is one project, named "ProjectGlass"
projects = driver.find_elements_by_tag_name("h3")
assert_equal(len(projects), 1)
assert_equal(projects[0].text, "ProjectGlass")
# click on ProjectGlass --> record list
projects[0].click()
assert_equal(driver.title, "ProjectGlass: List of records")
assert_equal(driver.current_url, "http://127.0.0.1:8765/ProjectGlass/")
def test_record_list():
driver.get("http://127.0.0.1:8765/ProjectGlass/")
# assert there are four records
rows = driver.find_elements_by_tag_name('tr')
assert_equal(len(rows), 4 + 1) # first row is the header
column_headers = [elem.text for elem in rows[0].find_elements_by_tag_name('th')]
# assert the labels are correct and that the reason and outcome fields are correct
expected_content = substitute_labels([
{'label': 0, 'outcome': 'works fine', 'reason': 'initial run',
'version': '6038f9c...', 'main': 'glass_sem_analysis.py'},
{'label': 1, 'outcome': '', 'reason': 'No filtering'},
{'label': 2, 'outcome': 'The default colourmap is nicer', 'reason': 'Trying a different colourmap'},
{'label': 3, 'outcome': '', 'reason': 'Added labels to output', 'version': '6038f9c...*'}])(utils.env)
for row, expected in zip(rows[1:], reversed(expected_content)):
cells = row.find_elements_by_tag_name('td')
label = cells[0].text
assert_equal(row.get_attribute('id'), label)
actual = dict((key.lower(), cell.text) for key, cell in zip(column_headers, cells))
assert_dict_contains_subset(expected, actual)
def test_column_settings_dialog():
driver.get("http://127.0.0.1:8765/ProjectGlass/")
# test the column settings dialog
row0 = driver.find_element_by_tag_name('tr')
column_headers = [elem.text for elem in row0.find_elements_by_tag_name('th')]
cog = driver.find_element_by_class_name("glyphicon-cog")
cog.click()
sleep(0.5)
options = driver.find_elements_by_class_name("checkbox")
displayed_columns = [option.text for option in options if option.find_element_by_tag_name("input").is_selected()]
assert_equal(displayed_columns, column_headers[1:]) # can't turn off "Label" column
# turn on all columns
for option in options:
checkbox = option.find_element_by_tag_name("input")
if not checkbox.is_selected():
checkbox.click()
apply_button, = [elem for elem in driver.find_elements_by_tag_name("button") if elem.text == "Apply"]
apply_button.click()
sleep(0.5)
column_headers = [elem.text for elem in row0.find_elements_by_tag_name('th')]
assert_equal(column_headers,
["Label", "Date/Time", "Reason", "Outcome", "Input data", "Output data",
"Duration", "Processes", "Executable", "Main", "Version", "Arguments", "Tags"])
def test_comparison_view():
driver.get("http://127.0.0.1:8765/ProjectGlass/")
# test that "Compare selected" gives an error message with no records selected
alert = driver.find_element_by_id("alert")
assert not alert.is_displayed()
compare_button, = [elem for elem in driver.find_elements_by_tag_name("button") if "Compare" in elem.text]
compare_button.click()
sleep(0.5)
assert alert.is_displayed()
assert "Need at least two records to compare" in alert.text
alert.click()
sleep(0.5)
assert not alert.is_displayed()
# select two records and click on compare selected
rows = driver.find_elements_by_tag_name('tr')
target_records = utils.env["labels"][::2]
for row in rows[1:]:
if row.get_attribute("id") in target_records:
row.click()
# scroll back to the top of the screen
driver.execute_script("window.scrollTo(0, 0)")
compare_button.click()
# assert go to comparison page
assert_in("compare", driver.current_url)
def test_data_detail_view():
driver.get("http://127.0.0.1:8765/ProjectGlass/")
rows = driver.find_elements_by_tag_name('tr')
rows[1].find_element_by_tag_name('td').find_element_by_tag_name('a').click()
assert_equal(driver.current_url, "http://127.0.0.1:8765/ProjectGlass/{}/".format(utils.env["labels"][-1]))
dl = driver.find_element_by_tag_name('dl')
general_attributes = dict(zip((item.text for item in dl.find_elements_by_tag_name("dt")),
(item.text for item in dl.find_elements_by_tag_name("dd"))))
assert_equal(general_attributes["Code version:"], '6038f9c500d1* (diff)')
assert_in("Added labels to output", general_attributes["Reason:"])
if __name__ == '__main__':
# Run the tests without using Nose.
setup()
try:
test_start_page()
test_record_list()
test_column_settings_dialog()
test_comparison_view()
test_data_detail_view()
# test filter by tags
# test editing reason
# test "Add outcome" button
# test deleting records
except Exception as err:
print(err)
response = input("Do you want to delete the temporary directory (default: yes)? ")
if response not in ["n", "N", "no", "No"]:
teardown()
else:
print("Temporary directory %s not removed" % utils.temporary_dir) | {
"content_hash": "3550bb7bbb61cde6cd5024a0fbaec49e",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 139,
"avg_line_length": 40.14414414414414,
"alnum_prop": 0.6411579892280072,
"repo_name": "open-research/sumatra",
"id": "54ce96be99439fa28669a83c3a280a9497bb5833",
"size": "8912",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/system/test_webui.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8168"
},
{
"name": "HTML",
"bytes": "104980"
},
{
"name": "JavaScript",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "581902"
},
{
"name": "R",
"bytes": "3325"
},
{
"name": "Shell",
"bytes": "35759"
},
{
"name": "TeX",
"bytes": "7153"
}
],
"symlink_target": ""
} |
import inspect
import io
import keyword
import pydoc
from collections import namedtuple
from six.moves import range
from pygments.token import Token
from bpython._py3compat import PythonLexer, py3
from bpython.lazyre import LazyReCompile
if not py3:
import types
_name = LazyReCompile(r'[a-zA-Z_]\w*$')
ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'varkwargs', 'defaults',
'kwonly', 'kwonly_defaults', 'annotations'])
FuncProps = namedtuple('FuncProps', ['func', 'argspec', 'is_bound_method'])
class AttrCleaner(object):
"""A context manager that tries to make an object not exhibit side-effects
on attribute lookup."""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
"""Try to make an object not exhibit side-effects on attribute
lookup."""
type_ = type(self.obj)
__getattribute__ = None
__getattr__ = None
# Dark magic:
# If __getattribute__ doesn't exist on the class and __getattr__ does
# then __getattr__ will be called when doing
# getattr(type_, '__getattribute__', None)
# so we need to first remove the __getattr__, then the
# __getattribute__, then look up the attributes and then restore the
# original methods. :-(
# The upshot being that introspecting on an object to display its
# attributes will avoid unwanted side-effects.
if py3 or type_ != types.InstanceType:
__getattr__ = getattr(type_, '__getattr__', None)
if __getattr__ is not None:
try:
setattr(type_, '__getattr__', (lambda *_, **__: None))
except TypeError:
__getattr__ = None
__getattribute__ = getattr(type_, '__getattribute__', None)
if __getattribute__ is not None:
try:
setattr(type_, '__getattribute__', object.__getattribute__)
except TypeError:
# XXX: This happens for e.g. built-in types
__getattribute__ = None
self.attribs = (__getattribute__, __getattr__)
# /Dark magic
def __exit__(self, exc_type, exc_val, exc_tb):
"""Restore an object's magic methods."""
type_ = type(self.obj)
__getattribute__, __getattr__ = self.attribs
# Dark magic:
if __getattribute__ is not None:
setattr(type_, '__getattribute__', __getattribute__)
if __getattr__ is not None:
setattr(type_, '__getattr__', __getattr__)
# /Dark magic
class _Repr(object):
"""
Helper for `fixlongargs()`: Returns the given value in `__repr__()`.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return self.value
__str__ = __repr__
def parsekeywordpairs(signature):
tokens = PythonLexer().get_tokens(signature)
preamble = True
stack = []
substack = []
parendepth = 0
for token, value in tokens:
if preamble:
if token is Token.Punctuation and value == u"(":
preamble = False
continue
if token is Token.Punctuation:
if value in [u'(', u'{', u'[']:
parendepth += 1
elif value in [u')', u'}', u']']:
parendepth -= 1
elif value == ':' and parendepth == -1:
# End of signature reached
break
if ((value == ',' and parendepth == 0) or
(value == ')' and parendepth == -1)):
stack.append(substack)
substack = []
continue
if value and (parendepth > 0 or value.strip()):
substack.append(value)
d = {}
for item in stack:
if len(item) >= 3:
d[item[0]] = ''.join(item[2:])
return d
def fixlongargs(f, argspec):
"""Functions taking default arguments that are references to other objects
whose str() is too big will cause breakage, so we swap out the object
itself with the name it was referenced with in the source by parsing the
source itself !"""
if argspec[3] is None:
# No keyword args, no need to do anything
return
values = list(argspec[3])
if not values:
return
keys = argspec[0][-len(values):]
try:
src = inspect.getsourcelines(f)
except (IOError, IndexError):
# IndexError is raised in inspect.findsource(), can happen in
# some situations. See issue #94.
return
signature = ''.join(src[0])
kwparsed = parsekeywordpairs(signature)
for i, (key, value) in enumerate(zip(keys, values)):
if len(repr(value)) != len(kwparsed[key]):
values[i] = _Repr(kwparsed[key])
argspec[3] = values
getpydocspec_re = LazyReCompile(r'([a-zA-Z_][a-zA-Z0-9_]*?)\((.*?)\)')
def getpydocspec(f, func):
try:
argspec = pydoc.getdoc(f)
except NameError:
return None
s = getpydocspec_re.search(argspec)
if s is None:
return None
if not hasattr(f, '__name__') or s.groups()[0] != f.__name__:
return None
args = list()
defaults = list()
varargs = varkwargs = None
kwonly_args = list()
kwonly_defaults = dict()
for arg in s.group(2).split(','):
arg = arg.strip()
if arg.startswith('**'):
varkwargs = arg[2:]
elif arg.startswith('*'):
varargs = arg[1:]
else:
arg, _, default = arg.partition('=')
if varargs is not None:
kwonly_args.append(arg)
if default:
kwonly_defaults[arg] = default
else:
args.append(arg)
if default:
defaults.append(default)
return ArgSpec(args, varargs, varkwargs, defaults, kwonly_args,
kwonly_defaults, None)
def getfuncprops(func, f):
# Check if it's a real bound method or if it's implicitly calling __init__
# (i.e. FooClass(...) and not FooClass.__init__(...) -- the former would
# not take 'self', the latter would:
try:
func_name = getattr(f, '__name__', None)
except:
# if calling foo.__name__ would result in an error
func_name = None
try:
is_bound_method = ((inspect.ismethod(f) and f.__self__ is not None)
or (func_name == '__init__' and not
func.endswith('.__init__')))
except:
# if f is a method from a xmlrpclib.Server instance, func_name ==
# '__init__' throws xmlrpclib.Fault (see #202)
return None
try:
if py3:
argspec = inspect.getfullargspec(f)
else:
argspec = inspect.getargspec(f)
argspec = list(argspec)
fixlongargs(f, argspec)
if len(argspec) == 4:
argspec = argspec + [list(), dict(), None]
argspec = ArgSpec(*argspec)
fprops = FuncProps(func, argspec, is_bound_method)
except (TypeError, KeyError):
with AttrCleaner(f):
argspec = getpydocspec(f, func)
if argspec is None:
return None
if inspect.ismethoddescriptor(f):
argspec.args.insert(0, 'obj')
fprops = FuncProps(func, argspec, is_bound_method)
return fprops
def is_eval_safe_name(string):
if py3:
return all(part.isidentifier() and not keyword.iskeyword(part)
for part in string.split('.'))
else:
return all(_name.match(part) and not keyword.iskeyword(part)
for part in string.split('.'))
def is_callable(obj):
return callable(obj)
get_encoding_re = LazyReCompile(r'coding[:=]\s*([-\w.]+)')
def get_encoding(obj):
"""Try to obtain encoding information of the source of an object."""
for line in inspect.findsource(obj)[0][:2]:
m = get_encoding_re.search(line)
if m:
return m.group(1)
return 'ascii'
def get_encoding_file(fname):
"""Try to obtain encoding information from a Python source file."""
with io.open(fname, 'rt', encoding='ascii', errors='ignore') as f:
for unused in range(2):
line = f.readline()
match = get_encoding_re.search(line)
if match:
return match.group(1)
return 'ascii'
if py3:
def get_source_unicode(obj):
"""Returns a decoded source of object"""
return inspect.getsource(obj)
else:
def get_source_unicode(obj):
"""Returns a decoded source of object"""
return inspect.getsource(obj).decode(get_encoding(obj))
| {
"content_hash": "a3dadd1c99f97f0e99bab07b00dd3bff",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 79,
"avg_line_length": 31.36200716845878,
"alnum_prop": 0.5516571428571428,
"repo_name": "wevial/bpython",
"id": "0dfb4feffd277331cadef41526fff9d5878dd330",
"size": "9936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bpython/inspection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "514140"
},
{
"name": "Shell",
"bytes": "1209"
}
],
"symlink_target": ""
} |
import unittest
from test import test_support
import os
import subprocess
MacOS = test_support.import_module('MacOS')
TESTFN2 = test_support.TESTFN + '2'
class TestMacOS(unittest.TestCase):
def testGetCreatorAndType(self):
if not os.path.exists('/Developer/Tools/SetFile'):
return
try:
fp = open(test_support.TESTFN, 'w')
fp.write('\n')
fp.close()
subprocess.call(
['/Developer/Tools/SetFile', '-t', 'ABCD', '-c', 'EFGH',
test_support.TESTFN])
cr, tp = MacOS.GetCreatorAndType(test_support.TESTFN)
self.assertEquals(tp, 'ABCD')
self.assertEquals(cr, 'EFGH')
finally:
os.unlink(test_support.TESTFN)
def testSetCreatorAndType(self):
if not os.path.exists('/Developer/Tools/GetFileInfo'):
return
try:
fp = open(test_support.TESTFN, 'w')
fp.write('\n')
fp.close()
MacOS.SetCreatorAndType(test_support.TESTFN,
'ABCD', 'EFGH')
cr, tp = MacOS.GetCreatorAndType(test_support.TESTFN)
self.assertEquals(cr, 'ABCD')
self.assertEquals(tp, 'EFGH')
data = subprocess.Popen(["/Developer/Tools/GetFileInfo", test_support.TESTFN],
stdout=subprocess.PIPE).communicate()[0]
tp = None
cr = None
for ln in data.splitlines():
if ln.startswith('type:'):
tp = ln.split()[-1][1:-1]
if ln.startswith('creator:'):
cr = ln.split()[-1][1:-1]
self.assertEquals(cr, 'ABCD')
self.assertEquals(tp, 'EFGH')
finally:
os.unlink(test_support.TESTFN)
def testOpenRF(self):
try:
fp = open(test_support.TESTFN, 'w')
fp.write('hello world\n')
fp.close()
rfp = MacOS.openrf(test_support.TESTFN, '*wb')
rfp.write('goodbye world\n')
rfp.close()
fp = open(test_support.TESTFN, 'r')
data = fp.read()
fp.close()
self.assertEquals(data, 'hello world\n')
rfp = MacOS.openrf(test_support.TESTFN, '*rb')
data = rfp.read(100)
data2 = rfp.read(100)
rfp.close()
self.assertEquals(data, 'goodbye world\n')
self.assertEquals(data2, '')
finally:
os.unlink(test_support.TESTFN)
def test_main():
test_support.run_unittest(TestMacOS)
if __name__ == '__main__':
test_main()
| {
"content_hash": "bb41ed89886bc5495829e17f12f803f2",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 90,
"avg_line_length": 27.275510204081634,
"alnum_prop": 0.5170220725776281,
"repo_name": "MalloyPower/parsing-python",
"id": "465d3042a20719cdd07057ecc0d6f5023174cfc7",
"size": "2673",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.7/Lib/test/test_macos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import GafferUI
from Qt import QtGui
from Qt import QtWidgets
# The pointer is specified via the C++ Pointer class, but to actually change
# the qt pointer we use this python code which is triggered by Pointer::changedSignal().
__cursorOverridden = False
def __pointerChanged() :
global __cursorOverridden
pointer = GafferUI.Pointer.getCurrent()
application = QtWidgets.QApplication.instance()
if pointer is None :
if __cursorOverridden :
application.restoreOverrideCursor()
__cursorOverridden = False
else :
pixmap = GafferUI.Image._qtPixmapFromImagePrimitive( pointer.image() )
cursor = QtGui.QCursor( pixmap, pointer.hotspot().x, pointer.hotspot().y )
if __cursorOverridden :
application.changeOverrideCursor( cursor )
else :
application.setOverrideCursor( cursor )
__cursorOverridden = True
__changedConnection = GafferUI.Pointer.changedSignal().connect( __pointerChanged )
| {
"content_hash": "53c4ede3c857c7ec25a0b4ab23a52d59",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 30.5,
"alnum_prop": 0.7562841530054645,
"repo_name": "appleseedhq/gaffer",
"id": "b22ad28dbcd0ba571b2d701fb91d6837d5d5b4e0",
"size": "2718",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/GafferUI/_Pointer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="outlinewidth",
parent_name="scattercarpet.marker.colorbar",
**kwargs
):
super(OutlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "2c64f71f50acde7e70545c89b3608ea1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 31.11111111111111,
"alnum_prop": 0.5803571428571429,
"repo_name": "plotly/python-api",
"id": "2e3c6b763b584c267792d07d7afbe7ce8ada3cea",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_outlinewidth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from datetime import timedelta, datetime
from celery import shared_task
from django.db import transaction
from django.db.models import Q, Count
from django.utils import timezone
from bestiary.models import Monster
from .models import DungeonLog, RiftRaidLog, WorldBossLog, StatisticsReport
from .reports.generate import generate_dungeon_log_reports, generate_magic_box_crafting_reports, generate_rift_raid_reports, generate_rift_dungeon_reports, generate_shop_refresh_reports, generate_summon_reports, generate_wish_reports, \
generate_world_boss_dungeon_reports, generate_rune_crafting_reports
from herders.models import Summoner
import time
@shared_task
def generate_all_reports():
generate_dungeon_log_reports()
generate_rift_raid_reports()
generate_rift_dungeon_reports()
generate_world_boss_dungeon_reports()
generate_shop_refresh_reports()
generate_magic_box_crafting_reports()
generate_wish_reports()
generate_summon_reports()
generate_rune_crafting_reports()
@shared_task
def clean_incomplete_logs():
# Delete all logs older than 1 day which have only had a start event captured, and no result event
log_is_old = Q(timestamp__lte=timezone.now() - timedelta(days=1))
result = {
DungeonLog.__name__: DungeonLog.objects.filter(log_is_old, success__isnull=True).delete(),
RiftRaidLog.__name__: RiftRaidLog.objects.filter(log_is_old, success__isnull=True).delete(),
WorldBossLog.__name__: WorldBossLog.objects.filter(log_is_old, grade__isnull=True).delete(),
}
return result
def _generate_monster_statistic_report(start_date, monster, server, is_rta, min_box_6stars, profiles):
with transaction.atomic():
report = {}
sr = StatisticsReport.objects.create(
start_date=start_date,
monster=monster,
server=server,
is_rta=is_rta,
min_box_6stars=min_box_6stars,
report=report
)
start = time.time()
monsterinstances = sr.monsterinstances(profiles, filter_by_date=False)
print(f"\tMI: {round(time.time() - start, 2)}")
print(f"\tMI (Count): {len(monsterinstances)}")
start = time.time()
sr.generate_report(monsterinstances)
print(f"\tR: {round(time.time() - start, 2)}")
print(f"\tReport #{sr.pk} for [{len(monsterinstances)}] {sr.monster} generated from {start_date} to {timezone.now().date()}")
@shared_task
def generate_statistics_reports():
# 180d earlier
start_date = (timezone.now() - timedelta(days=180)).date()
# servers = [None] + list(dict(Summoner.SERVER_CHOICES).keys())
servers = [None]
monsters = Monster.objects.filter(awaken_level__in=[Monster.AWAKEN_LEVEL_AWAKENED, Monster.AWAKEN_LEVEL_SECOND], obtainable=True)
# is_rta_options = [False, True]
is_rta_options = [False]
# min_box_6stars_list = [0, 50, 100, 200]
min_box_6stars_list = [0, 200]
profiles = Summoner.objects\
.filter(consent_report__isnull=False, last_update__date__gte=start_date)\
.prefetch_related('monsterinstance')\
.select_related('monsterinstance__defaultbuild', 'monsterinstance__rtabuild')
for server in servers:
profiles_f = profiles
if server:
profiles_f = profiles_f.filter(server=server)
for min_box_6stars in min_box_6stars_list:
if min_box_6stars:
profiles_f = profiles_f.annotate(stars6=Count('monsterinstance__stars')).filter(stars6__gte=min_box_6stars).distinct()
for monster in monsters:
for is_rta in is_rta_options:
print(f"========================================")
print(f"\tMonster: {monster}")
print(f"\tStart date: {start_date}")
print(f"\tServer: {server}")
print(f"\tRTA: {is_rta}")
print(f"\tMin 6* in box: {min_box_6stars}")
print(f"\tProfiles: {profiles_f.count()}")
_generate_monster_statistic_report(start_date, monster, server, is_rta, min_box_6stars, profiles_f)
| {
"content_hash": "d7bdea58e3c379167e37330c5377da95",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 236,
"avg_line_length": 42,
"alnum_prop": 0.645983645983646,
"repo_name": "porksmash/swarfarm",
"id": "39261dfa3c56618989795189638638bbcd1dcb68",
"size": "4158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_log/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29358"
},
{
"name": "HTML",
"bytes": "349774"
},
{
"name": "JavaScript",
"bytes": "80827"
},
{
"name": "Python",
"bytes": "932930"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
} |
"""Test for deprecation of qiskit.test.mock module."""
from qiskit.test import QiskitTestCase
class MockModuleDeprecationTest(QiskitTestCase):
"""Test for deprecation of qiskit.test.mock module."""
def test_deprecated_mock_module(self):
"""Test that the mock module is deprecated."""
# pylint: disable=unused-import,no-name-in-module
with self.assertWarns(DeprecationWarning):
from qiskit.test.mock import FakeWashington
with self.assertWarns(DeprecationWarning):
from qiskit.test.mock.backends import FakeWashington
with self.assertWarns(DeprecationWarning):
from qiskit.test.mock.backends.washington import FakeWashington
| {
"content_hash": "4c6a3ceb669d4f19b6983f2a7bfe6940",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 44.4375,
"alnum_prop": 0.720112517580872,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "d2355c84717bb357054783ed4b4140ad0bbf9543",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/mock/test_mock_module_deprecation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
import csv
from os import listdir
from os.path import join
base_dir = '/home/matt/DL/Autonomous_Driving/Udacity/CarND-Behavioral-Cloning/data/IMG'
img_files = listdir('data/IMG/')
with open('data/driving_log_test.csv', 'w') as wf:
with open('data/driving_log_clean.csv', 'r') as rf:
reader = csv.reader(rf)
writer = csv.writer(wf)
for i, row in enumerate(reader):
if i > 10: break
rel_path = row[0].split('/')[-1]
real_path = join(base_dir, rel_path)
writer.writerow([real_path, row[1]])
| {
"content_hash": "5d1c42a52116d91a553d6195d2bfd0de",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.6102292768959435,
"repo_name": "dyelax/CarND-Behavioral-Cloning",
"id": "d118bbc92b77333a6cadfc1061dca83479ca71c4",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11038"
}
],
"symlink_target": ""
} |
import os
from collections import defaultdict
from helper_functions import sortby, Make_folder_if_not_exists, load_fasta, convert_to_bases, clean_files
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Make Outgroup
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
def make_out_group(individuals_input, bedfile, vcffiles, outputfile, ancestralfiles, refgenomefiles):
Make_folder_if_not_exists(outputfile)
outgroup_individuals = ','.join(individuals_input)
with open(outputfile + '.unsorted', 'w') as out:
print('chrom', 'pos', 'ref_allele_info', 'alt_allele_info', 'ancestral_base', sep = '\t', file = out)
for vcffile, ancestralfile, reffile in zip(vcffiles, ancestralfiles, refgenomefiles):
if ancestralfile is not None:
ancestral_allele = load_fasta(ancestralfile)
if bedfile is not None:
command = f'bcftools view -s {outgroup_individuals} -T {bedfile} {vcffile} | bcftools norm -m -any | bcftools view -v snps | vcftools --vcf - --counts --stdout'
else:
command = f'bcftools view -s {outgroup_individuals} {vcffile} | bcftools norm -m -any | bcftools view -v snps | vcftools --vcf - --counts --stdout'
print(f'Processing {vcffile}...')
print('Running command:')
print(command, '\n\n')
variants_seen = defaultdict(int)
for index, line in enumerate(os.popen(command)):
if not line.startswith('CHROM'):
chrom, pos, _, _, ref_allele_info, alt_allele_info = line.strip().split()
ref_allele, ref_count = ref_allele_info.split(':')
alt_allele, alt_count = alt_allele_info.split(':')
pos, ref_count, alt_count = int(pos), int(ref_count), int(alt_count)
# Always include polymorphic sites
if alt_count * ref_count > 0:
ancestral_base = ref_allele if ref_count > alt_count else alt_allele
# Use ancestral base info if available
if ancestralfile is not None:
ancestral_base_temp = ancestral_allele[pos-1]
if ancestral_base_temp in [ref_allele, alt_allele]:
ancestral_base = ancestral_base_temp
print(chrom, pos, ref_allele_info, alt_allele_info, ancestral_base, sep = '\t', file = out)
variants_seen[pos-1] = 1
# Fixed sites
elif alt_count * ref_count == 0:
ancestral_base = ref_allele if ref_count > alt_count else alt_allele
# Use ancestral base info if available
if ancestralfile is not None:
ancestral_base_temp = ancestral_allele[pos-1]
if ancestral_base_temp in [ref_allele, alt_allele]:
ancestral_base = ancestral_base_temp
if ancestral_base == alt_allele:
derived_count = ref_count
else:
derived_count = alt_count
if derived_count > 0:
print(chrom, pos, ref_allele_info, alt_allele_info, ancestral_base, sep = '\t', file = out)
variants_seen[pos-1] = 1
if index % 100000 == 0:
print(f'at line {index} at chrom {chrom} and position {pos}')
# If reference genome is provided then remove positions where the reference and ancestral differ AND which is not found in the outgroup
if reffile is not None and ancestralfile is not None:
print('Find fixed derived sites')
refgenome_allele = load_fasta(reffile)
for index, (refbase, ancbase) in enumerate(zip(refgenome_allele, ancestral_allele)):
if ancbase in 'ACGT' and refbase in 'ACGT':
if refbase != ancbase and variants_seen[index] == 0:
print(chrom, index + 1, f'{refbase}:100', f'{ancbase}:0', ancbase, sep = '\t', file = out)
# Sort outgroup file
print('Sorting outgroup file')
positions_to_sort = defaultdict(lambda: defaultdict(str))
with open(outputfile + '.unsorted') as data, open(outputfile, 'w') as out:
for line in data:
if line.startswith('chrom'):
out.write(line)
else:
chrom, pos = line.strip().split()[0:2]
positions_to_sort[chrom][int(pos)] = line
for chrom in sorted(positions_to_sort, key=sortby):
for pos in sorted(positions_to_sort[chrom]):
line = positions_to_sort[chrom][pos]
out.write(line)
# Clean log files generated by vcf and bcf tools
clean_files(outputfile + '.unsorted')
clean_files('out.log')
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Make ingroup
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------
def make_ingroup_obs(ingroup_individuals, bedfile, vcffiles, outprefix, outgroupfile, ancestralfiles):
# handle output files
Make_folder_if_not_exists(outprefix)
outfile_handler = defaultdict(str)
for individual in ingroup_individuals:
outfile_handler[individual] = open(f'{outprefix}.{individual}.txt','w')
print('chrom', 'pos', 'ancestral_base', 'genotype', sep = '\t', file = outfile_handler[individual])
individuals_for_bcf = ','.join(ingroup_individuals)
for vcffile, ancestralfile in zip(vcffiles, ancestralfiles):
if ancestralfile is not None:
ancestral_allele = load_fasta(ancestralfile)
if bedfile is not None:
command = f'bcftools view -m2 -M2 -v snps -s {individuals_for_bcf} -T {bedfile} {vcffile} | vcftools --vcf - --exclude-positions {outgroupfile} --recode --stdout'
else:
command = f'bcftools view -m2 -M2 -v snps -s {individuals_for_bcf} {vcffile} | vcftools --vcf - --exclude-positions {outgroupfile} --recode --stdout'
print('Running command:')
print(command, '\n\n')
for index, line in enumerate(os.popen(command)):
if line.startswith('#CHROM'):
individuals_in_vcffile = line.strip().split()[9:]
if not line.startswith('#'):
chrom, pos, _, ref_allele, alt_allele = line.strip().split()[0:5]
pos = int(pos)
genotypes = [x.split(':')[0] for x in line.strip().split()[9:]]
if ref_allele in 'ACGT' and alt_allele in 'ACGT':
for original_genotype, individual in zip(genotypes, individuals_in_vcffile):
ref_count = original_genotype.count('0')
alt_count = original_genotype.count('1')
genotype = convert_to_bases(original_genotype, ref_allele, alt_allele)
if ancestralfile is not None:
# With ancestral information look for derived alleles
ancestral_base = ancestral_allele[pos-1]
if ancestral_base in [ref_allele, alt_allele]:
derived_count = genotype.count(alt_allele) if ancestral_base == ref_allele else genotype.count(ref_allele)
if derived_count > 0:
print(chrom, pos, ancestral_base, genotype, sep = '\t', file = outfile_handler[individual])
else:
# If no ancestral information is provided only include heterozygous variants
if alt_count * ref_count > 0:
ancestral_base = ref_allele
print(chrom, pos, ancestral_base, genotype, sep = '\t', file = outfile_handler[individual])
if index % 100000 == 0:
print(f'at line {index} at chrom {chrom} and position {pos}')
# Clean log files generated by vcf and bcf tools
clean_files('out.log')
for individual in ingroup_individuals:
outfile_handler[individual].close()
| {
"content_hash": "490ad0f61df50a31124f8f644eab504d",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 176,
"avg_line_length": 48.642857142857146,
"alnum_prop": 0.509770699198012,
"repo_name": "LauritsSkov/Introgression-detection",
"id": "8efa6bac79acfc79e1e04cad4ef3f944e8906e10",
"size": "8853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bcf_vcf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60977"
}
],
"symlink_target": ""
} |
def verse(i):
if i == 0:
return "No more bottles of beer on the wall, no more bottles of beer.\n" \
"Go to the store and buy some more, " \
"99 bottles of beer on the wall.\n"
if i == 1:
return "1 bottle of beer on the wall, 1 bottle of beer.\n" \
"Take it down and pass it around, " \
"no more bottles of beer on the wall.\n"
plural = {1: ''}
return f"{i} bottles of beer on the wall, {i} bottles of beer.\n" \
f"Take one down and pass it around, " \
f"{i-1} bottle{plural.get(i-1,'s')} of beer on the wall.\n"
def song(start, stop=0):
return ''.join(verse(i)+'\n' for i in range(start, stop-1, -1))
| {
"content_hash": "c5777e45771858e6051e68185acf9b31",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 82,
"avg_line_length": 42.8235294117647,
"alnum_prop": 0.5357142857142857,
"repo_name": "always-waiting/exercism-python",
"id": "02b82a1b0f80ac319e52cacfb7e1a37f3a029ad7",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beer-song/beer_song1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148045"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
class DisableWalletTest (NavCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/navcoin/navcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| {
"content_hash": "0a44bbc268b6720f08d5bdcca4085e12",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 97,
"avg_line_length": 40.1025641025641,
"alnum_prop": 0.65153452685422,
"repo_name": "navcoindev/navcoin-core",
"id": "e8fe7907e9df32219758ced692074a480252bce6",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/disablewallet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3655915"
},
{
"name": "C++",
"bytes": "4954999"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "176582"
},
{
"name": "Makefile",
"bytes": "105930"
},
{
"name": "Objective-C",
"bytes": "3771"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "946426"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3792"
},
{
"name": "Shell",
"bytes": "426873"
}
],
"symlink_target": ""
} |
import sys
import string
import json
blink_protocol_path = sys.argv[1]
browser_protocol_path = sys.argv[2]
output_cc_path = sys.argv[3]
output_h_path = sys.argv[4]
header = """\
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// Generated by
// content/public/browser/devtools_protocol_handler_generator.py from
// third_party/WebKit/Source/devtools/protocol.json and
// content/browser/devtools/browser_protocol.json
"""
template_h = string.Template(header + """\
#ifndef CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
#define CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
#include "content/browser/devtools/protocol/devtools_protocol_client.h"
namespace content {
class DevToolsProtocolDispatcher;
namespace devtools {
extern const char kProtocolVersion[];
bool IsSupportedProtocolVersion(const std::string& version);
template<typename T>
base::Value* CreateValue(const T& param) {
return new base::FundamentalValue(param);
}
template<class T>
base::Value* CreateValue(scoped_ptr<T>& param) {
return param.release();
}
template<class T>
base::Value* CreateValue(scoped_refptr<T> param) {
return param->ToValue().release();
}
template<typename T>
base::Value* CreateValue(const std::vector<T> param) {
base::ListValue* result = new base::ListValue();
for (auto& item : param) {
result->Append(CreateValue(item));
}
return result;
}
template<>
base::Value* CreateValue(const std::string& param);
${types}\
} // namespace devtools
class DevToolsProtocolDispatcher {
public:
using Notifier = DevToolsProtocolClient::RawMessageCallback;
using CommandHandler =
base::Callback<bool(int, scoped_ptr<base::DictionaryValue>)>;
explicit DevToolsProtocolDispatcher(const Notifier& notifier);
~DevToolsProtocolDispatcher();
CommandHandler FindCommandHandler(const std::string& method);
${setters}\
private:
using Response = DevToolsProtocolClient::Response;
using CommandHandlers = std::map<std::string, CommandHandler>;
${methods}\
Notifier notifier_;
DevToolsProtocolClient client_;
CommandHandlers command_handlers_;
${fields}\
};
} // namespace content
#endif // CONTENT_BROWSER_DEVTOOLS_PROTOCOL_DEVTOOLS_PROTOCOL_DISPATCHER_H_
""")
tmpl_typedef = string.Template("""\
namespace ${domain} {
typedef ${param_type} ${declared_name};
} // namespace ${domain}
""")
tmpl_struct = string.Template("""\
namespace ${domain} {
template<int MASK>
struct ${declared_name}Builder
: base::RefCounted<${declared_name}Builder<MASK>> {
public:
enum {
kAllSet = 0,
${fields_enum}\
};
${methods}\
static scoped_refptr<${declared_name}Builder<kNoneSet>> Create() {
return new ${declared_name}Builder<kNoneSet>();
}
scoped_ptr<base::DictionaryValue> ToValue() {
static_assert(MASK == kAllSet, "required properties missing");
return make_scoped_ptr(dict_->DeepCopy());
}
private:
friend struct ${declared_name}Builder<0>;
${declared_name}Builder() : dict_(new base::DictionaryValue()) {
}
template<class T> T* ThisAs() {
static_assert(sizeof(*this) == sizeof(T), "cannot cast");
return reinterpret_cast<T*>(this);
}
scoped_ptr<base::DictionaryValue> dict_;
};
typedef ${declared_name}Builder<0> ${declared_name};
} // namespace ${domain}
""")
tmpl_builder_setter_req = string.Template("""\
scoped_refptr<${declared_name}Builder<MASK & ~k${Param}>>
set_${param}(${pass_type} ${param}) {
static_assert(MASK & k${Param}, "already set");
dict_->Set("${proto_param}", CreateValue(${param}));
return ThisAs<${declared_name}Builder<MASK & ~k${Param}>>();
}
""")
tmpl_builder_setter_opt = string.Template("""\
scoped_refptr<${declared_name}Builder<MASK>>
set_${param}(${pass_type} ${param}) {
dict_->Set("${proto_param}", CreateValue(${param}));
return this;
}
""")
tmpl_builder_enum = string.Template("""\
k${Param} = 1 << ${ordinal},
""")
tmpl_builder_none_set = string.Template("""\
kNoneSet = ${all_fields}
""")
tmpl_named_enum = string.Template("""\
namespace ${domain} {
${values}\
} // namespace ${domain}
""")
tmpl_inline_enum = string.Template("""\
namespace ${domain} {
namespace ${subdomain} {
${values}\
} // namespace ${subdomain}
} // namespace ${domain}
""")
tmpl_enum_value = string.Template("""\
extern const char k${Enum}${Value}[];
""")
tmpl_enum_value_def = string.Template("""\
const char k${Enum}${Value}[] = "${value}";
""")
tmpl_handler = string.Template("""\
namespace ${domain} {
class ${Domain}Handler;
} // namespace domain
""")
tmpl_client = string.Template("""\
namespace ${domain} {
class Client : public DevToolsProtocolClient {
public:
explicit Client(const RawMessageCallback& raw_message_callback);
~Client() override;
${methods}\
};
} // namespace ${domain}
""")
tmpl_event = string.Template("""\
void ${Command}(
scoped_refptr<${Command}Params> params);
""")
tmpl_response = string.Template("""\
void Send${Command}Response(
DevToolsCommandId command_id,
scoped_refptr<${Command}Response> params);
""")
tmpl_setter = string.Template("""\
void Set${Domain}Handler(
devtools::${domain}::${Domain}Handler* ${domain}_handler);
""")
tmpl_callback = string.Template("""\
bool On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params);
""")
tmpl_field = string.Template("""\
devtools::${domain}::${Domain}Handler* ${domain}_handler_;
""")
template_cc = string.Template(header + """\
#include "content/browser/devtools/protocol/devtools_protocol_handler.h"
#include "base/bind.h"
#include "base/strings/string_number_conversions.h"
${includes}\
namespace content {
DevToolsProtocolDispatcher::DevToolsProtocolDispatcher(
const Notifier& notifier)
: notifier_(notifier),
client_(notifier),
${fields_init} {
}
DevToolsProtocolDispatcher::~DevToolsProtocolDispatcher() {
}
DevToolsProtocolDispatcher::CommandHandler
DevToolsProtocolDispatcher::FindCommandHandler(const std::string& method) {
CommandHandlers::iterator it = command_handlers_.find(method);
return it == command_handlers_.end() ? CommandHandler() : it->second;
}
${methods}\
namespace devtools {
const char kProtocolVersion[] = "${major}.${minor}";
bool IsSupportedProtocolVersion(const std::string& version) {
std::vector<std::string> tokens;
Tokenize(version, ".", &tokens);
int major, minor;
return tokens.size() == 2 &&
base::StringToInt(tokens[0], &major) && major == ${major} &&
base::StringToInt(tokens[1], &minor) && minor <= ${minor};
}
template<>
base::Value* CreateValue(const std::string& param) {
return new base::StringValue(param);
}
${types}\
} // namespace devtools
} // namespace content
""")
tmpl_include = string.Template("""\
#include "content/browser/devtools/protocol/${domain}_handler.h"
""")
tmpl_field_init = string.Template("${domain}_handler_(nullptr)")
tmpl_setter_impl = string.Template("""\
void DevToolsProtocolDispatcher::Set${Domain}Handler(
devtools::${domain}::${Domain}Handler* ${domain}_handler) {
DCHECK(!${domain}_handler_);
${domain}_handler_ = ${domain}_handler;
${initializations}\
}
""")
tmpl_register = string.Template("""\
command_handlers_["${Domain}.${command}"] =
base::Bind(
&DevToolsProtocolDispatcher::On${TargetDomain}${Command},
base::Unretained(this));
""")
tmpl_init_client = string.Template("""\
${domain}_handler_->SetClient(make_scoped_ptr(
new devtools::${domain}::Client(notifier_)));
""")
tmpl_callback_impl = string.Template("""\
bool DevToolsProtocolDispatcher::On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params) {
${prep}\
Response response = ${domain}_handler_->${Command}(${args});
scoped_ptr<base::DictionaryValue> protocol_response;
if (client_.SendError(command_id, response))
return true;
if (response.IsFallThrough())
return false;
scoped_ptr<base::DictionaryValue> result(new base::DictionaryValue());
${wrap}\
client_.SendSuccess(command_id, result.Pass());
return true;
}
""")
tmpl_wrap = string.Template("""\
result->Set("${proto_param}", devtools::CreateValue(out_${param}));
""")
tmpl_callback_async_impl = string.Template("""\
bool DevToolsProtocolDispatcher::On${Domain}${Command}(
DevToolsCommandId command_id,
scoped_ptr<base::DictionaryValue> params) {
${prep}\
Response response = ${domain}_handler_->${Command}(${args});
if (client_.SendError(command_id, response))
return true;
return !response.IsFallThrough();
}
""")
tmpl_prep_req = string.Template("""\
${raw_type} in_${param}${init};
if (!params || !params->Get${Type}("${proto_param}", &in_${param})) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
""")
tmpl_prep_req_list = string.Template("""\
base::ListValue* list_${param} = nullptr;
if (!params || !params->GetList("${proto_param}", &list_${param})) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
std::vector<${item_type}> in_${param};
for (base::ListValue::const_iterator it =
list_${param}->begin(); it != list_${param}->end(); ++it) {
${item_raw_type} item;
if (!(*it)->GetAs${ItemType}(&item)) {
client_.SendError(command_id, Response::InvalidParams("${proto_param}"));
return true;
}
in_${param}.push_back(${item_pass});
}
""")
tmpl_prep_opt = string.Template("""\
${raw_type} in_${param}${init};
bool ${param}_found = params && params->Get${Type}(
"${proto_param}",
&in_${param});
""")
tmpl_prep_output = string.Template("""\
${param_type} out_${param}${init};
""")
tmpl_arg_name = string.Template("in_${param}")
tmpl_arg_req = string.Template("${param_pass}")
tmpl_arg_opt = string.Template(
"${param}_found ? ${param_pass} : nullptr")
tmpl_object_pass = string.Template(
"make_scoped_ptr<base::DictionaryValue>(${name}->DeepCopy())")
tmpl_client_impl = string.Template("""\
namespace ${domain} {
Client::Client(const RawMessageCallback& raw_message_callback)
: DevToolsProtocolClient(raw_message_callback) {
}
Client::~Client() {
}
${methods}\
} // namespace ${domain}
""")
tmpl_event_impl = string.Template("""\
void Client::${Command}(
scoped_refptr<${Command}Params> params) {
SendNotification("${Domain}.${command}",
params->ToValue().Pass());
}
""")
tmpl_response_impl = string.Template("""\
void Client::Send${Command}Response(
DevToolsCommandId command_id,
scoped_refptr<${Command}Response> params) {
SendSuccess(command_id, params->ToValue().Pass());
}
""")
tmpl_typename = string.Template("devtools::${domain}::${declared_name}")
def Capitalize(s):
return s[:1].upper() + s[1:]
def Uncamelcase(s):
result = ""
for i, c in enumerate(s):
if c.isupper():
if (i > 0) and ((i < len(s)-1) and s[i+1].islower() or s[i-1].islower()):
result += "_"
result += c.lower()
else:
result += c
return result
types = {}
blink_protocol = json.loads(open(blink_protocol_path, "r").read())
browser_protocol = json.loads(open(browser_protocol_path, "r").read())
type_decls = []
type_impls = []
handler_methods = []
handler_method_impls = []
domain_maps = []
redirects = {}
all_domains = blink_protocol["domains"] + browser_protocol["domains"]
for json_domain in all_domains:
if "types" in json_domain:
for json_type in json_domain["types"]:
types["%s.%s" % (json_domain["domain"], json_type["id"])] = json_type
def DeclareStruct(json_properties, mapping):
methods = []
fields_enum = []
enum_items = []
req_fields_num = 0
for json_prop in json_properties:
prop_map = mapping.copy()
prop_map["proto_param"] = json_prop["name"]
prop_map["param"] = Uncamelcase(json_prop["name"])
prop_map["Param"] = Capitalize(json_prop["name"])
prop_map["subdomain"] = Uncamelcase(prop_map["declared_name"])
del prop_map["declared_name"]
ResolveType(json_prop, prop_map)
prop_map["declared_name"] = mapping["declared_name"]
if json_prop.get("optional"):
methods.append(tmpl_builder_setter_opt.substitute(prop_map))
else:
methods.append(tmpl_builder_setter_req.substitute(prop_map))
enum_items.append("k%s" % prop_map["Param"]);
fields_enum.append(tmpl_builder_enum.substitute(prop_map,
ordinal = req_fields_num))
req_fields_num += 1
all_fields = "kAllSet"
if len(enum_items) > 0:
all_fields = " | ".join(enum_items)
fields_enum.append(tmpl_builder_none_set.substitute(mapping,
all_fields = all_fields))
type_decls.append(tmpl_struct.substitute(mapping,
methods = "\n".join(methods),
fields_enum = "".join(fields_enum)))
def DeclareEnum(json, mapping):
values = []
value_defs = []
tmpl_enum = tmpl_inline_enum
if "declared_name" in mapping:
mapping["Enum"] = mapping["declared_name"]
tmpl_enum = tmpl_named_enum
else:
mapping["Enum"] = Capitalize(mapping["proto_param"])
for enum_value in json["enum"]:
values.append(tmpl_enum_value.substitute(mapping,
Value = Capitalize(enum_value)))
value_defs.append(tmpl_enum_value_def.substitute(mapping,
value = enum_value,
Value = Capitalize(enum_value)))
type_decls.append(tmpl_enum.substitute(mapping,
values = "".join(values)))
type_impls.append(tmpl_enum.substitute(mapping,
values = "".join(value_defs)))
def ResolveRef(json, mapping):
dot_pos = json["$ref"].find(".")
if dot_pos == -1:
domain_name = mapping["Domain"]
type_name = json["$ref"]
else:
domain_name = json["$ref"][:dot_pos]
type_name = json["$ref"][dot_pos + 1:]
json_type = types["%s.%s" % (domain_name, type_name)]
mapping["declared_name"] = Capitalize(type_name)
mapping["Domain"] = domain_name
mapping["domain"] = Uncamelcase(domain_name)
mapping["param_type"] = tmpl_typename.substitute(mapping)
ResolveType(json_type, mapping)
if not "___type_declared" in json_type:
json_type["___type_declared"] = True;
if (json_type.get("type") == "object") and ("properties" in json_type):
DeclareStruct(json_type["properties"], mapping)
else:
if ("enum" in json_type):
DeclareEnum(json_type, mapping)
type_decls.append(tmpl_typedef.substitute(mapping))
def ResolveArray(json, mapping):
items_map = mapping.copy()
ResolveType(json["items"], items_map)
if items_map["Type"] == "List":
# TODO(dgozman) Implement this.
raise Exception("Nested arrays are not implemented")
mapping["param_type"] = "std::vector<%s>" % items_map["param_type"]
mapping["Type"] = "List"
mapping["pass_type"] = "const %s&" % mapping["param_type"]
mapping["storage_type"] = "std::vector<%s>" % items_map["storage_type"]
mapping["raw_type"] = mapping["storage_type"]
mapping["prep_req"] = tmpl_prep_req_list.substitute(mapping,
item_type = items_map["storage_type"],
item_init = items_map["init"],
item_raw_type = items_map["raw_type"],
item_pass = items_map["pass_template"].substitute(name="item", opt=""),
ItemType = items_map["Type"])
mapping["arg_out"] = "&out_%s" % mapping["param"]
def ResolveObject(json, mapping):
mapping["Type"] = "Dictionary"
mapping["storage_type"] = "scoped_ptr<base::DictionaryValue>"
mapping["raw_type"] = "base::DictionaryValue*"
mapping["pass_template"] = tmpl_object_pass
if "properties" in json:
if not "declared_name" in mapping:
mapping["declared_name"] = ("%s%s" %
(mapping["Command"], Capitalize(mapping["proto_param"])))
mapping["param_type"] = ("scoped_refptr<%s>" %
tmpl_typename.substitute(mapping))
DeclareStruct(json["properties"], mapping)
else:
mapping["param_type"] = ("scoped_refptr<%s>" %
tmpl_typename.substitute(mapping))
mapping["pass_type"] = mapping["param_type"]
mapping["arg_out"] = "&out_%s" % mapping["param"]
else:
mapping["param_type"] = "base::DictionaryValue"
mapping["pass_type"] = "scoped_ptr<base::DictionaryValue>"
mapping["arg_out"] = "out_%s.get()" % mapping["param"]
mapping["prep_req"] = tmpl_prep_req.substitute(mapping)
def ResolvePrimitive(json, mapping):
jsonrpc_type = json["type"]
if jsonrpc_type == "boolean":
mapping["param_type"] = "bool"
mapping["Type"] = "Boolean"
mapping["init"] = " = false"
elif jsonrpc_type == "integer":
mapping["param_type"] = "int"
mapping["Type"] = "Integer"
mapping["init"] = " = 0"
elif jsonrpc_type == "number":
mapping["param_type"] = "double"
mapping["Type"] = "Double"
mapping["init"] = " = 0.0"
elif jsonrpc_type == "string":
mapping["param_type"] = "std::string"
mapping["pass_type"] = "const std::string&"
mapping["Type"] = "String"
if "enum" in json and not "declared_name" in mapping:
if not "subdomain" in mapping:
mapping["subdomain"] = Uncamelcase(mapping["command"])
DeclareEnum(json, mapping)
else:
raise Exception("Unknown type: %s" % json_type)
mapping["storage_type"] = mapping["param_type"]
mapping["raw_type"] = mapping["param_type"]
mapping["prep_req"] = tmpl_prep_req.substitute(mapping)
if jsonrpc_type != "string":
mapping["pass_type"] = mapping["param_type"]
mapping["arg_out"] = "&out_%s" % mapping["param"]
def ResolveType(json, mapping):
mapping["init"] = ""
mapping["pass_template"] = string.Template("${opt}${name}")
if "$ref" in json:
ResolveRef(json, mapping)
elif "type" in json:
jsonrpc_type = json["type"]
if jsonrpc_type == "array":
ResolveArray(json, mapping)
elif jsonrpc_type == "object":
ResolveObject(json, mapping)
else:
ResolvePrimitive(json, mapping)
else:
raise Exception("Unknown type at %s.%s %s" %
(mapping["Domain"], mapping["command"], mapping["proto_param"]))
setters = []
fields = []
includes = []
fields_init = []
for json_domain in all_domains:
domain_map = {}
domain_map["Domain"] = json_domain["domain"]
domain_map["domain"] = Uncamelcase(json_domain["domain"])
initializations = []
client_methods = []
client_method_impls = []
domain_empty = True
domain_needs_client = False
if "commands" in json_domain:
for json_command in json_domain["commands"]:
if (not ("handlers" in json_command) or
not ("browser" in json_command["handlers"])):
continue
domain_empty = False
command_map = domain_map.copy()
command_map["command"] = json_command["name"]
command_map["Command"] = Capitalize(json_command["name"])
if "redirect" in json_command:
redirect_domain = json_command["redirect"]
if not (redirect_domain in redirects):
redirects[redirect_domain] = []
command_map["TargetDomain"] = redirect_domain
redirects[redirect_domain].append(tmpl_register.substitute(command_map))
continue
command_map["TargetDomain"] = command_map["Domain"]
prep = []
args = []
if "parameters" in json_command:
for json_param in json_command["parameters"]:
param_map = command_map.copy()
param_map["proto_param"] = json_param["name"]
param_map["param"] = Uncamelcase(json_param["name"])
ResolveType(json_param, param_map)
if json_param.get("optional"):
if param_map["Type"] in ["List"]:
# TODO(vkuzkokov) Implement transformation of base::ListValue
# to std::vector and base::DictonaryValue to struct.
raise Exception(
"Optional array parameters are not implemented")
prep.append(tmpl_prep_opt.substitute(param_map))
param_pass = param_map["pass_template"].substitute(
name=tmpl_arg_name.substitute(param_map),
opt="&")
args.append(
tmpl_arg_opt.substitute(param_map, param_pass=param_pass))
else:
prep.append(param_map["prep_req"])
param_pass = param_map["pass_template"].substitute(
name=tmpl_arg_name.substitute(param_map),
opt="")
args.append(
tmpl_arg_req.substitute(param_map, param_pass=param_pass))
if json_command.get("async"):
domain_needs_client = True
json_returns = []
if "returns" in json_command:
json_returns = json_command["returns"]
command_map["declared_name"] = "%sResponse" % command_map["Command"]
DeclareStruct(json_returns, command_map)
# TODO(vkuzkokov) Pass async callback instance similar to how
# InspectorBackendDispatcher does it. This, however, can work
# only if Blink and Chrome are in the same repo.
args.insert(0, "command_id")
handler_method_impls.append(
tmpl_callback_async_impl.substitute(command_map,
prep = "".join(prep),
args = "\n " + ",\n ".join(args)))
client_methods.append(tmpl_response.substitute(command_map))
client_method_impls.append(tmpl_response_impl.substitute(command_map))
else:
wrap = []
if "returns" in json_command:
for json_param in json_command["returns"]:
param_map = command_map.copy()
param_map["proto_param"] = json_param["name"]
param_map["param"] = Uncamelcase(json_param["name"])
if json_param.get("optional"):
# TODO(vkuzkokov) Implement Optional<T> for value types.
raise Exception("Optional return values are not implemented")
ResolveType(json_param, param_map)
prep.append(tmpl_prep_output.substitute(param_map))
args.append(param_map["arg_out"])
wrap.append(tmpl_wrap.substitute(param_map))
args_str = ""
if len(args) > 0:
args_str = "\n " + ",\n ".join(args)
handler_method_impls.append(tmpl_callback_impl.substitute(command_map,
prep = "".join(prep),
args = args_str,
wrap = "".join(wrap)))
initializations.append(tmpl_register.substitute(command_map))
handler_methods.append(tmpl_callback.substitute(command_map))
if "events" in json_domain:
for json_event in json_domain["events"]:
if (not ("handlers" in json_event) or
not ("browser" in json_event["handlers"])):
continue
domain_empty = False
domain_needs_client = True
event_map = domain_map.copy()
event_map["command"] = json_event["name"]
event_map["Command"] = Capitalize(json_event["name"])
json_parameters = []
if "parameters" in json_event:
json_parameters = json_event["parameters"]
event_map["declared_name"] = "%sParams" % event_map["Command"]
DeclareStruct(json_parameters, event_map);
client_methods.append(tmpl_event.substitute(event_map))
client_method_impls.append(tmpl_event_impl.substitute(event_map))
if domain_empty:
continue
type_decls.append(tmpl_handler.substitute(domain_map))
setters.append(tmpl_setter.substitute(domain_map))
fields.append(tmpl_field.substitute(domain_map))
includes.append(tmpl_include.substitute(domain_map))
fields_init.append(tmpl_field_init.substitute(domain_map))
if domain_needs_client:
type_decls.append(tmpl_client.substitute(domain_map,
methods = "".join(client_methods)))
initializations.append(tmpl_init_client.substitute(domain_map))
type_impls.append(tmpl_client_impl.substitute(domain_map,
methods = "\n".join(client_method_impls)))
domain_map["initializations"] = "".join(initializations)
domain_maps.append(domain_map)
for domain_map in domain_maps:
domain = domain_map["Domain"]
if domain in redirects:
domain_map["initializations"] += "".join(redirects[domain])
handler_method_impls.append(tmpl_setter_impl.substitute(domain_map))
output_h_file = open(output_h_path, "w")
output_cc_file = open(output_cc_path, "w")
output_h_file.write(template_h.substitute({},
types = "\n".join(type_decls),
setters = "".join(setters),
methods = "".join(handler_methods),
fields = "".join(fields)))
output_h_file.close()
output_cc_file.write(template_cc.substitute({},
major = blink_protocol["version"]["major"],
minor = blink_protocol["version"]["minor"],
includes = "".join(sorted(includes)),
fields_init = ",\n ".join(fields_init),
methods = "\n".join(handler_method_impls),
types = "\n".join(type_impls)))
output_cc_file.close()
| {
"content_hash": "203c50f0738ff3374ea9d60449a76aa8",
"timestamp": "",
"source": "github",
"line_count": 797,
"max_line_length": 80,
"avg_line_length": 31.158092848180676,
"alnum_prop": 0.6471227801715459,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "cc2aa130b6840e6f2585bc825df34929bf91f3dc",
"size": "25018",
"binary": false,
"copies": "3",
"ref": "refs/heads/ubuntu/precise",
"path": "content/browser/devtools/protocol/devtools_protocol_handler_generator.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import sst
import sst.actions
sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)
sst.actions.go_to('/')
elem = sst.actions.get_element(tag='body')
sst.actions.assert_css_property(
elem, 'font-family', 'Ubuntu,Tahoma,sans-serif')
elem = sst.actions.get_element(tag='body')
sst.actions.assert_css_property(elem, 'font-family', 'Ubuntu', regex=True)
elems = sst.actions.get_elements(tag='h2')
for elem in elems:
sst.actions.assert_css_property(elem, 'padding-left', '8px')
elems = sst.actions.get_elements(tag='h2')
for elem in elems:
sst.actions.fails(
sst.actions.assert_css_property, elem, 'padding-left', 'notfound')
| {
"content_hash": "6b089866be0f8879e3602e87e4943ba7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 30.136363636363637,
"alnum_prop": 0.7073906485671192,
"repo_name": "DramaFever/sst",
"id": "2055b03d407f4496fb0c8bf58d786b1cf0a3ec0c",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sst/selftests/css_props.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2786"
},
{
"name": "CSS",
"bytes": "269"
},
{
"name": "HTML",
"bytes": "17795"
},
{
"name": "Python",
"bytes": "302776"
},
{
"name": "Shell",
"bytes": "6047"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..")
import unittest
from operator import attrgetter
import datetime
import pymongo
import maroon
from maroon import Model, IntProperty, Property
from mongo import MongoDB
from mock import MockDB
from couch import CouchDB
from models import SimpleModel, FunModel, PersonModel
class TestBasicModelCreationAndAssignment(unittest.TestCase):
def setUp(self):
self.o1 = SimpleModel()
self.o2 = SimpleModel()
self.o3 = SimpleModel()
def test_simple_save(self):
self.o1.int1 = 44
self.o1.save()
self.failIfEqual(self.o1._id, None)
def test_update_object(self):
#make sure that we replace objects when they are updated
self.o1._id = "mustafa"
self.o1.int1 = 1
self.o1.int2 = 2
self.o1.save()
ob = SimpleModel.get_id("mustafa")
ob.int2 = 3
ob.save()
ob = SimpleModel.get_id("mustafa")
self.failUnlessEqual(3, ob.int2)
def test_merge(self):
f = FunModel(_id='7',enum='red')
f.save()
g = FunModel(_id='7',dic={'three':4})
g.merge()
ob = FunModel.get_id('7').to_d()
del ob['ca']
ob.pop('_rev',None)
self.failUnlessEqual(ob, dict(_id='7', e='red', d={'three':4}))
def test_missing_fields(self):
obj1 = SimpleModel({'_id':'simba','i1':2})
obj1.save()
ob = SimpleModel.get_id('simba')
self.failUnlessEqual(ob.int2, None)
def test_set_missing_field(self):
SimpleModel({'i1':2,'_id':'timon'}).save()
ob = SimpleModel.get_id('timon')
ob.int2 = 15
ob.save()
ob = SimpleModel.get_id('timon')
self.failUnlessEqual(ob.int2, 15)
def test_remove_field(self):
self.o2._id = "nala"
self.o2.int1 = 2
self.o2.int2 = 3
self.o2.save()
item = SimpleModel.get_id("nala")
self.failUnlessEqual( item.int2, 3)
item.int2 = None
item.save()
result = SimpleModel.get_id("nala")
self.failUnlessEqual( result.int2, None)
def test_get_all(self):
for name in ['pumba','zazu','rafiki']:
m = PersonModel(name=name, age=(10+len(name)))
m.save()
people = sorted(PersonModel.get_all(),key=attrgetter('age'))
self.failUnlessEqual( people[0].name, 'zazu')
self.failUnlessEqual( people[0].age, 14)
self.failUnlessEqual( people[1].name, 'pumba')
self.failUnlessEqual( people[1].age, 15)
self.failUnlessEqual( people[2].name, 'rafiki')
self.failUnlessEqual( people[2].age, 16)
people = list(PersonModel.get_all(limit=2))
self.failUnlessEqual( len(people), 2)
def test_fun_model(self):
dic = {"one":2, 'three':"four", 'five':["six",7]}
names = ['Shenzi', 'Banzai', 'ed']
now = datetime.datetime.utcnow()
fun = FunModel(
_id="fun",
enum="red",
real=3.14,
dic=dic,
names=names,
)
fun.part=PersonModel(name="scar", age=32)
fun.save()
fun = FunModel.get_id("fun")
self.failUnlessEqual( fun.enum, 'red')
self.failUnlessEqual( fun.real, 3.14)
self.failUnlessEqual( fun.dic, dic)
dt = abs(fun.created-now)
self.failUnless( dt.days==0 and dt.seconds==0 )
self.failUnlessEqual( fun.names, names)
self.failUnlessEqual( fun.part.name, "scar")
self.failUnlessEqual( fun.part.age, 32)
if __name__ == '__main__':
db = sys.argv[1]
models = ('SimpleModel', 'FunModel', 'PersonModel')
if db=='mongo':
Model.database = MongoDB(None,'test_maroon', port=2727)
for m in models:
Model.database[m].remove()
elif db=='mock':
Model.database = MockDB()
elif db=='couch':
for m in models:
url = 'http://127.0.0.1:5984/'
cls = locals()[m]
cls.database = CouchDB(url+'test_maroon_'+m.lower(),True)
cls.database.flush()
else:
print "Usage: ./database_tests.py [mongo|couch|mock]"
if hasattr(FunModel,'database'):
del sys.argv[1]
unittest.main()
| {
"content_hash": "6dad51ede22bbb4da5dcdf1ba821d9cf",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 71,
"avg_line_length": 31.54074074074074,
"alnum_prop": 0.5657585720995773,
"repo_name": "nod/maroon",
"id": "bb96174153f0771230fc7c07bb6b0916d423245c",
"size": "4281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/database_tests.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "62"
},
{
"name": "Python",
"bytes": "41231"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
"""
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.safestring import SafeBytes
try:
import pytz
except ImportError:
pytz = None
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
# Some of these import sqlite3, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The SQLite database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime))
Database.register_converter(str("timestamp"), decoder(parse_datetime))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_warn_on_aware_datetime)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db(self.settings_dict['NAME']):
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self, name):
return name == ":memory:" or "mode=memory" in force_text(name)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| {
"content_hash": "9d4ee7329539e7afec8f5cd67b6ee02c",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 117,
"avg_line_length": 39.93052631578947,
"alnum_prop": 0.62993620498761,
"repo_name": "gitaarik/django",
"id": "2330a98a5330fefdfa69833b1f8641632dd9a98a",
"size": "18967",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/backends/sqlite3/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52372"
},
{
"name": "HTML",
"bytes": "170531"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11546984"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
import pytest
from compose.cli.command import get_config_path_from_options
from compose.config.environment import Environment
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
class TestGetConfigPathFromOptions:
def test_path_from_options(self):
paths = ['one.yml', 'two.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options(opts, environment) == paths
def test_single_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options({}, environment) == ['one.yml']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator')
def test_multiple_path_from_env(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml:two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options({}, environment) == ['one.yml', 'two.yml']
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator')
def test_multiple_path_from_env_windows(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_FILE'] = 'one.yml;two.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options({}, environment) == ['one.yml', 'two.yml']
def test_multiple_path_from_env_custom_separator(self):
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
environment = Environment.from_env_file('.')
assert get_config_path_from_options({}, environment) == ['c:\\one.yml', '.\\semi;colon.yml']
def test_no_path(self):
environment = Environment.from_env_file('.')
assert not get_config_path_from_options({}, environment)
def test_unicode_path_from_options(self):
paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
opts = {'--file': paths}
environment = Environment.from_env_file('.')
assert get_config_path_from_options(opts, environment) == ['就吃饭/docker-compose.yml']
| {
"content_hash": "9e6f78e786c3dfc3c16ee70c6f606911",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 104,
"avg_line_length": 43.2962962962963,
"alnum_prop": 0.6372968349016254,
"repo_name": "thaJeztah/compose",
"id": "60638864c37107e4f26880707da68e8b96c5e051",
"size": "2344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/cli/command_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2920"
},
{
"name": "Groovy",
"bytes": "12229"
},
{
"name": "Makefile",
"bytes": "1573"
},
{
"name": "PowerShell",
"bytes": "7139"
},
{
"name": "Python",
"bytes": "1108081"
},
{
"name": "Shell",
"bytes": "34381"
}
],
"symlink_target": ""
} |
import logging
from datetime import datetime
from onadata.libs.utils.viewer_tools import get_client_ip
class Enum(object):
__name__ = "Enum"
def __init__(self, **enums):
self.enums = enums
def __getattr__(self, item):
return self.enums[item]
def __getitem__(self, item):
return self.__getattr__(item)
def __iter__(self):
return self.enums.itervalues()
Actions = Enum(
PROFILE_ACCESSED="profile-accessed",
PUBLIC_PROFILE_ACCESSED="public-profile-accessed",
PROFILE_SETTINGS_UPDATED="profile-settings-updated",
USER_LOGIN="user-login",
USER_LOGOUT="user-logout",
USER_BULK_SUBMISSION="bulk-submissions-made",
USER_FORMLIST_REQUESTED="formlist-requested",
FORM_ACCESSED="form-accessed",
FORM_PUBLISHED="form-published",
FORM_UPDATED="form-updated",
FORM_XLS_DOWNLOADED="form-xls-downloaded",
FORM_XLS_UPDATED="form-xls-updated",
FORM_DELETED="form-deleted",
FORM_CLONED="form-cloned",
FORM_XML_DOWNLOADED="form-xml-downloaded",
FORM_JSON_DOWNLOADED="form-json-downloaded",
FORM_PERMISSIONS_UPDATED="form-permissions-updated",
FORM_ENTER_DATA_REQUESTED="form-enter-data-requested",
FORM_MAP_VIEWED="form-map-viewed",
FORM_DATA_VIEWED="form-data-viewed",
EXPORT_CREATED="export-created",
EXPORT_DOWNLOADED="export-downloaded",
EXPORT_DELETED="export-deleted",
EXPORT_LIST_REQUESTED="export-list-requested",
SUBMISSION_CREATED="submission-created",
SUBMISSION_UPDATED="submission-updated",
SUBMISSION_DELETED="submission-deleted",
SUBMISSION_ACCESSED="submission-accessed",
SUBMISSION_EDIT_REQUESTED="submission-edit-requested",
BAMBOO_LINK_CREATED="bamboo-link-created",
BAMBOO_LINK_DELETED="bamboo-link-deleted",
SMS_SUPPORT_ACTIVATED="sms-support-activated",
SMS_SUPPORT_DEACTIVATED="sms-support-deactivated",
)
class AuditLogHandler(logging.Handler):
def __init__(self, model=""):
super(AuditLogHandler, self).__init__()
self.model_name = model
def _format(self, record):
data = {
'action': record.formhub_action,
'user': record.request_username,
'account': record.account_username,
'audit': {},
'msg': record.msg,
# save as python datetime object
# to have mongo convert to ISO date and allow queries
'created_on': datetime.utcfromtimestamp(record.created),
'levelno': record.levelno,
'levelname': record.levelname,
'args': record.args,
'funcName': record.funcName,
'msecs': record.msecs,
'relativeCreated': record.relativeCreated,
'thread': record.thread,
'name': record.name,
'threadName': record.threadName,
'exc_info': record.exc_info,
'pathname': record.pathname,
'exc_text': record.exc_text,
'lineno': record.lineno,
'process': record.process,
'filename': record.filename,
'module': record.module,
'processName': record.processName
}
if hasattr(record, 'audit') and isinstance(record.audit, dict):
data['audit'] = record.audit
return data
def emit(self, record):
data = self._format(record)
# save to mongodb audit_log
try:
model = self.get_model(self.model_name)
except:
pass
else:
log_entry = model(data)
log_entry.save()
def get_model(self, name):
names = name.split('.')
mod = __import__('.'.join(names[:-1]), fromlist=names[-1:])
return getattr(mod, names[-1])
def audit_log(action, request_user, account_user, message, audit, request,
level=logging.DEBUG):
"""
Create a log message based on these params
@param action: Action performed e.g. form-deleted
@param request_username: User performing the action
@param account_username: The formhub account the action was performed on
@param message: The message to be displayed on the log
@param level: log level
@param audit: a dict of key/values of other info pertaining to the action
e.g. form's id_string, submission uuid
@return: None
"""
logger = logging.getLogger("audit_logger")
extra = {
'formhub_action': action,
'request_username':
request_user.username if request_user.username else str(request_user),
'account_username':
account_user.username if account_user.username else str(account_user),
'client_ip': get_client_ip(request),
'audit': audit
}
logger.log(level, message, extra=extra)
| {
"content_hash": "2ad2b40dc09cb2bdd334ac1d6409e549",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 34.941605839416056,
"alnum_prop": 0.6266973052015876,
"repo_name": "jomolinare/kobocat",
"id": "2ba75f5e8f7a9deb107e91e2f641e1ac523c8940",
"size": "4787",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "onadata/libs/utils/log.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "74590"
},
{
"name": "HTML",
"bytes": "248806"
},
{
"name": "JavaScript",
"bytes": "904686"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2372182"
},
{
"name": "Shell",
"bytes": "10002"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateFile, self).__init__(temboo_session, '/Library/Box/Files/UpdateFile')
def new_input_set(self):
return UpdateFileInputSet()
def _make_result_set(self, result, path):
return UpdateFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateFileChoreographyExecution(session, exec_id, path)
class UpdateFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_FileObject(self, value):
"""
Set the value of the FileObject input for this Choreo. ((required, json) A JSON object representing the new file information. See documentation for formatting examples.)
"""
super(UpdateFileInputSet, self)._set_input('FileObject', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(UpdateFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(UpdateFileInputSet, self)._set_input('AsUser', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of fields to include in the response.)
"""
super(UpdateFileInputSet, self)._set_input('Fields', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to update.)
"""
super(UpdateFileInputSet, self)._set_input('FileID', value)
class UpdateFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Box.)
"""
return self._output.get('Response', None)
class UpdateFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateFileResultSet(response, path)
| {
"content_hash": "6a150947adde65fabfb9e525759908d3",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 182,
"avg_line_length": 41.36842105263158,
"alnum_prop": 0.6870229007633588,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "fbcc726f29db4e3fe105af1467e573693257d3dd",
"size": "4017",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Box/Files/UpdateFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.datashare import DataShareManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datashare
# USAGE
python synchronization_settings_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataShareManagementClient(
credential=DefaultAzureCredential(),
subscription_id="433a8dfd-e5d5-4e77-ad86-90acdc75eb1a",
)
response = client.synchronization_settings.get(
resource_group_name="SampleResourceGroup",
account_name="Account1",
share_name="Share1",
synchronization_setting_name="SynchronizationSetting1",
)
print(response)
# x-ms-original-file: specification/datashare/resource-manager/Microsoft.DataShare/stable/2020-09-01/examples/SynchronizationSettings_Get.json
if __name__ == "__main__":
main()
| {
"content_hash": "35d7c80472a063f0893985920f521814",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 142,
"avg_line_length": 34.94285714285714,
"alnum_prop": 0.7383483237939493,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7b32c5b8a441652415f811dfe54b4e649623d4f6",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/datashare/azure-mgmt-datashare/generated_samples/synchronization_settings_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os, sys, argparse, time, threading
import traceback
# The path to the ASP python files and tools
basepath = os.path.dirname(os.path.realpath(__file__)) # won't change, unlike syspath
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
binpath = os.path.abspath(basepath + '/../bin') # for packaged ASP
icebridgepath = os.path.abspath(basepath + '/../IceBridge') # IceBridge tools
toolspath = os.path.abspath(basepath + '/../Tools') # ASP Tools
# Prepend to Python path
sys.path.insert(0, basepath)
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
sys.path.insert(0, icebridgepath)
import icebridge_common
import asp_system_utils, asp_alg_utils, asp_geo_utils
asp_system_utils.verify_python_version_is_supported()
# Prepend to system PATH
os.environ["PATH"] = basepath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = pythonpath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = libexecpath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = icebridgepath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = toolspath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = binpath + os.pathsep + os.environ["PATH"]
def checkFileForFlight(path, site, date):
'''Return True if the site/date pair is contained in the file'''
with open(path, 'r') as f:
for line in f:
# Parse the line
parts = line.split()
if len(parts) != 2:
print('Illegal input line is skipped: ' + line)
continue
thisSite = parts[0]
thisDate = parts[1]
if (thisSite == site) and (thisDate == date):
return True
return False
def processAndLog(command, logPath, line, deleteFolder=None):
'''Run the command and log the result'''
print(command)
#os.system(command)
time.sleep(2) # DEBUG
# TODO: How to check for an error?
with open(logPath, 'a') as f:
f.write(line + '\n')
print('Finished running: ' + command)
if deleteFolder:
print('Cleaning up folder: ' + deleteFolder)
cleanup = 'rm -rf ' + deleteFolder
#print(cleanup)
#os.system(cleanup)
def main(argsIn):
try:
usage = '''label_images.py <options>'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("--input-file", dest="inputFile", required=True,
help="Path to a file containing 'SITE DATE' pairs, one per line, to be run.")
parser.add_argument("--success-file", dest="successFile", required=True,
help="Log of completed flights.")
parser.add_argument("--failure-file", dest="failureFile", required=True,
help="Log of failed flights.")
parser.add_argument("--limit", dest="limit", default=4,
help="Don't process more than this many flights at once.")
options = parser.parse_args(argsIn)
except argparse.ArgumentError as msg:
parser.error(msg)
# This assumes that we already have the input data archived.
FETCH_COMMAND = """python ~/repo/StereoPipeline/src/asp/IceBridge/pleiades_manager.py --node-type san --camera-calibration-folder /nobackup/smcmich1/icebridge/calib_files --reference-dem-folder /nobackup/smcmich1/icebridge/reference_dems/ --skip-convert --skip-archive-cameras --skip-batch-gen --skip-process --skip-blend --skip-ortho-gen --skip-check-outputs --skip-report --skip-archive-aligned-cameras --skip-archive-orthos --skip-archive-summary --skip-archive-run --base-dir /nobackup/smcmich1/icebridge"""
# This assumes that we already have the flight fetched.
PROCESS_COMMAND = """python ~/repo/StereoPipeline/src/asp/IceBridge/pleiades_manager.py --node-type san --camera-calibration-folder /nobackup/smcmich1/icebridge/calib_files --reference-dem-folder /nobackup/smcmich1/icebridge/reference_dems/ --skip-check-inputs --skip-fetch --skip-convert --skip-archive-cameras --skip-batch-gen --skip-process --skip-blend --skip-ortho-gen --skip-check-outputs --skip-report --skip-archive-aligned-cameras --skip-archive-orthos --skip-archive-summary --skip-archive-run --skip-validate --base-dir /nobackup/smcmich1/icebridge --generate-labels --archive-labels"""
# Build up the list of flights to process
tasks = []
print(options.inputFile)
with open(options.inputFile, 'r') as f:
for line in f:
# Parse the line
parts = line.split()
if len(parts) != 2:
print('ERROR: Illegal input line is skipped: ' + line)
continue
site = parts[0]
date = parts[1]
# See if we already processed this flight
if ( checkFileForFlight(options.successFile, site, date) or
checkFileForFlight(options.failureFile, site, date) ):
print('This flight was already processed, skipping...')
continue
tasks.append((site, date))
print('Finished creating the flight list.')
# Loop through all of the flights to process them
numProcessed = 0
lastPair = None
fetchThread = None
processThread = None
for (site, date) in tasks:
idString = (' --site %s --yyyymmdd %s ' % (site, date))
fetchCmd = FETCH_COMMAND + idString
processCmd = PROCESS_COMMAND + idString
# Launch the fetch job for this flight
# TODO: Log to both files
logLine = site+' '+date
print('Launching FETCH job for ' + logLine)
fetchThread = threading.Thread(target=processAndLog, args=(fetchCmd, options.successFile, logLine))
fetchThread.start()
# Launch the process job for the previous flight
if lastPair:
if processThread:
print('Waiting on the last processing job to complete...')
processThread.join()
logLine = lastPair[0]+' '+lastPair[1]
print('Launching PROCESS job for ' + logLine)
folder = '/nobackup/smcmich1/icebridge/data/'+lastPair[0]+'_'+lastPair[1]
processThread = threading.Thread(target=processAndLog, args=(processCmd, options.successFile, logLine, None))
processThread.start()
if fetchThread:
print('Waiting on the last fetch job to complete...')
fetchThread.join()
# This pair was fetched this iteration, will be processed next iteration.
lastPair = (site, date)
numProcessed += 1
if numProcessed >= options.limit:
print('Hit the limit of processed flights!')
break
# Process the data from the last fetch
if lastPair:
if processThread:
print('Waiting on the last processing job to complete...')
processThread.join()
logLine = lastPair[0]+' '+lastPair[1]
print('Launching PROCESS job for ' + logLine)
folder = '/nobackup/smcmich1/icebridge/data/'+lastPair[0]+'_'+lastPair[1]
processThread = threading.Thread(target=processAndLog, args=(processCmd, options.successFile, logLine, None))
processThread.start()
# Make sure everything is finished
if fetchThread:
print('Waiting on the last fetch job to complete...')
fetchThread.join()
if processThread:
print('Waiting on the last processing job to complete...')
processThread.join()
print('Jobs finished.')
# Run main function if file used from shell
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "938731fa38fbe4917e893fb5561720ef",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 617,
"avg_line_length": 39.0547263681592,
"alnum_prop": 0.6226751592356687,
"repo_name": "NeoGeographyToolkit/StereoPipeline",
"id": "ce9bab5abfc32bac86c5ba0a51cec2f80544a197",
"size": "8747",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/asp/IceBridge/run_multiple_flights.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "850"
},
{
"name": "C++",
"bytes": "4692744"
},
{
"name": "CMake",
"bytes": "59817"
},
{
"name": "Cuda",
"bytes": "3235"
},
{
"name": "GLSL",
"bytes": "830"
},
{
"name": "M4",
"bytes": "98882"
},
{
"name": "MATLAB",
"bytes": "25700"
},
{
"name": "Makefile",
"bytes": "11078"
},
{
"name": "Perl",
"bytes": "8080"
},
{
"name": "Python",
"bytes": "1096967"
},
{
"name": "Shell",
"bytes": "24742"
},
{
"name": "XSLT",
"bytes": "41773"
}
],
"symlink_target": ""
} |
import mysql.connector
from tests_group.group_helper import Group
from tests_contract.contact_helper import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True
def destroy(self):
self.connection.close()
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute('select group_id, group_name, group_header, group_footer from group_list')
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), group_name=name, group_header=header, group_footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute('select id, firstname, lastname, address, home, mobile, work, email, email2, email3, '
'phone2 from addressbook where deprecated = "0000-00-00 00:00:00"')
for row in cursor:
(id, firstname, lastname, address, home, mobile, work,
email, email2, email3, phone2) = row
list.append(Contact(id = int(id), first_name=firstname, last_name=lastname,
home=home,
mobile=mobile, work=work, email1=email,
email2=email2, email3=email3, phone=phone2, address=address))
finally:
cursor.close()
return list
'''
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute('select id, firstname, middlename, lastname, nickname, title, company, address, home, '
'mobile, work, fax, email, email2, email3, homepage, address2, phone2, notes '
'from addressbook where deprecated = "0000-00-00 00:00:00"')
for row in cursor:
(id, firstname, middlename, lastname, nickname, company, title, address, home, mobile, work, fax,
email, email2, email3, homepage, address2, phone2, notes) = row
list.append(Contact(id = id, first_name=firstname, last_name=lastname,
middle_name=middlename, nickname=nickname,
title=title, company_name=company,
address_name=address, work=work,
fax=fax, home=home,
mobile=mobile, email1=email,
email2=email2, email3=email3, homepage=homepage, address=address2, phone=phone2,
notes=notes))
finally:
cursor.close()
return list''' | {
"content_hash": "52900d83263f997dd9e16d4829ed98e2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 114,
"avg_line_length": 42.647887323943664,
"alnum_prop": 0.5554821664464994,
"repo_name": "werbk/task-7.20",
"id": "e28a7bd7e9ff1a576e54998d8788acdd85c025b0",
"size": "3028",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fixture/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53699"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HcreateComponent(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule createComponent.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HcreateComponent, self).__init__(name='HcreateComponent', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """createComponent"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'createComponent')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Module() node
self.add_node()
self.vs[3]["mm__"] = """Module"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Module()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class SwCompToEcuMapping_component() node
self.add_node()
self.vs[5]["mm__"] = """SwCompToEcuMapping_component"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class SwCompToEcuMapping_component()
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# apply class ComponentPrototype() node
self.add_node()
self.vs[7]["mm__"] = """ComponentPrototype"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class ComponentPrototype()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply association SwCompToEcuMapping_component--componentPrototype-->ComponentPrototype node
self.add_node()
self.vs[9]["attr1"] = """componentPrototype"""
self.vs[9]["mm__"] = """directLink_T"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Module()
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class SwCompToEcuMapping_component()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class ComponentPrototype()
(5,9), # apply_class SwCompToEcuMapping_component() -> association componentPrototype
(9,7), # association componentPrototype -> apply_class ComponentPrototype()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'ApplyAttribute'),('constant','solveRef')), ((7,'shortName'),(3,'name')), ((7,'ApplyAttribute'),('constant','solveRef')), ]
| {
"content_hash": "6ef586694430d3846ea4b947c09b93f8",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 156,
"avg_line_length": 33.97872340425532,
"alnum_prop": 0.5203506574827802,
"repo_name": "levilucio/SyVOLT",
"id": "d805610b15bee9cfa02356f0aeb5ba8ae8ac2ac2",
"size": "3194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/transformation_from_ATL/HcreateComponent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
import tornado.httpserver
import tornado.process
from core.application import Application
import core.db as db
from core.settings import Settings
from core.util import get_servlet_urlspec
from servlets.api import APIServlet
import ui_modules
api_application = tornado.web.Application(
# Servlet dispatch rules
[
get_servlet_urlspec(APIServlet),
],
# Server settings
static_path = os.path.join(os.path.dirname(__file__), "static"),
template_path = os.path.join(os.path.dirname(__file__), "templates"),
gzip = True,
login_url = "/login",
cookie_secret = Settings['cookie_secret'],
ui_modules = ui_modules,
autoescape = None,
)
class PushManagerAPIApp(Application):
name = "api"
def start_services(self):
# HTTP server (for api)
sockets = tornado.netutil.bind_sockets(self.port, address=Settings['api_app']['servername'])
tornado.process.fork_processes(Settings['tornado']['num_workers'])
server = tornado.httpserver.HTTPServer(api_application)
server.add_sockets(sockets)
if __name__ == '__main__':
app = PushManagerAPIApp()
db.init_db()
app.run()
| {
"content_hash": "b742713000a353c8b05c55413a60e554",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 100,
"avg_line_length": 29.536585365853657,
"alnum_prop": 0.6837324525185797,
"repo_name": "pombredanne/pushmanager",
"id": "76b8f4f28de332ba1f7897bd60297404b7ecc0c9",
"size": "1229",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pushmanager_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Integrating embed tables
Revision ID: 1b8856078bd3
Revises: f2b0984f780
Create Date: 2016-10-28 10:58:42.229165
"""
# revision identifiers, used by Alembic.
revision = '1b8856078bd3'
down_revision = 'f2b0984f780'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('SiWaySAMLUsers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.Unicode(length=255), nullable=False),
sa.Column('uid', sa.Integer(), nullable=False),
sa.Column('employee_type', sa.Unicode(length=255), nullable=False),
sa.Column('full_name', sa.Unicode(length=255), nullable=False),
sa.Column('short_name', sa.Unicode(length=255), nullable=False),
sa.Column('school_name', sa.Unicode(length=255), nullable=False),
sa.Column('group', sa.Unicode(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_SiWaySAMLUsers_email', 'SiWaySAMLUsers', ['email'], unique=True)
op.create_table('EmbedApplications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.Unicode(length=255), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('scale', sa.Integer(), nullable=True),
sa.Column('identifier', sa.Unicode(length=36), nullable=False),
sa.Column('creation', sa.DateTime(), nullable=False),
sa.Column('last_update', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['lt_users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_EmbedApplications_creation', 'EmbedApplications', ['creation'], unique=False)
op.create_index(u'ix_EmbedApplications_identifier', 'EmbedApplications', ['identifier'], unique=True)
op.create_index(u'ix_EmbedApplications_last_update', 'EmbedApplications', ['last_update'], unique=False)
op.create_index(u'ix_EmbedApplications_name', 'EmbedApplications', ['name'], unique=False)
op.create_index(u'ix_EmbedApplications_url', 'EmbedApplications', ['url'], unique=False)
op.create_table('EmbedApplicationTranslation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('embed_application_id', sa.Integer(), nullable=True),
sa.Column('url', sa.Unicode(length=255), nullable=False),
sa.Column('language', sa.Unicode(length=10), nullable=False),
sa.ForeignKeyConstraint(['embed_application_id'], ['EmbedApplications.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_EmbedApplicationTranslation_language', 'EmbedApplicationTranslation', ['language'], unique=False)
op.create_index(u'ix_EmbedApplicationTranslation_url', 'EmbedApplicationTranslation', ['url'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_EmbedApplicationTranslation_url', table_name='EmbedApplicationTranslation')
op.drop_index(u'ix_EmbedApplicationTranslation_language', table_name='EmbedApplicationTranslation')
op.drop_table('EmbedApplicationTranslation')
op.drop_index(u'ix_EmbedApplications_url', table_name='EmbedApplications')
op.drop_index(u'ix_EmbedApplications_name', table_name='EmbedApplications')
op.drop_index(u'ix_EmbedApplications_last_update', table_name='EmbedApplications')
op.drop_index(u'ix_EmbedApplications_identifier', table_name='EmbedApplications')
op.drop_index(u'ix_EmbedApplications_creation', table_name='EmbedApplications')
op.drop_table('EmbedApplications')
op.drop_index(u'ix_SiWaySAMLUsers_email', table_name='SiWaySAMLUsers')
op.drop_table('SiWaySAMLUsers')
### end Alembic commands ###
| {
"content_hash": "c26691f307faadb06819c25a1cc2f14a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 122,
"avg_line_length": 51,
"alnum_prop": 0.7145098039215686,
"repo_name": "go-lab/labmanager",
"id": "323bceb1ccbf95876d9c64c35d39077ff5454a95",
"size": "3825",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "alembic/versions/1b8856078bd3_integrating_embed_tables.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1428"
},
{
"name": "HTML",
"bytes": "151587"
},
{
"name": "JavaScript",
"bytes": "71413"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "505879"
},
{
"name": "Shell",
"bytes": "1807"
}
],
"symlink_target": ""
} |
from flask import render_template, url_for, flash, redirect, session, request, g
from forms import LoginForm
from app import app
@app.route("/")
@app.route("/index")
@app.route("/home")
def index():
return render_template("index.html", name="ehigie aito")
@app.route("/login", methods = ["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
flash(form.username.data)
return redirect(url_for("index"))
return render_template("login.html", form=form)
| {
"content_hash": "addecd781b6ff8b940e9cc93de6c4cd0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 29.88235294117647,
"alnum_prop": 0.6692913385826772,
"repo_name": "aitoehigie/reddish",
"id": "29722e322e6282c835b580816c6aa4c62d74fecb",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "794"
},
{
"name": "Python",
"bytes": "1147"
}
],
"symlink_target": ""
} |
import unittest
from client import RebillEwayClient, HOSTED_TEST_URL
from suds import WebFault
# uncomment to enable debugging
#import logging
#logging.basicConfig(level=logging.DEBUG)
#logging.getLogger('suds.client').setLevel(logging.DEBUG)
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.rebill_test = RebillEwayClient(test_mode=True, customer_id='87654321', username='test@eway.com.au', password='test123')
self.rebill_customer = self.rebill_test.client.factory.create("CustomerDetails")
self.rebill_event = self.rebill_test.client.factory.create("RebillEventDetails")
self.hosted_test = RebillEwayClient(test_mode=True,
customer_id='87654321',
username='test@eway.com.au',
password='test123',
url=HOSTED_TEST_URL)
self.hosted_customer = self.hosted_test.client.factory.create("CreditCard")
def test_create_rebill_customer(self):
self.rebill_customer.CustomerTitle = "Mr."
self.rebill_customer.CustomerFirstName = "Joe"
self.rebill_customer.CustomerLastName = "Bloggs"
self.rebill_customer.CustomerAddress = "test street"
self.rebill_customer.CustomerSuburb = "Sydney"
self.rebill_customer.CustomerState = "NSW"
self.rebill_customer.CustomerCompany = "Test Company"
self.rebill_customer.CustomerPostCode = "2000"
self.rebill_customer.CustomerCountry = "au"
self.rebill_customer.CustomerEmail = "test@eway.com.au"
self.rebill_customer.CustomerFax = "0267720000"
self.rebill_customer.CustomerPhone1 = "0267720000"
self.rebill_customer.CustomerPhone2 = "0404085992"
self.rebill_customer.CustomerRef = "REF100"
self.rebill_customer.CustomerJobDesc = "test"
self.rebill_customer.CustomerComments = "Now!"
self.rebill_customer.CustomerURL = "http://www.google.com.au"
new_rebill_customer = self.rebill_test.create_rebill_customer(self.rebill_customer)
print "create rebill customer", new_rebill_customer
self.assertEqual(new_rebill_customer.Result, "Success")
def test_create_rebill_customer_with_kwargs(self):
new_rebill_customer_with_kwargs = self.rebill_test.create_rebill_customer(
customerTitle="Mr.",
customerFirstName="Joe",
customerLastName="Bloggs",
customerAddress="test street",
customerSuburb="Sydney",
customerState="NSW",
customerCompany="Test Company",
customerPostCode="2000",
customerCountry="au",
customerEmail="test@eway.com.au",
customerFax="0267720000",
customerPhone1="0267720000",
customerPhone2="0404085992",
customerRef="REF100",
customerJobDesc="test",
customerURL="http://www.google.com.au",
customerComments="Now!",
)
print "create rebill customer with kwargs", new_rebill_customer_with_kwargs
self.assertEqual(new_rebill_customer_with_kwargs.Result, "Success")
def test_update_rebill_customer(self):
updated_rebill_customer = self.rebill_test.update_rebill_customer(
RebillCustomerID="17609",
customerTitle="Mr.",
customerFirstName="Joe",
customerLastName="Bloggs",
customerAddress="test street",
customerSuburb="Sydney",
customerState="NSW",
customerCompany="Test Company",
customerPostCode="2000",
customerCountry="au",
customerEmail="test@eway.com.au",
customerFax="0267720000",
customerPhone1="0267720000",
customerPhone2="0404085992",
customerRef="REF100",
customerJobDesc="test",
customerURL="http://www.google.com.au",
customerComments="Now!",
)
print "update rebill customer", updated_rebill_customer
self.assertEqual(updated_rebill_customer.Result, "Success")
def test_delete_rebill_customer(self):
deleted_rebill_customer = self.rebill_test.delete_rebill_customer("10292")
print "delete rebill customer", deleted_rebill_customer
self.assertEqual(deleted_rebill_customer.Result, "Success")
def test_create_rebill_event(self):
self.rebill_event.RebillCustomerID = "60001545"
self.rebill_event.RebillID = ""
self.rebill_event.RebillInvRef = "ref123"
self.rebill_event.RebillInvDesc = "test event"
self.rebill_event.RebillCCName = "test"
self.rebill_event.RebillCCNumber = "4444333322221111"
self.rebill_event.RebillCCExpMonth = "07"
self.rebill_event.RebillCCExpYear = "12"
self.rebill_event.RebillInitAmt = "100"
self.rebill_event.RebillInitDate = "08/06/2011"
self.rebill_event.RebillRecurAmt = "100"
self.rebill_event.RebillStartDate = "09/06/2011"
self.rebill_event.RebillInterval = "1"
self.rebill_event.RebillIntervalType = "1"
self.rebill_event.RebillEndDate = "08/07/2011"
new_rebill_event = self.rebill_test.create_rebill_event(self.rebill_event)
print "create rebill event", new_rebill_event
self.assertEqual(new_rebill_event.Result, "Success")
def test_create_rebill_event_with_kwargs(self):
new_rebill_event_with_kwargs = self.rebill_test.create_rebill_event(
RebillCustomerID="60001545",
RebillInvRef="ref123",
RebillInvDes="test",
RebillCCName="test",
RebillCCNumber="4444333322221111",
RebillCCExpMonth="07",
RebillCCExpYear="12",
RebillInitAmt="100",
RebillInitDate="08/06/2011",
RebillRecurAmt="100",
RebillStartDate="09/06/2011",
RebillInterval="1",
RebillIntervalType="1",
RebillEndDate="08/07/2011"
)
print "create rebill event with kwargs", new_rebill_event_with_kwargs
self.assertEqual(new_rebill_event_with_kwargs.Result, "Success")
def test_update_rebill_event(self):
updated_rebill_event = self.rebill_test.update_rebill_event(
RebillCustomerID="60001545",
RebillID="80001208",
RebillInvRef="ref123",
RebillInvDes="test",
RebillCCName="test",
RebillCCNumber="4444333322221111",
RebillCCExpMonth="07",
RebillCCExpYear="12",
RebillInitAmt="100",
RebillInitDate="08/06/2011",
RebillRecurAmt="100",
RebillStartDate="09/06/2011",
RebillInterval="1",
RebillIntervalType="1",
RebillEndDate="08/07/2011"
)
print "update rebill event", updated_rebill_event
self.assertEqual(updated_rebill_event.Result, "Success")
def test_delete_rebill_event(self):
deleted_rebill_event = self.rebill_test.delete_rebill_event("10292", "80001208")
print "delete rebill event", deleted_rebill_event
self.assertEqual(deleted_rebill_event.Result, "Success")
def test_query_next_transaction(self):
query_next_transaction_result = self.rebill_test.query_next_transaction("60001545", "80001227")
print "test_query_next_transaction", query_next_transaction_result
self.assertFalse(query_next_transaction_result == None)
def test_query_rebill_customer(self):
query_rebill_customer_result = self.rebill_test.query_rebill_customer("60001545")
print "test_query_rebill_customer", query_rebill_customer_result
self.assertFalse(query_rebill_customer_result == None)
def test_query_rebill_event(self):
query_rebill_result = self.rebill_test.query_rebill_event("60001545", "80001227")
print "test_query_rebill_event", query_rebill_result
self.assertFalse(query_rebill_result == None)
def test_query_transactions(self):
query_transactions_result = self.rebill_test.query_transactions("60001545", "80001208")
print "test_query_transactions", query_transactions_result
self.assertFalse(query_transactions_result == None)
def test_create_hosted_customer(self):
self.hosted_customer.Title = "Mr."
self.hosted_customer.FirstName = "Joe"
self.hosted_customer.LastName = "Bloggs"
self.hosted_customer.Address = "test street"
self.hosted_customer.Suburb = "Sydney"
self.hosted_customer.State = "NSW"
self.hosted_customer.Company = "Test Company"
self.hosted_customer.PostCode = "2000"
self.hosted_customer.Country = "au"
self.hosted_customer.Email = "test@eway.com.au"
self.hosted_customer.Fax = "0267720000"
self.hosted_customer.Phone = "0267720000"
self.hosted_customer.Mobile = "0404085992"
self.hosted_customer.CustomerRef = "REF100"
self.hosted_customer.JobDesc = "test"
self.hosted_customer.Comments = "Now!"
self.hosted_customer.URL = "http://www.google.com.au"
self.hosted_customer.CCNumber = "4444333322221111"
self.hosted_customer.CCNameOnCard = "test"
self.hosted_customer.CCExpiryMonth = "07"
self.hosted_customer.CCExpiryYear = "12"
new_hosted_customer_id = self.hosted_test.create_hosted_customer(self.hosted_customer)
print "create new hosted customer", new_hosted_customer_id
self.assertFalse(isinstance(new_hosted_customer_id, WebFault))
def test_create_hosted_customer_with_kwargs(self):
new_hosted_customer_id = self.hosted_test.create_hosted_customer(
Title="Mr.",
FirstName="Joe",
LastName="Bloggs",
Address="test street",
Suburb="Sydney",
State="NSW",
Company="Test Company",
PostCode="2000",
Country="au",
Email="test@eway.com.au",
Fax="0267720000",
Phone="0267720000",
Mobile="0404085992",
CustomerRef="REF100",
JobDesc="test",
Comments="Now!",
URL="http://www.google.com.au",
CCNumber="4444333322221111",
CCNameOnCard="test",
CCExpiryMonth="07",
CCExpiryYear="12"
)
print "create new hosted customer with kwargs", new_hosted_customer_id
self.assertFalse(isinstance(new_hosted_customer_id, WebFault))
def test_update_hosted_customer(self):
updated_hosted_customer = self.hosted_test.update_hosted_customer(
managedCustomerID="9876543211000",
Title="Mr.",
FirstName="Joe",
LastName="Bloggs",
Address="test street",
Suburb="Sydney",
State="NSW",
Company="Test Company",
PostCode="2000",
Country="au",
Email="test@eway.com.au",
Fax="0267720000",
Phone="0267720000",
Mobile="0404085992",
CustomerRef="REF100",
JobDesc="test",
Comments="Now!",
URL="http://www.google.com.au",
CCNumber="4444333322221111",
CCNameOnCard="test",
CCExpiryMonth="07",
CCExpiryYear="12"
)
print "update hosted customer", updated_hosted_customer
self.assertTrue(updated_hosted_customer)
def test_process_payment(self):
payment_result = self.hosted_test.process_payment("9876543211000", "100", "test", "test")
print "test_process_payment", payment_result
self.assertFalse(isinstance(payment_result, WebFault))
def test_query_customer(self):
query_result = self.hosted_test.query_customer("9876543211000")
print "test_query_customer", query_result
self.assertFalse(query_result == None)
def test_query_customer_by_reference(self):
ref_result = self.hosted_test.query_customer_by_reference("customer reference")
print "test_query_customer_by_reference", ref_result
self.assertFalse(ref_result == None)
def test_query_payment(self):
query_payment_result = self.hosted_test.query_payment("9876543211000")
print "test_query_payment", query_payment_result
self.assertFalse(query_payment_result == None)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "936f736d0f74af3a3b8b08f03e0ab81a",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 190,
"avg_line_length": 71.16724738675958,
"alnum_prop": 0.38673194614443085,
"repo_name": "SimpleTax/merchant",
"id": "207870afca75138f5ea765bd561f928d426cb186",
"size": "20425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "billing/gateways/eway_gateway/eway_api/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9029"
},
{
"name": "Python",
"bytes": "270234"
}
],
"symlink_target": ""
} |
from flask import Flask
# Creates our application.
app = Flask(__name__)
app.debug = True
# DATABASE SETTINGS
from app import views
| {
"content_hash": "9ebcfbeac448a1af3775baf98ea62a41",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 26,
"avg_line_length": 15,
"alnum_prop": 0.7333333333333333,
"repo_name": "vgm64/worklunch",
"id": "a7aca5447d66a0788eb415c13d092b62c26757fb",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worklunch/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "551"
},
{
"name": "Python",
"bytes": "1890"
}
],
"symlink_target": ""
} |
"""
Function helpers to do stuff on svg files.
"""
import os
import logging
from docstamp.commands import call_command, which, check_command
import svgutils.transform as sg
log = logging.getLogger(__name__)
def replace_chars_for_svg_code(svg_content):
""" Replace known special characters to SVG code.
Parameters
----------
svg_content: str
Returns
-------
corrected_svg: str
Corrected SVG content
"""
result = svg_content
svg_char = [
('&', '&'),
('>', '>'),
('<', '<'),
('"', '"'),
]
for c, entity in svg_char:
result = result.replace(c, entity)
return result
def _check_svg_file(svg_file):
""" Try to read a SVG file if `svg_file` is a string.
Raise an exception in case of error or return the svg object.
If `svg_file` is a svgutils svg object, will just return it.
Parameters
----------
svg_file: str or svgutils.transform.SVGFigure object
If a `str`: path to a '.svg' file,
otherwise a svgutils svg object is expected.
Returns
-------
svgutils svg object
Raises
------
Exception if any error happens.
"""
if isinstance(svg_file, str):
try:
svg = sg.fromfile(svg_file)
except Exception as exc:
raise Exception('Error reading svg file {}.'.format(svg_file)) from exc
else:
return svg
if isinstance(svg_file, sg.SVGFigure):
return svg_file
raise ValueError('Expected `svg_file` to be `str` or `svgutils.SVG`, got {}.'.format(type(svg_file)))
def merge_svg_files(svg_file1, svg_file2, x_coord, y_coord, scale=1):
""" Merge `svg_file2` in `svg_file1` in the given positions `x_coord`, `y_coord` and `scale`.
Parameters
----------
svg_file1: str or svgutils svg document object
Path to a '.svg' file.
svg_file2: str or svgutils svg document object
Path to a '.svg' file.
x_coord: float
Horizontal axis position of the `svg_file2` content.
y_coord: float
Vertical axis position of the `svg_file2` content.
scale: float
Scale to apply to `svg_file2` content.
Returns
-------
`svg1` svgutils object with the content of 'svg_file2'
"""
svg1 = _check_svg_file(svg_file1)
svg2 = _check_svg_file(svg_file2)
svg2_root = svg2.getroot()
svg1.append([svg2_root])
svg2_root.moveto(x_coord, y_coord, scale=scale)
return svg1
def rsvg_export(input_file, output_file, dpi=90, rsvg_binpath=None):
""" Calls the `rsvg-convert` command, to convert a svg to a PDF (with unicode).
Parameters
----------
rsvg_binpath: str
Path to `rsvg-convert` command
input_file: str
Path to the input file
output_file: str
Path to the output file
Returns
-------
return_value
Command call return value
"""
if not os.path.exists(input_file):
log.error('File {} not found.'.format(input_file))
raise IOError((0, 'File not found.', input_file))
if rsvg_binpath is None:
rsvg_binpath = which('rsvg-convert')
check_command(rsvg_binpath)
args_strings = []
args_strings += ["-f pdf"]
args_strings += ["-o {}".format(output_file)]
args_strings += ["--dpi-x {}".format(dpi)]
args_strings += ["--dpi-y {}".format(dpi)]
args_strings += [input_file]
return call_command(rsvg_binpath, args_strings)
| {
"content_hash": "7972291f5008821f442082bd5c77746f",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 105,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.593892694063927,
"repo_name": "PythonSanSebastian/docstamp",
"id": "f40869589a3583a0e5190b90a538900d06efe49d",
"size": "3504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docstamp/svg_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2027"
},
{
"name": "Python",
"bytes": "59634"
}
],
"symlink_target": ""
} |
"""Base class for request handlers that display charts."""
import json
from dashboard import layered_cache
from dashboard.common import request_handler
from dashboard.common import namespaced_stored_object
# The revision info (stored in datastore) is a dict mapping of revision type,
# which should be a string starting with "r_", to a dict of properties for
# that revision, including "name" and "url".
_REVISION_INFO_KEY = 'revision_info'
class ChartHandler(request_handler.RequestHandler):
"""Base class for requests which display a chart."""
def RenderHtml(self, template_file, template_values, status=200):
"""Fills in template values for pages that show charts."""
template_values.update(self._GetChartValues())
template_values['revision_info'] = json.dumps(
template_values['revision_info'])
return super(ChartHandler, self).RenderHtml(
template_file, template_values, status)
def GetDynamicVariables(self, template_values, request_path=None):
template_values.update(self._GetChartValues())
super(ChartHandler, self).GetDynamicVariables(
template_values, request_path)
def _GetChartValues(self):
return {
'revision_info': namespaced_stored_object.Get(_REVISION_INFO_KEY) or {},
'warning_message': layered_cache.Get('warning_message'),
'warning_bug': layered_cache.Get('warning_bug'),
}
| {
"content_hash": "7a6dade41a1b08ac9e24c9a38942fcdb",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 38.583333333333336,
"alnum_prop": 0.7249820014398848,
"repo_name": "benschmaus/catapult",
"id": "c90a1534400f5d022c0b7b52d396dfbb9cf9a596",
"size": "1552",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dashboard/dashboard/chart_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43486"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "58279"
},
{
"name": "HTML",
"bytes": "11801772"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6141932"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
'''
website
:copyright: (c) 2013-2015 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
import os
from flask.helpers import send_from_directory
from trytond.model import ModelSQL, fields
from trytond.pool import Pool, PoolMeta
from nereid import current_app, route, render_template, request, jsonify
from trytond.pyson import Eval, Not
__metaclass__ = PoolMeta
__all__ = ['WebShop', 'BannerCategory', 'Banner', 'Article', 'Website']
#: Get the static folder. The static folder also
#: goes into the site packages
STATIC_FOLDER = os.path.join(
os.path.abspath(
os.path.dirname(__file__)
), 'static'
)
class WebShop(ModelSQL):
"website"
__name__ = "nereid.webshop"
@classmethod
@route("/static-webshop/<path:filename>", methods=["GET"])
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
"""
cache_timeout = current_app.get_send_file_max_age(filename)
return send_from_directory(
STATIC_FOLDER, filename,
cache_timeout=cache_timeout
)
class BannerCategory:
"""Collection of related Banners"""
__name__ = 'nereid.cms.banner.category'
@staticmethod
def check_xml_record(records, values):
return True
class Banner:
"""Banner for CMS"""
__name__ = 'nereid.cms.banner'
@staticmethod
def check_xml_record(records, values):
return True
class Article:
"CMS Articles"
__name__ = 'nereid.cms.article'
@staticmethod
def check_xml_record(records, values):
"""The webshop module creates a bunch of commonly used articles on
webshops. Since tryton does not allow records created via XML to be
edited, this method explicitly allows users to modify the articles
created by the module.
"""
return True
class Website:
"Nereid Website"
__name__ = 'nereid.website'
cms_root_menu = fields.Many2One(
'nereid.cms.menuitem', "CMS root menu", ondelete='RESTRICT',
select=True,
)
show_site_message = fields.Boolean('Show Site Message')
site_message = fields.Char(
'Site Message',
states={
'readonly': Not(Eval('show_site_message', False)),
'required': Eval('show_site_message', False)
},
depends=['show_site_message']
)
@classmethod
@route('/sitemap', methods=["GET"])
def render_sitemap(cls):
"""
Return the sitemap.
"""
Node = Pool().get('product.tree_node')
# Search for nodes, sort by sequence.
nodes = Node.search([
('parent', '=', None),
], order=[
('sequence', 'ASC'),
])
return render_template('sitemap.jinja', nodes=nodes)
@classmethod
def auto_complete(cls, phrase):
"""
Customizable method which returns a list of dictionaries
according to the search query. The search service used can
be modified in downstream modules.
The front-end expects a jsonified list of dictionaries. For example,
a downstream implementation of this method could return -:
[
...
{
"value": "<suggestion string>"
}, {
"value": "Nexus 6"
}
...
]
"""
return []
@classmethod
@route('/search-auto-complete')
def search_auto_complete(cls):
"""
Handler for auto-completing search.
"""
return jsonify(results=cls.auto_complete(
request.args.get('q', '')
))
@classmethod
@route('/search')
def quick_search(cls):
"""
Downstream implementation of quick_search().
TODO:
* Add article search.
"""
return super(Website, cls).quick_search()
| {
"content_hash": "5e78b9a632d88e80623840b454d63d10",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 76,
"avg_line_length": 26.03921568627451,
"alnum_prop": 0.5893574297188755,
"repo_name": "aroraumang/nereid-webshop",
"id": "d07d5e23a656edc65b97e89796facedfe267689b",
"size": "4008",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "webshop.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52547"
},
{
"name": "Gettext Catalog",
"bytes": "80308"
},
{
"name": "HTML",
"bytes": "187456"
},
{
"name": "JavaScript",
"bytes": "28047"
},
{
"name": "Python",
"bytes": "112507"
}
],
"symlink_target": ""
} |
import re
import json
import base64
from time import sleep
from io import BytesIO
from googleapiclient import discovery
from googleapiclient.http import MediaIoBaseUpload
from googleapiclient.errors import HttpError
from google.oauth2.service_account import Credentials
from starthinker.config import UI_SERVICE
RE_CREDENTIALS_JSON = re.compile(r'^\s*\{.*\}\s*$', re.DOTALL)
def _credentials_storage_service():
if RE_CREDENTIALS_JSON.match(UI_SERVICE):
credentials = Credentials.from_service_account_info(json.loads(UI_SERVICE))
else:
credentials = Credentials.from_service_account_file(UI_SERVICE)
return discovery.build('storage', 'v1', credentials=credentials)
def _credentials_retry(job, retries=3, wait=1):
try:
return job.execute()
except HttpError as e:
if e.resp.status == 429 and retries > 0:
sleep(wait)
return _credentials_retry(job, retries - 1, wait * 2)
else:
raise
def credentials_storage_get(cloud_path):
bucket, filename = cloud_path.split(':', 1)
data = _credentials_retry(_credentials_storage_service().objects().get_media(
bucket=bucket, object=filename))
return json.loads(base64.b64decode(data.decode()).decode())
def credentials_storage_put(cloud_path, credentials):
bucket, filename = cloud_path.split(':', 1)
data = BytesIO(base64.b64encode(json.dumps(credentials).encode()))
media = MediaIoBaseUpload(data, mimetype='text/json')
_credentials_retry(_credentials_storage_service().objects().insert(
bucket=bucket, name=filename, media_body=media))
| {
"content_hash": "80716a0aeff27580dee792c3589104d7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 31.18,
"alnum_prop": 0.7357280307889673,
"repo_name": "google/starthinker",
"id": "32683e02c7a1c0eea545236aa22de81f80fd6a87",
"size": "2301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starthinker/util/auth_storage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "89775"
},
{
"name": "Jupyter Notebook",
"bytes": "1088964"
},
{
"name": "Python",
"bytes": "2356647"
},
{
"name": "Shell",
"bytes": "89492"
}
],
"symlink_target": ""
} |
import configparser
from datetime import datetime
import os
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, substring, lit, when, avg
from pyspark.sql import functions as F
from pyspark.sql.functions import input_file_name
import random
from pyspark.sql.types import *
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format
from pyspark import SparkContext, SparkConf, SQLContext
from google.cloud import storage
import sys
# Reading the arguments and storing them in variables
project=sys.argv[1]
data_set=sys.argv[2]
BUCKET_NAME=sys.argv[3]
user_name=sys.argv[4]
input_data1="gs://"+BUCKET_NAME+"/cell-tower-anomaly-detection/output_data/"+user_name+"_customer_threshold_join/part*"
input_data2="gs://"+BUCKET_NAME+"/cell-tower-anomaly-detection/01-datasets/telecom_customer_churn_data.csv"
output_data="gs://"+BUCKET_NAME+"/cell-tower-anomaly-detection/output_data"
# Building the Spark Session
spark =SparkSession.builder.appName("cell_tower_performance_dataset-exploration").getOrCreate()
#Reading the input datasets
custDF1 = spark.read.format("parquet") .option("header", True) .option("inferschema",True) .load(input_data1)
custDF1.printSchema()
telecomDF1 = spark.read.format("csv") .option("header", True) .option("inferschema",True) .load(input_data2)
telecomDF1.printSchema()
# Cleaning the telecome performance dataset
telecom_table = telecomDF1.selectExpr("roam_Mean","change_mou","drop_vce_Mean","drop_dat_Mean","blck_vce_Mean","blck_dat_Mean","plcd_vce_Mean","plcd_dat_Mean","comp_vce_Mean","comp_dat_Mean","peak_vce_Mean","peak_dat_Mean","mou_peav_Mean","mou_pead_Mean","opk_vce_Mean","opk_dat_Mean","mou_opkv_Mean","mou_opkd_Mean","drop_blk_Mean","callfwdv_Mean","callwait_Mean","churn","months","uniqsubs","actvsubs","area","dualband","forgntvl","Customer_ID")
telecomDF2=telecom_table.withColumn('customer_ID', substring('Customer_ID', 4,7))
telecomDF2=telecomDF2.withColumn('customer_ID_index', telecomDF2.customer_ID>1000)
telecomDF2.createOrReplaceTempView("telecom")
telecomDF3 = spark.sql('''select * from telecom where customer_ID_index = 'true' ''')
telecomDF3=telecomDF3.drop(telecomDF3.customer_ID_index)
telecomDF3.createOrReplaceTempView("Telecome_Mean")
telecomDF4 = spark.sql('''select * from (SELECT *, ROW_NUMBER() OVER(PARTITION BY customer_ID ORDER BY customer_ID) AS Rank FROM Telecome_Mean) as Service_Rank where Rank between 1 and 6 ''')
telecomDF4=telecomDF4.drop(telecomDF4.Rank)
telecomDF4.show(truncate=False)
# Joining the customer threshold data with the telecom performance data
custDF2 = custDF1.join(telecomDF4, custDF1.customerID == telecomDF4.customer_ID, "inner").drop(telecomDF4.customer_ID).drop(telecomDF4.churn)
custDF2.show(truncate=False)
#Writing the output data to BigQuery
custDF2.write.parquet(os.path.join(output_data, user_name+"_customer_threshold_service_join"), mode = "overwrite")
| {
"content_hash": "a1fe71c9ce345979ed279656ca8d869a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 447,
"avg_line_length": 55.351851851851855,
"alnum_prop": 0.7648042823686851,
"repo_name": "GoogleCloudPlatform/serverless-spark-workshop",
"id": "4a608c962ad82321e640ae8a480875a721f0b817",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cell-tower-anomaly-detection-dataproc-ui/00-scripts/customer_threshold_services_join.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "6886"
},
{
"name": "Jupyter Notebook",
"bytes": "37685"
},
{
"name": "Python",
"bytes": "156230"
},
{
"name": "Shell",
"bytes": "12868"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-jgcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from jgcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave jgcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing jgcoind/jgcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_jgcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "f81ebffbef495c95cdacb351ff2ff121",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 105,
"avg_line_length": 39.04054054054054,
"alnum_prop": 0.5579785392869505,
"repo_name": "cptecdfi/jgcoins",
"id": "c24f497047b581baeef16aa5b091e43a51fb1e9e",
"size": "6073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/listtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "174415"
},
{
"name": "C++",
"bytes": "2899586"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "754"
},
{
"name": "Objective-C",
"bytes": "6260"
},
{
"name": "Python",
"bytes": "103661"
},
{
"name": "Shell",
"bytes": "41817"
},
{
"name": "TypeScript",
"bytes": "10305448"
}
],
"symlink_target": ""
} |
"""Support for Roku binary sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from rokuecp.models import Device as RokuDevice
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .entity import RokuEntity
@dataclass
class RokuBinarySensorEntityDescriptionMixin:
"""Mixin for required keys."""
value_fn: Callable[[RokuDevice], bool | None]
@dataclass
class RokuBinarySensorEntityDescription(
BinarySensorEntityDescription, RokuBinarySensorEntityDescriptionMixin
):
"""Describes a Roku binary sensor entity."""
BINARY_SENSORS: tuple[RokuBinarySensorEntityDescription, ...] = (
RokuBinarySensorEntityDescription(
key="headphones_connected",
name="Headphones Connected",
icon="mdi:headphones",
value_fn=lambda device: device.info.headphones_connected,
),
RokuBinarySensorEntityDescription(
key="supports_airplay",
name="Supports AirPlay",
icon="mdi:cast-variant",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.supports_airplay,
),
RokuBinarySensorEntityDescription(
key="supports_ethernet",
name="Supports Ethernet",
icon="mdi:ethernet",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.ethernet_support,
),
RokuBinarySensorEntityDescription(
key="supports_find_remote",
name="Supports Find Remote",
icon="mdi:remote",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.supports_find_remote,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a Roku binary sensors based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
unique_id = coordinator.data.info.serial_number
async_add_entities(
RokuBinarySensorEntity(
device_id=unique_id,
coordinator=coordinator,
description=description,
)
for description in BINARY_SENSORS
)
class RokuBinarySensorEntity(RokuEntity, BinarySensorEntity):
"""Defines a Roku binary sensor."""
entity_description: RokuBinarySensorEntityDescription
@property
def is_on(self) -> bool | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator.data)
| {
"content_hash": "18bd7e6ad3c0294d825cf6b76c302a40",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 73,
"avg_line_length": 30.473118279569892,
"alnum_prop": 0.7155963302752294,
"repo_name": "toddeye/home-assistant",
"id": "5b6da073dd189fb1be35b0d2b783ecc6147802c1",
"size": "2834",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/roku/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""The security groups extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from webob import exc
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import security_groups as \
schema_security_groups
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.policies import security_groups as sg_policies
from nova.virt import netutils
LOG = logging.getLogger(__name__)
ALIAS = 'os-security-groups'
ATTRIBUTE_NAME = 'security_groups'
def _authorize_context(req):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
return context
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a security group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase, wsgi.Controller):
"""The Security group API controller for the OpenStack API."""
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
security_group)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
@wsgi.response(202)
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(404)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403))
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404))
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase,
wsgi.Controller):
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403, 404))
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
try:
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id'))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except (exception.Invalid, exception.InvalidCidr) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
try:
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class ServerSecurityGroupController(SecurityGroupControllerBase):
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
groups = self.security_group_api.get_instance_security_groups(
context, instance, True)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
group_name = self._parse(body, 'addSecurityGroup')
try:
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except (exception.SecurityGroupCannotBeApplied,
exception.SecurityGroupExistsForInstance) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
context.can(sg_policies.BASE_POLICY_NAME)
group_name = self._parse(body, 'removeSecurityGroup')
try:
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.SecurityGroupNotExistsForInstance as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = req.environ['nova.context']
if not context.can(sg_policies.BASE_POLICY_NAME, fatal=False):
return
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[ATTRIBUTE_NAME] = [{"name": group.name}
for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[ATTRIBUTE_NAME] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroups(extensions.V21APIExtensionBase):
"""Security group support."""
name = "SecurityGroups"
alias = ALIAS
version = 1
def get_controller_extensions(self):
return []
def get_resources(self):
secgrp_ext = extensions.ResourceExtension(ALIAS,
SecurityGroupController())
server_secgrp_ext = extensions.ResourceExtension(
ALIAS,
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
secgrp_rules_ext = extensions.ResourceExtension(
'os-security-group-rules',
controller=SecurityGroupRulesController())
return [secgrp_ext, server_secgrp_ext, secgrp_rules_ext]
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(server_dict, create_kwargs, body_deprecated_param):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_groups'] = [
sg['name'] for sg in security_groups if sg.get('name')]
create_kwargs['security_groups'] = list(
set(create_kwargs['security_groups']))
def get_server_create_schema(version):
if version == '2.0':
return schema_security_groups.server_create_v20
return schema_security_groups.server_create
| {
"content_hash": "0bcad187084c43e993be664bdac1d079",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 79,
"avg_line_length": 43.36801541425819,
"alnum_prop": 0.587035720632664,
"repo_name": "rajalokan/nova",
"id": "8e1d2e0d59a6d083be046103b2c659f95d640020",
"size": "23182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
} |
"""
__IsolHasNoMatchIsolated_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Fri Oct 11 15:08:32 2013
____________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__Distributable import *
from graph_MT_pre__Distributable import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def IsolHasNoMatchIsolated_MDL(self, rootNode, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('IsolHasNoMatchIsolated')
# --- ASG attributes over ---
self.obj50=LHS(self)
self.obj50.isGraphObjectVisual = True
if(hasattr(self.obj50, '_setHierarchicalLink')):
self.obj50._setHierarchicalLink(False)
# constraint
self.obj50.constraint.setValue('if PreNode(\'1\')[\'cardinality\']==\'1\':\n return True\nreturn False\n')
self.obj50.constraint.setHeight(15)
self.obj50.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(60.0,40.0,self.obj50)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj50.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj50)
self.globalAndLocalPostcondition(self.obj50, rootNode)
self.obj50.postAction( rootNode.CREATE )
self.obj51=MT_pre__Distributable(self)
self.obj51.isGraphObjectVisual = True
if(hasattr(self.obj51, '_setHierarchicalLink')):
self.obj51._setHierarchicalLink(False)
# MT_pivotOut__
self.obj51.MT_pivotOut__.setValue('')
self.obj51.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj51.MT_subtypeMatching__.setValue(('True', 0))
self.obj51.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj51.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj51.MT_pivotIn__.setValue('')
self.obj51.MT_pivotIn__.setNone()
# MT_label__
self.obj51.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj51.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj51.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__name.setHeight(15)
self.obj51.graphClass_= graph_MT_pre__Distributable
if self.genGraphics:
new_obj = graph_MT_pre__Distributable(140.0,80.0,self.obj51)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Distributable", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj51.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj51)
self.globalAndLocalPostcondition(self.obj51, rootNode)
self.obj51.postAction( rootNode.CREATE )
# Connections for obj50 (graphObject_: Obj0) of type LHS
self.drawConnections(
)
# Connections for obj51 (graphObject_: Obj1) of type MT_pre__Distributable
self.drawConnections(
)
newfunction = IsolHasNoMatchIsolated_MDL
loadedMMName = ['MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| {
"content_hash": "8d82ed72c7283ea6686ef96870a116dd",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 625,
"avg_line_length": 43.87074829931973,
"alnum_prop": 0.6410296169948829,
"repo_name": "levilucio/SyVOLT",
"id": "dae256a1ed925bac8472799a0e39cd6be0a926a6",
"size": "6449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/positive/models/IsolHasNoMatchIsolated_MDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""Tests for drums_rnn_create_dataset."""
# internal imports
import tensorflow as tf
import magenta
from magenta.models.drums_rnn import drums_rnn_create_dataset
from magenta.models.shared import events_rnn_model
from magenta.pipelines import drum_pipelines
from magenta.pipelines import pipelines_common
from magenta.protobuf import music_pb2
FLAGS = tf.app.flags.FLAGS
class DrumsRNNPipelineTest(tf.test.TestCase):
def setUp(self):
self.config = events_rnn_model.EventSequenceRnnConfig(
None,
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.MultiDrumOneHotEncoding()),
magenta.common.HParams())
def testDrumsRNNPipeline(self):
note_sequence = magenta.common.testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
time_signatures: {
numerator: 4
denominator: 4}
tempos: {
qpm: 120}""")
magenta.music.testing_lib.add_track_to_sequence(
note_sequence, 0,
[(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0),
(41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0),
(39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1), (51, 40, 12.6, 13.0),
(55, 100, 14.1, 15.0), (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)],
is_drum=True)
quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
drums_extractor = drum_pipelines.DrumsExtractor(min_bars=7, gap_bars=1.0)
one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.MultiDrumOneHotEncoding())
quantized = quantizer.transform(note_sequence)[0]
drums = drums_extractor.transform(quantized)[0]
one_hot = one_hot_encoding.encode(drums)
expected_result = {'training_drum_tracks': [one_hot],
'eval_drum_tracks': []}
pipeline_inst = drums_rnn_create_dataset.get_pipeline(self.config,
eval_ratio=0.0)
result = pipeline_inst.transform(note_sequence)
self.assertEqual(expected_result, result)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "830f7735c256953d7725103730b91c67",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 35.5,
"alnum_prop": 0.6333333333333333,
"repo_name": "YoshikawaMasashi/magenta",
"id": "11c2d01b627b8befd830a48c5d71b517e3c03a7a",
"size": "2725",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/models/drums_rnn/drums_rnn_create_dataset_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12668"
},
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "43259"
},
{
"name": "Jupyter Notebook",
"bytes": "2115912"
},
{
"name": "Protocol Buffer",
"bytes": "12931"
},
{
"name": "Python",
"bytes": "1243942"
},
{
"name": "Shell",
"bytes": "8783"
}
],
"symlink_target": ""
} |
from ..external.qt.QtCore import (QAbstractItemModel, QModelIndex,
QObject, Qt, QTimer, Signal, QSize)
from ..external.qt.QtGui import (QFont, QTreeView, QItemSelectionModel,
QAbstractItemView, QStyledItemDelegate)
from .qtutil import layer_icon
from .mime import LAYERS_MIME_TYPE, PyMimeData
from ..core.decorators import memoize
from ..core import message as m
from ..core.hub import HubListener
from .. import core
from .widgets.style_dialog import StyleDialog
DATA_IDX = 0
SUBSET_IDX = 1
def full_edit_factory(item, pos):
StyleDialog.dropdown_editor(item, pos)
def restricted_edit_factory(item, pos):
StyleDialog.dropdown_editor(item, pos, edit_label=False)
class Item(object):
edit_factory = None
glue_data = None
flags = Qt.ItemIsEnabled
tooltip = None
def font(self):
return QFont()
def icon(self):
return None
@property
def label(self):
return self._label
class DataCollectionItem(Item):
def __init__(self, dc):
self.dc = dc
self.row = 0
self.column = 0
self.parent = None
self._label = ''
self.children_count = 2
@memoize
def child(self, row):
if row == DATA_IDX:
return DataListItem(self.dc, self)
if row == SUBSET_IDX:
return SubsetListItem(self.dc, self)
return None
class DataListItem(Item):
def __init__(self, dc, parent):
self.dc = dc
self.parent = parent
self.row = DATA_IDX
self.column = 0
self._label = 'Data'
@memoize
def child(self, row):
if row < len(self.dc):
return DataItem(self.dc, row, self)
@property
def children_count(self):
return len(self.dc)
def font(self):
result = QFont()
result.setBold(True)
return result
class DataItem(Item):
edit_factory = full_edit_factory
flags = (Qt.ItemIsSelectable | Qt.ItemIsEnabled |
Qt.ItemIsDragEnabled)
def __init__(self, dc, row, parent):
self.dc = dc
self.row = row
self.parent = parent
self.column = 0
self.children_count = 0
@property
def data(self):
return self.dc[self.row]
@property
def glue_data(self):
return self.data
@property
def label(self):
return self.data.label
@label.setter
def label(self, value):
self.data.label = value
@property
def style(self):
return self.data.style
def icon(self):
return layer_icon(self.data)
class SubsetListItem(Item):
def __init__(self, dc, parent):
self.dc = dc
self.parent = parent
self.row = SUBSET_IDX
self._label = 'Subsets'
self.column = 0
@memoize
def child(self, row):
if row < len(self.dc.subset_groups):
return SubsetGroupItem(self.dc, row, self)
@property
def children_count(self):
return len(self.dc.subset_groups)
def font(self):
result = QFont()
result.setBold(True)
return result
class SubsetGroupItem(Item):
edit_factory = full_edit_factory
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
def __init__(self, dc, row, parent):
self.parent = parent
self.dc = dc
self.row = row
self.column = 0
@property
def subset_group(self):
return self.dc.subset_groups[self.row]
@property
def glue_data(self):
return self.subset_group
@property
def label(self):
return self.subset_group.label
@label.setter
def label(self, value):
self.subset_group.label = value
@property
def tooltip(self):
if type(self.subset_group.subset_state) == core.subset.SubsetState:
return "Empty subset"
atts = self.subset_group.subset_state.attributes
atts = [a for a in atts if isinstance(a, core.ComponentID)]
if len(atts) > 0:
lbl = ', '.join(a.label for a in atts)
return "Selection on %s" % lbl
@property
def style(self):
return self.subset_group.style
@property
def children_count(self):
return len(self.subset_group.subsets)
@memoize
def child(self, row):
return SubsetItem(self.dc, self.subset_group, row, self)
def icon(self):
return layer_icon(self.subset_group)
class SubsetItem(Item):
edit_factory = restricted_edit_factory
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled
def __init__(self, dc, subset_group, subset_idx, parent):
self.parent = parent
self.subset_group = subset_group
self.row = subset_idx
self.parent = parent
self.children_count = 0
self.column = 0
@property
def subset(self):
return self.subset_group.subsets[self.row]
@property
def label(self):
return self.subset.verbose_label
def icon(self):
return layer_icon(self.subset)
@property
def style(self):
return self.subset.style
@property
def glue_data(self):
return self.subset
class DataCollectionModel(QAbstractItemModel, HubListener):
new_item = Signal(QModelIndex)
def __init__(self, data_collection, parent=None):
QAbstractItemModel.__init__(self, parent)
HubListener.__init__(self)
self.data_collection = data_collection
self.root = DataCollectionItem(data_collection)
self._items = {} # map hashes of Model pointers to model items
# without this reference, PySide clobbers instance
# data of model items
self.register_to_hub(self.data_collection.hub)
self.setSupportedDragActions(Qt.CopyAction)
def index(self, row, column, parent=QModelIndex()):
if column != 0:
return QModelIndex()
if not parent.isValid():
parent_item = self.root
else:
parent_item = self._get_item(parent)
if parent_item is None:
return QModelIndex()
child_item = parent_item.child(row)
if child_item:
return self._make_index(row, column, child_item)
else:
return QModelIndex()
def _get_item(self, index):
if not index.isValid():
return None
return self._items.get(id(index.internalPointer()), None)
def _make_index(self, row, column, item):
if item is not None:
result = self.createIndex(row, column, item)
self._items[id(result.internalPointer())] = item
assert result.internalPointer() is item
return result
return self.createIndex(row, column)
def to_indices(self, items):
"""Translate a list of Data, Subset, or SubsetGroups
to a list of indices"""
result = []
for item in items:
if isinstance(item, core.Data):
idx = self.data_index(list(self.data_collection).index(item))
elif isinstance(item, core.SubsetGroup):
idx = self.subsets_index(
self.data_collection.subset_groups.index(item))
elif isinstance(item, core.subset_group.GroupedSubset):
grp = item.group
idx = self.subsets_index(
self.data_collection.subset_groups.index(grp))
row = list(self.data_collection).index(item.data)
idx = self.index(row, idx)
else:
raise NotImplementedError(type(item))
result.append(idx)
return result
def flags(self, index=QModelIndex()):
if not index.isValid():
return Qt.NoItemFlags
return self._get_item(index).flags
def data(self, index, role):
if not index.isValid():
return
dispatch = {
Qt.DisplayRole: self._display_data,
Qt.FontRole: self._font_data,
Qt.DecorationRole: self._icon_data,
Qt.UserRole: self._user_data,
Qt.ToolTipRole: self._tooltip_data}
if role in dispatch:
return dispatch[role](index)
def setData(self, index, value, role=Qt.EditRole):
if role != Qt.EditRole:
return False
try:
self._get_item(index).label = value
return True
except AttributeError:
return False
def _tooltip_data(self, index):
tooltip = self._get_item(index).tooltip
return tooltip
def _user_data(self, index):
return self._get_item(index)
def _display_data(self, index):
return self._get_item(index).label
def _font_data(self, index):
item = self._get_item(index)
return item.font()
def _icon_data(self, index):
return self._get_item(index).icon()
def headerData(self, section, orientation, role=Qt.DisplayRole):
return ''
def data_index(self, data_number=None):
"""
Fetch the QModelIndex for a given data index,
or the index for the parent data item
:param data_number: position of data set to fetch, or None
"""
base = self.index(DATA_IDX, 0)
if data_number is None:
return base
return self.index(data_number, 0, base)
def subsets_index(self, subset_number=None):
"""
Fetch the QModelIndex for a given subset,
or the index for the parent subset item
:param data_number: position of subset group to fetch, or None
"""
base = self.index(SUBSET_IDX, 0)
assert isinstance(self._get_item(base), SubsetListItem)
if subset_number is None:
return base
return self.index(subset_number, 0, base)
def rowCount(self, index=QModelIndex()):
item = self._get_item(index)
if item is None:
return self.root.children_count
return item.children_count
def parent(self, index=None):
if index is None: # overloaded QObject.parent()
return QObject.parent(self)
item = self._get_item(index)
if item is None:
return QModelIndex()
return self._make_index(item.row, item.column, item.parent)
def columnCount(self, index):
return 1
def register_to_hub(self, hub):
for msg in [m.DataCollectionDeleteMessage,
m.SubsetDeleteMessage]:
hub.subscribe(self, msg, lambda x: self.invalidate())
hub.subscribe(self, m.DataCollectionAddMessage, self._on_add_data)
hub.subscribe(self, m.SubsetCreateMessage, self._on_add_subset)
def _on_add_data(self, message):
self.invalidate()
idx = self.data_index(len(self.data_collection) - 1)
self.new_item.emit(idx)
def _on_add_subset(self, message):
self.invalidate()
idx = self.subsets_index(len(self.data_collection.subset_groups) - 1)
self.new_item.emit(idx)
def invalidate(self):
self.root = DataCollectionItem(self.data_collection)
self._items.clear()
self.reset()
self.layoutChanged.emit()
def glue_data(self, indices):
""" Given a list of indices, return a list of all selected
Data, Subset, and SubsetGroup objects.
"""
items = [self._get_item(idx) for idx in indices]
items = [item.glue_data for item in items]
return items
def mimeData(self, indices):
data = self.glue_data(indices)
result = PyMimeData(data, **{LAYERS_MIME_TYPE: data})
self._mime = result # hold reference to prevent segfault
return result
def mimeTypes(self):
return [LAYERS_MIME_TYPE]
class DataCollectionView(QTreeView):
selection_changed = Signal()
def __init__(self, parent=None):
super(DataCollectionView, self).__init__(parent)
self.doubleClicked.connect(self._edit)
# this keeps the full-row of the selection bar in-sync
self.pressed.connect(lambda x: self.viewport().update())
# only edit label on model.new_item
self.setItemDelegate(LabeledDelegate())
self.setEditTriggers(self.NoEditTriggers)
self._timer = QTimer(self)
self._timer.timeout.connect(self.viewport().update)
self._timer.start(1000)
def selected_layers(self):
idxs = self.selectedIndexes()
return self._model.glue_data(idxs)
def set_selected_layers(self, layers):
sm = self.selectionModel()
idxs = self._model.to_indices(layers)
self.select_indices(*idxs)
def select_indices(self, *indices):
sm = self.selectionModel()
sm.clearSelection()
for idx in indices:
sm.select(idx, sm.Select)
def set_data_collection(self, data_collection):
self._model = DataCollectionModel(data_collection)
self.setModel(self._model)
sm = QItemSelectionModel(self._model)
sm.selectionChanged.connect(lambda *args:
self.selection_changed.emit())
self.setSelectionModel(sm)
self.setRootIsDecorated(False)
self.setExpandsOnDoubleClick(False)
self.expandToDepth(0)
self._model.layoutChanged.connect(lambda: self.expandToDepth(0))
self._model.layoutChanged.connect(self.selection_changed.emit)
self._model.new_item.connect(self.select_indices)
self._model.new_item.connect(self.edit_label)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
self.setDragDropMode(QAbstractItemView.DragOnly)
def edit_label(self, index):
if not (self._model.flags(index) & Qt.ItemIsEditable):
return
self.edit(index)
def _edit(self, index):
item = self._model.data(index, role=Qt.UserRole)
if item is None or item.edit_factory is None:
return
rect = self.visualRect(index)
pos = self.mapToGlobal(rect.bottomLeft())
pos.setY(pos.y() + 1)
item.edit_factory(pos)
class LabeledDelegate(QStyledItemDelegate):
""" Add placeholder text to default delegate """
def setEditorData(self, editor, index):
super(LabeledDelegate, self).setEditorData(editor, index)
label = index.model().data(index, role=Qt.DisplayRole)
editor.selectAll()
editor.setText(label)
if __name__ == "__main__":
from glue.qt import get_qapp
from glue.external.qt.QtGui import QTreeView
from glue.core import Data, DataCollection
app = get_qapp()
dc = DataCollection()
dc.append(Data(label='w'))
view = DataCollectionView()
view.set_data_collection(dc)
view.show()
view.raise_()
dc.extend([Data(label='x', x=[1, 2, 3]),
Data(label='y', y=[1, 2, 3]),
Data(label='z', z=[1, 2, 3])])
app.exec_()
| {
"content_hash": "1360667a59a31cb33ed0d41dffd7164d",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 77,
"avg_line_length": 28.32276119402985,
"alnum_prop": 0.6036492984651868,
"repo_name": "JudoWill/glue",
"id": "ab881840a6fb3420c32bb0fec04947d57390cec0",
"size": "15211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/qt/data_collection_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "1387891"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
} |
import pathlib
import hashlib
import subprocess
import multiprocessing
import os
import re
import sys
# key for performing a natural sort
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
# exception to handle asymptote errors
class AsyError(Exception):
def __init__(self, path="", stdout="", stderr=""):
self.path = path
self.file = pathlib.Path(path).name
self.stdout = stdout
self.stderr = stderr
BLOCKSIZE = 65536
# build a hash for every file in the passed path
def load_hashes(path):
hashes = {}
try:
with open(path, 'r') as f:
for line in f:
# split the line
item = line.strip().split('\t')
# only add the file to the dict if the corresponding pdf is still there
p = pathlib.Path(item[0][:-4] + "_0.pdf")
if p.exists():
hashes[item[0]] = item[1]
except FileNotFoundError:
return {}
return hashes
# write out the hashes
def write_hashes(path, files):
with open(path, 'w') as f:
for fn, h in files.items():
f.write("{file}\t{hash}\n".format(file=fn, hash=h))
# run asy on a given file
def run_asy(path):
output = subprocess.run(['asy', '-q', str(path)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8")
# since asy doesn't respect return codes for stderr, we have to assume that any output means an error.
if len(output.stdout) > 0:
raise AsyError(str(path), output.stdout)
return [path, output.stdout]
# run asy on all valid files in path, will return the number of files processed
def process(files_path, hashes_path, verbose=True):
p = pathlib.Path(files_path)
files = {}
# loop over all asymptote files in the directory
for fp in p.glob("*.asy"):
# hash the file
h = hashlib.md5()
# read the file in as binary
with fp.open('br') as f:
while True:
buffer = f.read(BLOCKSIZE)
if len(buffer) == 0:
break
h.update(buffer)
# add hash to dict
hash = h.hexdigest()
files[str(fp.resolve())] = hash
# if there were not asymptote files, then exit
if len(files) == 0:
return 0
# load the previous hashes
old_hashes = load_hashes(hashes_path)
# to track changed and unchaged files
new = []
# Not the most efficent way, but the output is nicer to read
keys = list(files.keys())
keys.sort(key=natural_sort_key)
for f in keys:
fn = pathlib.Path(f).name
h = files[f]
# seperate the files into changed and unchanged
if f in old_hashes:
if h == old_hashes[f]:
if verbose:
print("Unchanged: {0}".format(fn))
else:
if verbose:
print(" Changed: {0}".format(fn))
new.append(f)
else:
if verbose:
print(" New: {0}".format(fn))
new.append(f)
# process the changed files
output = []
if verbose:
if len(new) == 1:
print("Processing 1 file... ", end='', flush=True)
else:
print("Processing {0} files... ".format(len(new)), end='', flush=True)
if len(new) <= 1 or os.cpu_count() == 1:
# with only one item, no need to build a worker pool
for f in new:
output.append(run_asy(f))
else:
if verbose:
print("Using {0} subprocesses... ".format(os.cpu_count()), end='', flush=True)
# create the pool
with multiprocessing.Pool(os.cpu_count()) as pool:
# set the pool to work on the changed files
output = pool.map(run_asy, new)
if verbose:
print("Done.")
# update the hash file
write_hashes(hashes_path, files)
return len(new)
# if this file is ran, then it will just process all .asy files in the current directory
if __name__ == "__main__":
try:
process(".", "asy.hashes")
sys.exit(0)
except AsyError as e:
if "Cannot write to texput.log" not in e.stdout:
print("\nError in {0}:\n{1}".format(e.file, e.stdout))
sys.exit(1)
| {
"content_hash": "50db32324713b97382581a826b70f2b6",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 118,
"avg_line_length": 24.81045751633987,
"alnum_prop": 0.6485774499473129,
"repo_name": "weinels/LaTeX",
"id": "ffb9fc147180fa4e4e11582347d31bd2ca8aaf94",
"size": "3796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/asy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3796"
},
{
"name": "Shell",
"bytes": "14806"
},
{
"name": "TeX",
"bytes": "46192"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories.
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
Unlike M(file), does not modify ownership, permissions or manipulate links.
notes:
- See also M(win_copy), M(win_template), M(copy), M(template), M(assemble)
requirements: [ ]
author: "Jon Hawkesworth (@jhawkesworth)"
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
default: []
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
required: false
default: file
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = '''
- name: Create a file
win_file:
path: C:\temp\foo.conf
state: file
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\temp
state: absent
'''
| {
"content_hash": "61f4a4275d7a866da09d69a108e11cf8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 87,
"avg_line_length": 31.107692307692307,
"alnum_prop": 0.658753709198813,
"repo_name": "nwiizo/workspace_2017",
"id": "82c5510c3cfb55df1315b745ce9279f6f20f14b6",
"size": "2790",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "ansible-modules-core/windows/win_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.empty import EmptyOperator
DEFAULT_DATE = datetime(2016, 1, 1)
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id="test_start_date_scheduling", start_date=datetime.utcnow() + timedelta(days=1))
dag1_task1 = EmptyOperator(task_id="dummy", dag=dag1, owner="airflow")
dag2 = DAG(dag_id="test_task_start_date_scheduling", start_date=DEFAULT_DATE)
dag2_task1 = EmptyOperator(
task_id="dummy1", dag=dag2, owner="airflow", start_date=DEFAULT_DATE + timedelta(days=3)
)
dag2_task2 = EmptyOperator(task_id="dummy2", dag=dag2, owner="airflow")
| {
"content_hash": "f820c356677bd00c4e9ea86af58c7d11",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 97,
"avg_line_length": 38.89473684210526,
"alnum_prop": 0.7564276048714479,
"repo_name": "apache/airflow",
"id": "2729dfaf8230ddf63e0445a3b0e56df5a43eeca5",
"size": "1526",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/dags/test_scheduler_dags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
import os
import pickle
import sys
import vim # pylint: disable=F0401
# Insert the taskwiki on the python path
BASE_DIR = vim.eval("s:plugin_path")
sys.path.insert(0, os.path.join(BASE_DIR, 'taskwiki'))
import errors
# Handle exceptions without traceback, if they're TaskWikiException
def output_exception(exception_type, value, tb):
if exception_type is errors.TaskWikiException:
print(unicode(value), file=sys.stderr)
else:
sys.__excepthook__(exception_type, value, tb)
sys.excepthook = output_exception
import cache
import sort
import util
import viewport
# Initialize the cache
cache = cache.TaskCache()
# Check the necessary dependencies first
util.enforce_dependencies(cache)
class WholeBuffer(object):
@staticmethod
def update_from_tw():
"""
Updates all the incomplete tasks in the vimwiki file if the info from TW is different.
"""
cache.reset()
cache.load_tasks()
cache.load_vwtasks(buffer_has_authority=False)
cache.load_viewports()
cache.update_vwtasks_from_tasks()
cache.update_vwtasks_in_buffer()
cache.evaluate_viewports()
@staticmethod
def update_to_tw():
"""
Updates all tasks that differ from their TaskWarrior representation.
"""
cache.reset()
cache.load_tasks()
cache.load_vwtasks()
cache.load_viewports()
cache.save_tasks()
cache.update_vwtasks_in_buffer()
cache.evaluate_viewports()
class SelectedTasks(object):
def __init__(self):
# Reset cache, otherwise old line content may be used
cache.reset()
# Find relevant TaskWarrior instance
self.tw = cache.get_relevant_tw()
# Load the current tasks
range_tasks = [cache.vwtask[i] for i in util.selected_line_numbers()]
self.tasks = [t for t in range_tasks if t is not None]
if not self.tasks:
print("No tasks selected.")
def annotate(self, annotation):
if not annotation:
with util.current_line_highlighted():
annotation = util.get_input("Enter annotation: ")
for vimwikitask in self.tasks:
vimwikitask.task.add_annotation(annotation)
print("Task \"{0}\" annotated.".format(vimwikitask['description']))
def done(self):
# Multiple VimwikiTasks might refer to the same task, so make sure
# we do not complete one task twice
for task in set(vimwikitask.task for vimwikitask in self.tasks):
task.done()
# Update the lines in the buffer
for vimwikitask in self.tasks:
vimwikitask.update_from_task()
vimwikitask.update_in_buffer()
print("Task \"{0}\" completed.".format(vimwikitask['description']))
def info(self):
for vimwikitask in self.tasks:
out = util.tw_execute_safely(self.tw, [vimwikitask.uuid, 'info'])
if out:
util.show_in_split(out, name='info', activate_cursorline=True)
break # Show only one task
def edit(self):
for vimwikitask in self.tasks:
vim.command('! task {0} edit'.format(vimwikitask.uuid))
def link(self):
path = util.get_absolute_filepath()
for vimwikitask in self.tasks:
vimwikitask.task.add_annotation("wiki: {0}".format(path))
print("Task \"{0}\" linked.".format(vimwikitask['description']))
def grid(self):
port = viewport.ViewPort.find_closest(cache)
if port:
vim.command("TW rc:{0} rc.context: {1}"
.format(port.tw.taskrc_location, port.raw_filter))
else:
print("No viewport detected.", file=sys.stderr)
def delete(self):
# Delete the tasks in TaskWarrior
# Multiple VimwikiTasks might refer to the same task, so make sure
# we do not delete one task twice
for task in set(vimwikitask.task for vimwikitask in self.tasks):
task.delete()
# Remove the lines in the buffer
for vimwikitask in self.tasks:
cache.remove_line(vimwikitask['line_number'])
print("Task \"{0}\" deleted.".format(vimwikitask['description']))
def modify(self, modstring):
# If no modstring was passed as argument, ask the user interactively
if not modstring:
with util.current_line_highlighted():
modstring = util.get_input("Enter modifications: ")
# We might have two same tasks in the range, make sure we do not pass the
# same uuid twice
unique_tasks = set(vimwikitask.task['uuid'] for vimwikitask in self.tasks)
uuids = list(unique_tasks)
# Generate the arguments from the modstring
args = util.tw_modstring_to_args(modstring)
# Modify all tasks at once
output = util.tw_execute_safely(self.tw, uuids + ['mod'] + args)
# Update the touched tasks in buffer, if needed
cache.load_tasks()
cache.update_vwtasks_from_tasks()
cache.update_vwtasks_in_buffer()
# Output the feedback from TW
if output:
print(output[-1])
def start(self):
# Multiple VimwikiTasks might refer to the same task, so make sure
# we do not start one task twice
for task in set(vimwikitask.task for vimwikitask in self.tasks):
task.start()
# Update the lines in the buffer
for vimwikitask in self.tasks:
vimwikitask.update_from_task()
vimwikitask.update_in_buffer()
print("Task \"{0}\" started.".format(vimwikitask['description']))
def stop(self):
# Multiple VimwikiTasks might refer to the same task, so make sure
# we do not stop one task twice
for task in set(vimwikitask.task for vimwikitask in self.tasks):
task.stop()
# Update the lines in the buffer
for vimwikitask in self.tasks:
vimwikitask.update_from_task()
vimwikitask.update_in_buffer()
print("Task \"{0}\" stopped.".format(vimwikitask['description']))
def sort(self, sortstring):
sort.TaskSorter(cache, self.tasks, sortstring).execute()
class Mappings(object):
@staticmethod
def task_info_or_vimwiki_follow_link():
# If the line under cursor contains task, toggle info
# otherwise do the default VimwikiFollowLink
position = util.get_current_line_number()
if cache.vwtask[position] is not None:
SelectedTasks().info()
else:
port = viewport.ViewPort.from_line(position, cache)
if port is not None:
Meta().inspect_viewport()
else:
vim.command('VimwikiFollowLink')
class Meta(object):
def inspect_viewport(self):
position = util.get_current_line_number()
port = viewport.ViewPort.from_line(position, cache)
if port.meta.get('visible') is False:
cache.reset()
cache.load_vwtasks()
cache.load_tasks()
template = (
"ViewPort inspection:\n"
"--------------------\n"
"Name: {0}\n"
"Filter used: {1}\n"
"Defaults used: {2}\n"
"Ordering used: {3}\n"
"Matching taskwarrior tasks: {4}\n"
"Displayed tasks: {5}\n"
"Tasks to be added: {6}\n"
"Tasks to be deleted: {7}\n"
)
if port is not None:
# Load the tasks under the viewport
port.load_tasks()
to_add, to_del = port.get_tasks_to_add_and_del()
# Fill in the interesting info in the template
template_formatted = template.format(
port.name,
port.raw_filter,
port.raw_defaults,
port.sort,
len(port.matching_tasks),
len(port.tasks),
', '.join(map(unicode, to_add)),
', '.join(map(unicode, to_del)),
)
# Show in the split
lines = template_formatted.splitlines()
util.show_in_split(lines, activate_cursorline=True)
def integrate_tagbar(self):
tagbar_available = vim.eval('exists(":Tagbar")') == '2'
if tagbar_available:
vim.vars['tagbar_type_vimwiki'] = {
'ctagstype': 'default',
'kinds': ['h:header', 'i:inside', 'v:viewport'],
'sro': '&&&',
'kind2scope': {'h':'header', 'v':'viewport'},
'sort': 0,
'ctagsbin': os.path.join(BASE_DIR, 'extra/vwtags.py'),
'ctagsargs': 'default'
}
def set_proper_colors(self):
tw_color_counterparts = {
'TaskWikiTaskActive': 'color.active',
'TaskWikiTaskCompleted': 'color.completed',
'TaskWikiTaskDeleted': 'color.deleted',
}
taskwiki_native_colors = {
'TaskWikiTaskActive': 'Type',
'TaskWikiTaskCompleted': 'Comment',
'TaskWikiTaskDeleted': 'Error',
'TaskWikiTaskPriority': 'Error',
}
# If tw support is enabled, try to find definition in TW first
if vim.vars.get('taskwiki_source_tw_colors'):
tw = cache.get_relevant_tw()
for syntax in tw_color_counterparts.keys():
tw_def = tw.config.get(tw_color_counterparts[syntax])
if tw_def:
vim_def = util.convert_colorstring_for_vim(tw_def)
vim.command('hi def {0} {1}'.format(syntax, vim_def))
# Define taskwiki (native) color. This can be overriden by user
# by using :hi <group name> <color> command.
for syntax in taskwiki_native_colors.keys():
vim.command('hi def link {0} {1}'
.format(syntax, taskwiki_native_colors[syntax]))
class Split(object):
command = None
split_name = None
colorful = False
maxwidth = False
maxheight = False
vertical = False
cursorline = True
size = None
tw_extra_args = []
def __init__(self, args):
self.args = self._process_args(args)
self.split_name = self.split_name or self.command
self.tw = cache.get_relevant_tw()
def _process_args(self, args):
tw_args = util.tw_modstring_to_args(args)
# If only 'global' argument has been passed, then no
# filter should be applied
if tw_args == ['global']:
return []
# If unempty filter has been passed, then use that
elif tw_args != []:
return tw_args
# If no argument has been passed, locate the closest viewport,
# if any exists, and use its filter.
else:
port = viewport.ViewPort.find_closest(cache)
return port.taskfilter if port is not None else []
@property
def full_args(self):
return self.args + [self.command] + self.tw_extra_args
def execute(self):
if self.colorful:
output = util.tw_execute_colorful(self.tw, self.full_args,
allow_failure=False,
maxwidth=self.maxwidth,
maxheight=self.maxheight)
else:
output = util.tw_execute_safely(self.tw, self.full_args)
util.show_in_split(
output,
size=self.size,
name=self.split_name,
vertical=self.vertical,
activate_cursorline=self.cursorline,
)
class CallbackSplitMixin(object):
split_cursorline = False
def __init__(self, args):
super(CallbackSplitMixin, self).__init__(args)
self.selected = SelectedTasks()
def execute(self):
super(CallbackSplitMixin, self).execute()
# Close the split if the user leaves it
vim.command('au BufLeave <buffer> :bwipe')
# SREMatch objecets cannot be pickled
cache.line.clear()
# We can't save the current instance in vim variable
# so save the pickled version
vim.current.buffer.vars['taskwiki_callback'] = pickle.dumps(self)
# Remap <CR> to calling the callback and wiping the buffer
vim.command(
"nnoremap <silent> <buffer> <enter> :py "
"callback = pickle.loads("
"vim.current.buffer.vars['taskwiki_callback']); "
"callback.callback(); "
"vim.command('bwipe') <CR>"
)
# Show cursorline in split if required
if self.split_cursorline:
vim.current.window.options['cursorline'] = True
def callback(self):
raise NotImplementedError("No callback defined.")
class SplitProjects(Split):
command = 'projects'
vertical = True
class ChooseSplitProjects(CallbackSplitMixin, SplitProjects):
split_cursorline = True
def get_selected_project(self):
project_re = re.compile(r'^(?P<indent>\s*)(?P<name>[^\s]+)\s+[0-9]+$')
project_parts = []
current_indent = None
indented_less = lambda s: (current_indent is None or
len(s) < current_indent)
for line in util.get_lines_above():
match = project_re.match(line)
if match and indented_less(match.group('indent')):
current_indent = len(match.group('indent'))
project_parts.append(match.group('name'))
# Properly handle selected (none)
if project_parts == ['(none)']:
project_parts = []
project_parts.reverse()
return '.'.join(project_parts)
def callback(self):
project = self.get_selected_project()
self.selected.modify("project:{0}".format(project))
class SplitSummary(Split):
command = 'summary'
vertical = True
colorful = True
class SplitBurndownDaily(Split):
command = 'burndown.daily'
colorful = True
maxwidth = True
class SplitBurndownWeekly(Split):
command = 'burndown.weekly'
colorful = True
maxwidth = True
class SplitBurndownMonthly(Split):
command = 'burndown.monthly'
colorful = True
maxwidth = True
class SplitCalendar(Split):
command = 'calendar'
colorful = True
maxwidth = True
# Task calendar does not take filter and in general uses
# command-suffix syntax
def __init__(self, args):
self.args = []
self.tw_extra_args = util.tw_modstring_to_args(args)
self.split_name = self.split_name or self.command
self.tw = cache.get_relevant_tw()
class SplitGhistoryMonthly(Split):
command = 'ghistory.monthly'
colorful = True
maxwidth = True
class SplitGhistoryAnnual(Split):
command = 'ghistory.annual'
colorful = True
maxwidth = True
class SplitHistoryMonthly(Split):
command = 'history.monthly'
colorful = True
vertical = True
class SplitHistoryAnnual(Split):
command = 'history.annual'
colorful = True
vertical = True
class SplitStats(Split):
command = 'stats'
colorful = True
vertical = True
class SplitTags(Split):
command = 'tags'
colorful = True
vertical = True
class ChooseSplitTags(CallbackSplitMixin, SplitTags):
split_cursorline = True
def get_selected_tag(self):
tag_re = re.compile(r'^(?P<name>[^\s]+)\s+[0-9]+$')
match = tag_re.match(vim.current.line)
if match:
return match.group('name')
else:
raise errors.TaskWikiException("No tag selected.")
def callback(self):
tag = self.get_selected_tag()
self.selected.modify("+{0}".format(tag))
if __name__ == '__main__':
WholeBuffer.update_from_tw()
Meta().integrate_tagbar()
Meta().set_proper_colors()
| {
"content_hash": "692c248e64522e9739ba6095962abd02",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 94,
"avg_line_length": 30.70689655172414,
"alnum_prop": 0.586062761245243,
"repo_name": "Spirotot/taskwiki",
"id": "dd9f6d032e042758baae07c34ec09df1cc598dd4",
"size": "16029",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taskwiki/taskwiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153087"
},
{
"name": "VimL",
"bytes": "7533"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PostReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True)),
('creation_date', models.DateTimeField(auto_now=True, verbose_name='Data de cria\xe7\xe3o')),
('modification_date', models.DateTimeField(auto_now=True, verbose_name='Data de modifica\xe7\xe3o')),
('post', models.ForeignKey(related_name='reports', verbose_name='Post', to='core.Post')),
('user', models.ForeignKey(related_name='post_reports', verbose_name='Usu\xe1rio', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Postagem reportada',
'verbose_name_plural': 'Postagens reportadas',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TopicReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True)),
('creation_date', models.DateTimeField(auto_now=True, verbose_name='Data de cria\xe7\xe3o')),
('modification_date', models.DateTimeField(auto_now=True, verbose_name='Data de modifica\xe7\xe3o')),
('post', models.ForeignKey(related_name='reports', verbose_name='Post', to='core.Topic')),
('user', models.ForeignKey(related_name='topic_reports', verbose_name='Usu\xe1rio', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'T\xf3pico reportado',
'verbose_name_plural': 'T\xf3picos reportados',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='post',
name='topic',
field=models.ForeignKey(related_name='posts', verbose_name='T\xf3pico', to='core.Topic'),
preserve_default=True,
),
]
| {
"content_hash": "c45a4fe8485fe98b0b14b7d04634cfce",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 130,
"avg_line_length": 46.0377358490566,
"alnum_prop": 0.578688524590164,
"repo_name": "HigorSilvaRosa/ForumGeolocalizado",
"id": "61c79b333b870df0f651c35c7bbd4b90a85f9c31",
"size": "2464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0002_auto_20141118_2258.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49549"
},
{
"name": "JavaScript",
"bytes": "138774"
},
{
"name": "Python",
"bytes": "20167"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Perfil',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tel', models.CharField(blank=True, max_length=10, null=True)),
('fecha_nacimiento', models.DateField(blank=True, null=True)),
('photo', models.ImageField(blank=True, null=True, upload_to='colaboradores/%Y/%m')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SolicitudColaboracion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('licenciatura_leyes', models.CharField(choices=[('si', 'Si'), ('no', 'No')], max_length=2)),
('telefono', models.CharField(blank=True, max_length=10, null=True)),
('fecha_nacimiento', models.DateField(blank=True, null=True)),
('fecha_solicitud', models.DateTimeField(auto_now=True)),
],
),
]
| {
"content_hash": "d96570655994adcf9ade9b28710d9cb6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 121,
"avg_line_length": 41.578947368421055,
"alnum_prop": 0.5943037974683544,
"repo_name": "SurielRuano/Orientador-Legal",
"id": "d01f3a93cba32f3cefad516eaaab29a4b076443e",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colaboradores/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "194202"
},
{
"name": "HTML",
"bytes": "62553"
},
{
"name": "JavaScript",
"bytes": "106997"
},
{
"name": "Python",
"bytes": "30652"
}
],
"symlink_target": ""
} |
import threading
import uuid
from nova.compute import flavors
from nova.compute import utils as compute_utils
import nova.image
import time
from nova.openstack.common import log as logging
from nova import volume
from nova import objects
from nova.openstack.common import uuidutils
from nova import block_device
from nova.compute import task_states
import os.path
import traceback
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import network
import eventlet
from eventlet import greenthread
LOG = logging.getLogger(__name__)
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'admin_actions:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, ext_mgr, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
# TODO(bcwaldon): These action names should be prefixed with 'os-'
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend')
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume')
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
param_dict=body.get('migrate')
no_sys_vol = param_dict.get('no_sys_vol',False)
az=param_dict.get('az')
boot_system_volume = not no_sys_vol
context = req.environ['nova.context']
authorize(context, 'migrate')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if az is not None:
availability_zone = instance.availability_zone
checkResut = self._check_migrate_conditions(context, az, instance, boot_system_volume)
if checkResut is False:
if 'vcloud' in az:
msg = _("The vm can't migrate to the az")
raise exc.HTTPBadRequest(explanation=msg)
if 'aws' in az:
msg = _("The vm can only migrate data volume to the az")
raise exc.HTTPBadRequest(explanation=msg)
if 'aws' in availability_zone:
msg = _("The vm can only migrate data volume from the az")
raise exc.HTTPBadRequest(explanation=msg)
if az == availability_zone:
msg = _("The target azone can't be the same one.")
raise exc.HTTPBadRequest(explanation=msg)
migrateThread = MigrateThread(context,instance,az,boot_system_volume)
migrateThread.start()
else:
host = None
if self.ext_mgr.is_loaded('os-migrate-host'):
migrate_body = body.get('migrate')
host = migrate_body.get('host') if migrate_body else None
LOG.debug("Going to try to cold migrate %(uuid)s to %(host)s",
{"uuid":instance["uuid"], "host":(host or "another host")})
try:
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.NoValidHost as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except Exception as e:
LOG.exception(_LE("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
def _check_migrate_conditions(self,context,az,instance,boot_system_volume):
can_migrate =True
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
if len(bdms)>1 and 'vcloud' in az:
# can_migrate = False
# modify by luqitao 2015/8/18
can_migrate = True
if boot_system_volume and 'aws' in az:
can_migrate = False
availability_zone = instance.availability_zone
if boot_system_volume and 'aws' in availability_zone:
can_migrate = False
return can_migrate
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.reset_network(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
authorize(context, 'lock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.lock(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
authorize(context, 'unlock')
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.unlock(context, instance)
except exception.PolicyNotAuthorized as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('createBackup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'createBackup')
entity = body["createBackup"]
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("createBackup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed createBackup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = int(rotation)
except ValueError:
msg = _("createBackup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
if rotation < 0:
msg = _("createBackup attribute 'rotation' must be greater "
"than or equal to zero")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup')
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = os.path.join(req.application_url, 'images', image_id)
resp.headers['Location'] = image_ref
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
try:
block_migration = body["os-migrateLive"]["block_migration"]
disk_over_commit = body["os-migrateLive"]["disk_over_commit"]
host = body["os-migrateLive"]["host"]
except (TypeError, KeyError):
msg = _("host, block_migration and disk_over_commit must "
"be specified for live migration.")
raise exc.HTTPBadRequest(explanation=msg)
try:
block_migration = strutils.bool_from_string(block_migration,
strict=True)
disk_over_commit = strutils.bool_from_string(disk_over_commit,
strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceNotRunning,
exception.ComputeHostNotFound,
exception.MigrationPreCheckError) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'os-migrateLive')
except Exception:
if host is None:
msg = _("Live migration of instance %s to another host "
"failed") % id
else:
msg = _("Live migration of instance %(id)s to host %(host)s "
"failed") % {'id': id, 'host': host}
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('os-resetState')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'resetState')
# Identify the desired state from the body
try:
state = state_map[body["os-resetState"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
class MigrateThread(threading.Thread):
def __init__(self,context,instance,availability_zone,migrate_system_volume):
threading.Thread.__init__(self)
self.context = context
self.instance = instance
#self.flavor_id = flavor_id
self.availability_zone = availability_zone
self.migrate_system_volume = migrate_system_volume
self.compute_api = compute.API()
self.host_api = compute.HostAPI()
self.image_api = nova.image.API()
#self.ext_mgr = ext_mgr
self.volume_api = volume.API()
self.network_api = network.API()
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
return instance.vm_state
def _convert_volume_type(self,context,availability_zone,origin_volume_type):
""" convert different azone's volume type"""
# volume_type_dist = {'az01.shenzhen--fusionsphere': None, 'az02.hangzhou--fusionsphere': 'lvm',
# 'az11.shenzhen--vcloud': None, 'az31.singapore--aws': None, 'az32.singapore--aws': None}
# if availability_zone is not None:
# return volume_type_dist.get(availability_zone, None)
volume_type = None
origin_volume_type_list=[]
if origin_volume_type is None:
return volume_type
else:
try:
origin_volume_type_list = origin_volume_type.split('@')
volume_type = origin_volume_type_list[0]+'@'+availability_zone
except Exception as e :
LOG.warning('convert volume type error, %s ' %e.message)
return volume_type
def _delete_tmp_image(self,image_uuid,volume_dist_for_image_id):
""" delete the created image during the migrate """
if self.migrate_system_volume and image_uuid is not None:
self.image_api.delete(self.context,image_uuid)
if volume_dist_for_image_id:
for image_id in volume_dist_for_image_id.values():
LOG.debug('delete the tmp image %s' %image_id)
self.image_api.delete(self.context,image_id)
def _upload_volume_to_image(self,volume_ids,volume_dict_for_boot_index):
""" upload the volume to glance """
volume_dist_for_image_id ={}
for volume_id in volume_ids:
if volume_dict_for_boot_index[volume_id] == 0 and self.migrate_system_volume is False:
continue
else:
container_format ='bare'
if volume_dict_for_boot_index[volume_id] == 0 and self.migrate_system_volume is True:
pass
else:
region_info_list = self.availability_zone.split('.')
container_format='vgw_url'
#if self.availability_zone =='az01.shenzhen--fusionsphere':
#if 'fusionsphere' in self.availability_zone:
# container_format='fs_vgw_url'
#elif self.availability_zone =='az11.shenzhen--vcloud':
# elif 'vcloud' in self.availability_zone:
# container_format='vcloud_vgw_url'
#else:
# container_format='aws_vgw_url'
response = self.volume_api.upload_to_image(self.context,
volume_id,
True,
volume_id + '_' + self.availability_zone,
container_format,
'qcow2')
image_uuid_of_volume = response[1]['os-volume_upload_image']['image_id']
volume_dist_for_image_id[volume_id] = image_uuid_of_volume
return volume_dist_for_image_id
def _upload_data_volume_to_image(self,data_volume_info_dict):
"""upload data volume to image"""
LOG.debug("begin upload data_volumes to image")
volume_dist_for_image_id ={}
for volume_id in data_volume_info_dict.keys():
container_format='vgw_url'
#if 'fusionsphere' in self.availability_zone:
# container_format='fs_vgw_url'
#elif 'vcloud' in self.availability_zone:
# container_format='vcloud_vgw_url'
#else:
# container_format='aws_vgw_url'
LOG.debug('upload the data volume %s to image ,the image name is %s'%(volume_id,volume_id))
response = self.volume_api.upload_to_image(self.context,
volume_id,
True,
volume_id + '_' + self.availability_zone,
container_format,
'qcow2')
image_uuid_of_volume = response[1]['os-volume_upload_image']['image_id']
volume_dist_for_image_id[volume_id] = image_uuid_of_volume
return volume_dist_for_image_id
def _delete_volume_after_migrate(self,data_volume_info_dict):
if data_volume_info_dict:
for volume_id in data_volume_info_dict.keys():
volume = self.volume_api.get(self.context, volume_id)
query_volume_status=1800
if volume:
volume_status=volume.get('status')
if volume_status=='error' \
or volume_status=='deleting' \
or volume_status=='error_deleting':
return
while volume_status != 'available':
time.sleep(1)
volume = self.volume_api.get(self.context, volume_id)
if volume:
volume_status=volume.get('status')
query_volume_status = query_volume_status-1
if query_volume_status==0 and volume_status !='available':
return
self.volume_api.delete(self.context, volume_id)
#copy from servers for migrate
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
try:
request.port_id = network.get('port', None)
except ValueError:
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % network.get('port')
raise exc.HTTPBadRequest(explanation=msg)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and not
uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
try:
request.address = network.get('fixed_ip', None)
except ValueError:
msg = _("Invalid fixed IP address (%s)") % request.address
raise exc.HTTPBadRequest(explanation=msg)
if (request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
def _stop_instance(self,instance):
current_power_state = self._get_power_state(self.context, instance)
if current_power_state != vm_states.STOPPED:
self.compute_api.stop(self.context,instance)
query_vm_status_count=600
instance = common.get_instance(self.compute_api, self.context, instance.uuid, want_objects=True)
current_power_state = self._get_power_state(self.context, instance)
while current_power_state != vm_states.STOPPED:
time.sleep(1)
instance = common.get_instance(self.compute_api, self.context, instance.uuid, want_objects=True)
current_power_state = self._get_power_state(self.context, instance)
if current_power_state == vm_states.ERROR:
msg = _("stop instance failed when migrating vm")
raise exc.HTTPBadRequest(explanation=msg)
query_vm_status_count =query_vm_status_count -1
if query_vm_status_count==0 and current_power_state != vm_states.STOPPED:
msg = _("stop instance failed when migrating vm")
raise exc.HTTPBadRequest(explanation=msg)
def _create_target_volume(self,volume_dict_for_image_id):
""" create the target volume and return the mapping of source_volume and target_volume"""
LOG.info('begin create target volume')
source_target_vol_mapping={}
if volume_dict_for_image_id:
for volume_id in volume_dict_for_image_id.keys():
image_id_of_volume = volume_dict_for_image_id.get(volume_id)
image = self.image_api.get(self.context,image_id_of_volume)
query_image_status_count=1800
LOG.info('query the image %s status of the voluem %s' %(image_id_of_volume,volume_id))
while image['status'] != 'active':
time.sleep(2)
image = self.image_api.get(self.context,image_id_of_volume)
if image['status'] == 'error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_cascaded_image_status_count = query_image_status_count-1
if query_cascaded_image_status_count == 0 and image['status'] != 'active':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info('create target volume using the image %s' %image_id_of_volume)
volume = self.volume_api.get(self.context, volume_id)
size= volume.get('size')
origin_volume_type= volume.get('volume_type_id')
volume_type = self._convert_volume_type(self.context,self.availability_zone,origin_volume_type)
volume_name = volume.get('display_name')
metadata ={'readonly':'False','attached_mode':'rw'}
target_volume = self.volume_api.create(self.context,size,volume_name,None,image_id=image['id'],volume_type=volume_type,
metadata=metadata,availability_zone=self.availability_zone)
source_target_vol_mapping[volume_id]=target_volume
return source_target_vol_mapping
def _check_volume_status(self,source_target_vol_mapping):
if source_target_vol_mapping:
for target_volume in source_target_vol_mapping.values():
query_volume_status_count=1800
volume_id = target_volume['id']
volume = self.volume_api.get(self.context, volume_id)
while volume.get('status') != 'available':
time.sleep(2)
volume = self.volume_api.get(self.context, volume_id)
if volume.get('status') == 'error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_volume_status_count = query_volume_status_count-1
if query_volume_status_count==0 and volume.get('status') != 'available':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
def _mount_data_volume(self,instance,source_target_vol_mapping,data_volume_info_dict):
if source_target_vol_mapping:
for source_vol_id in source_target_vol_mapping.keys():
target_volume= source_target_vol_mapping.get(source_vol_id)
device_name =data_volume_info_dict.get(source_vol_id)['mount_point']
self.compute_api.attach_volume(self.context,instance,target_volume['id'],device=device_name)
for target_volume in source_target_vol_mapping.values():
query_volume_status_count=1800
volume_id = target_volume['id']
volume = self.volume_api.get(self.context, volume_id)
while volume.get('status') != 'in-use':
time.sleep(2)
volume = self.volume_api.get(self.context, volume_id)
if volume.get('status') == 'error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_volume_status_count = query_volume_status_count-1
if query_volume_status_count==0 and volume.get('status') != 'available':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
def _create_instance(self,inst_type, boot_image_uuid,display_name=None, display_description=None,
key_name=None, metadata=None,access_ip_v4=None, access_ip_v6=None,injected_files=None,
admin_password=None, min_count=1, max_count=1, requested_networks=None, security_group=None,
user_data=None, availability_zone=None, config_drive=None, block_device_mapping=None,
auto_disk_config=None,scheduler_hints=None, legacy_bdm=True,check_server_group_quota=None):
while True:
time.sleep(3)
try:
(instances, resv_id) = self.compute_api.create(self.context,
inst_type,
boot_image_uuid,
display_name=display_name,
display_description=display_description,
key_name=key_name,
metadata=metadata,
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=admin_password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=security_group,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
check_server_group_quota=check_server_group_quota)
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
readable = traceback.format_exc()
LOG.exception('migrate exception10:%s', readable)
continue
raise exc.HTTPConflict(explanation=error.format_message())
except exception.FixedIpAlreadyInUse as error:
readable = traceback.format_exc()
LOG.exception('migrate exception11:%s', readable)
continue
raise exc.HTTPBadRequest(explanation=error.format_message())
break
return instances
def _create_bdm(self,source_target_vol_mapping,volume_dict_for_boot_index,volume_dict_for_mountpoint):
block_device_mapping=[]
bdm=None
if source_target_vol_mapping:
for source_vol_id in source_target_vol_mapping.keys():
target_volume= source_target_vol_mapping.get(source_vol_id)
bdm_dict={'boot_index':volume_dict_for_boot_index.get(source_vol_id),'uuid': target_volume['id'],'source_type':'volume','delete_on_termination':False,
'volume_id':target_volume['id'], 'destination_type':'volume','device_name':volume_dict_for_mountpoint.get(source_vol_id)}
block_device_mapping.append(bdm_dict)
bdm = [ block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping]
return bdm
def _get_instance_port_info(self,instance):
#get the interface-port info
search_opts = {'device_id': instance['uuid']}
try:
data = self.network_api.list_ports(self.context, **search_opts)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Network driver does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
ports = data.get('ports', [])
return ports
#copy end
def run(self):
LOG.error('begin time of migrate is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, self.instance['uuid'])
is_boot_from_image = False
volume_dict_for_image_id ={}
#add system_volume
system_volume=None
data_volume_info_dict={}
block_device_mapping = None
volume_ids = []
system_volume_image_id = None
#step1 get the source instance info
instance = common.get_instance(self.compute_api, self.context, self.instance.uuid, want_objects=True)
#get the interface-port info
ports = self._get_instance_port_info(instance)
#get floatingip
floatingIp_fixIp_map ={}
for port in ports:
search_opts={}
search_opts['port_id'] = port['id']
floating_ip = self.network_api.list_floatingips(self.context,**search_opts)
if floating_ip:
if floating_ip.get('floatingips',None):
floatingIp_fixIp_map[port['fixed_ips'][0].get('ip_address')]=floating_ip
access_ip_v4 = instance.access_ip_v4
access_ip_v6 = instance.access_ip_v6
min_count = 1
max_count = 1
name=instance.display_name
key_name = None
metadata = instance.metadata
injected_files = []
security_group=instance.security_groups
user_data=instance.user_data
flavor_id = instance.system_metadata['instance_type_flavorid']
scheduler_hints = {}
#check_server_group_quota = \
# self.ext_mgr.is_loaded('os-server-group-quotas')
check_server_group_quota=True
requested_networks = []
nw_info = compute_utils.get_nw_info_for_instance(instance)
for vif in nw_info:
net_uuid = vif['network']['id']
net_ip = vif['network']['subnets'][0]['ips'][0]['address']
requested_networks.append({'fixed_ip':net_ip, 'uuid':net_uuid})
requested_networks = self._get_requested_networks(requested_networks)
for port in ports:
LOG.debug('begin detach the port %s of instance %s' %(port['id'],instance.uuid))
self.compute_api.detach_interface(self.context,instance, port_id=port['id'])
time.sleep(2)
LOG.debug('end detach the port %s of instance %s' %(port['id'],instance.uuid))
for bdm in bdms:
if bdm.image_id is not None and bdm.boot_index == 0 and bdm.destination_type =='local':
is_boot_from_image =True
system_volume_image_id = bdm.image_id
if bdm.volume_id is not None:
if bdm.boot_index == 0:
volume = self.volume_api.get(self.context, bdm.volume_id)
system_volume = volume
volume_image_metadata = volume.get('volume_metadata')
system_volume_image_id = volume_image_metadata['image_id']
else:
volume_info={'boot_index':bdm.boot_index,'mount_point':bdm.device_name}
data_volume_info_dict[bdm.volume_id]=volume_info
#step2 stop the instance
LOG.error('begin time of stop instance is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
self._stop_instance(instance)
LOG.error('end time of stop instance is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
instance = common.get_instance(self.compute_api, self.context, self.instance.uuid, want_objects=True)
#detach volume
for volume_id in data_volume_info_dict.keys():
volume = self.volume_api.get(self.context, volume_id)
self.compute_api.detach_volume(self.context,instance,volume)
#judge volume detach
for volume_id in data_volume_info_dict.keys():
query_volume_status_count = 1800
volume = self.volume_api.get(self.context, volume_id)
while volume.get('status') != 'available':
time.sleep(2)
volume = self.volume_api.get(self.context, volume_id)
if volume.get('status') == 'error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_volume_status_count = query_volume_status_count-1
if query_volume_status_count==0 and volume.get('status') != 'available':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
#get the image of target vm
boot_image_uuid = None
if is_boot_from_image:
if self.migrate_system_volume is False:
boot_image_uuid = system_volume_image_id
else:
tmp_image_name = "%s@%s" % (uuid.uuid1(), self.instance.uuid)
instance = common.get_instance(self.compute_api, self.context, self.instance.uuid, want_objects=True)
image_meta = self.compute_api.snapshot(self.context, instance, name=tmp_image_name, extra_properties=None)
query_image_status_count=1800
filters = {'name':tmp_image_name}
imagelist = self.image_api.get_all(self.context,filters=filters)
image = imagelist[0]
while image['status'] != 'active':
time.sleep(1)
imagelist = self.image_api.get_all(self.context,filters=filters)
image = imagelist[0]
#image_uuid = image['id']
#image = self.image_api.get(self.context,image_uuid )
if image['status'] =='error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_image_status_count = query_image_status_count-1
if query_image_status_count == 0 and image['status'] != 'active':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
boot_image_uuid =image['id']
else:
if self.migrate_system_volume is False:
boot_image_uuid = system_volume_image_id
else :
response = self.volume_api.upload_to_image(self.context,
system_volume['id'],
True,
system_volume['id'],
'bare',
'qcow2')
image_id_of_volume = response[1]['os-volume_upload_image']['image_id']
image = self.image_api.get(self.context,image_id_of_volume)
query_image_status_count=1800
LOG.info('query the image %s status of the voluem %s' %(image_id_of_volume,system_volume['id']))
while image['status'] != 'active':
time.sleep(2)
image = self.image_api.get(self.context,image_id_of_volume)
if image['status'] == 'error':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_cascaded_image_status_count = query_image_status_count-1
if query_cascaded_image_status_count == 0 and image['status'] != 'active':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
boot_image_uuid = image_id_of_volume
LOG.error('begin time of delete instance is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
#update the instance metadata the metadata use for vcloud delete vm
self.compute_api.update_instance_metadata(self.context,instance,{'quick_delete_once': 'True'},delete=False)
self.compute_api.delete(self.context,instance)
LOG.error('end time of delete instance is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=self.context,
read_deleted="no")
instances=self._create_instance(inst_type,boot_image_uuid, display_name=name,
display_description=name,key_name=key_name,metadata=metadata,
access_ip_v4=access_ip_v4,access_ip_v6=access_ip_v6,injected_files=injected_files,
admin_password=None, min_count=min_count,max_count=max_count, requested_networks=requested_networks,
security_group=security_group, user_data=user_data,availability_zone=self.availability_zone,
config_drive=None, block_device_mapping=block_device_mapping, auto_disk_config=None,
scheduler_hints=scheduler_hints, legacy_bdm=True,check_server_group_quota=check_server_group_quota)
volume_dict_for_image_id = self._upload_data_volume_to_image(data_volume_info_dict)
source_target_vol_mapping = self._create_target_volume(volume_dict_for_image_id)
#mount volume and reboot
instance_new =None
if instances is not None and len(instances) == 1:
instance_new = instances[0]
query_new_vm_status_count=1200
while instance_new.vm_state != 'active':
time.sleep(2)
instance_new = common.get_instance(self.compute_api, self.context, instance_new.uuid,
want_objects=True)
if instance_new.vm_state == 'error' :
LOG.error("bulid instance failed")
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_new_vm_status_count =query_new_vm_status_count-1
if query_new_vm_status_count ==0 and instance_new.vm_state != 'active':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
instances[0].task_state = task_states.MIGRATING
instances[0].save()
try:
self._check_volume_status(source_target_vol_mapping)
self._mount_data_volume(instance_new, source_target_vol_mapping, data_volume_info_dict)
if floatingIp_fixIp_map:
for ip_address in floatingIp_fixIp_map.keys():
self.network_api.associate_floating_ip(self.context,instance_new,floatingIp_fixIp_map.get(ip_address)['floatingips'][0]['floating_ip_address'],ip_address)
instance_new.task_state = None
instance_new.save()
if source_target_vol_mapping:
LOG.error('begin time of reboot instance is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
self.compute_api.reboot(self.context, instance_new, 'SOFT')
#step 9 delete the image
LOG.debug('begin clear the image and volume')
self._delete_tmp_image(boot_image_uuid, volume_dict_for_image_id)
#step 10 delete the volume
self._delete_volume_after_migrate(data_volume_info_dict)
except Exception as e:
LOG.error('exception occur during migrating,the expeciont %s' %e.message)
instance_new.task_state = None
instance_new.save()
# instance_new.task_state = None
# instance_new.save()
time.sleep(2)
instance_new = common.get_instance(self.compute_api, self.context, instance_new.uuid,
want_objects=True)
while instance_new.vm_state != 'active':
time.sleep(2)
instance_new = common.get_instance(self.compute_api, self.context, instance_new.uuid,
want_objects=True)
if instance_new.vm_state == 'error' :
LOG.error("bulid instance failed")
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
query_new_vm_status_count =query_new_vm_status_count-1
if query_new_vm_status_count ==0 and instance_new.vm_state != 'active':
msg = _("migrate vm failed.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.error('end time of migrate is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
"""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1"
updated = "2011-09-20T00:00:00Z"
def get_controller_extensions(self):
controller = AdminActionsController(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| {
"content_hash": "e5cad11b079d85b8d27327fdb3691890",
"timestamp": "",
"source": "github",
"line_count": 1070,
"max_line_length": 174,
"avg_line_length": 49.899065420560746,
"alnum_prop": 0.5490710218759365,
"repo_name": "Hybrid-Cloud/badam",
"id": "2b40deb265f7333464f6f14d5af074251c523327",
"size": "54009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascading/nova/api/openstack/compute/contrib/admin_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0007_userprofile'),
]
operations = [
migrations.AddField(
model_name='user',
name='about',
field=models.CharField(max_length=255, verbose_name='About Me', blank=True),
),
]
| {
"content_hash": "b42f541749ee1e3ea61b2f2edeebf008",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5945273631840796,
"repo_name": "yrchen/CommonRepo",
"id": "0489135384c01f5e0587116f297123dd303d31ed",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "commonrepo/users/migrations/0008_user_about.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "209557"
},
{
"name": "JavaScript",
"bytes": "3462"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "336120"
},
{
"name": "Shell",
"bytes": "4523"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
'''
Extension to Python Markdown for Embedded Audio
Basic Example:
>>> import markdown
>>> text = """[podcast]http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]"""
>>> html = markdown.markdown(text, [PodcastExtension()])
>>> print(html)
<p><audio src="http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3"></audio></p>
'''
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]'
class PodcastPattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, configs):
Pattern.__init__(self, pattern)
def handleMatch(self, m):
url = m.group('url').strip()
audio_elem = etree.Element('audio')
audio_elem.set('controls', '')
source_elem = etree.SubElement(audio_elem, 'source')
source_elem.set('src', url)
source_elem.set('type', 'audio/mpeg')
return audio_elem
class PodcastExtension(Extension):
def __init__(self, configs={}):
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs())
podcast_md_pattern.md = md
md.inlinePatterns.add('podcast', podcast_md_pattern, "<not_strong")
md.registerExtension(self)
def makeExtension(configs=None):
return PodcastExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
| {
"content_hash": "fcfb385bd5157750c523d5b1d6bf7399",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 101,
"avg_line_length": 30.524590163934427,
"alnum_prop": 0.6541353383458647,
"repo_name": "damianavila/nikola",
"id": "be8bb6be12cb27a9ebae161dd0a16d00763bc709",
"size": "3143",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile/markdown/mdx_podcast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "315959"
},
{
"name": "JavaScript",
"bytes": "147595"
},
{
"name": "Python",
"bytes": "505175"
},
{
"name": "Shell",
"bytes": "663"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.