text stringlengths 4 1.02M | meta dict |
|---|---|
import pulsar
from pulsar.schema import *
import sys
class ReceiveExample(Record):
x = Integer()
y = Long()
service_url = sys.argv[1]
topic = sys.argv[2]
client = pulsar.Client(service_url)
consumer = client.subscribe(
topic=topic,
subscription_name="my-subscription",
schema=JsonSchema(ReceiveExample)
)
msg = consumer.receive()
obj = msg.value()
assert obj.x == 1
assert obj.y == 2
client.close()
| {
"content_hash": "a18b3fc80610bb087eb771c8b7f94f62",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 56,
"avg_line_length": 17,
"alnum_prop": 0.5983772819472617,
"repo_name": "nkurihar/pulsar",
"id": "64efcbecead7366f93390f46b2efffc10127075a",
"size": "1305",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/docker-images/latest-version-image/python-examples/consumer_schema.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2170"
},
{
"name": "C",
"bytes": "145811"
},
{
"name": "C++",
"bytes": "1327528"
},
{
"name": "CMake",
"bytes": "23019"
},
{
"name": "CSS",
"bytes": "31825"
},
{
"name": "Dockerfile",
"bytes": "26393"
},
{
"name": "Go",
"bytes": "109755"
},
{
"name": "Groovy",
"bytes": "20767"
},
{
"name": "HCL",
"bytes": "13762"
},
{
"name": "HTML",
"bytes": "133834"
},
{
"name": "Java",
"bytes": "13217208"
},
{
"name": "JavaScript",
"bytes": "80337"
},
{
"name": "Makefile",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "442677"
},
{
"name": "Ruby",
"bytes": "20575"
},
{
"name": "Shell",
"bytes": "163519"
},
{
"name": "Smarty",
"bytes": "1042"
}
],
"symlink_target": ""
} |
from Child import Child
from Node import Node # noqa: I201
ATTRIBUTE_NODES = [
# token-list -> token? token-list?
Node('TokenList', kind='SyntaxCollection',
element='Token'),
# token-list -> token token-list?
Node('NonEmptyTokenList', kind='SyntaxCollection',
element='Token', omit_when_empty=True),
Node('CustomAttribute', kind='Syntax',
description='''
A custom `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Type', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='FunctionCallArgumentList',
collection_element_name='Argument', is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
# attribute -> '@' identifier '('?
# ( identifier
# | string-literal
# | integer-literal
# | availability-spec-list
# | specialize-attr-spec-list
# | implements-attr-arguments
# | named-attribute-string-argument
# )? ')'?
Node('Attribute', kind='Syntax',
description='''
An `@` attribute.
''',
children=[
Child('AtSignToken', kind='AtSignToken',
description='The `@` sign.'),
Child('AttributeName', kind='Token', classification='Attribute',
description='The name of the attribute.'),
Child('LeftParen', kind='LeftParenToken', is_optional=True,
description='''
If the attribute takes arguments, the opening parenthesis.
'''),
Child('Argument', kind='Syntax', is_optional=True,
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('String', kind='StringLiteralToken'),
Child('Integer', kind='IntegerLiteralToken'),
Child('Availability', kind='AvailabilitySpecList'),
Child('SpecializeArguments',
kind='SpecializeAttributeSpecList'),
Child('ObjCName', kind='ObjCSelector'),
Child('ImplementsArguments',
kind='ImplementsAttributeArguments'),
Child('NamedAttributeString',
kind='NamedAttributeStringArgument'),
Child('OpaqueReturnTypeOfArguments',
kind='OpaqueReturnTypeOfAttributeArguments'),
], description='''
The arguments of the attribute. In case the attribute \
takes multiple arguments, they are gather in the \
appropriate takes first.
'''),
Child('RightParen', kind='RightParenToken', is_optional=True,
description='''
If the attribute takes arguments, the closing parenthesis.
'''),
# TokenList to gather remaining tokens of invalid attributes
# FIXME: Remove this recovery option entirely
Child('TokenList', kind='TokenList',
collection_element_name='Token', is_optional=True),
]),
# attribute-list -> attribute attribute-list?
Node('AttributeList', kind='SyntaxCollection',
omit_when_empty=True,
element='Syntax', element_name='Attribute',
element_choices=[
'Attribute',
'CustomAttribute',
]),
# The argument of '@_specialize(...)'
# specialize-attr-spec-list -> labeled-specialize-entry
# specialize-spec-attr-list?
# | generic-where-clause
# specialize-spec-attr-list?
Node('SpecializeAttributeSpecList', kind='SyntaxCollection',
description='''
A collection of arguments for the `@_specialize` attribute
''',
element='Syntax', element_name='SpecializeAttribute',
element_choices=[
'LabeledSpecializeEntry',
'GenericWhereClause',
]),
# Representation of e.g. 'exported: true,'
# labeled-specialize-entry -> identifier ':' token ','?
Node('LabeledSpecializeEntry', kind='Syntax',
description='''
A labeled argument for the `@_specialize` attribute like \
`exported: true`
''',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('Value', kind='Token',
description='The value for this argument'),
Child('TrailingComma', kind='CommaToken',
is_optional=True, description='''
A trailing comma if this argument is followed by another one
'''),
]),
# The argument of '@_dynamic_replacement(for:)' or '@_private(sourceFile:)'
# named-attribute-string-arg -> 'name': string-literal
Node('NamedAttributeStringArgument', kind='Syntax',
description='''
The argument for the `@_dynamic_replacement` or `@_private` \
attribute of the form `for: "function()"` or `sourceFile: \
"Src.swift"`
''',
children=[
Child('NameTok', kind='Token',
description='The label of the argument'),
Child('Colon', kind='ColonToken',
description='The colon separating the label and the value'),
Child('StringOrDeclname', kind='Syntax', node_choices=[
Child('String', kind='StringLiteralToken'),
Child('Declname', kind='DeclName'),
]),
]),
Node('DeclName', kind='Syntax', children=[
Child('DeclBaseName', kind='Token', description='''
The base name of the protocol\'s requirement.
''',
token_choices=[
'IdentifierToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
'SpacedBinaryOperatorToken',
'UnspacedBinaryOperatorToken',
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# The argument of '@_implements(...)'
# implements-attr-arguments -> simple-type-identifier ','
# (identifier | operator) decl-name-arguments
Node('ImplementsAttributeArguments', kind='Syntax',
description='''
The arguments for the `@_implements` attribute of the form \
`Type, methodName(arg1Label:arg2Label:)`
''',
children=[
Child('Type', kind='SimpleTypeIdentifier', description='''
The type for which the method with this attribute \
implements a requirement.
'''),
Child('Comma', kind='CommaToken',
description='''
The comma separating the type and method name
'''),
Child('DeclBaseName', kind='Syntax', description='''
The base name of the protocol\'s requirement.
''',
node_choices=[
Child('Identifier', kind='IdentifierToken'),
Child('Operator', kind='PrefixOperatorToken'),
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True, description='''
The argument labels of the protocol\'s requirement if it \
is a function requirement.
'''),
]),
# objc-selector-piece -> identifier? ':'?
Node('ObjCSelectorPiece', kind='Syntax',
description='''
A piece of an Objective-C selector. Either consisiting of just an \
identifier for a nullary selector, an identifier and a colon for a \
labeled argument or just a colon for an unlabeled argument
''',
children=[
Child('Name', kind='IdentifierToken', is_optional=True),
Child('Colon', kind='ColonToken', is_optional=True),
]),
# objc-selector -> objc-selector-piece objc-selector?
Node('ObjCSelector', kind='SyntaxCollection', element='ObjCSelectorPiece'),
# opaque-return-type-of-attr-arguments -> string-literal ','
# integer-literal
Node('OpaqueReturnTypeOfAttributeArguments', kind='Syntax',
description='''
The argument for the `@_opaqueReturnTypeOf` type attribute of the \
form `<mangled name>, <index number>`.
''',
children=[
Child('MangledName', kind='StringLiteralToken', description='''
The mangled name of the opaque function/property which the
the type represents.
'''),
Child('Comma', kind='CommaToken'),
Child('Index', kind='IntegerLiteralToken', description='''
The index of the return type.
'''),
]),
]
| {
"content_hash": "594ebff073509fe16c2559846c672c67",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 44.47767857142857,
"alnum_prop": 0.5250426578339857,
"repo_name": "sschiau/swift",
"id": "750810beb10efc6b3e15bb27a23513e19a5de3a9",
"size": "9963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/AttributeNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12280"
},
{
"name": "C",
"bytes": "229930"
},
{
"name": "C++",
"bytes": "33692032"
},
{
"name": "CMake",
"bytes": "539717"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57288"
},
{
"name": "LLVM",
"bytes": "70517"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "422082"
},
{
"name": "Objective-C++",
"bytes": "248166"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1582780"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "220027"
},
{
"name": "Swift",
"bytes": "29360898"
},
{
"name": "Vim script",
"bytes": "16701"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
"""Pytest plugins globally available.
`conftest.py` file is automatically detected by Pytest and register
plugins (hooks and fixtures) common to all tests.
See: https://docs.pytest.org/en/latest/writing_plugins.html
"""
from typing import Iterator, Type
import pytest
from tensorflow_datasets import testing
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import visibility
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
from tensorflow_datasets.testing import setup_teardown
# Global setup/teardown
@pytest.fixture(scope='session', autouse=True)
def activate_eager():
"""Globally and automatically enable eager."""
tf.compat.v1.enable_v2_behavior()
# Register fixtures which are automatically applied once when tests start.
@pytest.fixture(scope='session', autouse=True)
def disable_community_datasets():
"""During tests, `tfds.list_builders` disable community datasets."""
# For tests, only public datasets are available (no-community datasets)
# Kokoro pytest tests are executed without absl.app, so the default
# visibility isn't automatically set.
visibility.set_availables([
visibility.DatasetType.TFDS_PUBLIC,
])
# Register all fixtures defined in `setup_teardown` to be automatically
# applied in all tests.
global_dict = globals()
for fixture_fn in setup_teardown.GLOBAL_FIXTURES:
fixture_name = fixture_fn.__name__
if fixture_name in global_dict:
raise ValueError(f'{fixture_name} already in module.')
fixture_fn = pytest.fixture(scope='session', autouse=True)(fixture_fn)
# In orders for fixtures to be registered, there need to be an explicit
# attribute
# https://stackoverflow.com/questions/27064004/splitting-a-conftest-py-file-into-several-smaller-conftest-like-parts/65035367#65035367
global_dict[fixture_name] = fixture_fn
del global_dict # Do not modifying global beyond this point
# Fixtures globally available
@pytest.fixture
def mock_fs() -> Iterator[testing.MockFs]:
"""Patch `tf.io.gfile` API into a virtual file system."""
with testing.MockFs() as fs:
yield fs
def _make_dataset(
tmp_path_factory: pytest.TempPathFactory,
builder_cls: Type[dataset_builder.DatasetBuilder],
) -> dataset_builder.DatasetBuilder:
tmp_path = tmp_path_factory.mktemp(f'global_{builder_cls.__name__}')
builder = builder_cls(data_dir=tmp_path)
builder.download_and_prepare()
return builder
@pytest.fixture(scope='session')
def dummy_mnist(
tmp_path_factory: pytest.TempPathFactory) -> dataset_builder.DatasetBuilder:
"""Dummy mnist dataset builder pre-generated."""
return _make_dataset(tmp_path_factory, testing.DummyMnist)
@pytest.fixture(scope='session')
def dummy_dataset(
tmp_path_factory: pytest.TempPathFactory) -> dataset_builder.DatasetBuilder:
"""Dummy dataset builder pre-generated."""
return _make_dataset(tmp_path_factory, testing.DummyDataset)
| {
"content_hash": "5728e79d1661b550902c12af1f1c0595",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 136,
"avg_line_length": 34.05813953488372,
"alnum_prop": 0.7603277569136224,
"repo_name": "tensorflow/datasets",
"id": "46cb05e06eeb2793b5dbc85747200cc134cc2791",
"size": "3541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
from tspapi import API
from tspapi import Source
from tspapi import Sender
from tspapi import Measurement
import tspapi.event
from unittest import TestCase
from tspapi import RawEvent
from datetime import datetime
import random
import os
import sys
import json
_path = os.path.dirname(__file__)
sys.path.append(_path)
from api_test_utils import TestUtils
class RawEventTest(TestCase):
def setUp(self):
self.api = API()
def test_default_constructor(self):
raw_event = RawEvent()
self.assertIsNone(raw_event.created_at)
self.assertIsNone(raw_event.event_id)
self.assertIsNone(raw_event.fingerprint_fields)
self.assertIsNone(raw_event.id)
self.assertIsNone(raw_event.message)
self.assertIsNone(raw_event.properties)
self.assertIsNone(raw_event.received_at)
self.assertIsNone(raw_event.sender)
self.assertIsNone(raw_event.severity)
self.assertIsNone(raw_event.source)
self.assertIsNone(raw_event.status)
self.assertIsNone(raw_event.tags)
self.assertIsNone(raw_event.tenant_id)
self.assertIsNone(raw_event.title)
def test_constructor_args(self):
created_at = int(datetime.now().strftime('%s'))
event_id = random.randrange(1, 1000000000)
fingerprint_fields = '@title'
id = random.randrange(1, 1000000000)
raw_event = RawEvent(
created_at=created_at,
event_id=event_id,
fingerprint_fields=fingerprint_fields,
)
self.assertEqual(created_at, raw_event.created_at)
def test_repr_(self):
created_at = int(datetime.now().strftime('%s'))
event_id = random.randrange(1, 1000000000)
fingerprint_fields = '@title'
id = random.randrange(1, 1000000000)
event_class = 'CHANGE'
message = TestUtils.random_string(32)
properties = {"foo": "bar", "color": "red"}
received_at = int(datetime.now().strftime('%s'))
sender = TestUtils.random_string(10)
severity = 'INFO'
source = Source(ref=TestUtils.random_string(10), _type='host', name='foobar')
status = 'OPEN'
tags = {"foo": "bar", "color": "red"}
tenant_id = random.randrange(1, 10000000)
title = TestUtils.random_string(16)
raw_event = RawEvent(
created_at=created_at,
event_id=event_id,
event_class=event_class,
fingerprint_fields=fingerprint_fields,
id=id,
message=message,
properties=properties,
received_at=received_at,
sender=sender,
severity=severity,
source=source,
status=status,
tags=tags,
tenant_id=tenant_id,
title=title
)
expected = []
expected.append("RawEvent(created_at={0}".format(created_at, event_id))
expected.append(", event_id='{0}'".format(event_id))
expected.append(", event_class='{0}'".format(event_class))
expected.append(", fingerprint_fields='{0}'".format(fingerprint_fields))
expected.append(", id='{0}'".format(id))
expected.append(", message='{0}'".format(message))
expected.append(", properties={0}".format(properties))
expected.append(", source='{0}'".format(source))
expected.append(", sender='{0}'".format(sender))
expected.append(", severity='{0}'".format(severity))
expected.append(", status='{0}'".format(status))
expected.append(", tags='{0}'".format(tags))
expected.append(", tenant_id={0}".format(tenant_id))
expected.append(", title='{0}')".format(title))
expected = "".join(expected)
self.assertEqual(expected, raw_event.__repr__())
def test_create_event(self):
source = Source(ref='localhost', _type='host', name='bubba')
self.api.event_create(title='Hello World', fingerprint_fields=['@title'], source=source)
def test_create_event_with_date(self):
source = Source(ref='localhost', _type='host', name='bubba')
dt = datetime.now()
self.api.event_create(created_at=dt, title='Hello World', fingerprint_fields=['@title'], source=source)
def test_create_event_with_finger_print_fields(self):
fingerprint_fields = ['@message']
source = Source(ref='localhost', _type='host', name='bubba')
message = 'hello' + TestUtils.random_string(6)
dt = datetime.now()
self.api.event_create(message=message, created_at=dt, title='Hello World', fingerprint_fields=fingerprint_fields, source=source)
def test_create_event_with_properties(self):
source = Source(ref='localhost', _type='host', name='bubba')
title = 'sending tags'
properties = {"foo": "bar"}
self.api.event_create(title=title, fingerprint_fields=['@title'], source=source, properties=properties)
def test_create_event_with_class(self):
source = Source(ref='localhost', _type='host', name='bubba')
title = 'Event class'
event_class = 'MyClass'
self.api.event_create(title=title, fingerprint_fields=['@title'], source=source, event_class=event_class)
def test_create_event_with_sender(self):
source = Source(ref='localhost', _type='host', name='bubba')
sender = Sender(ref='localhost', _type='host', name='bubba')
self.api.event_create(title='Hello World', fingerprint_fields=['@title'], source=source, sender=sender)
def test_create_bad_source(self):
try:
ref = 'Hello World'
self.api.event_create(title='Hello World', fingerprint_fields=['@title'], source=ref)
self.assertTrue(False)
except ValueError:
pass
def test_create_bad_sender(self):
try:
source = Source(ref='localhost', _type='host', name='bubba')
ref = 'Hello World'
self.api.event_create(title='Hello World', fingerprint_fields=['@title'], source=source, sender=ref)
self.assertTrue(False)
except ValueError:
pass
def test_event_get(self):
events = self.api.event_list()
for event in events:
print(event)
def test_to_json(self):
ref = 'device'
_type = 'blah'
name = 'hello'
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
source = Source(ref=ref, _type=_type, name=name, properties=properties)
event = RawEvent(title='Hello World', fingerprint_fields=['@title'], source=source)
output = json.dumps(event, sort_keys=True, default=tspapi.event.serialize_instance)
expected = '{"source": {"name": "hello", "properties": {"blue": "foo", "green": 1.0, "red": 1}, ' + \
'"ref": "device", "type": "blah"}, "title": "Hello World"}'
self.assertEqual(expected, output)
def test_parse_date_datetime(self):
d = datetime.now()
expected = int(d.strftime('%s'))
timestamp = Measurement.parse_timestamp(d)
self.assertEqual(expected, timestamp)
def test_parse_date_epoch(self):
expected = int(datetime.now().strftime('%s'))
timestamp = Measurement.parse_timestamp(expected)
self.assertEqual(expected, timestamp)
def test_parse_date_ymd(self):
s = '2015-06-30'
timestamp = Measurement.parse_timestamp(s)
expected = int(datetime(2015, 6, 30).strftime('%s'))
self.assertEqual(expected, timestamp)
def test_parse_date_ymd_hms24(self):
s = '2014-06-30 14:27:16'
timestamp = Measurement.parse_timestamp(s)
expected = int(datetime(2014, 6, 30, 14, 27, 16).strftime('%s'))
self.assertEqual(expected, timestamp)
def test_parse_date_ymd_hms(self):
s = '2014-06-30 02:27:16PM'
timestamp = Measurement.parse_timestamp(s)
expected = int(datetime(2014, 6, 30, 14, 27, 16).strftime('%s'))
self.assertEqual(expected, timestamp)
def test_parse_date_bad_date_format(self):
try:
s = 'foobar'
timestamp = Measurement.parse_timestamp(s)
self.assertTrue(False)
except ValueError:
pass
| {
"content_hash": "2f748c122284ec2143e7c521003cf498",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 136,
"avg_line_length": 38.943396226415096,
"alnum_prop": 0.6107073643410853,
"repo_name": "jdgwartney/pulse-api-python",
"id": "3f369fa54fcef8683878ea8dc2d39e1e4b2913e5",
"size": "8866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/tspapi/event_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "404"
},
{
"name": "Python",
"bytes": "87720"
}
],
"symlink_target": ""
} |
import greenlet
from .hub import get_hub
from ..timeout import Timeout
__all__ = ['gyield', 'trampoline']
def gyield(switch_back=True):
"""Yield to other greenlets
This is a cooperative yield which suspends the current greenlet and allows other greenlets to
run by switching to the hub.
- If `switch_back` is True (default), the current greenlet is resumed at the beginning of the
next event loop iteration, before the loop polls for I/O and calls any I/O callbacks. This
is the intended use for this function the vast majority of the time.
- If `switch_back` is False, the hub will will never resume the current greenlet (use with
caution). This is mainly useful for situations where other greenlets (not the hub) are
responsible for switching back to this greenlet. An example is the Event class,
where waiters are switched to when the event is ready.
:param bool switch_back: automatically switch back to this greenlet on the next event loop cycle
"""
current = greenlet.getcurrent()
hub = get_hub()
if switch_back:
hub.schedule_call_now(current.switch)
hub.switch()
def trampoline(fd, evtype, timeout=None, timeout_exc=Timeout):
"""Jump from the current greenlet to the hub and wait until the given file descriptor is ready
for I/O, or the specified timeout elapses
If the specified `timeout` elapses before the socket is ready to read or write, `timeout_exc`
will be raised instead of :func:`trampoline()` returning normally.
When the specified file descriptor is ready for I/O, the hub internally calls the callback to
switch back to the current (this) greenlet.
Conditions:
- must not be called from the hub greenlet (can be called from any other greenlet)
- `evtype` must be either :attr:`~guv.const.READ` or :attr:`~guv.const.WRITE` (not possible to
watch for both simultaneously)
:param int fd: file descriptor
:param int evtype: either the constant :attr:`~guv.const.READ` or :attr:`~guv.const.WRITE`
:param float timeout: (optional) maximum time to wait in seconds
:param Exception timeout_exc: (optional) timeout Exception class
"""
#: :type: AbstractHub
hub = get_hub()
current = greenlet.getcurrent()
assert hub is not current, 'do not call blocking functions from the mainloop'
assert isinstance(fd, int)
timer = None
if timeout is not None:
def _timeout(exc):
# timeout has passed
current.throw(exc)
timer = hub.schedule_call_global(timeout, _timeout, timeout_exc)
try:
# add a watcher for this file descriptor
listener = hub.add(evtype, fd, current.switch, current.throw)
# switch to the hub
try:
return hub.switch()
finally:
# log.debug('(trampoline finally) remove listener for fd: {}'.format(fd))
hub.remove(listener)
finally:
if timer is not None:
timer.cancel()
| {
"content_hash": "0fdedc9964379d8ef89c398d784df682",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 100,
"avg_line_length": 37.775,
"alnum_prop": 0.6813368630046327,
"repo_name": "veegee/guv",
"id": "fa741912753bb7dd6041bbbcb5ecb9ea8af84ee9",
"size": "3022",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "guv/hubs/switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4634"
},
{
"name": "Python",
"bytes": "308368"
}
],
"symlink_target": ""
} |
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import KFold
import numpy as np
import cPickle
import logging
logger = logging.getLogger('train')
handler = logging.FileHandler('train.log')
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Load Feature Matrix
feature_matrix = cPickle.load(open('2013-04-20 183207/features.dat', 'r'))
# Load Target Data
logger.info("Converting to CSR Matrix to make life easier...")
feature_matrix = feature_matrix.tocsr()
print feature_matrix.shape
parameters = {'loss': ('hinge', 'huber'), 'penalty': ('l2', 'l1', 'elasticnet')}
for target in ['property_damages', 'direct_deaths', 'indirect_deaths', 'direct_injuries',
'indirect_injuries', 'crop_damages']:
avg_train_score = 0
avg_test_score = 0
target_data_file = "targets_%s.dat" % target
logger.info("Starting to train a model to predict %s..." % target.replace('_', ' '))
target_matrix = cPickle.load(open('2013-04-20 183207/' + target_data_file, 'r'))
target_matrix = [1 if i > 0 else 0 for i in target_matrix]
target_matrix = np.array(target_matrix)
kf = KFold(len(target_matrix), n_folds=3, indices=True, shuffle=True)
sgdc = SGDClassifier(shuffle=True, n_jobs=-1, n_iter=300, verbose=3)
gs = GridSearchCV(sgdc, parameters, n_jobs=-1, cv=kf, verbose=1, refit=False)
gs.fit(feature_matrix, target_matrix)
logger.info(gs.grid_scores_)
logger.info(gs.best_score_)
logger.info(gs.best_params_) | {
"content_hash": "4bbfe3cbc1f4df6085c6ef066dbe3481",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 89,
"avg_line_length": 37.43181818181818,
"alnum_prop": 0.7043108682452944,
"repo_name": "alexrkopp/weather-severity-model",
"id": "48fec81dd7e1f497b230e2c382cf11b0d0a2d9e7",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/train_classifiers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "146533"
}
],
"symlink_target": ""
} |
import json
import requests
import time
class APIProblem(Exception):
pass
api_key = "19c03581-6d29-43ff-8390-a99ef89132ac"
base_url = "https://na.api.pvp.net"
region = "na"
accounts = [{"real_name": "Jesse", "name": "iRemyxed"},
{"real_name": "Mike", "name": "Shichao"},
{"real_name": "Ray", "name": "Fliedliceman"},
{"real_name": "Richard", "name": "Lightningfax"}]
# keep track of rates
class MyRequest:
def __init__(self, base_url):
self.requests = 0
self.base_url = base_url
def get(self, url, params):
if self.requests == 10:
time.sleep(10)
self.requests = 0
r = requests.get(base_url + url, params=params)
self.requests += 1
return r
my_request = MyRequest(base_url)
def fetch_summoner_by_name(names):
""" Returns dict of all users' summoner object"""
parameters = {"api_key": api_key}
if isinstance(names, basestring):
names = [names]
url = "/api/lol/{region}/v1.4/summoner/by-name/{summonerNames}".format(region=region, summonerNames=",".join(names))
data = my_request.get(url, parameters).json()
return data
def fetch_match_history(account_id):
params = {"api_key": api_key, "rankedQueues": "RANKED_SOLO_5x5", "beginIndex": 0}
url = "/api/lol/{region}/v2.2/matchhistory/{summonerId}".format(region=region, summonerId=account_id)
print "Making request at game index {0} ".format(params["beginIndex"]),
r = my_request.get(url, params)
data = r.json()["matches"]
data.reverse()
matches = data
print "{0} games returned".format(len(data))
# keep looping until we run out of gamesx
while len(data) >= 15:
params["beginIndex"] += 15
print "Making request at game index {0} ".format(params["beginIndex"]),
r = my_request.get(url, params)
data = r.json()["matches"]
data.reverse()
matches += data
print "{0} games returned".format(len(data))
return matches
def fetch_matchlist(account_id):
params = {"api_key": api_key, "rankedQueues": "RANKED_SOLO_5x5", "beginIndex": 0}
url = "/api/lol/{region}/v2.2/matchhistory/{summonerId}".format(region=region, summonerId=account_id)
match_list = []
print "Making request at game index {0} ".format(params["beginIndex"]),
r = my_request.get(url, params)
data = r.json()["matches"]
data.reverse()
for i in range(0, len(data)):
match = data[i]
match_list.append(match["matchId"])
print "{0} games returned".format(len(data))
# keep looping until we run out of gamesx
while len(data) >= 15:
params["beginIndex"] += 15
print "Making request at game index {0} ".format(params["beginIndex"]),
r = my_request.get(url, params)
data = r.json()["matches"]
data.reverse()
for i in range(0, len(data)):
match = data[i]
match_list.append(match["matchId"])
print "{0} games returned".format(len(data))
match_list = list(set(match_list))
return match_list
def fetch_match_info(match_id):
params = {"api_key": api_key, "includeTimeline": True}
url = "/api/lol/{region}/v2.2/match/{matchId}".format(region=region, matchId=match_id)
match_info = {}
events = []
r = my_request.get(url, params)
data = r.json()["timeline"]
frames = data["frames"]
match_duration = r.json()["matchDuration"]
participants = r.json()["participants"]
for participant in participants:
participant["matchDuration"] = match_duration
'''
for frame in frames:
if "events" in frame:
events += frame["events"]
match_info["timeline"] = events
#match_info["matchDuration"] = r.json()["matchDuration"]
#match_info["participantIdentities"] = r.json()["participantIdentities"]
# keep looping until we run out of gamesx
while len(data) >= 15:
params["beginIndex"] += 15
print "Making request at game index {0} ".format(params["beginIndex"])
r = my_request.get(url, params)
data = r.json()["matches"]
data.reverse()
for i in range(0, len(data)):
match = data[i]
match_list.append(match["matchId"])
print "{0} games returned".format(len(data))
'''
return participants
def fetch_rune_info(match_id):
params = {"api_key": api_key, "includeTimeline": True}
url = "/api/lol/{region}/v2.2/match/{matchId}".format(region=region, matchId=match_id)
rune_info = {}
r = my_request.get(url, params)
#data = r.json()["timeline"]
#frames = data["frames"]
participants = r.json()["participants"]
for participant in participants:
if "runes" in participant:
flatlist = []
runelist = participant["runes"]
for object in runelist:
for i in range(0, object["rank"]):
flatlist.append(object["runeId"])
rune_info[participant["championId"]] = flatlist
print rune_info
return rune_info
def most_common(lst):
return max(set(lst), key=lst.count)
if __name__ == "__main__":
account_data = fetch_summoner_by_name([user["name"] for user in accounts])
match_list = []
match_info = []
raw_rune_info = {}
rune_info = {}
with open("data/match_list.json") as infile:
match_list = json.load(infile)
for i in range(0, len(match_list)):
match_runes = fetch_rune_info(match_list[i])
if match_runes is not None:
for champion in match_runes:
if champion in raw_rune_info:
raw_rune_info[champion].append(match_runes[champion])
else:
raw_rune_info[champion] = match_runes[champion]
if raw_rune_info is not None:
for champion in raw_rune_info:
champ_runes = raw_rune_info[champion]
flat_champ_runes = []
runeString = ""
for i in range(0, len(champ_runes)):
champ_runes[i] = sorted(champ_runes[i])
for j in range(0, champ_runes[i]):
runeSet = champ_runes[i]
runeString += str(runeSet[j])
flat_champ_runes.append(runeString)
runeSet = most_common(flat_champ_runes)
rune_info[champion] = runeSet
with open("rune_info.json", "w") as outfile:
json.dump(rune_info, outfile, indent=4)
'''
for user in account_data.itervalues():
match_list += fetch_matchlist(user["id"])
with open("match_list.json", "w") as outfile:
json.dump(match_list, outfile, indent=4)
'''
'''
for i in range(0, len(match_list)):
id = match_list[i]
match_info += fetch_match_info(id)
with open("match_info.json", "w") as outfile:
json.dump(match_info, outfile, indent=4)
'''
'''
for name, data in account_data.iteritems():
data["real_name"] = accounts[[user["name"].lower() for user in accounts].index(name)]["real_name"]
for user in account_data.itervalues():
print "Fetching data for {0}".format(user["name"])
matches = fetch_match_history(user["id"])
user["matches"] = matches
with open("match_data_by_player.json", "w") as outfile:
json.dump(account_data, outfile, indent=4)
'''
| {
"content_hash": "56bd610b7338083e47fe09638eb1aaf6",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 120,
"avg_line_length": 31.27310924369748,
"alnum_prop": 0.5857853016256885,
"repo_name": "rkgibson2/hackfest-project",
"id": "9b95a06d9ce7e29d9736f1b49545c5048aa8a461",
"size": "7443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LoLAPI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5351"
},
{
"name": "JavaScript",
"bytes": "35236"
},
{
"name": "Python",
"bytes": "11543"
}
],
"symlink_target": ""
} |
import paramiko
from select import select
import re
import datetime
_JUNOS_PROMPT = '> '
_SHELL_PROMPT = '(%|#)\s'
_SELECT_WAIT = 0.1
_RECVSZ = 1024
class StartShell(object):
"""
Junos shell execution utility. This utility is written to
support the "context manager" design pattern. For example::
def _ssh_exec(self, command):
with StartShell(self._dev) as sh:
got = sh.run(command)
return got
"""
def __init__(self, nc, timeout=30):
"""
Utility Constructor
:param Device nc: The Device object
:param int timeout:
Timeout value in seconds to wait for expected string/pattern.
"""
self._nc = nc
self.timeout = timeout
def wait_for(self, this=_SHELL_PROMPT, timeout=0):
"""
Wait for the result of the command, expecting **this** prompt.
:param str this: expected string/pattern.
:param int timeout:
Timeout value in seconds to wait for expected string/pattern.
If not specified defaults to self.timeout.
:returns: resulting string of data in a list
:rtype: list
.. warning:: need to add a timeout safeguard
"""
chan = self._chan
got = []
timeout = timeout or self.timeout
timeout = datetime.datetime.now()+datetime.timedelta(
seconds=timeout)
while timeout > datetime.datetime.now():
rd, wr, err = select([chan], [], [], _SELECT_WAIT)
if rd:
data = chan.recv(_RECVSZ)
if isinstance(data, bytes):
data = data.decode('utf-8', 'replace')
got.append(data)
if re.search(r'{0}\s?$'.format(this), data):
break
return got
def send(self, data):
"""
Send the command **data** followed by a newline character.
:param str data: the data to write out onto the shell.
:returns: result of SSH channel send
"""
self._chan.send(data)
self._chan.send('\n')
def open(self):
"""
Open an ssh-client connection and issue the 'start shell' command to
drop into the Junos shell (csh). This process opens a
:class:`paramiko.SSHClient` instance.
"""
junos = self._nc
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=junos.hostname,
port=(22, junos._port)[junos.hostname == 'localhost'],
username=junos._auth_user,
password=junos._auth_password,
)
chan = client.invoke_shell()
self._client = client
self._chan = chan
got = self.wait_for(r'(%|>|#)')
if got[-1].endswith(_JUNOS_PROMPT):
self.send('start shell')
self.wait_for(_SHELL_PROMPT)
def close(self):
""" Close the SSH client channel """
self._chan.close()
self._client.close()
def run(self, command, this=_SHELL_PROMPT, timeout=0):
"""
Run a shell command and wait for the response. The return is a
tuple. The first item is True/False if exit-code is 0. The second
item is the output of the command.
:param str command: the shell command to execute
:param str this: the exected shell-prompt to wait for
:param int timeout:
Timeout value in seconds to wait for expected string/pattern (this).
If not specified defaults to self.timeout. This timeout is specific
to individual run call.
:returns: (last_ok, result of the executed shell command (str) )
.. note:: as a *side-effect* this method will set the ``self.last_ok``
property. This property is set to ``True`` if ``$?`` is
"0"; indicating the last shell command was successful else
False
"""
timeout = timeout or self.timeout
# run the command and capture the output
self.send(command)
got = ''.join(self.wait_for(this, timeout))
self.last_ok = False
if this != _SHELL_PROMPT:
self.last_ok = re.search(r'{0}\s?$'.format(this), got) is not None
elif re.search(r'{0}\s?$'.format(_SHELL_PROMPT), got) is not None:
# use $? to get the exit code of the command
self.send('echo $?')
rc = ''.join(self.wait_for(_SHELL_PROMPT))
self.last_ok = rc.find('0') > 0
return (self.last_ok, got)
# -------------------------------------------------------------------------
# CONTEXT MANAGER
# -------------------------------------------------------------------------
def __enter__(self):
self.open()
return self
def __exit__(self, exc_ty, exc_val, exc_tb):
self.close()
| {
"content_hash": "4c2d6e3b7b3851aafca3e7cc1ccb0fbe",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 32.928104575163395,
"alnum_prop": 0.5398967844382692,
"repo_name": "spidercensus/py-junos-eznc",
"id": "07956c0ee06f68aadbaf4c05f1558c2004e1e5f0",
"size": "5038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/jnpr/junos/utils/start_shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "689193"
},
{
"name": "Ruby",
"bytes": "4840"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('foundation', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CoreAwardCohort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text="Name for the group being inducted, e.g. 'Q1 2021'", max_length=255, unique=True)),
('description', models.TextField(blank=True)),
('cohort_date', models.DateField(help_text='Date this cohort was approved by the DSF Board')),
],
),
migrations.CreateModel(
name='CoreAward',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient', models.CharField(help_text="Recipient's name", max_length=1023, unique=True)),
('link', models.URLField(blank=True, help_text='Optional link for this recipient', null=True)),
('description', models.TextField(blank=True, help_text='Optional one-paragraph description/bio of why this person received the award')),
('cohort', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipients', to='foundation.CoreAwardCohort')),
],
options={
'ordering': ['recipient'],
},
),
]
| {
"content_hash": "22f7d2a48e5a6ea79372b09af21cf059",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 152,
"avg_line_length": 46.44117647058823,
"alnum_prop": 0.5953134895503484,
"repo_name": "django/djangoproject.com",
"id": "8f28a285fd6b584de1de613c1cab915971508d2f",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "foundation/migrations/0002_coreaward_coreawardcohort.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "1510"
},
{
"name": "CSS",
"bytes": "6637"
},
{
"name": "CoffeeScript",
"bytes": "24188"
},
{
"name": "Dockerfile",
"bytes": "1102"
},
{
"name": "HTML",
"bytes": "237354"
},
{
"name": "JavaScript",
"bytes": "817965"
},
{
"name": "Makefile",
"bytes": "1594"
},
{
"name": "PostScript",
"bytes": "937444"
},
{
"name": "Procfile",
"bytes": "36"
},
{
"name": "Python",
"bytes": "672841"
},
{
"name": "Ruby",
"bytes": "19821"
},
{
"name": "SCSS",
"bytes": "111983"
},
{
"name": "Shell",
"bytes": "1356"
},
{
"name": "Smalltalk",
"bytes": "407"
}
],
"symlink_target": ""
} |
import math
import random
import numpy as np
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import parameter_modules, TensorSpec, tf_function, tf_util
from tensorforce.core.layers import Layer, NondeterministicLayer
class Activation(Layer):
"""
Activation layer (specification key: `activation`).
Args:
nonlinearity ('crelu' | 'elu' | 'leaky-relu' | 'none' | 'relu' | 'selu' | 'sigmoid' |
'softmax' | 'softplus' | 'softsign' | 'swish' | 'tanh'): Nonlinearity
(<span style="color:#C00000"><b>required</b></span>).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, nonlinearity, name=None, input_spec=None):
super().__init__(name=name, input_spec=input_spec)
# Nonlinearity
if nonlinearity not in (
'crelu', 'elu', 'leaky-relu', 'none', 'relu', 'selu', 'sigmoid', 'softmax', 'softplus',
'softsign', 'swish', 'tanh'
):
raise TensorforceError.value(
name='activation', argument='nonlinearity', value=nonlinearity
)
self.nonlinearity = nonlinearity
self.architecture_kwargs['nonlinearity'] = nonlinearity
def default_input_spec(self):
return TensorSpec(type='float', shape=None)
@tf_function(num_args=1)
def apply(self, *, x):
if self.nonlinearity == 'crelu':
x = tf.nn.crelu(features=x)
elif self.nonlinearity == 'elu':
x = tf.nn.elu(features=x)
elif self.nonlinearity == 'leaky-relu':
# TODO: make alpha public argument
x = tf.nn.leaky_relu(features=x, alpha=0.2)
elif self.nonlinearity == 'none':
pass
elif self.nonlinearity == 'relu':
x = tf.nn.relu(features=x)
elif self.nonlinearity == 'selu':
x = tf.nn.selu(features=x)
elif self.nonlinearity == 'sigmoid':
x = tf.sigmoid(x=x)
elif self.nonlinearity == 'softmax':
x = tf.nn.softmax(logits=x)
elif self.nonlinearity == 'softplus':
x = tf.nn.softplus(features=x)
elif self.nonlinearity == 'softsign':
x = tf.nn.softsign(features=x)
elif self.nonlinearity == 'swish':
# https://arxiv.org/abs/1710.05941
x = tf.sigmoid(x=x) * x
elif self.nonlinearity == 'tanh':
x = tf.nn.tanh(x=x)
return x
class Dropout(NondeterministicLayer):
"""
Dropout layer (specification key: `dropout`).
Args:
rate (parameter, 0.0 <= float < 1.0): Dropout rate
(<span style="color:#C00000"><b>required</b></span>).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, rate, name=None, input_spec=None):
super().__init__(name=name, input_spec=input_spec)
# Rate
self.rate = self.submodule(
name='rate', module=rate, modules=parameter_modules, dtype='float', min_value=0.0,
max_value=1.0
)
self.architecture_kwargs['rate'] = str(rate)
def default_input_spec(self):
return TensorSpec(type='float', shape=None)
@tf_function(num_args=2)
def apply(self, *, x, deterministic):
if self.rate.is_constant(value=0.0):
return x
else:
rate = self.rate.value()
def no_dropout():
return x
def apply_dropout():
return tf.nn.dropout(x=x, rate=rate)
zero = tf_util.constant(value=0.0, dtype='float')
skip_dropout = tf.math.logical_or(x=deterministic, y=tf.math.equal(x=rate, y=zero))
return tf.cond(pred=skip_dropout, true_fn=no_dropout, false_fn=apply_dropout)
class Function(Layer):
"""
Custom TensorFlow function layer (specification key: `function`).
Args:
function (callable[x -> x] | str): TensorFlow function, or string expression with argument
"x", e.g. "(x+1.0)/2.0"
(<span style="color:#C00000"><b>required</b></span>).
output_spec (specification): Output tensor specification containing type and/or shape
information (<span style="color:#00C000"><b>default</b></span>: same as input).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
# (requires function as first argument)
def __init__(
self, function, output_spec=None, l2_regularization=None, name=None, input_spec=None
):
super().__init__(l2_regularization=l2_regularization, name=name, input_spec=input_spec)
self.function = function
if output_spec is None:
self._output_spec = None
else:
self._output_spec = TensorSpec(**output_spec)
self.architecture_kwargs['function'] = str(function)
if l2_regularization is not None:
self.architecture_kwargs['l2_regularization'] = str(l2_regularization)
def output_spec(self):
if self._output_spec is None:
return super().output_spec()
else:
return self._output_spec
@tf_function(num_args=1)
def apply(self, *, x):
if isinstance(self.function, str):
x = eval(self.function, dict(), dict(x=x, math=math, np=np, random=random, tf=tf))
else:
x = self.function(x)
return x
class Reshape(Layer):
"""
Reshape layer (specification key: `reshape`).
Args:
shape (<i>int | iter[int]</i>): New shape
(<span style="color:#C00000"><b>required</b></span>).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(self, *, shape, name=None, input_spec=None):
super().__init__(name=name, input_spec=input_spec)
if isinstance(shape, int):
self.shape = (shape,)
else:
self.shape = tuple(shape)
self.architecture_kwargs['reshape'] = str(self.shape)
def output_spec(self):
output_spec = super().output_spec()
if output_spec.size != util.product(xs=self.shape):
raise TensorforceError.value(name='Reshape', argument='shape', value=self.shape)
output_spec.shape = self.shape
return output_spec
@tf_function(num_args=1)
def apply(self, *, x):
x = tf.reshape(tensor=x, shape=((-1,) + self.shape))
return x
| {
"content_hash": "720c266f296a8d4e52c9695fb4d9988f",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 99,
"avg_line_length": 33.604651162790695,
"alnum_prop": 0.5815916955017301,
"repo_name": "reinforceio/tensorforce",
"id": "25a98b645110f4647fbe38b794816c2e5da221c1",
"size": "7909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorforce/core/layers/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "869657"
}
],
"symlink_target": ""
} |
DEPENDS = [ 'base', 'dialog' ]
import television
def get_data(k):
television.get_data(k)
| {
"content_hash": "5438f7abeb226059381a85ca0247e252",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 14.142857142857142,
"alnum_prop": 0.6262626262626263,
"repo_name": "gooofy/zamia-ai",
"id": "f085b4df4a9dd82ab9e209b8c8f31e4d143b3437",
"size": "743",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zamiaai/skills/television/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "463"
},
{
"name": "Prolog",
"bytes": "23478934"
},
{
"name": "Python",
"bytes": "8963389"
},
{
"name": "Shell",
"bytes": "923"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0010_migrate_use_structure'),
]
operations = [
migrations.CreateModel(
name='RawHtmlPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True, serialize=False)),
('body', models.TextField(verbose_name='HTML')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| {
"content_hash": "a159f71e728a7b6354f9e7a4dd7dceb8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 148,
"avg_line_length": 27.541666666666668,
"alnum_prop": 0.5491679273827534,
"repo_name": "makukha/cmsplugin-raw-html",
"id": "548994faa7bdf2ccedc7ecc3d1da81d3238e70cf",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_raw_html/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11"
},
{
"name": "Python",
"bytes": "2738"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outlinecolor", parent_name="choropleth.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "c988cd9d9aec5200747c2c5b979ba65b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 34,
"alnum_prop": 0.6266968325791855,
"repo_name": "plotly/plotly.py",
"id": "c3820715c1558f0ff4c70c5dc426f970f7d39e84",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/colorbar/_outlinecolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import logging
import multiprocessing
import os
import random
import subprocess
import time
import math
from threading import Thread
from threading import Semaphore, Lock, Condition
from Queue import Queue, Empty
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
logger = logging.getLogger(__name__)
class SingleMachineBatchSystem(AbstractBatchSystem):
"""
The interface for running jobs on a single machine, runs all the jobs you give it as they come in, but in parallel.
"""
numCores = multiprocessing.cpu_count()
def __init__(self, config, maxCpus, maxMemory, maxDisk, badWorker=False):
assert type(maxCpus) == int
if maxCpus > self.numCores:
logger.warn('Limiting maxCpus to CPU count of system (%i).', self.numCores)
maxCpus = self.numCores
AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory, maxDisk)
assert self.maxCpus >= 1
assert self.maxMemory >= 1
# The scale allows the user to apply a factor to each task's CPU requirement, thereby squeezing more tasks
# onto each core (scale < 1) or stretching tasks over more cores (scale > 1).
self.scale = float(config.attrib['scale'])
# The minimal fractional CPU. Tasks with a smaller CPU requirement will be rounded up to this value. One
# important invariant of this class is that each worker thread represents a CPU requirement of minCpu,
# meaning that we can never run more than numCores / minCpu jobs concurrently. With minCpu set to .1,
# a task with cpu=1 will occupy 10 workers. One of these workers will be blocked on the Popen.wait() call for
# the worker.py child process, the others will be blocked on the acquiring the CPU semaphore.
self.minCpu = 0.1
# Number of worker threads that will be started
self.numWorkers = int(self.maxCpus / self.minCpu)
# A counter to generate batchjob IDs and a lock to guard it
self.jobIndex = 0
self.jobIndexLock = Lock()
# A dictionary mapping IDs of submitted jobs to those jobs
self.jobs = {}
# A queue of jobs waiting to be executed. Consumed by the workers.
self.inputQueue = Queue()
# A queue of finished jobs. Produced by the workers.
self.outputQueue = Queue()
# A dictionary mapping IDs of currently running jobs to their Info objects
self.runningJobs = {}
# The list of worker threads
self.workerThreads = []
# A semaphore representing available CPU in units of minCpu
self.cpuSemaphore = Semaphore(self.numWorkers)
# A counter representing failed acquisitions of the semaphore, also in units of minCpu, and a lock to guard it
self.cpuOverflow = 0
self.cpuOverflowLock = Lock()
# A lock to work around the lack of thread-safety in Python's subprocess module
self.popenLock = Lock()
# A counter representing available memory in bytes
self.memoryPool = self.maxMemory
# A condition object used to guard it (a semphore would force us to acquire each unit of memory individually)
self.memoryCondition = Condition()
logger.info('Setting up the thread pool with %i workers, '
'given a minimum CPU fraction of %f '
'and a maximum CPU value of %i.', self.numWorkers, self.minCpu, maxCpus)
self.workerFn = self.badWorker if badWorker else self.worker
for i in xrange(self.numWorkers):
worker = Thread(target=self.workerFn, args=(self.inputQueue,))
self.workerThreads.append(worker)
worker.start()
# The input queue is passed as an argument because the corresponding attribute is reset to None in shutdown()
def worker(self, inputQueue):
while True:
args = inputQueue.get()
if args is None:
logger.debug('Received queue sentinel.')
break
jobCommand, jobID, jobCpu, jobMem, jobDisk = args
try:
numThreads = int(jobCpu / self.minCpu)
logger.debug('Acquiring %i bytes of memory from pool of %i.', jobMem, self.memoryPool)
self.memoryCondition.acquire()
while jobMem > self.memoryPool:
logger.debug('Waiting for memory condition to change.')
self.memoryCondition.wait()
logger.debug('Memory condition changed.')
self.memoryPool -= jobMem
self.memoryCondition.release()
try:
logger.debug('Attempting to acquire %i threads for %i cpus submitted', numThreads, jobCpu)
numThreadsAcquired = 0
# Acquire first thread blockingly
logger.debug('Acquiring semaphore blockingly.')
self.cpuSemaphore.acquire(blocking=True)
try:
numThreadsAcquired += 1
logger.debug('Semaphore acquired.')
while numThreadsAcquired < numThreads:
# Optimistically and non-blockingly acquire remaining threads. For every failed attempt
# to acquire a thread, atomically increment the overflow instead of the semaphore such
# any thread that later wants to release a thread, can do so into the overflow,
# thereby effectively surrendering that thread to this batchjob and not into the semaphore.
# That way we get to start a batchjob with as many threads as are available, and later grab
# more as they become available.
if not self.cpuSemaphore.acquire(blocking=False):
with self.cpuOverflowLock:
self.cpuOverflow += 1
numThreadsAcquired += 1
logger.info("Executing command: '%s'.", jobCommand)
with self.popenLock:
popen = subprocess.Popen(jobCommand, shell=True)
info = Info(time.time(), popen, kill_intended=False)
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if 0 != statusCode:
if statusCode != -9 or not info.kill_intended:
logger.error("Got exit code %i (indicating failure) from command '%s'.", statusCode, jobCommand )
finally:
self.runningJobs.pop(jobID)
finally:
logger.debug('Releasing %i threads.', numThreadsAcquired)
with self.cpuOverflowLock:
if self.cpuOverflow > 0:
if self.cpuOverflow > numThreadsAcquired:
self.cpuOverflow -= numThreadsAcquired
numThreadsAcquired = 0
else:
numThreadsAcquired -= self.cpuOverflow
self.cpuOverflow = 0
for i in xrange(numThreadsAcquired):
self.cpuSemaphore.release()
finally:
logger.debug('Releasing %i memory back to pool', jobMem)
self.memoryCondition.acquire()
self.memoryPool += jobMem
self.memoryCondition.notifyAll()
self.memoryCondition.release()
finally:
# noinspection PyProtectedMember
value = self.cpuSemaphore._Semaphore__value
logger.debug('Finished batchjob. CPU semaphore value (approximate): %i, overflow: %i', value, self.cpuOverflow)
self.outputQueue.put((jobID, 0))
logger.info('Exiting worker thread normally.')
# FIXME: Remove or fix badWorker to be compliant with new thread management.
def badWorker(self, inputQueue, outputQueue):
"""
This is used to test what happens if we fail and restart jobs
"""
# Pipe the output to dev/null (it is caught by the worker and will be reported if there is an error)
fnull = open(os.devnull, 'w')
while True:
args = inputQueue.get()
# Case where we are reducing threads for max number of CPUs
if args is None:
inputQueue.task_done()
return
command, jobID, threadsToStart = args
# Run to first calculate the runtime..
process = subprocess.Popen(command, shell=True, stdout=fnull, stderr=fnull)
if random.choice((False, True)):
time.sleep(random.random())
process.kill()
process.wait()
outputQueue.put((jobID, 1, threadsToStart))
else:
process.wait()
outputQueue.put((jobID, process.returncode, threadsToStart))
inputQueue.task_done()
def issueBatchJob(self, command, memory, cpu, disk):
"""
Adds the command and resources to a queue to be run.
"""
# Round cpu to minCpu and apply scale
cpu = math.ceil(cpu * self.scale / self.minCpu) * self.minCpu
assert cpu <= self.maxCpus, \
'batchjob is requesting {} cpu, which is greater than {} available on the machine. Scale currently set ' \
'to {} consider adjusting batchjob or scale.'.format(cpu, multiprocessing.cpu_count(), self.scale)
assert cpu >= self.minCpu
assert memory <= self.maxMemory, 'batchjob requests {} mem, only {} total available.'.format(memory, self.maxMemory)
self.checkResourceRequest(memory, cpu, disk)
logger.debug("Issuing the command: %s with memory: %i, cpu: %i, disk: %i" % (command, memory, cpu, disk))
with self.jobIndexLock:
jobID = self.jobIndex
self.jobIndex += 1
self.jobs[jobID] = command
self.inputQueue.put((command, jobID, cpu, memory, disk))
return jobID
def killBatchJobs(self, jobIDs):
"""
As jobs are already run, this method has no effect.
"""
logger.debug('Killing jobs: {}'.format(jobIDs))
for id in jobIDs:
if id in self.runningJobs:
info = self.runningJobs[id]
info.kill_intended = True
os.kill(info.popen.pid, 9)
while id in self.runningJobs:
pass
def getIssuedBatchJobIDs(self):
"""
Just returns all the jobs that have been run, but not yet returned as updated.
"""
return self.jobs.keys()
def getRunningBatchJobIDs(self):
"""
Return empty map
"""
currentJobs = {}
for jobID, info in self.runningJobs.iteritems():
startTime = info.time
currentJobs[jobID] = time.time() - startTime
return currentJobs
def shutdown(self):
"""
Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join all worker threads.
"""
for i in xrange(self.numWorkers):
self.inputQueue.put(None)
# Remove reference to inputQueue (raises exception if inputQueue is used after method call)
self.inputQueue = None
for thread in self.workerThreads:
thread.join()
def getUpdatedBatchJob(self, maxWait):
"""
Returns a map of the run jobs and the return value of their processes.
"""
try:
i = self.outputQueue.get(timeout=maxWait)
except Empty:
return None
jobID, exitValue = i
self.jobs.pop(jobID)
logger.debug("Ran jobID: %s with exit value: %i" % (jobID, exitValue))
self.outputQueue.task_done()
return jobID, exitValue
@classmethod
def getRescueBatchJobFrequency(cls):
"""
This should not really occur, wihtout an error. To exercise the system we allow it every 90 minutes.
"""
return 5400
class Info(object):
def __init__(self, time, popen, kill_intended):
self.time = time
self.popen = popen
self.kill_intended = kill_intended
| {
"content_hash": "fc0e79bd532174561224d87f8ba11578",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 133,
"avg_line_length": 46.94795539033457,
"alnum_prop": 0.5819146409058517,
"repo_name": "BD2KGenomics/toil-old",
"id": "ce012ab4531d14f7a4e012c35fe8d52fbd4d5c57",
"size": "13774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/toil/batchSystems/singleMachine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "425"
},
{
"name": "Python",
"bytes": "427306"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.module_loading import import_string
DJANGO_TRANSLATOR_MODELS = getattr(settings, 'DJANGO_TRANSLATOR_MODELS', {})
class Translator(object):
def __init__(self, request, model_class=None):
from translator.models import Translation
self.request = request
self.model_class = model_class or Translation
def __getattr__(self, item):
from translator.util import get_translation_for_key
return get_translation_for_key(item=item, model_class=self.model_class)
def translator(request):
context = {'translator': Translator(request)}
for variable_name, model_class_path in DJANGO_TRANSLATOR_MODELS.items():
model_class = import_string(model_class_path)
context[variable_name] = Translator(request, model_class)
return context
| {
"content_hash": "3293f981c735eb3e58a6a2d429df38ba",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 32.65384615384615,
"alnum_prop": 0.7126030624263839,
"repo_name": "dreipol/django-translator",
"id": "23a0977f8de9af9ceb738e9278d6501162f335cf",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "translator/context_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8267"
}
],
"symlink_target": ""
} |
"""
A connection to the VMware vCenter platform.
"""
import re
from oslo.config import cfg
from oslo.vmware import api
from oslo.vmware import vim
import suds
from nova import exception
from nova.i18n import _, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('host_ip',
help='Hostname or IP address for connection to VMware VC '
'host.'),
cfg.IntOpt('host_port',
default=443,
help='Port for connection to VMware VC host.'),
cfg.StrOpt('host_username',
help='Username for connection to VMware VC host.'),
cfg.StrOpt('host_password',
help='Password for connection to VMware VC host.',
secret=True),
cfg.MultiStrOpt('cluster_name',
help='Name of a VMware Cluster ComputeResource.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
default=0.5,
help='The interval used for polling of remote tasks.'),
cfg.IntOpt('api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc.'),
cfg.IntOpt('vnc_port',
default=5900,
help='VNC starting port'),
cfg.IntOpt('vnc_port_total',
default=10000,
help='Total number of VNC ports'),
cfg.BoolOpt('use_linked_clone',
default=True,
help='Whether to use linked clone'),
cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug '
'work-arounds')
]
CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
# The following class was removed in the transition from Icehouse to
# Juno, but may still be referenced in configuration files. The
# following stub allow those configurations to work while logging a
# deprecation warning.
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
def _do_deprecation_warning(self):
LOG.warn(_LW('The VMware ESX driver is now deprecated and has been '
'removed in the Juno release. The VC driver will remain '
'and continue to be supported.'))
def __init__(self, virtapi, read_only=False, scheme="https"):
self._do_deprecation_warning()
class VMwareVCDriver(driver.ComputeDriver):
"""The VC host connection object."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
}
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
# vCenter is not a hypervisor itself, it works with multiple
# hypervisor host machines and their guests. This fact can
# subtly alter how vSphere and OpenStack interoperate.
def __init__(self, virtapi, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
if (CONF.vmware.host_ip is None or
CONF.vmware.host_username is None or
CONF.vmware.host_password is None):
raise Exception(_("Must specify host_ip, host_username and "
"host_password to use vmwareapi.VMwareVCDriver"))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
try:
self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
except re.error:
raise exception.InvalidInput(reason=
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
# TODO(hartsocks): back-off into a configuration test module.
if CONF.vmware.use_linked_clone is None:
raise error_util.UseLinkedCloneConfigurationFault()
# Get the list of clusters to be used
self._cluster_names = CONF.vmware.cluster_name
self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
self._cluster_names)
if not self.dict_mors:
raise exception.NotFound(_("All clusters specified %s were not"
" found in the vCenter")
% self._cluster_names)
# Check if there are any clusters that were specified in the nova.conf
# but are not in the vCenter, for missing clusters log a warning.
clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
missing_clusters = set(self._cluster_names) - set(clusters_found)
if missing_clusters:
LOG.warn(_LW("The following clusters could not be found in the "
"vCenter %s") % list(missing_clusters))
# The _resources is used to maintain the vmops, volumeops and vcstate
# objects per cluster
self._resources = {}
self._resource_keys = set()
self._virtapi = virtapi
self._update_resources()
# The following initialization is necessary since the base class does
# not use VC state.
first_cluster = self._resources.keys()[0]
self._vmops = self._resources.get(first_cluster).get('vmops')
self._volumeops = self._resources.get(first_cluster).get('volumeops')
self._vc_state = self._resources.get(first_cluster).get('vcstate')
def init_host(self, host):
vim = self._session.vim
if vim is None:
self._session._create_session()
def cleanup_host(self, host):
# NOTE(hartsocks): we lean on the init_host to force the vim object
# to not be None.
vim = self._session.vim
service_content = vim.service_content
session_manager = service_content.sessionManager
try:
vim.client.service.Logout(session_manager)
except suds.WebFault:
LOG.debug("No vSphere session was open during cleanup_host.")
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
instances = self.list_instances()
if instance['uuid'] not in instances:
LOG.warn(_LW('Instance cannot be found in host, or in an unknown'
'state.'), instance=instance)
else:
state = vm_util.get_vm_state_from_name(self._session,
instance['uuid'])
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
uuids = self._vmops.list_instances()
return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)]
def list_instances(self):
"""List VM instances from all nodes."""
instances = []
nodes = self.get_available_nodes()
for node in nodes:
vmops = self._get_vmops_for_compute_node(node)
instances.extend(vmops.list_instances())
return instances
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# TODO(PhilDay): Add support for timeout (clean shutdown)
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host."""
self._vmops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
def get_instance_disk_info(self, instance_name, block_device_info=None):
pass
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
# vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_vnc_console(instance)
def _update_resources(self):
"""This method creates a dictionary of VMOps, VolumeOps and VCState.
The VMwareVMOps, VMwareVolumeOps and VCState object is for each
cluster/rp. The dictionary is of the form
{
domain-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyCluster},
resgroup-1000 : {'vmops': vmops_obj,
'volumeops': volumeops_obj,
'vcstate': vcstate_obj,
'name': MyRP},
}
"""
added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys)
for node in added_nodes:
_volumeops = volumeops.VMwareVolumeOps(self._session,
self.dict_mors[node]['cluster_mor'])
_vmops = vmops.VMwareVMOps(self._session, self._virtapi,
_volumeops,
self.dict_mors[node]['cluster_mor'],
datastore_regex=self._datastore_regex)
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
_vc_state = host.VCState(self._session, nodename,
self.dict_mors.get(node)['cluster_mor'])
self._resources[nodename] = {'vmops': _vmops,
'volumeops': _volumeops,
'vcstate': _vc_state,
'name': name,
}
self._resource_keys.add(node)
deleted_nodes = (set(self._resource_keys) -
set(self.dict_mors.keys()))
for node in deleted_nodes:
name = self.dict_mors.get(node)['name']
nodename = self._create_nodename(node, name)
del self._resources[nodename]
self._resource_keys.discard(node)
def _create_nodename(self, mo_id, display_name):
"""Creates the name that is stored in hypervisor_hostname column.
The name will be of the form similar to
domain-1000(MyCluster)
resgroup-1000(MyResourcePool)
"""
return mo_id + '(' + display_name + ')'
def _get_resource_for_node(self, nodename):
"""Gets the resource information for the specific node."""
resource = self._resources.get(nodename)
if not resource:
msg = _("The resource %s does not exist") % nodename
raise exception.NotFound(msg)
return resource
def _get_vmops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vmops']
def _get_volumeops_for_compute_node(self, nodename):
"""Retrieve vmops object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['volumeops']
def _get_vc_state_for_compute_node(self, nodename):
"""Retrieve VCState object from mo_id stored in the node name.
Node name is of the form domain-1000(MyCluster)
"""
resource = self._get_resource_for_node(nodename)
return resource['vcstate']
def _get_available_resources(self, host_stats):
return {'vcpus': host_stats['vcpus'],
'memory_mb': host_stats['host_memory_total'],
'local_gb': host_stats['disk_total'],
'vcpus_used': 0,
'memory_mb_used': host_stats['host_memory_total'] -
host_stats['host_memory_free'],
'local_gb_used': host_stats['disk_used'],
'hypervisor_type': host_stats['hypervisor_type'],
'hypervisor_version': host_stats['hypervisor_version'],
'hypervisor_hostname': host_stats['hypervisor_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
'supported_instances': jsonutils.dumps(
host_stats['supported_instances']),
'numa_topology': None,
}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
stats_dict = {}
vc_state = self._get_vc_state_for_compute_node(nodename)
if vc_state:
host_stats = vc_state.get_host_stats(refresh=True)
# Updating host information
stats_dict = self._get_available_resources(host_stats)
else:
LOG.info(_("Invalid cluster or resource pool"
" name : %s") % nodename)
return stats_dict
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
self.dict_mors = vm_util.get_all_cluster_refs_by_name(
self._session,
CONF.vmware.cluster_name)
node_list = []
self._update_resources()
for node in self.dict_mors.keys():
nodename = self._create_nodename(node,
self.dict_mors.get(node)['name'])
node_list.append(nodename)
LOG.debug("The available nodes are: %s", node_list)
return node_list
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.attach_volume(connection_info,
instance,
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage to VM instance."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.detach_volume(connection_info,
instance,
mountpoint)
def get_volume_connector(self, instance):
"""Return volume connector information."""
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.get_volume_connector(instance)
def get_host_ip_addr(self):
"""Returns the IP address of the vCenter host."""
return CONF.vmware.host_ip
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.reboot(instance, network_info)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy VM instance."""
# Destroy gets triggered when Resource Claim in resource_tracker
# is not successful. When resource claim is not successful,
# node is not set in instance. Perform destroy only if node is set
if not instance['node']:
return
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.unpause(instance)
def suspend(self, instance):
"""Suspend the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance.node)
_vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance.node)
_vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
for instance in instances:
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.poll_rebooting_instances(timeout, [instance])
def get_info(self, instance):
"""Return info about the VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
data = _vmops.get_diagnostics(instance)
return data
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
data = _vmops.get_instance_diagnostics(instance)
return data
def host_power_action(self, host, action):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Host operations not supported by VC driver.
This needs to override the ESX driver implementation.
"""
raise NotImplementedError()
def get_host_uptime(self, host):
"""Host uptime operation not supported by VC driver."""
msg = _("Multiple hosts may be managed by the VMWare "
"vCenter driver; therefore we do not return "
"uptime for just one host.")
raise NotImplementedError(msg)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.inject_network_info(instance, nw_info)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
# Running instances per cluster
cluster_instances = {}
for instance in all_instances:
instances = cluster_instances.get(instance['node'])
if instances:
instances.append(instance)
else:
instances = [instance]
cluster_instances[instance['node']] = instances
# Invoke the image aging per cluster
for resource in self._resources.keys():
instances = cluster_instances.get(resource, [])
_vmops = self._get_vmops_for_compute_node(resource)
_vmops.manage_image_cache(context, instances)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.instance_exists(instance)
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
_vmops = self._get_vmops_for_compute_node(instance.node)
_vmops.attach_interface(instance, image_meta, vif)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
_vmops = self._get_vmops_for_compute_node(instance.node)
_vmops.detach_interface(instance, vif)
class VMwareAPISession(api.VMwareAPISession):
"""Sets up a session with the VC/ESX host and handles all
the calls made to the host.
"""
def __init__(self, host_ip=CONF.vmware.host_ip,
host_port=CONF.vmware.host_port,
username=CONF.vmware.host_username,
password=CONF.vmware.host_password,
retry_count=CONF.vmware.api_retry_count,
scheme="https"):
super(VMwareAPISession, self).__init__(
host=host_ip,
port=host_port,
server_username=username,
server_password=password,
api_retry_count=retry_count,
task_poll_interval=CONF.vmware.task_poll_interval,
scheme=scheme,
create_session=True,
wsdl_loc=CONF.vmware.wsdl_location
)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
return isinstance(module, vim.Vim)
def _call_method(self, module, method, *args, **kwargs):
"""Calls a method within the module specified with
args provided.
"""
if not self._is_vim_object(module):
return self.invoke_api(module, method, self.vim, *args, **kwargs)
else:
return self.invoke_api(module, method, *args, **kwargs)
def _wait_for_task(self, task_ref):
"""Return a Deferred that will give the result of the given task.
The task is polled until it completes.
"""
return self.wait_for_task(task_ref)
| {
"content_hash": "2a5499d94f1f72daac20d317d1b20376",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 79,
"avg_line_length": 42.244204018547144,
"alnum_prop": 0.5870774184106542,
"repo_name": "vmthunder/nova",
"id": "74dbcb789ef7ddf3889a806b7a27a713ef7e4858",
"size": "28084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Handle parsing of the 2 different config files."""
import yaml
import pprint
import os
from .model import Rule, CommandSet
from . import DEFAULTS_PATH, USER_PATH
def import_default(path):
with open(path) as f:
structure = yaml.load(f)
defaults = structure.pop("all")
for k, v in structure.iteritems():
if isinstance(v, dict):
copy = defaults.copy()
copy.update(v)
structure[k] = copy
return {k: Rule.from_yaml(**v) for k, v in structure.iteritems()
if isinstance(v, dict)}
def import_rules(path, defaults):
with open(path) as f:
structure = yaml.load(f)
return {k: CommandSet.from_yaml(defaults, **v)
for k, v in structure.iteritems()}
def get_rules():
defaults = import_default(DEFAULTS_PATH)
ret = import_rules(USER_PATH, defaults)
return ret
| {
"content_hash": "34d7626d0a19b385c782fedce591e5d5",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 26.941176470588236,
"alnum_prop": 0.6004366812227074,
"repo_name": "ssherar/hook",
"id": "316fd369fc30753a35bc88ea85d49b3091dde786",
"size": "916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hook/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6633"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
"""
Classes used to simulate different data sets
"""
class Simulate(object):
"A class to simulate multiple types of data"
def __init__(self,seed=101):
"""
Constructor
"""
self.seed = seed
def set_rseed(self,seed=None):
"""
set the random seed
"""
if seed != None:
self.seed = seed
np.random.seed(seed)
def draw_multinomial(self,seed=None,n=10,m=50):
"""
returns a matrix of results
where each element is the num. times a category is chosen
m = number of draws (ea. sample totals this number)
n = total number of samples
"""
self.set_rseed()
proportions = [0.6, 0.3, 1.0]
x = np.random.multinomial(m,proportions,size=n)
return x
def draw_u(self,M=10,markerIndices=[0,5,9],dType='linear'):
"""
u - the observed imput variables in the system
M - the total number of imput variables
"""
allIndices = range(M)
for i in markerIndices:
if i not in allIndices:
raise Exception("Index %s not in range(%s)"%(i,M))
nonMarkerIndices = set(range(M)).difference(set(markerIndices))
def linear_draw(self,n,w0=-0.3,w1=0.5,sigma=0.2):
"""
draw samples with an approximatly linear relationship
n - number of samples to
w0 - intercept coefficient (truth)
w1 - regression coefficient (truth)
sigma - error variance (truth)
"""
self.set_rseed()
trueX = np.random.uniform(-1,1,n)
trueT = w0 + (w1*trueX)
return trueX, trueT + np.random.normal(0,sigma,n)
def sine_draw(self,n,sigma=0.3):
"""
draw samples with a sine wave relationship
"""
self.set_rseed()
trueX = np.linspace(0,1.0*np.pi,n) #np.linspace(0,3*np.pi,n)
trueT = np.sin(trueX) + np.random.normal(0,sigma,n)
return trueX,trueT
def random_draw(self,n):
"""
draw random uniform samples
"""
self.set_rseed()
trueX = np.random.uniform(0,1,n)
trueT = np.random.uniform(0,1,n)
return trueX,trueT
def plot_all(self,n,figName=None):
"""
plot all of the simulated data
"""
fig = plt.figure()
ax = fig.add_subplot(1,3,1)
n = 50
x,y = self.linear_draw(n)
ax.scatter(x,y)
ax.set_aspect(1./ax.get_data_ratio())
ax = fig.add_subplot(1,3,2)
x,y = self.sine_draw(n)
ax.scatter(x,y)
ax.set_aspect(1./ax.get_data_ratio())
ax = fig.add_subplot(1,3,3)
x,y = self.random_draw(n)
ax.scatter(x,y)
ax.set_aspect(1./ax.get_data_ratio())
## save or show the plots
fig.tight_layout()
if figName != None:
fig.savefig(figName,dpi=200)
else:
plt.show()
## used to test
if __name__ == "__main__":
sim = Simulate()
samples = sim.draw_multinomial()
print samples
#sim.draw_u()
#x,y = sim.linear_draw(10)
#print 'x',x
#print 'y',y
#sim.plot_all(10)
| {
"content_hash": "63b53c4278fdb70ef128bcc2ccb40525",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 71,
"avg_line_length": 24.496296296296297,
"alnum_prop": 0.530692470517085,
"repo_name": "ajrichards/htsint",
"id": "1f117fe41531ca3bfdcca0dfac0ea3e3ea28ec0d",
"size": "3307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "htsint/sandbox/Simulate.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "273743"
}
],
"symlink_target": ""
} |
"""Tests for Keras Premade WideNDeep models."""
import numpy as np
import tensorflow.compat.v2 as tf
from keras.engine import input_layer
from keras.engine import sequential
from keras.engine import training
from keras.feature_column import dense_features_v2
from keras.layers import core
from keras.optimizers.optimizer_v2 import gradient_descent
from keras.premade_models import linear
from keras.premade_models import wide_deep
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class WideDeepModelTest(test_combinations.TestCase):
def test_wide_deep_model(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2))
dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer=["sgd", "adam"],
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
def test_wide_deep_model_backprop(self):
with self.cached_session():
linear_model = linear.LinearModel(
units=1, kernel_initializer="zeros"
)
dnn_model = sequential.Sequential(
[core.Dense(units=1, kernel_initializer="zeros")]
)
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.array([[1.0]])
dnn_inp = np.array([[1.0]])
inputs = [linear_inp, dnn_inp]
output = linear_inp + 2 * dnn_inp
linear_opt = gradient_descent.SGD(learning_rate=0.1)
dnn_opt = gradient_descent.SGD(learning_rate=0.3)
wide_deep_model.compile(
optimizer=[linear_opt, dnn_opt],
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
self.evaluate(tf.compat.v1.global_variables_initializer())
wide_deep_model.fit(inputs, output, epochs=1)
self.assertAllClose(
[[0.6]],
self.evaluate(
wide_deep_model.linear_model.dense_layers[0].kernel
),
)
self.assertAllClose(
[[1.8]],
self.evaluate(wide_deep_model.dnn_model.layers[0].kernel),
)
def test_wide_deep_model_with_single_input(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
inputs = np.random.uniform(low=-5.0, high=5.0, size=(64, 3))
output = 0.3 * inputs[:, 0]
wide_deep_model.compile(
optimizer=["sgd", "adam"],
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
wide_deep_model.fit(inputs, output, epochs=5)
def test_wide_deep_model_with_multi_outputs(self):
inp = input_layer.Input(shape=(1,), name="linear")
l = linear.LinearModel(units=2, use_bias=False)(inp)
l1, l2 = tf.split(l, num_or_size_splits=2, axis=1)
linear_model = training.Model(inp, [l1, l2])
linear_model.set_weights([np.asarray([[0.5, 0.3]])])
h = core.Dense(units=2, use_bias=False)(inp)
h1, h2 = tf.split(h, num_or_size_splits=2, axis=1)
dnn_model = training.Model(inp, [h1, h2])
dnn_model.set_weights([np.asarray([[0.1, -0.5]])])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
inp_np = np.asarray([[1.0]])
out1, out2 = wide_deep_model(inp_np)
# output should be (0.5 + 0.1), and (0.3 - 0.5)
self.assertAllClose([[0.6]], out1)
self.assertAllClose([[-0.2]], out2)
wide_deep_model = wide_deep.WideDeepModel(
linear_model, dnn_model, activation="relu"
)
out1, out2 = wide_deep_model(inp_np)
# output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5))
self.assertAllClose([[0.6]], out1)
self.assertAllClose([[0.0]], out2)
def test_wide_deep_model_with_single_optimizer(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2))
dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1]
wide_deep_model.compile(
optimizer="sgd",
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
def test_wide_deep_model_as_layer(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1)])
linear_input = input_layer.Input(shape=(3,), name="linear")
dnn_input = input_layer.Input(shape=(5,), name="dnn")
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
wide_deep_output = wide_deep_model((linear_input, dnn_input))
input_b = input_layer.Input(shape=(1,), name="b")
output_b = core.Dense(units=1)(input_b)
model = training.Model(
inputs=[linear_input, dnn_input, input_b],
outputs=[wide_deep_output + output_b],
)
linear_input_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 3))
dnn_input_np = np.random.uniform(low=-5.0, high=5.0, size=(64, 5))
input_b_np = np.random.uniform(low=-5.0, high=5.0, size=(64,))
output_np = (
linear_input_np[:, 0] + 0.2 * dnn_input_np[:, 1] + input_b_np
)
model.compile(
optimizer="sgd",
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
[linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5
)
def test_wide_deep_model_with_sub_model_trained(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(
linear.LinearModel(units=1),
sequential.Sequential([core.Dense(units=1, input_dim=3)]),
)
linear_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 2))
dnn_inp = np.random.uniform(low=-5.0, high=5.0, size=(64, 3))
inputs = [linear_inp, dnn_inp]
output = 0.3 * linear_inp[:, 0] + 0.2 * dnn_inp[:, 1]
linear_model.compile(
optimizer="sgd",
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
dnn_model.compile(
optimizer="adam",
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
linear_model.fit(linear_inp, output, epochs=50)
dnn_model.fit(dnn_inp, output, epochs=50)
wide_deep_model.compile(
optimizer=["sgd", "adam"],
loss="mse",
metrics=[],
run_eagerly=test_utils.should_run_eagerly(),
)
wide_deep_model.fit(inputs, output, epochs=50)
# This test is an example for cases where linear and dnn model accepts
# same raw input and same transformed inputs, i.e., the raw input is
# categorical, and both linear and dnn model accept one hot encoding.
def test_wide_deep_model_with_single_feature_column(self):
vocab_list = ["alpha", "beta", "gamma"]
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape
)
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="symbol", vocabulary_list=vocab_list
)
ind_column = tf.feature_column.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer="zeros"
)
dnn_model = sequential.Sequential([core.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
combined = sequential.Sequential([dense_feature_layer, wide_deep_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(
opt, "mse", [], run_eagerly=test_utils.should_run_eagerly()
)
combined.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10)
# This test is an example for cases where linear and dnn model accepts
# same raw input but different transformed inputs, i.e,. the raw input is
# categorical, and linear model accepts one hot encoding, while dnn model
# accepts embedding encoding.
def test_wide_deep_model_with_two_feature_columns(self):
vocab_list = ["alpha", "beta", "gamma"]
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape
)
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="symbol", vocabulary_list=vocab_list
)
ind_column = tf.feature_column.indicator_column(cat_column)
emb_column = tf.feature_column.embedding_column(cat_column, dimension=5)
linear_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer="zeros"
)
combined_linear = sequential.Sequential(
[linear_feature_layer, linear_model]
)
dnn_model = sequential.Sequential([core.Dense(units=1)])
dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column])
combined_dnn = sequential.Sequential([dnn_feature_layer, dnn_model])
wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn)
opt = gradient_descent.SGD(learning_rate=0.1)
wide_deep_model.compile(
opt, "mse", [], run_eagerly=test_utils.should_run_eagerly()
)
wide_deep_model.fit(x={"symbol": data}, y=y, batch_size=32, epochs=10)
def test_config(self):
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
config = wide_deep_model.get_config()
cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(config)
self.assertEqual(
linear_model.units, cloned_wide_deep_model.linear_model.units
)
self.assertEqual(
dnn_model.layers[0].units,
cloned_wide_deep_model.dnn_model.layers[0].units,
)
def test_config_with_custom_objects(self):
def my_activation(x):
return x
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
wide_deep_model = wide_deep.WideDeepModel(
linear_model, dnn_model, activation=my_activation
)
config = wide_deep_model.get_config()
cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(
config, custom_objects={"my_activation": my_activation}
)
self.assertEqual(cloned_wide_deep_model.activation, my_activation)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "57753112d2b506a380bfd9cca2c4b36b",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 80,
"avg_line_length": 44.25874125874126,
"alnum_prop": 0.5946437035866645,
"repo_name": "keras-team/keras",
"id": "570a073650ac62cd7bc3d2016446b48afd3843a4",
"size": "13347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/premade_models/wide_deep_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import hashlib
import json
class FilePathInfo(object):
"""Give additional info around file path:
>>> f = FilePathInfo('/path/to/my_file.txt')
>>> f.file_name
'my_file.txt'
>>> f.file_extension
'txt'
>>> f.file_name_without_extension
'my_file'
>>> f.file_path_without_extension
'/path/to/my_file'
>>> f.file_path_without_name
'/path/to/'
"""
file_extension = None
file_name = None
file_name_without_extension = None
file_path_without_extension = None
file_path_without_name = None
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, value):
self._file_path = value
if value:
self.file_path_with_name, self.file_extension = os.path.splitext(
value)
self.file_extension = self.file_extension.split('?')[0]
self.file_name = os.path.basename(self._file_path).split('?')[0]
if self.file_extension:
name_no_ext = ''.join(self.file_name.rsplit(
self.file_extension, 1)
)
self.file_name_without_extension = name_no_ext
path_no_ext = self.file_path.replace(self.file_extension, '')
self.file_path_without_extension = path_no_ext
if '.' in self.file_name:
self.file_extension = self.file_name.rsplit('.')[-1]
path_no_name = self.file_path.replace(self.file_name, '')
self.file_path_without_name = path_no_name
else:
self.file_extension = None
self.file_name = None
self.file_name_without_extension = None
self.file_path_without_extension = None
self.file_path_without_name = None
def is_file(self):
return os.path.isfile(path=self.file_path)
def __init__(self, file_path, **kwargs):
self.file_path = file_path
def get_md5_for_file(file):
"""Get the md5 hash for a file.
:param file: the file to get the md5 hash for
"""
md5 = hashlib.md5()
while True:
data = file.read(md5.block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def get_dict_from_json_file(path, encoding='utf-8'):
"""Gets a dict of data form a json file.
:param path: the absolute path to the file
:param encoding: the encoding the file is in
"""
with open(path, encoding=encoding) as data_file:
return json.loads(data_file.read())
| {
"content_hash": "c68d26933746e54ce07ed050acec3044",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 27.567010309278352,
"alnum_prop": 0.5695587135377711,
"repo_name": "InfoAgeTech/django-core",
"id": "eee4b692ced5c8469b108686e2fe492a12906d35",
"size": "2674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_core/utils/file_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "452"
},
{
"name": "Python",
"bytes": "180676"
}
],
"symlink_target": ""
} |
import re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetricsPage(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/_metrics"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
| {
"content_hash": "097709e543ee793c9f2cd498a44bed1f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 100,
"avg_line_length": 35.03508771929825,
"alnum_prop": 0.6529794692038057,
"repo_name": "alphagov/digitalmarketplace-buyer-frontend",
"id": "527c84aee27422ed7071e7ae82a9e7fde0bd9c3a",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/digitalmarketplace-apiclient-23.2.0",
"path": "tests/test_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18169"
},
{
"name": "Dockerfile",
"bytes": "44"
},
{
"name": "HTML",
"bytes": "110965"
},
{
"name": "JavaScript",
"bytes": "35234"
},
{
"name": "Makefile",
"bytes": "2468"
},
{
"name": "Nix",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "481177"
},
{
"name": "Shell",
"bytes": "207"
}
],
"symlink_target": ""
} |
default_app_config = 'home.apps.HomeConfig' | {
"content_hash": "37970b0b9cbcf68f6e077cc9988f6740",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 43,
"avg_line_length": 43,
"alnum_prop": 0.7906976744186046,
"repo_name": "sakset/getyourdata",
"id": "33cabb65b533968ac570d8fd873071b15f0a9e5e",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getyourdata/home/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2791"
},
{
"name": "HTML",
"bytes": "64735"
},
{
"name": "JavaScript",
"bytes": "1519"
},
{
"name": "Python",
"bytes": "218082"
},
{
"name": "Shell",
"bytes": "2722"
}
],
"symlink_target": ""
} |
import os, sys
import mercadopago
import json
def index(req, **kwargs):
mp = mercadopago.MP("CLIENT_ID", "CLIENT_SECRET")
filters = {
"status": "approved",
"offset": 0,
"limit": 10
}
searchResult = mp.search_payment(filters)
return json.dumps(searchResult, indent=4) | {
"content_hash": "375581b56b932e5c22d66d676df40d3b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 18.764705882352942,
"alnum_prop": 0.6050156739811913,
"repo_name": "matikbird/matikbird.github.io",
"id": "eff71d00216507e8776c43b47e9780f7f3db049d",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portfolio/quay/back_end/payments2/mercadopago/api-mercadopago-master/templates/code-examples-master/payments/search/python/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "121666"
},
{
"name": "HTML",
"bytes": "2376362"
},
{
"name": "Java",
"bytes": "938"
},
{
"name": "JavaScript",
"bytes": "33618"
},
{
"name": "Shell",
"bytes": "24082"
}
],
"symlink_target": ""
} |
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ironninja5.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "688027eb04e491e3ab145db02e63b1c9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 21.25,
"alnum_prop": 0.7058823529411765,
"repo_name": "colinvandermeer/ironninja5",
"id": "77863db71e5f65445d2a5dd0042d95028f3272a6",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47013"
},
{
"name": "HTML",
"bytes": "6511"
},
{
"name": "JavaScript",
"bytes": "90276"
},
{
"name": "Python",
"bytes": "10740"
}
],
"symlink_target": ""
} |
"""Tests for monascastatsd.py."""
import collections
import socket
import time
import unittest
import monascastatsd as mstatsd
from unittest import mock
import six
from six.moves import range
class FakeSocket(object):
"""A fake socket for testing."""
def __init__(self):
self.payloads = collections.deque()
def send(self, payload):
self.payloads.append(payload)
def recv(self):
try:
return self.payloads.popleft()
except IndexError:
return None
def __repr__(self):
return str(self.payloads)
class BrokenSocket(FakeSocket):
def send(self, payload):
raise socket.error("Socket error")
class TestMonascaStatsd(unittest.TestCase):
def setUp(self):
conn = mstatsd.Connection()
conn.socket = FakeSocket()
self.client = mstatsd.Client(connection=conn, dimensions={'env': 'test'})
def recv(self, metric_obj):
return metric_obj._connection.socket.recv()
@mock.patch('monascastatsd.client.Connection')
def test_client_set_host_port(self, connection_mock):
mstatsd.Client(host='foo.bar', port=5213)
connection_mock.assert_called_once_with(host='foo.bar',
port=5213,
max_buffer_size=50)
@mock.patch('monascastatsd.client.Connection')
def test_client_default_host_port(self, connection_mock):
mstatsd.Client()
connection_mock.assert_called_once_with(host='localhost',
port=8125,
max_buffer_size=50)
def test_counter(self):
counter = self.client.get_counter(name='page.views')
counter.increment()
self.assertEqual(six.b("page.views:1|c|#{'env': 'test'}"),
self.recv(counter))
counter += 1
self.assertEqual(six.b("page.views:1|c|#{'env': 'test'}"),
self.recv(counter))
counter.increment(11)
self.assertEqual(six.b("page.views:11|c|#{'env': 'test'}"),
self.recv(counter))
counter += 11
self.assertEqual(six.b("page.views:11|c|#{'env': 'test'}"),
self.recv(counter))
counter.decrement()
self.assertEqual(six.b("page.views:-1|c|#{'env': 'test'}"),
self.recv(counter))
counter -= 1
self.assertEqual(six.b("page.views:-1|c|#{'env': 'test'}"),
self.recv(counter))
counter.decrement(12)
self.assertEqual(six.b("page.views:-12|c|#{'env': 'test'}"),
self.recv(counter))
counter -= 12
self.assertEqual(six.b("page.views:-12|c|#{'env': 'test'}"),
self.recv(counter))
def test_counter_with_dimensions(self):
counter = self.client.get_counter('counter_with_dims',
dimensions={'date': '10/24', 'time': '23:00'})
counter.increment(dimensions={'country': 'canada', 'color': 'red'})
result = self.recv(counter)
if isinstance(result, bytes):
result = result.decode('utf-8')
self.assertRegexpMatches(result, "counter_with_dims:1|c|#{")
self.assertRegexpMatches(result, "'country': 'canada'")
self.assertRegexpMatches(result, "'date': '10/24'")
self.assertRegexpMatches(result, "'color': 'red'")
self.assertRegexpMatches(result, "'env': 'test'")
self.assertRegexpMatches(result, "'time': '23:00'")
counter += 1
result = self.recv(counter)
if isinstance(result, bytes):
result = result.decode('utf-8')
self.assertRegexpMatches(result, "counter_with_dims:1|c|#{")
self.assertRegexpMatches(result, "'date': '10/24'")
self.assertRegexpMatches(result, "'env': 'test'")
self.assertRegexpMatches(result, "'time': '23:00'")
def test_gauge(self):
gauge = self.client.get_gauge('gauge')
gauge.send('metric', 123.4)
result = self.recv(gauge)
if isinstance(result, bytes):
result = result.decode('utf-8')
assert result == "gauge.metric:123.4|g|#{'env': 'test'}"
def test_gauge_with_dimensions(self):
gauge = self.client.get_gauge('gauge')
gauge.send('gt', 123.4,
dimensions={'country': 'china',
'age': 45,
'color': 'blue'})
result = self.recv(gauge)
if isinstance(result, bytes):
result = result.decode('utf-8')
self.assertRegexpMatches(result, "gauge.gt:123.4|g|#{")
self.assertRegexpMatches(result, "'country': 'china'")
self.assertRegexpMatches(result, "'age': 45")
self.assertRegexpMatches(result, "'color': 'blue'")
self.assertRegexpMatches(result, "'env': 'test'")
def test_sample_rate(self):
counter = self.client.get_counter('sampled_counter')
counter.increment(sample_rate=0)
assert not self.recv(counter)
for _ in range(10000):
counter.increment(sample_rate=0.3)
self.assert_almost_equal(3000,
len(self.client.connection.socket.payloads),
150)
self.assertEqual(six.b("sampled_counter:1|c|@0.3|#{'env': 'test'}"), self.recv(counter))
def test_samples_with_dimensions(self):
gauge = self.client.get_gauge()
for _ in range(100):
gauge.send('gst',
23,
dimensions={'status': 'sampled'},
sample_rate=0.9)
def test_timing(self):
timer = self.client.get_timer()
timer.timing('t', 123)
self.assertEqual(six.b("t:123|g|#{'env': 'test'}"), self.recv(timer))
def test_time(self):
timer = self.client.get_timer()
with timer.time('t'):
time.sleep(2)
packet = self.recv(timer)
if isinstance(packet, bytes):
packet = packet.decode("utf-8")
name_value, type_, dimensions = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('g', type_)
self.assertEqual('t', name)
self.assert_almost_equal(2.0, float(value), 0.1)
self.assertEqual("{'env': 'test'}", dimensions.lstrip('#'))
def test_timed(self):
timer = self.client.get_timer()
@timer.timed('timed.test')
def func(a, b, c=1, d=1):
"""docstring."""
time.sleep(0.5)
return (a, b, c, d)
self.assertEqual('func', func.__name__)
self.assertEqual('docstring.', func.__doc__)
result = func(1, 2, d=3)
# Assert it handles args and kwargs correctly.
self.assertEqual(result, (1, 2, 1, 3))
packet = self.recv(timer)
if isinstance(packet, bytes):
packet = packet.decode("utf-8")
name_value, type_, dimensions = packet.split('|')
name, value = name_value.split(':')
self.assertEqual('g', type_)
self.assertEqual('timed.test', name)
self.assert_almost_equal(0.5, float(value), 0.1)
self.assertEqual("{'env': 'test'}", dimensions.lstrip('#'))
def test_socket_error(self):
self.client.connection.socket = BrokenSocket()
self.client.get_gauge().send('no error', 1)
assert True, 'success'
self.client.connection.socket = FakeSocket()
def test_batched(self):
self.client.connection.open_buffer()
gauge = self.client.get_gauge('site')
gauge.send('views', 123)
timer = self.client.get_timer('site')
timer.timing('timer', 123)
self.client.connection.close_buffer()
self.assertEqual(six.b("site.views:123|g|#{'env': 'test'}\n"
"site.timer:123|g|#{'env': 'test'}"),
self.recv(gauge))
def test_context_manager(self):
fake_socket = FakeSocket()
with mstatsd.Connection() as conn:
conn.socket = fake_socket
client = mstatsd.Client(name='ContextTester', connection=conn)
client.get_gauge('page').send('views', 123)
client.get_timer('page').timing('timer', 12)
self.assertEqual(six.b('ContextTester.page.views:123|g\nContextTester.page.timer:12|g'),
fake_socket.recv())
def test_batched_buffer_autoflush(self):
fake_socket = FakeSocket()
with mstatsd.Connection() as conn:
conn.socket = fake_socket
client = mstatsd.Client(name='BufferedTester', connection=conn)
counter = client.get_counter('mycounter')
for _ in range(51):
counter.increment()
self.assertEqual(six.b('\n'.join(['BufferedTester.mycounter:1|c' for _ in range(50)])),
fake_socket.recv())
self.assertEqual(six.b('BufferedTester.mycounter:1|c'), fake_socket.recv())
@staticmethod
def assert_almost_equal(a, b, delta):
assert 0 <= abs(a - b) <= delta, "%s - %s not within %s" % (a,
b,
delta)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f340dfd51c26f3b7f3ffcd69d0ea4a4f",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 99,
"avg_line_length": 34.8639705882353,
"alnum_prop": 0.5443425076452599,
"repo_name": "stackforge/monasca-statsd",
"id": "d8692c8915ea7ef3a06e6a8cfb7ec836a608e023",
"size": "11668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_monascastatsd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33940"
},
{
"name": "Ruby",
"bytes": "144"
}
],
"symlink_target": ""
} |
header_top = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>
"""
header_bottom= """
</title>
<!-- jQuery (necessary for Bootstrap's JavaScript plugins) -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js" integrity="sha256-Sk3nkD6mLTMOF0EOpNtsIry+s1CsaqQC1rVLTAy+0yc= sha512-K1qjQ+NcF2TYO/eI3M6v8EiNYZfA95pQumfvcVrTHtwQVDG+aHRqLi/ETn2uB+1JqwYqVG3LIvdm9lj6imS/pQ==" crossorigin="anonymous"></script>
<!-- Latest compiled and minified CSS -->
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet" integrity="sha256-MfvZlkHCEqatNoGiOXveE8FIwMzZg4W85qfrfIFBfYc= sha512-dTfge/zgoMYpP7QbHy4gWMEGsbsdZeCXz7irItjcC3sPUFtf0kuFbDz/ixG7ArTxmDjLXDmezHubeNikyKGVyQ==" crossorigin="anonymous">
</head>
<body>
<div class="container">
<!-- Static navbar -->
<nav class="navbar navbar-default">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="audit.html">RAMAS</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="audit.html">Home</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Platforms<span class="caret"></span></a>
<ul class="dropdown-menu">
"""
header_targets = """
</ul>
</li>
</ul>
</div><!--/.nav-collapse -->
</div><!--/.container-fluid -->
</nav>
"""
def header_html(title, targets):
target_list = ""
for target in targets:
target_list += '<li><a href="' + target + '.html">' + target + '</a></li>'
header = header_top + title + \
header_bottom + target_list +\
header_targets
return header
| {
"content_hash": "b3f56531ee8cc9bbe2423184ad552718",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 292,
"avg_line_length": 46.51851851851852,
"alnum_prop": 0.5931528662420382,
"repo_name": "tiagolb/CSF",
"id": "10e74d751bdfe54cd96a152bb5a3c09ec4ee52e3",
"size": "2536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/outputs/html_assets/header.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96423"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from golfr.find_grid_points.filter_points import group_points
from os import listdir
from os.path import abspath, basename, join, dirname
import sys, traceback
import pandas as pd
import cv2
def test_filter_points():
data_dir = abspath(join(dirname(__file__),'filter_pnts_data/'))
df = pd.read_csv(join(data_dir, 'pnts_unfiltered.csv'), index_col=False)
centroids = df.values#.tolist()
hori_lines = cv2.imread(join(data_dir, 'hori_lines.jpg'), 1)
vert_lines = cv2.imread(join(data_dir, 'vert_lines.jpg'), 1)
try:
group_points(centroids, vert_lines, hori_lines)
except:
print('Exception: couldn\'t group points')
print ('-'*60)
traceback.print_exc(file=sys.stdout)
print ('-'*60)
print('test FAILED')
return
print('test not necessarily failed')
return True
if __name__ == '__main__':
test_filter_points()
| {
"content_hash": "c2f52ad8cf15429283e56bae78722723",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 29.5,
"alnum_prop": 0.6514830508474576,
"repo_name": "joshshep/golfr",
"id": "50a6ad2c0506aedce7a31f8bc959b72bb4c8fcb7",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_filter_points.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26943"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__author__ = "William Dabney"
from rlpy.Domains import GridWorld
from rlpy.Agents import LSPI
from rlpy.Representations import Tabular
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import os
def make_experiment(exp_id=1, path="./Results/Temp"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
# Experiment variables
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 10000
opt["num_policy_checks"] = 10
opt["checks_per_policy"] = 50
# Logging
# Domain:
# MAZE = '/Domains/GridWorldMaps/1x3.txt'
maze = os.path.join(GridWorld.default_map_dir, '4x5.txt')
domain = GridWorld(maze, noise=0.3)
opt["domain"] = domain
# Representation
representation = Tabular(domain)
# Policy
policy = eGreedy(representation, epsilon=0.1)
# Agent
opt["agent"] = LSPI(policy, representation, domain.discount_factor,
opt["max_steps"], 1000)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
path = "./Results/Temp/{domain}/{agent}/{representation}/"
experiment = make_experiment(1, path=path)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=True, # show performance runs?
visualize_performance=True) # show value function?
experiment.plot()
experiment.save()
| {
"content_hash": "add38f1c2c0cf1f9e309bd963aabfb36",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.6728682170542636,
"repo_name": "rlpy/rlpy",
"id": "c7073ea93361d3f3ca0d50f1996b5fa9e31b7107",
"size": "1958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/gridworld/lspi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "117712"
},
{
"name": "C++",
"bytes": "1601"
},
{
"name": "PLSQL",
"bytes": "787682"
},
{
"name": "Python",
"bytes": "1215456"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for copy functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import base64
from collections import namedtuple
import csv
import datetime
import errno
import gzip
from hashlib import md5
import json
import logging
import mimetypes
from operator import attrgetter
import os
import pickle
import random
import re
import shutil
import six
import stat
import subprocess
import tempfile
import textwrap
import time
import traceback
import six
from six.moves import xrange
from six.moves import range
from apitools.base.protorpclite import protojson
from boto import config
import crcmod
import gslib
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import CloudApi
from gslib.cloud_api import EncryptionException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadAbortException
from gslib.cloud_api import ResumableUploadException
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.cloud_api import ServiceException
from gslib.commands.compose import MAX_COMPOSE_ARITY
from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE
from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
from gslib.commands.config import DEFAULT_GZIP_COMPRESSION_LEVEL
from gslib.cs_api_map import ApiSelector
from gslib.daisy_chain_wrapper import DaisyChainWrapper
from gslib.exception import CommandException
from gslib.exception import HashMismatchException
from gslib.file_part import FilePart
from gslib.parallel_tracker_file import GenerateComponentObjectPrefix
from gslib.parallel_tracker_file import ReadParallelUploadTrackerFile
from gslib.parallel_tracker_file import ValidateParallelCompositeTrackerData
from gslib.parallel_tracker_file import WriteComponentToParallelUploadTrackerFile
from gslib.parallel_tracker_file import WriteParallelUploadTrackerFile
from gslib.progress_callback import FileProgressCallbackHandler
from gslib.progress_callback import ProgressCallbackWithTimeout
from gslib.resumable_streaming_upload import ResumableStreamingJsonUploadWrapper
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import GenerationFromUrlAndString
from gslib.storage_url import IsCloudSubdirPlaceholder
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.thread_message import FileMessage
from gslib.thread_message import RetryableErrorMessage
from gslib.tracker_file import DeleteDownloadTrackerFiles
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import ENCRYPTION_UPLOAD_TRACKER_ENTRY
from gslib.tracker_file import GetDownloadStartByte
from gslib.tracker_file import GetTrackerFilePath
from gslib.tracker_file import GetUploadTrackerData
from gslib.tracker_file import RaiseUnwritableTrackerFileException
from gslib.tracker_file import ReadOrCreateDownloadTrackerFile
from gslib.tracker_file import SERIALIZATION_UPLOAD_TRACKER_ENTRY
from gslib.tracker_file import TrackerFileType
from gslib.tracker_file import WriteDownloadComponentTrackerFile
from gslib.utils import parallelism_framework_util
from gslib.utils import text_util
from gslib.utils.boto_util import GetJsonResumableChunkSize
from gslib.utils.boto_util import GetMaxRetryDelay
from gslib.utils.boto_util import GetNumRetries
from gslib.utils.boto_util import ResumableThreshold
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.cloud_api_helper import GetDownloadSerializationData
from gslib.utils.constants import DEFAULT_FILE_BUFFER_SIZE
from gslib.utils.constants import MIN_SIZE_COMPUTE_LOGGING
from gslib.utils.constants import UTF8
from gslib.utils.encryption_helper import CryptoKeyType
from gslib.utils.encryption_helper import CryptoKeyWrapperFromKey
from gslib.utils.encryption_helper import FindMatchingCSEKInBotoConfig
from gslib.utils.encryption_helper import GetEncryptionKeyWrapper
from gslib.utils.hashing_helper import Base64EncodeHash
from gslib.utils.hashing_helper import CalculateB64EncodedMd5FromContents
from gslib.utils.hashing_helper import CalculateHashesFromContents
from gslib.utils.hashing_helper import CHECK_HASH_IF_FAST_ELSE_FAIL
from gslib.utils.hashing_helper import CHECK_HASH_NEVER
from gslib.utils.hashing_helper import ConcatCrc32c
from gslib.utils.hashing_helper import GetDownloadHashAlgs
from gslib.utils.hashing_helper import GetUploadHashAlgs
from gslib.utils.hashing_helper import HashingFileUploadWrapper
from gslib.utils.metadata_util import ObjectIsGzipEncoded
from gslib.utils.parallelism_framework_util import AtomicDict
from gslib.utils.parallelism_framework_util import CheckMultiprocessingAvailableAndInit
from gslib.utils.parallelism_framework_util import PutToQueueWithTimeout
from gslib.utils.posix_util import ATIME_ATTR
from gslib.utils.posix_util import ConvertDatetimeToPOSIX
from gslib.utils.posix_util import GID_ATTR
from gslib.utils.posix_util import MODE_ATTR
from gslib.utils.posix_util import MTIME_ATTR
from gslib.utils.posix_util import ParseAndSetPOSIXAttributes
from gslib.utils.posix_util import UID_ATTR
from gslib.utils.system_util import CheckFreeSpace
from gslib.utils.system_util import GetFileSize
from gslib.utils.system_util import GetStreamFromFileUrl
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.translation_helper import AddS3MarkerAclToObjectMetadata
from gslib.utils.translation_helper import CopyObjectMetadata
from gslib.utils.translation_helper import DEFAULT_CONTENT_TYPE
from gslib.utils.translation_helper import ObjectMetadataFromHeaders
from gslib.utils.translation_helper import PreconditionsFromHeaders
from gslib.utils.translation_helper import S3MarkerAclFromObjectMetadata
from gslib.utils.unit_util import DivideAndCeil
from gslib.utils.unit_util import HumanReadableToBytes
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils.unit_util import SECONDS_PER_DAY
from gslib.utils.unit_util import TEN_MIB
from gslib.wildcard_iterator import CreateWildcardIterator
if six.PY3:
long = int
# pylint: disable=g-import-not-at-top
if IS_WINDOWS:
import msvcrt
# Declare copy_helper_opts as a global because namedtuple isn't aware of
# assigning to a class member (which breaks pickling done by multiprocessing).
# For details see
# http://stackoverflow.com/questions/16377215/how-to-pickle-a-namedtuple-instance-correctly
# pylint: disable=global-at-module-level
global global_copy_helper_opts
# In-memory map of local files that are currently opened for write. Used to
# ensure that if we write to the same file twice (say, for example, because the
# user specified two identical source URLs), the writes occur serially.
global open_files_map, open_files_lock
open_files_map = AtomicDict(
manager=(parallelism_framework_util.top_level_manager
if CheckMultiprocessingAvailableAndInit().is_available else None))
# We don't allow multiple processes on Windows, so using a process-safe lock
# would be unnecessary.
open_files_lock = parallelism_framework_util.CreateLock()
# For debugging purposes; if True, files and objects that fail hash validation
# will be saved with the below suffix appended.
_RENAME_ON_HASH_MISMATCH = False
_RENAME_ON_HASH_MISMATCH_SUFFIX = '_corrupt'
PARALLEL_UPLOAD_TEMP_NAMESPACE = (
'/gsutil/tmp/parallel_composite_uploads/for_details_see/gsutil_help_cp/')
PARALLEL_UPLOAD_STATIC_SALT = u"""
PARALLEL_UPLOAD_SALT_TO_PREVENT_COLLISIONS.
The theory is that no user will have prepended this to the front of
one of their object names and then done an MD5 hash of the name, and
then prepended PARALLEL_UPLOAD_TEMP_NAMESPACE to the front of their object
name. Note that there will be no problems with object name length since we
hash the original name.
"""
# When uploading a file, get the following fields in the response for
# filling in command output and manifests.
UPLOAD_RETURN_FIELDS = [
'crc32c',
'customerEncryption',
'etag',
'generation',
'md5Hash',
'size',
]
# This tuple is used only to encapsulate the arguments needed for
# command.Apply() in the parallel composite upload case.
# Note that content_type is used instead of a full apitools Object() because
# apitools objects are not picklable.
# filename: String name of file.
# file_start: start byte of file (may be in the middle of a file for partitioned
# files).
# file_length: length of upload (may not be the entire length of a file for
# partitioned files).
# src_url: FileUrl describing the source file.
# dst_url: CloudUrl describing the destination component file.
# canned_acl: canned_acl to apply to the uploaded file/component.
# content_type: content-type for final object, used for setting content-type
# of components and final object.
# tracker_file: tracker file for this component.
# tracker_file_lock: tracker file lock for tracker file(s).
# gzip_encoded: Whether to use gzip transport encoding for the upload.
PerformParallelUploadFileToObjectArgs = namedtuple(
'PerformParallelUploadFileToObjectArgs',
'filename file_start file_length src_url dst_url canned_acl '
'content_type tracker_file tracker_file_lock encryption_key_sha256 '
'gzip_encoded')
PerformSlicedDownloadObjectToFileArgs = namedtuple(
'PerformSlicedDownloadObjectToFileArgs',
'component_num src_url src_obj_metadata_json dst_url download_file_name '
'start_byte end_byte decryption_key')
# This tuple is used only to encapsulate the arguments returned by
# _PerformSlicedDownloadObjectToFile.
# component_num: Component number.
# crc32c: CRC32C hash value (integer) of the downloaded bytes
# bytes_transferred: The number of bytes transferred, potentially less
# than the component size if the download was resumed.
# component_total_size: The number of bytes corresponding to the whole
# component size, potentially more than bytes_transferred
# if the download was resumed.
# server_encoding: Content-encoding string if it was detected that the server
# sent encoded bytes during transfer, None otherwise.
PerformSlicedDownloadReturnValues = namedtuple(
'PerformSlicedDownloadReturnValues',
'component_num crc32c bytes_transferred component_total_size '
'server_encoding')
# TODO: Refactor this file to be less cumbersome. In particular, some of the
# different paths (e.g., uploading a file to an object vs. downloading an
# object to a file) could be split into separate files.
# Chunk size to use while zipping/unzipping gzip files.
GZIP_CHUNK_SIZE = 8192
# Indicates that all files should be gzipped, in _UploadFileToObject
GZIP_ALL_FILES = 'GZIP_ALL_FILES'
# Number of bytes to wait before updating a sliced download component tracker
# file.
TRACKERFILE_UPDATE_THRESHOLD = TEN_MIB
PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD = 150 * 1024 * 1024
# S3 requires special Multipart upload logic (that we currently don't implement)
# for files > 5GiB in size.
S3_MAX_UPLOAD_SIZE = 5 * 1024 * 1024 * 1024
# TODO: Create a message class that serializes posting this message once
# through the UI's global status queue.
global suggested_sliced_transfers, suggested_sliced_transfers_lock
suggested_sliced_transfers = AtomicDict(
manager=(parallelism_framework_util.top_level_manager
if CheckMultiprocessingAvailableAndInit().is_available else None))
suggested_sliced_transfers_lock = parallelism_framework_util.CreateLock()
# TODO(KMS, Compose): Remove this once we support compose across CMEK-encrypted
# components, making such parallel composite uploads possible.
global bucket_metadata_pcu_check, bucket_metadata_pcu_check_lock
# When considering whether we should perform a parallel composite upload to a
# gs bucket, we check if the bucket metadata contains a defaultKmsKeyName. If
# so, we don't do a pcu. Additionally, we only want to perform this check once,
# hence the lock.
#
# Becomes True or False once populated. If we ever allow multiple destination
# arguments to cp, this could become a dict of bucket name -> bool.
bucket_metadata_pcu_check = None
bucket_metadata_pcu_check_lock = parallelism_framework_util.CreateLock()
class FileConcurrencySkipError(Exception):
"""Raised when skipping a file due to a concurrent, duplicate copy."""
def _RmExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
cls.logger.error(str(e))
def _ParallelCopyExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
cls.logger.error(str(e))
cls.op_failure_count += 1
cls.logger.debug('\n\nEncountered exception while copying:\n%s\n',
traceback.format_exc())
def _PerformParallelUploadFileToObject(cls, args, thread_state=None):
"""Function argument to Apply for performing parallel composite uploads.
Args:
cls: Calling Command class.
args: PerformParallelUploadFileToObjectArgs tuple describing the target.
thread_state: gsutil Cloud API instance to use for the operation.
Returns:
StorageUrl representing a successfully uploaded component.
"""
fp = FilePart(args.filename, args.file_start, args.file_length)
gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
with fp:
# We take many precautions with the component names that make collisions
# effectively impossible. Specifying preconditions will just allow us to
# reach a state in which uploads will always fail on retries.
preconditions = None
# Fill in content type if one was provided.
dst_object_metadata = apitools_messages.Object(
name=args.dst_url.object_name,
bucket=args.dst_url.bucket_name,
contentType=args.content_type)
try:
if global_copy_helper_opts.canned_acl:
# No canned ACL support in JSON, force XML API to be used for
# upload/copy operations.
orig_prefer_api = gsutil_api.prefer_api
gsutil_api.prefer_api = ApiSelector.XML
ret = _UploadFileToObject(args.src_url,
fp,
args.file_length,
args.dst_url,
dst_object_metadata,
preconditions,
gsutil_api,
cls.logger,
cls,
_ParallelCopyExceptionHandler,
gzip_exts=None,
allow_splitting=False,
is_component=True,
gzip_encoded=args.gzip_encoded)
finally:
if global_copy_helper_opts.canned_acl:
gsutil_api.prefer_api = orig_prefer_api
component = ret[2]
WriteComponentToParallelUploadTrackerFile(
args.tracker_file,
args.tracker_file_lock,
component,
cls.logger,
encryption_key_sha256=args.encryption_key_sha256)
return ret
CopyHelperOpts = namedtuple('CopyHelperOpts', [
'perform_mv',
'no_clobber',
'daisy_chain',
'read_args_from_stdin',
'print_ver',
'use_manifest',
'preserve_acl',
'canned_acl',
'skip_unsupported_objects',
'test_callback_file',
'dest_storage_class',
])
# pylint: disable=global-variable-undefined
def CreateCopyHelperOpts(perform_mv=False,
no_clobber=False,
daisy_chain=False,
read_args_from_stdin=False,
print_ver=False,
use_manifest=False,
preserve_acl=False,
canned_acl=None,
skip_unsupported_objects=False,
test_callback_file=None,
dest_storage_class=None):
"""Creates CopyHelperOpts for passing options to CopyHelper."""
# We create a tuple with union of options needed by CopyHelper and any
# copy-related functionality in CpCommand, RsyncCommand, or Command class.
global global_copy_helper_opts
global_copy_helper_opts = CopyHelperOpts(
perform_mv=perform_mv,
no_clobber=no_clobber,
daisy_chain=daisy_chain,
read_args_from_stdin=read_args_from_stdin,
print_ver=print_ver,
use_manifest=use_manifest,
preserve_acl=preserve_acl,
canned_acl=canned_acl,
skip_unsupported_objects=skip_unsupported_objects,
test_callback_file=test_callback_file,
dest_storage_class=dest_storage_class)
return global_copy_helper_opts
# pylint: disable=global-variable-undefined
# pylint: disable=global-variable-not-assigned
def GetCopyHelperOpts():
"""Returns namedtuple holding CopyHelper options."""
global global_copy_helper_opts
return global_copy_helper_opts
def _SelectDownloadStrategy(dst_url):
"""Get download strategy based on the destination object.
Args:
dst_url: Destination StorageUrl.
Returns:
gsutil Cloud API DownloadStrategy.
"""
dst_is_special = False
if dst_url.IsFileUrl():
# Check explicitly first because os.stat doesn't work on 'nul' in Windows.
if dst_url.object_name == os.devnull:
dst_is_special = True
try:
mode = os.stat(dst_url.object_name).st_mode
if stat.S_ISCHR(mode):
dst_is_special = True
except OSError:
pass
if dst_is_special:
return CloudApi.DownloadStrategy.ONE_SHOT
else:
return CloudApi.DownloadStrategy.RESUMABLE
def InsistDstUrlNamesContainer(exp_dst_url, have_existing_dst_container,
command_name):
"""Ensures the destination URL names a container.
Acceptable containers include directory, bucket, bucket
subdir, and non-existent bucket subdir.
Args:
exp_dst_url: Wildcard-expanded destination StorageUrl.
have_existing_dst_container: bool indicator of whether exp_dst_url
names a container (directory, bucket, or existing bucket subdir).
command_name: Name of command making call. May not be the same as the
calling class's self.command_name in the case of commands implemented
atop other commands (like mv command).
Raises:
CommandException: if the URL being checked does not name a container.
"""
if ((exp_dst_url.IsFileUrl() and not exp_dst_url.IsDirectory()) or
(exp_dst_url.IsCloudUrl() and exp_dst_url.IsBucket() and
not have_existing_dst_container)):
raise CommandException('Destination URL must name a directory, bucket, '
'or bucket\nsubdirectory for the multiple '
'source form of the %s command.' % command_name)
def _ShouldTreatDstUrlAsBucketSubDir(have_multiple_srcs, dst_url,
have_existing_dest_subdir,
src_url_names_container,
recursion_requested):
"""Checks whether dst_url should be treated as a bucket "sub-directory".
The decision about whether something constitutes a bucket "sub-directory"
depends on whether there are multiple sources in this request and whether
there is an existing bucket subdirectory. For example, when running the
command:
gsutil cp file gs://bucket/abc
if there's no existing gs://bucket/abc bucket subdirectory we should copy
file to the object gs://bucket/abc. In contrast, if
there's an existing gs://bucket/abc bucket subdirectory we should copy
file to gs://bucket/abc/file. And regardless of whether gs://bucket/abc
exists, when running the command:
gsutil cp file1 file2 gs://bucket/abc
we should copy file1 to gs://bucket/abc/file1 (and similarly for file2).
Finally, for recursive copies, if the source is a container then we should
copy to a container as the target. For example, when running the command:
gsutil cp -r dir1 gs://bucket/dir2
we should copy the subtree of dir1 to gs://bucket/dir2.
Note that we don't disallow naming a bucket "sub-directory" where there's
already an object at that URL. For example it's legitimate (albeit
confusing) to have an object called gs://bucket/dir and
then run the command
gsutil cp file1 file2 gs://bucket/dir
Doing so will end up with objects gs://bucket/dir, gs://bucket/dir/file1,
and gs://bucket/dir/file2.
Args:
have_multiple_srcs: Bool indicator of whether this is a multi-source
operation.
dst_url: StorageUrl to check.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
src_url_names_container: bool indicator of whether the source URL
is a container.
recursion_requested: True if a recursive operation has been requested.
Returns:
bool indicator.
"""
if have_existing_dest_subdir:
return True
if dst_url.IsCloudUrl():
return (have_multiple_srcs or
(src_url_names_container and recursion_requested))
def _ShouldTreatDstUrlAsSingleton(src_url_names_container, have_multiple_srcs,
have_existing_dest_subdir, dst_url,
recursion_requested):
"""Checks that dst_url names a single file/object after wildcard expansion.
It is possible that an object path might name a bucket sub-directory.
Args:
src_url_names_container: Bool indicator of whether the source for the
operation is a container (bucket, bucket subdir, or directory).
have_multiple_srcs: Bool indicator of whether this is a multi-source
operation.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
dst_url: StorageUrl to check.
recursion_requested: True if a recursive operation has been requested.
Returns:
bool indicator.
"""
if recursion_requested and src_url_names_container:
return False
if dst_url.IsFileUrl():
return not dst_url.IsDirectory()
else: # dst_url.IsCloudUrl()
return (not have_multiple_srcs and not have_existing_dest_subdir and
dst_url.IsObject())
def ConstructDstUrl(src_url,
exp_src_url,
src_url_names_container,
have_multiple_srcs,
exp_dst_url,
have_existing_dest_subdir,
recursion_requested,
preserve_posix=False):
"""Constructs the destination URL for a given exp_src_url/exp_dst_url pair.
Uses context-dependent naming rules that mimic Linux cp and mv behavior.
Args:
src_url: Source StorageUrl to be copied.
exp_src_url: Single StorageUrl from wildcard expansion of src_url.
src_url_names_container: True if src_url names a container (including the
case of a wildcard-named bucket subdir (like gs://bucket/abc,
where gs://bucket/abc/* matched some objects).
have_multiple_srcs: True if this is a multi-source request. This can be
true if src_url wildcard-expanded to multiple URLs or if there were
multiple source URLs in the request.
exp_dst_url: the expanded StorageUrl requested for the cp destination.
Final written path is constructed from this plus a context-dependent
variant of src_url.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
recursion_requested: True if a recursive operation has been requested.
preserve_posix: True if preservation of posix attributes has been requested.
Returns:
StorageUrl to use for copy.
Raises:
CommandException if destination object name not specified for
source and source is a stream.
"""
if (exp_dst_url.IsFileUrl() and exp_dst_url.IsStream() and preserve_posix):
raise CommandException('Cannot preserve POSIX attributes with a stream.')
if _ShouldTreatDstUrlAsSingleton(src_url_names_container, have_multiple_srcs,
have_existing_dest_subdir, exp_dst_url,
recursion_requested):
# We're copying one file or object to one file or object.
return exp_dst_url
if exp_src_url.IsFileUrl() and (exp_src_url.IsStream() or
exp_src_url.IsFifo()):
if have_existing_dest_subdir:
raise CommandException('Destination object name needed when '
'source is a stream')
return exp_dst_url
if not recursion_requested and not have_multiple_srcs:
# We're copying one file or object to a subdirectory. Append final comp
# of exp_src_url to exp_dst_url.
src_final_comp = exp_src_url.object_name.rpartition(src_url.delim)[-1]
return StorageUrlFromString('%s%s%s' % (exp_dst_url.url_string.rstrip(
exp_dst_url.delim), exp_dst_url.delim, src_final_comp))
# Else we're copying multiple sources to a directory, bucket, or a bucket
# "sub-directory".
# Ensure exp_dst_url ends in delim char if we're doing a multi-src copy or
# a copy to a directory. (The check for copying to a directory needs
# special-case handling so that the command:
# gsutil cp gs://bucket/obj dir
# will turn into file://dir/ instead of file://dir -- the latter would cause
# the file "dirobj" to be created.)
# Note: need to check have_multiple_srcs or src_url.names_container()
# because src_url could be a bucket containing a single object, named
# as gs://bucket.
if ((have_multiple_srcs or src_url_names_container or
(exp_dst_url.IsFileUrl() and exp_dst_url.IsDirectory())) and
not exp_dst_url.url_string.endswith(exp_dst_url.delim)):
exp_dst_url = StorageUrlFromString(
'%s%s' % (exp_dst_url.url_string, exp_dst_url.delim))
# Making naming behavior match how things work with local Linux cp and mv
# operations depends on many factors, including whether the destination is a
# container, the plurality of the source(s), and whether the mv command is
# being used:
# 1. For the "mv" command that specifies a non-existent destination subdir,
# renaming should occur at the level of the src subdir, vs appending that
# subdir beneath the dst subdir like is done for copying. For example:
# gsutil rm -r gs://bucket
# gsutil cp -r dir1 gs://bucket
# gsutil cp -r dir2 gs://bucket/subdir1
# gsutil mv gs://bucket/subdir1 gs://bucket/subdir2
# would (if using cp naming behavior) end up with paths like:
# gs://bucket/subdir2/subdir1/dir2/.svn/all-wcprops
# whereas mv naming behavior should result in:
# gs://bucket/subdir2/dir2/.svn/all-wcprops
# 2. Copying from directories, buckets, or bucket subdirs should result in
# objects/files mirroring the source directory hierarchy. For example:
# gsutil cp dir1/dir2 gs://bucket
# should create the object gs://bucket/dir2/file2, assuming dir1/dir2
# contains file2).
# To be consistent with Linux cp behavior, there's one more wrinkle when
# working with subdirs: The resulting object names depend on whether the
# destination subdirectory exists. For example, if gs://bucket/subdir
# exists, the command:
# gsutil cp -r dir1/dir2 gs://bucket/subdir
# should create objects named like gs://bucket/subdir/dir2/a/b/c. In
# contrast, if gs://bucket/subdir does not exist, this same command
# should create objects named like gs://bucket/subdir/a/b/c.
# 3. Copying individual files or objects to dirs, buckets or bucket subdirs
# should result in objects/files named by the final source file name
# component. Example:
# gsutil cp dir1/*.txt gs://bucket
# should create the objects gs://bucket/f1.txt and gs://bucket/f2.txt,
# assuming dir1 contains f1.txt and f2.txt.
recursive_move_to_new_subdir = False
if (global_copy_helper_opts.perform_mv and recursion_requested and
src_url_names_container and not have_existing_dest_subdir):
# Case 1. Handle naming rules for bucket subdir mv. Here we want to
# line up the src_url against its expansion, to find the base to build
# the new name. For example, running the command:
# gsutil mv gs://bucket/abcd gs://bucket/xyz
# when processing exp_src_url=gs://bucket/abcd/123
# exp_src_url_tail should become /123
# Note: mv.py code disallows wildcard specification of source URL.
recursive_move_to_new_subdir = True
exp_src_url_tail = (exp_src_url.url_string[len(src_url.url_string):])
dst_key_name = '%s/%s' % (exp_dst_url.object_name.rstrip('/'),
exp_src_url_tail.strip('/'))
elif src_url_names_container and (exp_dst_url.IsCloudUrl() or
exp_dst_url.IsDirectory()):
# Case 2. Container copy to a destination other than a file.
# Build dst_key_name from subpath of exp_src_url past
# where src_url ends. For example, for src_url=gs://bucket/ and
# exp_src_url=gs://bucket/src_subdir/obj, dst_key_name should be
# src_subdir/obj.
src_url_path_sans_final_dir = GetPathBeforeFinalDir(src_url, exp_src_url)
dst_key_name = exp_src_url.versionless_url_string[len(
src_url_path_sans_final_dir):].lstrip(src_url.delim)
# Handle case where dst_url is a non-existent subdir.
if not have_existing_dest_subdir:
dst_key_name = dst_key_name.partition(src_url.delim)[-1]
# Handle special case where src_url was a directory named with '.' or
# './', so that running a command like:
# gsutil cp -r . gs://dest
# will produce obj names of the form gs://dest/abc instead of
# gs://dest/./abc.
if dst_key_name.startswith('.%s' % os.sep):
dst_key_name = dst_key_name[2:]
else:
# Case 3.
dst_key_name = exp_src_url.object_name.rpartition(src_url.delim)[-1]
if (not recursive_move_to_new_subdir and
(exp_dst_url.IsFileUrl() or _ShouldTreatDstUrlAsBucketSubDir(
have_multiple_srcs, exp_dst_url, have_existing_dest_subdir,
src_url_names_container, recursion_requested))):
if exp_dst_url.object_name and exp_dst_url.object_name.endswith(
exp_dst_url.delim):
dst_key_name = '%s%s%s' % (exp_dst_url.object_name.rstrip(
exp_dst_url.delim), exp_dst_url.delim, dst_key_name)
else:
delim = exp_dst_url.delim if exp_dst_url.object_name else ''
dst_key_name = '%s%s%s' % (exp_dst_url.object_name or
'', delim, dst_key_name)
new_exp_dst_url = exp_dst_url.Clone()
new_exp_dst_url.object_name = dst_key_name.replace(src_url.delim,
exp_dst_url.delim)
return new_exp_dst_url
def _CreateDigestsFromDigesters(digesters):
b64enc = base64.encodestring if six.PY2 else base64.encodebytes
digests = {}
if digesters:
for alg in digesters:
digests[alg] = b64enc(
digesters[alg].digest()).rstrip(b'\n').decode('ascii')
return digests
def _CreateDigestsFromLocalFile(status_queue, algs, file_name, src_url,
src_obj_metadata):
"""Creates a base64 CRC32C and/or MD5 digest from file_name.
Args:
status_queue: Queue for posting progress messages for UI/Analytics.
algs: List of algorithms to compute.
file_name: File to digest.
src_url: StorageUrl for local object. Used to track progress.
src_obj_metadata: Metadata of source object.
Returns:
Dict of algorithm name : base 64 encoded digest
"""
hash_dict = {}
if 'md5' in algs:
hash_dict['md5'] = md5()
if 'crc32c' in algs:
hash_dict['crc32c'] = crcmod.predefined.Crc('crc-32c')
with open(file_name, 'rb') as fp:
CalculateHashesFromContents(fp,
hash_dict,
callback_processor=ProgressCallbackWithTimeout(
src_obj_metadata.size,
FileProgressCallbackHandler(
status_queue,
src_url=src_url,
operation_name='Hashing').call))
digests = {}
for alg_name, digest in six.iteritems(hash_dict):
digests[alg_name] = Base64EncodeHash(digest.hexdigest())
return digests
def _CheckCloudHashes(logger, src_url, dst_url, src_obj_metadata,
dst_obj_metadata):
"""Validates integrity of two cloud objects copied via daisy-chain.
Args:
logger: for outputting log messages.
src_url: CloudUrl for source cloud object.
dst_url: CloudUrl for destination cloud object.
src_obj_metadata: Cloud Object metadata for object being downloaded from.
dst_obj_metadata: Cloud Object metadata for object being uploaded to.
Raises:
CommandException: if cloud digests don't match local digests.
"""
# See hack comment in _CheckHashes.
checked_one = False
download_hashes = {}
upload_hashes = {}
if src_obj_metadata.md5Hash:
src_md5hash = six.ensure_binary(src_obj_metadata.md5Hash)
download_hashes['md5'] = src_md5hash
if src_obj_metadata.crc32c:
src_crc32c_hash = six.ensure_binary(src_obj_metadata.crc32c)
download_hashes['crc32c'] = src_crc32c_hash
if dst_obj_metadata.md5Hash:
dst_md5hash = six.ensure_binary(dst_obj_metadata.md5Hash)
upload_hashes['md5'] = dst_md5hash
if dst_obj_metadata.crc32c:
dst_crc32c_hash = six.ensure_binary(dst_obj_metadata.crc32c)
upload_hashes['crc32c'] = dst_crc32c_hash
for alg, upload_b64_digest in six.iteritems(upload_hashes):
if alg not in download_hashes:
continue
download_b64_digest = download_hashes[alg]
if six.PY3 and isinstance(download_b64_digest, str):
download_b64_digest = download_b64_digest.encode('ascii')
logger.debug('Comparing source vs destination %s-checksum for %s. (%s/%s)',
alg, dst_url, download_b64_digest, upload_b64_digest)
if download_b64_digest != upload_b64_digest:
raise HashMismatchException(
'%s signature for source object (%s) doesn\'t match '
'destination object digest (%s). Object (%s) will be deleted.' %
(alg, download_b64_digest, upload_b64_digest, dst_url))
checked_one = True
if not checked_one:
# One known way this can currently happen is when downloading objects larger
# than 5 GiB from S3 (for which the etag is not an MD5).
logger.warn(
'WARNING: Found no hashes to validate object downloaded from %s and '
'uploaded to %s. Integrity cannot be assured without hashes.', src_url,
dst_url)
def _CheckHashes(logger,
obj_url,
obj_metadata,
file_name,
digests,
is_upload=False):
"""Validates integrity by comparing cloud digest to local digest.
Args:
logger: for outputting log messages.
obj_url: CloudUrl for cloud object.
obj_metadata: Cloud Object being downloaded from or uploaded to.
file_name: Local file name on disk being downloaded to or uploaded from
(used only for logging).
digests: Computed Digests for the object.
is_upload: If true, comparing for an uploaded object (controls logging).
Raises:
CommandException: if cloud digests don't match local digests.
"""
# Hack below.
# I cannot track down all of the code paths that get here, so I finally
# gave up and opted to convert all of the hashes to str. I know that they
# *should* be bytes, but the path of least resistance led to str.
# Not a nice thing, but for now it makes tests pass...
# Update: Since the _CheckCloudHashes function above needs to be changed
# as well, I am going to make the executive decision that hashes are
# bytes - here as well. It's what the hash and base64 PY3 libs return,
# and should be the native format for these things.
local_hashes = digests
cloud_hashes = {}
if obj_metadata.md5Hash:
md5_b64_digest = six.ensure_binary(obj_metadata.md5Hash)
cloud_hashes['md5'] = md5_b64_digest.rstrip(b'\n')
if obj_metadata.crc32c:
crc32c_b64_hash = six.ensure_binary(obj_metadata.crc32c)
cloud_hashes['crc32c'] = crc32c_b64_hash.rstrip(b'\n')
checked_one = False
for alg in local_hashes:
if alg not in cloud_hashes:
continue
local_b64_digest = six.ensure_binary(local_hashes[alg])
cloud_b64_digest = cloud_hashes[alg]
logger.debug('Comparing local vs cloud %s-checksum for %s. (%s/%s)', alg,
file_name, local_b64_digest, cloud_b64_digest)
if local_b64_digest != cloud_b64_digest:
raise HashMismatchException(
'%s signature computed for local file (%s) doesn\'t match '
'cloud-supplied digest (%s). %s (%s) will be deleted.' %
(alg, local_b64_digest, cloud_b64_digest, 'Cloud object'
if is_upload else 'Local file', obj_url if is_upload else file_name))
checked_one = True
if not checked_one:
if is_upload:
logger.warn(
'WARNING: Found no hashes to validate object uploaded to %s. '
'Integrity cannot be assured without hashes.', obj_url)
else:
# One known way this can currently happen is when downloading objects larger
# than 5 GB from S3 (for which the etag is not an MD5).
logger.warn(
'WARNING: Found no hashes to validate object downloaded to %s. '
'Integrity cannot be assured without hashes.', file_name)
def IsNoClobberServerException(e):
"""Checks to see if the server attempted to clobber a file.
In this case we specified via a precondition that we didn't want the file
clobbered.
Args:
e: The Exception that was generated by a failed copy operation
Returns:
bool indicator - True indicates that the server did attempt to clobber
an existing file.
"""
return ((isinstance(e, PreconditionException)) or
(isinstance(e, ResumableUploadException) and '412' in e.message))
def CheckForDirFileConflict(exp_src_url, dst_url):
"""Checks whether copying exp_src_url into dst_url is not possible.
This happens if a directory exists in local file system where a file
needs to go or vice versa. In that case we print an error message and
exits. Example: if the file "./x" exists and you try to do:
gsutil cp gs://mybucket/x/y .
the request can't succeed because it requires a directory where
the file x exists.
Note that we don't enforce any corresponding restrictions for buckets,
because the flat namespace semantics for buckets doesn't prohibit such
cases the way hierarchical file systems do. For example, if a bucket
contains an object called gs://bucket/dir and then you run the command:
gsutil cp file1 file2 gs://bucket/dir
you'll end up with objects gs://bucket/dir, gs://bucket/dir/file1, and
gs://bucket/dir/file2.
Args:
exp_src_url: Expanded source StorageUrl.
dst_url: Destination StorageUrl.
Raises:
CommandException: if errors encountered.
"""
if dst_url.IsCloudUrl():
# The problem can only happen for file destination URLs.
return
dst_path = dst_url.object_name
final_dir = os.path.dirname(dst_path)
if os.path.isfile(final_dir):
raise CommandException('Cannot retrieve %s because a file exists '
'where a directory needs to be created (%s).' %
(exp_src_url.url_string, final_dir))
if os.path.isdir(dst_path):
raise CommandException('Cannot retrieve %s because a directory exists '
'(%s) where the file needs to be created.' %
(exp_src_url.url_string, dst_path))
def _PartitionFile(fp,
file_size,
src_url,
content_type,
canned_acl,
dst_bucket_url,
random_prefix,
tracker_file,
tracker_file_lock,
encryption_key_sha256=None,
gzip_encoded=False):
"""Partitions a file into FilePart objects to be uploaded and later composed.
These objects, when composed, will match the original file. This entails
splitting the file into parts, naming and forming a destination URL for each
part, and also providing the PerformParallelUploadFileToObjectArgs
corresponding to each part.
Args:
fp: The file object to be partitioned.
file_size: The size of fp, in bytes.
src_url: Source FileUrl from the original command.
content_type: content type for the component and final objects.
canned_acl: The user-provided canned_acl, if applicable.
dst_bucket_url: CloudUrl for the destination bucket
random_prefix: The randomly-generated prefix used to prevent collisions
among the temporary component names.
tracker_file: The path to the parallel composite upload tracker file.
tracker_file_lock: The lock protecting access to the tracker file.
encryption_key_sha256: Encryption key SHA256 for use in this upload, if any.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Returns:
dst_args: The destination URIs for the temporary component objects.
"""
parallel_composite_upload_component_size = HumanReadableToBytes(
config.get('GSUtil', 'parallel_composite_upload_component_size',
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE))
(num_components, component_size) = _GetPartitionInfo(
file_size, MAX_COMPOSE_ARITY, parallel_composite_upload_component_size)
dst_args = {} # Arguments to create commands and pass to subprocesses.
file_names = [] # Used for the 2-step process of forming dst_args.
for i in range(num_components):
# "Salt" the object name with something a user is very unlikely to have
# used in an object name, then hash the extended name to make sure
# we don't run into problems with name length. Using a deterministic
# naming scheme for the temporary components allows users to take
# advantage of resumable uploads for each component.
encoded_name = six.ensure_binary(PARALLEL_UPLOAD_STATIC_SALT + fp.name)
content_md5 = md5()
content_md5.update(encoded_name)
digest = content_md5.hexdigest()
temp_file_name = (random_prefix + PARALLEL_UPLOAD_TEMP_NAMESPACE + digest +
'_' + str(i))
tmp_dst_url = dst_bucket_url.Clone()
tmp_dst_url.object_name = temp_file_name
if i < (num_components - 1):
# Every component except possibly the last is the same size.
file_part_length = component_size
else:
# The last component just gets all of the remaining bytes.
file_part_length = (file_size - ((num_components - 1) * component_size))
offset = i * component_size
func_args = PerformParallelUploadFileToObjectArgs(
fp.name, offset, file_part_length, src_url, tmp_dst_url, canned_acl,
content_type, tracker_file, tracker_file_lock, encryption_key_sha256,
gzip_encoded)
file_names.append(temp_file_name)
dst_args[temp_file_name] = func_args
return dst_args
def _GetComponentNumber(component):
"""Gets component number from component CloudUrl.
Used during parallel composite upload.
Args:
component: CloudUrl representing component.
Returns:
component number
"""
return int(component.object_name[component.object_name.rfind('_') + 1:])
def _DoParallelCompositeUpload(fp,
src_url,
dst_url,
dst_obj_metadata,
canned_acl,
file_size,
preconditions,
gsutil_api,
command_obj,
copy_exception_handler,
logger,
gzip_encoded=False):
"""Uploads a local file to a cloud object using parallel composite upload.
The file is partitioned into parts, and then the parts are uploaded in
parallel, composed to form the original destination object, and deleted.
Args:
fp: The file object to be uploaded.
src_url: FileUrl representing the local file.
dst_url: CloudUrl representing the destination file.
dst_obj_metadata: apitools Object describing the destination object.
canned_acl: The canned acl to apply to the object, if any.
file_size: The size of the source file in bytes.
preconditions: Cloud API Preconditions for the final object.
gsutil_api: gsutil Cloud API instance to use.
command_obj: Command object (for calling Apply).
copy_exception_handler: Copy exception handler (for use in Apply).
logger: logging.Logger for outputting log messages.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Returns:
Elapsed upload time, uploaded Object with generation, crc32c, and size
fields populated.
"""
start_time = time.time()
dst_bucket_url = StorageUrlFromString(dst_url.bucket_url_string)
api_selector = gsutil_api.GetApiSelector(provider=dst_url.scheme)
encryption_keywrapper = GetEncryptionKeyWrapper(config)
encryption_key_sha256 = (encryption_keywrapper.crypto_key_sha256
if encryption_keywrapper else None)
# Determine which components, if any, have already been successfully
# uploaded.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
api_selector, src_url)
(existing_enc_key_sha256, existing_prefix,
existing_components) = (ReadParallelUploadTrackerFile(
tracker_file_name, logger))
# Ensure that the tracker data is still valid (encryption keys match) and
# perform any necessary cleanup.
(existing_prefix, existing_components) = ValidateParallelCompositeTrackerData(
tracker_file_name, existing_enc_key_sha256, existing_prefix,
existing_components, encryption_key_sha256, dst_bucket_url, command_obj,
logger, _DeleteTempComponentObjectFn, _RmExceptionHandler)
encryption_key_sha256 = (encryption_key_sha256.decode('ascii')
if encryption_key_sha256 is not None else None)
random_prefix = (existing_prefix if existing_prefix is not None else
GenerateComponentObjectPrefix(
encryption_key_sha256=encryption_key_sha256))
# Create (or overwrite) the tracker file for the upload.
WriteParallelUploadTrackerFile(tracker_file_name,
random_prefix,
existing_components,
encryption_key_sha256=encryption_key_sha256)
# Protect the tracker file within calls to Apply.
tracker_file_lock = parallelism_framework_util.CreateLock()
# Dict to track component info so we may align FileMessage values
# before and after the operation.
components_info = {}
# Get the set of all components that should be uploaded.
dst_args = _PartitionFile(fp,
file_size,
src_url,
dst_obj_metadata.contentType,
canned_acl,
dst_bucket_url,
random_prefix,
tracker_file_name,
tracker_file_lock,
encryption_key_sha256=encryption_key_sha256,
gzip_encoded=gzip_encoded)
(components_to_upload, existing_components,
existing_objects_to_delete) = (FilterExistingComponents(
dst_args, existing_components, dst_bucket_url, gsutil_api))
# Assign a start message to each different component type
for component in components_to_upload:
components_info[component.dst_url.url_string] = (
FileMessage.COMPONENT_TO_UPLOAD, component.file_length)
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(component.src_url,
component.dst_url,
time.time(),
size=component.file_length,
finished=False,
component_num=_GetComponentNumber(component.dst_url),
message_type=FileMessage.COMPONENT_TO_UPLOAD))
for component in existing_components:
component_str = component[0].versionless_url_string
components_info[component_str] = (FileMessage.EXISTING_COMPONENT,
component[1])
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
component[0],
time.time(),
finished=False,
size=component[1],
component_num=_GetComponentNumber(component[0]),
message_type=FileMessage.EXISTING_COMPONENT))
for component in existing_objects_to_delete:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
component,
time.time(),
finished=False,
message_type=FileMessage.EXISTING_OBJECT_TO_DELETE))
# In parallel, copy all of the file parts that haven't already been
# uploaded to temporary objects.
cp_results = command_obj.Apply(
_PerformParallelUploadFileToObject,
components_to_upload,
copy_exception_handler, ('op_failure_count', 'total_bytes_transferred'),
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=command_obj.ParallelOverrideReason.SLICE,
should_return_results=True)
uploaded_components = []
for cp_result in cp_results:
uploaded_components.append(cp_result[2])
components = uploaded_components + [i[0] for i in existing_components]
if len(components) == len(dst_args):
# Only try to compose if all of the components were uploaded successfully.
# Sort the components so that they will be composed in the correct order.
components = sorted(components, key=_GetComponentNumber)
request_components = []
for component_url in components:
src_obj_metadata = (
apitools_messages.ComposeRequest.SourceObjectsValueListEntry(
name=component_url.object_name))
if component_url.HasGeneration():
src_obj_metadata.generation = long(component_url.generation)
request_components.append(src_obj_metadata)
composed_object = gsutil_api.ComposeObject(
request_components,
dst_obj_metadata,
preconditions=preconditions,
provider=dst_url.scheme,
fields=['crc32c', 'generation', 'size'],
encryption_tuple=encryption_keywrapper)
try:
# Make sure only to delete things that we know were successfully
# uploaded (as opposed to all of the objects that we attempted to
# create) so that we don't delete any preexisting objects, except for
# those that were uploaded by a previous, failed run and have since
# changed (but still have an old generation lying around).
objects_to_delete = components + existing_objects_to_delete
command_obj.Apply(
_DeleteTempComponentObjectFn,
objects_to_delete,
_RmExceptionHandler,
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=command_obj.ParallelOverrideReason.SLICE)
# Assign an end message to each different component type
for component in components:
component_str = component.versionless_url_string
try:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
component,
time.time(),
finished=True,
component_num=_GetComponentNumber(component),
size=components_info[component_str][1],
message_type=components_info[component_str][0]))
except: # pylint: disable=bare-except
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url, component, time.time(), finished=True))
for component in existing_objects_to_delete:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
component,
time.time(),
finished=True,
message_type=FileMessage.EXISTING_OBJECT_TO_DELETE))
except Exception: # pylint: disable=broad-except
# If some of the delete calls fail, don't cause the whole command to
# fail. The copy was successful iff the compose call succeeded, so
# reduce this to a warning.
logger.warn(
'Failed to delete some of the following temporary objects:\n' +
'\n'.join(dst_args.keys()))
finally:
with tracker_file_lock:
DeleteTrackerFile(tracker_file_name)
else:
# Some of the components failed to upload. In this case, we want to exit
# without deleting the objects.
raise CommandException(
'Some temporary components were not uploaded successfully. '
'Please retry this upload.')
elapsed_time = time.time() - start_time
return elapsed_time, composed_object
def _ShouldDoParallelCompositeUpload(logger,
allow_splitting,
src_url,
dst_url,
file_size,
gsutil_api,
canned_acl=None,
kms_keyname=None):
"""Determines whether parallel composite upload strategy should be used.
Args:
logger: for outputting log messages.
allow_splitting: If false, then this function returns false.
src_url: FileUrl corresponding to a local file.
dst_url: CloudUrl corresponding to destination cloud object.
file_size: The size of the source file, in bytes.
gsutil_api: CloudApi that may be used to check if the destination bucket
has any metadata attributes set that would discourage us from using
parallel composite uploads.
canned_acl: Canned ACL to apply to destination object, if any.
kms_keyname: Cloud KMS key name to encrypt destination, if any.
Returns:
True iff a parallel upload should be performed on the source file.
"""
# TODO(KMS, Compose): Until we ensure service-side that we have efficient
# compose functionality over objects with distinct KMS encryption keys (CMEKs)
# or distinct CSEKs, don't utilize parallel composite uploads.
if kms_keyname:
return False
global suggested_sliced_transfers, suggested_sliced_transfers_lock
parallel_composite_upload_threshold = HumanReadableToBytes(
config.get('GSUtil', 'parallel_composite_upload_threshold',
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD))
all_factors_but_size = (
allow_splitting # Don't split the pieces multiple times.
and not src_url.IsStream() # We can't partition streams.
and not src_url.IsFifo() # We can't partition fifos.
and dst_url.scheme == 'gs' # Compose is only for gs.
and not canned_acl) # TODO: Implement canned ACL support for compose.
# Since parallel composite uploads are disabled by default, make user aware of
# them.
# TODO: Once compiled crcmod is being distributed by major Linux distributions
# remove this check.
if (all_factors_but_size and parallel_composite_upload_threshold == 0 and
file_size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD):
with suggested_sliced_transfers_lock:
if not suggested_sliced_transfers.get('suggested'):
logger.info('\n'.join(
textwrap.wrap(
'==> NOTE: You are uploading one or more large file(s), which '
'would run significantly faster if you enable parallel composite '
'uploads. This feature can be enabled by editing the '
'"parallel_composite_upload_threshold" value in your .boto '
'configuration file. However, note that if you do this large files '
'will be uploaded as '
'`composite objects <https://cloud.google.com/storage/docs/composite-objects>`_,' # pylint: disable=line-too-long
'which means that any user who downloads such objects will need to '
'have a compiled crcmod installed (see "gsutil help crcmod"). This '
'is because without a compiled crcmod, computing checksums on '
'composite objects is so slow that gsutil disables downloads of '
'composite objects.')) + '\n')
suggested_sliced_transfers['suggested'] = True
if not (all_factors_but_size and parallel_composite_upload_threshold > 0 and
file_size >= parallel_composite_upload_threshold):
return False
# TODO(KMS, Compose): Once GCS supports compose operations over
# CMEK-encrypted objects, remove this check and return the boolean result of
# the predicate above, minus the top-level "not" operator.
#
# To avoid unnecessary API calls, we only perform this check once we're sure
# we'd otherwise do a parallel composite upload. To prevent gsutil from
# attempting parallel composite uploads to a bucket with its defaultKmsKeyName
# metadata attribute set, we check once for this attribute. We then cache that
# result so that each copy operation can check there, rather than having to
# do its own duplicate API call to check for this.
#
# Pre-emptive check; while this is susceptible to race conditions at the start
# of this gsutil invocation, and not reliable in the case where we see
# bucket_metadata_pcu_check has not been populated yet, it helps to avoid
# the slowdown of acquiring a lock in the case where the variable HAS been
# populated.
global bucket_metadata_pcu_check, bucket_metadata_pcu_check_lock
if bucket_metadata_pcu_check is not None:
return bucket_metadata_pcu_check
with bucket_metadata_pcu_check_lock:
# Check again once we've attained the lock; it's possible that between the
# time we checked above and now, another thread released the lock and
# populated bucket_metadata_pcu_check.
if bucket_metadata_pcu_check is not None:
return bucket_metadata_pcu_check
try:
bucket = gsutil_api.GetBucket(dst_url.bucket_name,
provider=dst_url.scheme,
fields=['id', 'encryption'])
if bucket.encryption and bucket.encryption.defaultKmsKeyName:
bucket_metadata_pcu_check = False
else:
bucket_metadata_pcu_check = True
except ServiceException:
# Treat an API call failure as if we checked and there was no key.
bucket_metadata_pcu_check = True
return bucket_metadata_pcu_check
def ExpandUrlToSingleBlr(url_str,
gsutil_api,
project_id,
treat_nonexistent_object_as_subdir=False,
logger=None):
"""Expands wildcard if present in url_str.
Args:
url_str: String representation of requested url.
gsutil_api: gsutil Cloud API instance to use.
project_id: project ID to use (for iterators).
treat_nonexistent_object_as_subdir: indicates if should treat a non-existent
object as a subdir.
logger: logging.Logger instance to use for output. If None, the root Logger
will be used.
Returns:
(exp_url, have_existing_dst_container)
where exp_url is a StorageUrl
and have_existing_dst_container is a bool indicating whether
exp_url names an existing directory, bucket, or bucket subdirectory.
In the case where we match a subdirectory AND an object, the
object is returned.
Raises:
CommandException: if url_str matched more than 1 URL.
"""
logger = logger or logging.Logger()
# Handle wildcarded url case.
if ContainsWildcard(url_str):
blr_expansion = list(
CreateWildcardIterator(url_str,
gsutil_api,
project_id=project_id,
logger=logger))
if len(blr_expansion) != 1:
raise CommandException('Destination (%s) must match exactly 1 URL' %
url_str)
blr = blr_expansion[0]
# BLR is either an OBJECT, PREFIX, or BUCKET; the latter two represent
# directories.
return (StorageUrlFromString(blr.url_string), not blr.IsObject())
storage_url = StorageUrlFromString(url_str)
# Handle non-wildcarded URL.
if storage_url.IsFileUrl():
return (storage_url, storage_url.IsDirectory())
# At this point we have a cloud URL.
if storage_url.IsBucket():
return (storage_url, True)
# For object/prefix URLs, there are four cases that indicate the destination
# is a cloud subdirectory; these are always considered to be an existing
# container. Checking each case allows gsutil to provide Unix-like
# destination folder semantics, but requires up to three HTTP calls, noted
# below.
# Case 1: If a placeholder object ending with '/' exists.
if IsCloudSubdirPlaceholder(storage_url):
return (storage_url, True)
# Get version of object name without trailing slash for matching prefixes
prefix = storage_url.object_name.rstrip('/')
# HTTP call to make an eventually consistent check for a matching prefix,
# _$folder$, or empty listing.
expansion_empty = True
list_iterator = gsutil_api.ListObjects(storage_url.bucket_name,
prefix=prefix,
delimiter='/',
provider=storage_url.scheme,
fields=['prefixes', 'items/name'])
for obj_or_prefix in list_iterator:
# To conserve HTTP calls for the common case, we make a single listing
# that covers prefixes and object names. Listing object names covers the
# _$folder$ case and the nonexistent-object-as-subdir case. However, if
# there are many existing objects for which the target URL is an exact
# prefix, this listing could be paginated and span multiple HTTP calls.
# If this case becomes common, we could heurestically abort the
# listing operation after the first page of results and just query for the
# _$folder$ object directly using GetObjectMetadata.
# TODO: currently the ListObjects iterator yields objects before prefixes,
# because ls depends on this iteration order for proper display. We could
# save up to 1ms in determining that a destination is a prefix if we had a
# way to yield prefixes first, but this would require poking a major hole
# through the abstraction to control this iteration order.
expansion_empty = False
if (obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.PREFIX and
obj_or_prefix.data == prefix + '/'):
# Case 2: If there is a matching prefix when listing the destination URL.
return (storage_url, True)
elif (obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.OBJECT and
obj_or_prefix.data.name == storage_url.object_name + '_$folder$'):
# Case 3: If a placeholder object matching destination + _$folder$
# exists.
return (storage_url, True)
# Case 4: If no objects/prefixes matched, and nonexistent objects should be
# treated as subdirectories.
return (storage_url, expansion_empty and treat_nonexistent_object_as_subdir)
def FixWindowsNaming(src_url, dst_url):
"""Translates Windows pathnames to cloud pathnames.
Rewrites the destination URL built by ConstructDstUrl().
Args:
src_url: Source StorageUrl to be copied.
dst_url: The destination StorageUrl built by ConstructDstUrl().
Returns:
StorageUrl to use for copy.
"""
if (src_url.IsFileUrl() and src_url.delim == '\\' and dst_url.IsCloudUrl()):
trans_url_str = re.sub(r'\\', '/', dst_url.url_string)
dst_url = StorageUrlFromString(trans_url_str)
return dst_url
def SrcDstSame(src_url, dst_url):
"""Checks if src_url and dst_url represent the same object or file.
We don't handle anything about hard or symbolic links.
Args:
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
Returns:
Bool indicator.
"""
if src_url.IsFileUrl() and dst_url.IsFileUrl():
# Translate a/b/./c to a/b/c, so src=dst comparison below works.
new_src_path = os.path.normpath(src_url.object_name)
new_dst_path = os.path.normpath(dst_url.object_name)
return new_src_path == new_dst_path
else:
return (src_url.url_string == dst_url.url_string and
src_url.generation == dst_url.generation)
def _LogCopyOperation(logger, src_url, dst_url, dst_obj_metadata):
"""Logs copy operation, including Content-Type if appropriate.
Args:
logger: logger instance to use for output.
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
"""
if (dst_url.IsCloudUrl() and dst_obj_metadata and
dst_obj_metadata.contentType):
content_type_msg = ' [Content-Type=%s]' % dst_obj_metadata.contentType
else:
content_type_msg = ''
if src_url.IsFileUrl() and (src_url.IsStream() or src_url.IsFifo()):
src_text = '<STDIN>' if src_url.IsStream() else 'named pipe'
logger.info('Copying from %s%s...', src_text, content_type_msg)
else:
logger.info('Copying %s%s...', src_url.url_string, content_type_msg)
# pylint: disable=undefined-variable
def _CopyObjToObjInTheCloud(src_url,
src_obj_metadata,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
decryption_key=None):
"""Performs copy-in-the cloud from specified src to dest object.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata for source object; must include etag and size.
dst_url: Destination CloudUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API instance to use for the copy.
decryption_key: Base64-encoded decryption key for the source object, if any.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
decryption_keywrapper = CryptoKeyWrapperFromKey(decryption_key)
encryption_keywrapper = GetEncryptionKeyWrapper(config)
start_time = time.time()
progress_callback = FileProgressCallbackHandler(gsutil_api.status_queue,
src_url=src_url,
dst_url=dst_url,
operation_name='Copying').call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
dst_obj = gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
src_generation=src_url.generation,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions,
progress_callback=progress_callback,
provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS,
decryption_tuple=decryption_keywrapper,
encryption_tuple=encryption_keywrapper)
end_time = time.time()
result_url = dst_url.Clone()
result_url.generation = GenerationFromUrlAndString(result_url,
dst_obj.generation)
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
end_time,
message_type=FileMessage.FILE_CLOUD_COPY,
size=src_obj_metadata.size,
finished=True))
return (end_time - start_time, src_obj_metadata.size, result_url,
dst_obj.md5Hash)
def _SetContentTypeFromFile(src_url, dst_obj_metadata):
"""Detects and sets Content-Type if src_url names a local file.
Args:
src_url: Source StorageUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
"""
# contentType == '' if user requested default type.
if (dst_obj_metadata.contentType is None and src_url.IsFileUrl() and
not src_url.IsStream() and not src_url.IsFifo()):
# Only do content type recognition if src_url is a file. Object-to-object
# copies with no -h Content-Type specified re-use the content type of the
# source object.
object_name = src_url.object_name
content_type = None
# Streams (denoted by '-') are expected to be 'application/octet-stream'
# and 'file' would partially consume them.
if object_name != '-':
real_file_path = os.path.realpath(object_name)
if config.getbool('GSUtil', 'use_magicfile', False) and not IS_WINDOWS:
try:
p = subprocess.Popen(['file', '-b', '--mime', real_file_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = p.communicate()
p.stdout.close()
p.stderr.close()
if p.returncode != 0 or error:
raise CommandException(
'Encountered error running "file -b --mime %s" '
'(returncode=%d).\n%s' % (real_file_path, p.returncode, error))
# Parse output by removing line delimiter
content_type = output.rstrip()
content_type = six.ensure_str(content_type)
except OSError as e: # 'file' executable may not always be present.
raise CommandException(
'Encountered OSError running "file -b --mime %s"\n%s' %
(real_file_path, e))
else:
content_type = mimetypes.guess_type(real_file_path)[0]
if not content_type:
content_type = DEFAULT_CONTENT_TYPE
dst_obj_metadata.contentType = content_type
# pylint: disable=undefined-variable
def _UploadFileToObjectNonResumable(src_url,
src_obj_filestream,
src_obj_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
gzip_encoded=False):
"""Uploads the file using a non-resumable strategy.
This function does not support component transfers.
Args:
src_url: Source StorageUrl to upload.
src_obj_filestream: File pointer to uploadable bytes.
src_obj_size (int or None): Size of the source object.
dst_url: Destination StorageUrl for the upload.
dst_obj_metadata: Metadata for the target object.
preconditions: Preconditions for the upload, if any.
gsutil_api: gsutil Cloud API instance to use for the upload.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Returns:
Elapsed upload time, uploaded Object with generation, md5, and size fields
populated.
"""
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=src_url,
dst_url=dst_url,
operation_name='Uploading').call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
encryption_keywrapper = GetEncryptionKeyWrapper(config)
if src_url.IsStream() or src_url.IsFifo():
# TODO: gsutil-beta: Provide progress callbacks for streaming uploads.
uploaded_object = gsutil_api.UploadObjectStreaming(
src_obj_filestream,
object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions,
progress_callback=progress_callback,
encryption_tuple=encryption_keywrapper,
provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS,
gzip_encoded=gzip_encoded)
else:
uploaded_object = gsutil_api.UploadObject(
src_obj_filestream,
object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
size=src_obj_size,
preconditions=preconditions,
progress_callback=progress_callback,
encryption_tuple=encryption_keywrapper,
provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS,
gzip_encoded=gzip_encoded)
end_time = time.time()
elapsed_time = end_time - start_time
return elapsed_time, uploaded_object
# pylint: disable=undefined-variable
def _UploadFileToObjectResumable(src_url,
src_obj_filestream,
src_obj_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
is_component=False,
gzip_encoded=False):
"""Uploads the file using a resumable strategy.
Args:
src_url: Source FileUrl to upload. Must not be a stream.
src_obj_filestream: File pointer to uploadable bytes.
src_obj_size (int or None): Size of the source object.
dst_url: Destination StorageUrl for the upload.
dst_obj_metadata: Metadata for the target object.
preconditions: Preconditions for the upload, if any.
gsutil_api: gsutil Cloud API instance to use for the upload.
logger: for outputting log messages.
is_component: indicates whether this is a single component or whole file.
gzip_encoded: Whether to use gzip transport encoding for the upload.
Returns:
Elapsed upload time, uploaded Object with generation, md5, and size fields
populated.
"""
tracker_file_name = GetTrackerFilePath(
dst_url, TrackerFileType.UPLOAD,
gsutil_api.GetApiSelector(provider=dst_url.scheme))
encryption_keywrapper = GetEncryptionKeyWrapper(config)
encryption_key_sha256 = (
encryption_keywrapper.crypto_key_sha256.decode('ascii')
if encryption_keywrapper and encryption_keywrapper.crypto_key_sha256 else
None)
def _UploadTrackerCallback(serialization_data):
"""Creates a new tracker file for starting an upload from scratch.
This function is called by the gsutil Cloud API implementation and the
the serialization data is implementation-specific.
Args:
serialization_data: Serialization data used in resuming the upload.
"""
tracker_file = None
try:
tracker_file = open(tracker_file_name, 'w')
tracker_data = {
ENCRYPTION_UPLOAD_TRACKER_ENTRY: encryption_key_sha256,
SERIALIZATION_UPLOAD_TRACKER_ENTRY: str(serialization_data)
}
tracker_file.write(json.dumps(tracker_data))
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close()
# This contains the upload URL, which will uniquely identify the
# destination object.
tracker_data = GetUploadTrackerData(
tracker_file_name, logger, encryption_key_sha256=encryption_key_sha256)
if tracker_data:
logger.info('Resuming upload for %s', src_url.url_string)
retryable = True
component_num = _GetComponentNumber(dst_url) if is_component else None
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=src_url,
component_num=component_num,
dst_url=dst_url,
operation_name='Uploading').call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
num_startover_attempts = 0
# This loop causes us to retry when the resumable upload failed in a way that
# requires starting over with a new upload ID. Retries within a single upload
# ID within the current process are handled in
# gsutil_api.UploadObjectResumable, and retries within a single upload ID
# spanning processes happens if an exception occurs not caught below (which
# will leave the tracker file in place, and cause the upload ID to be reused
# the next time the user runs gsutil and attempts the same upload).
while retryable:
try:
uploaded_object = gsutil_api.UploadObjectResumable(
src_obj_filestream,
object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions,
provider=dst_url.scheme,
size=src_obj_size,
serialization_data=tracker_data,
encryption_tuple=encryption_keywrapper,
fields=UPLOAD_RETURN_FIELDS,
tracker_callback=_UploadTrackerCallback,
progress_callback=progress_callback,
gzip_encoded=gzip_encoded)
retryable = False
except ResumableUploadStartOverException as e:
logger.info('Caught ResumableUploadStartOverException for upload of %s.' %
src_url.url_string)
# This can happen, for example, if the server sends a 410 response code.
# In that case the current resumable upload ID can't be reused, so delete
# the tracker file and try again up to max retries.
num_startover_attempts += 1
retryable = (num_startover_attempts < GetNumRetries())
if not retryable:
raise
# If the server sends a 404 response code, then the upload should only
# be restarted if it was the object (and not the bucket) that was missing.
try:
logger.info('Checking that bucket %s exists before retrying upload...' %
dst_obj_metadata.bucket)
gsutil_api.GetBucket(dst_obj_metadata.bucket, provider=dst_url.scheme)
except AccessDeniedException:
# Proceed with deleting the tracker file in the event that the bucket
# exists, but the user does not have permission to view its metadata.
pass
except NotFoundException:
raise
finally:
DeleteTrackerFile(tracker_file_name)
logger.info('Deleted tracker file %s for resumable upload of %s before '
'retrying.' % (tracker_file_name, src_url.url_string))
logger.info(
'Restarting upload of %s from scratch (retry #%d) after exception '
'indicating we need to start over with a new resumable upload ID: %s'
% (src_url.url_string, num_startover_attempts, e))
tracker_data = None
src_obj_filestream.seek(0)
# Reset the progress callback handler.
component_num = _GetComponentNumber(dst_url) if is_component else None
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=src_url,
component_num=component_num,
dst_url=dst_url,
operation_name='Uploading').call
# Report the retryable error to the global status queue.
PutToQueueWithTimeout(
gsutil_api.status_queue,
RetryableErrorMessage(e,
time.time(),
num_retries=num_startover_attempts))
time.sleep(
min(random.random() * (2**num_startover_attempts),
GetMaxRetryDelay()))
except ResumableUploadAbortException:
retryable = False
raise
finally:
if not retryable:
DeleteTrackerFile(tracker_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
return (elapsed_time, uploaded_object)
def _SelectUploadCompressionStrategy(object_name,
is_component=False,
gzip_exts=False,
gzip_encoded=False):
"""Selects how an upload should be compressed.
This is a helper function for _UploadFileToObject.
Args:
object_name: The object name of the source FileUrl.
is_component: indicates whether this is a single component or whole file.
gzip_exts: List of file extensions to gzip prior to upload, if any.
If gzip_exts is GZIP_ALL_FILES, gzip all files.
gzip_encoded: Whether to use gzip transport encoding for the upload. Used
in conjunction with gzip_exts for selecting which files will be
encoded. Streaming files compressed is only supported on the JSON GCS
API.
Returns:
A tuple: (If the file should be gzipped locally, if the file should be gzip
transport encoded).
"""
zipped_file = False
gzip_encoded_file = False
fname_parts = object_name.split('.')
# If gzip_encoded and is_component are marked as true, the file was already
# filtered through the original gzip_exts filter and we must compress the
# component via gzip transport encoding.
if gzip_encoded and is_component:
gzip_encoded_file = True
elif (gzip_exts == GZIP_ALL_FILES or
(gzip_exts and len(fname_parts) > 1 and fname_parts[-1] in gzip_exts)):
zipped_file = not gzip_encoded
gzip_encoded_file = gzip_encoded
return zipped_file, gzip_encoded_file
def _ApplyZippedUploadCompression(src_url, src_obj_filestream, src_obj_size,
logger):
"""Compresses a to-be-uploaded local file to save bandwidth.
This is a helper function for _UploadFileToObject.
Args:
src_url: Source FileUrl.
src_obj_filestream: Read stream of the source file - will be consumed
and closed.
src_obj_size (int or None): Size of the source file.
logger: for outputting log messages.
Returns:
StorageUrl path to compressed file, read stream of the compressed file,
compressed file size.
"""
# TODO: Compress using a streaming model as opposed to all at once here.
if src_obj_size is not None and src_obj_size >= MIN_SIZE_COMPUTE_LOGGING:
logger.info('Compressing %s (to tmp)...', src_url)
(gzip_fh, gzip_path) = tempfile.mkstemp()
gzip_fp = None
try:
# Check for temp space. Assume the compressed object is at most 2x
# the size of the object (normally should compress to smaller than
# the object)
if src_url.IsStream() or src_url.IsFifo():
# TODO: Support streaming gzip uploads.
# https://github.com/GoogleCloudPlatform/gsutil/issues/364
raise CommandException(
'gzip compression is not currently supported on streaming uploads. '
'Remove the compression flag or save the streamed output '
'temporarily to a file before uploading.')
if src_obj_size is not None and (CheckFreeSpace(gzip_path) <
2 * int(src_obj_size)):
raise CommandException('Inadequate temp space available to compress '
'%s. See the CHANGING TEMP DIRECTORIES section '
'of "gsutil help cp" for more info.' % src_url)
compression_level = config.getint('GSUtil', 'gzip_compression_level',
DEFAULT_GZIP_COMPRESSION_LEVEL)
gzip_fp = gzip.open(gzip_path, 'wb', compresslevel=compression_level)
data = src_obj_filestream.read(GZIP_CHUNK_SIZE)
while data:
gzip_fp.write(data)
data = src_obj_filestream.read(GZIP_CHUNK_SIZE)
finally:
if gzip_fp:
gzip_fp.close()
os.close(gzip_fh)
src_obj_filestream.close()
gzip_size = os.path.getsize(gzip_path)
compressed_filestream = open(gzip_path, 'rb')
return StorageUrlFromString(gzip_path), compressed_filestream, gzip_size
def _DelegateUploadFileToObject(upload_delegate, upload_url, upload_stream,
zipped_file, gzip_encoded_file,
parallel_composite_upload, logger):
"""Handles setup and tear down logic for uploads.
This is a helper function for _UploadFileToObject.
Args:
upload_delegate: Function that handles uploading the file.
upload_url: StorageURL path to the file.
upload_stream: Read stream of the file being uploaded. This will be closed
after the upload.
zipped_file: Flag for if the file is locally compressed prior to calling
this function. If true, the local temporary file is deleted after the
upload.
gzip_encoded_file: Flag for if the file will be uploaded with the gzip
transport encoding. If true, a lock is used to limit resource usage.
parallel_composite_upload: Set to true if this upload represents a
top-level parallel composite upload (not an upload of a component). If
true, resource locking is skipped.
logger: For outputting log messages.
Returns:
The elapsed upload time, the uploaded object.
"""
elapsed_time = None
uploaded_object = None
try:
# Parallel transport compressed uploads use a signifcant amount of memory.
# The number of threads that may run concurrently are restricted as a
# result. Parallel composite upload's don't actually upload data, but
# instead fork for each component and calling _UploadFileToObject
# individually. The parallel_composite_upload flag is false for the actual
# upload invocation.
if gzip_encoded_file and not parallel_composite_upload:
with gslib.command.concurrent_compressed_upload_lock:
elapsed_time, uploaded_object = upload_delegate()
else:
elapsed_time, uploaded_object = upload_delegate()
finally:
if zipped_file:
try:
os.unlink(upload_url.object_name)
# Windows sometimes complains the temp file is locked when you try to
# delete it.
except Exception: # pylint: disable=broad-except
logger.warning(
'Could not delete %s. This can occur in Windows because the '
'temporary file is still locked.', upload_url.object_name)
# In the zipped_file case, this is the gzip stream. When the gzip stream is
# created, the original source stream is closed in
# _ApplyZippedUploadCompression. This means that we do not have to
# explicitly close the source stream here in the zipped_file case.
upload_stream.close()
return elapsed_time, uploaded_object
def _UploadFileToObject(src_url,
src_obj_filestream,
src_obj_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
command_obj,
copy_exception_handler,
gzip_exts=None,
allow_splitting=True,
is_component=False,
gzip_encoded=False):
"""Uploads a local file to an object.
Args:
src_url: Source FileUrl.
src_obj_filestream: Read stream of the source file to be read and closed.
src_obj_size (int or None): Size of the source file.
dst_url: Destination CloudUrl.
dst_obj_metadata: Metadata to be applied to the destination object.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API to use for the copy.
logger: for outputting log messages.
command_obj: command object for use in Apply in parallel composite uploads.
copy_exception_handler: For handling copy exceptions during Apply.
gzip_exts: List of file extensions to gzip prior to upload, if any.
If gzip_exts is GZIP_ALL_FILES, gzip all files.
allow_splitting: Whether to allow the file to be split into component
pieces for an parallel composite upload.
is_component: indicates whether this is a single component or whole file.
gzip_encoded: Whether to use gzip transport encoding for the upload. Used
in conjunction with gzip_exts for selecting which files will be
encoded. Streaming files compressed is only supported on the JSON GCS
API.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
if not dst_obj_metadata or not dst_obj_metadata.contentLanguage:
content_language = config.get_value('GSUtil', 'content_language')
if content_language:
dst_obj_metadata.contentLanguage = content_language
upload_url = src_url
upload_stream = src_obj_filestream
upload_size = src_obj_size
zipped_file, gzip_encoded_file = _SelectUploadCompressionStrategy(
src_url.object_name, is_component, gzip_exts, gzip_encoded)
# The component's parent already printed this debug message.
if gzip_encoded_file and not is_component:
logger.debug('Using compressed transport encoding for %s.', src_url)
elif zipped_file:
upload_url, upload_stream, upload_size = _ApplyZippedUploadCompression(
src_url, src_obj_filestream, src_obj_size, logger)
dst_obj_metadata.contentEncoding = 'gzip'
# If we're sending an object with gzip encoding, it's possible it also
# has an incompressible content type. Google Cloud Storage will remove
# the top layer of compression when serving the object, which would cause
# the served content not to match the CRC32C/MD5 hashes stored and make
# integrity checking impossible. Therefore we set cache control to
# no-transform to ensure it is served in its original form. The caveat is
# that to read this object, other clients must then support
# accept-encoding:gzip.
if not dst_obj_metadata.cacheControl:
dst_obj_metadata.cacheControl = 'no-transform'
elif 'no-transform' not in dst_obj_metadata.cacheControl.lower():
dst_obj_metadata.cacheControl += ',no-transform'
if not is_component:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(upload_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_UPLOAD,
size=upload_size,
finished=False))
elapsed_time = None
uploaded_object = None
hash_algs = GetUploadHashAlgs()
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
parallel_composite_upload = _ShouldDoParallelCompositeUpload(
logger,
allow_splitting,
upload_url,
dst_url,
src_obj_size,
gsutil_api,
canned_acl=global_copy_helper_opts.canned_acl,
kms_keyname=dst_obj_metadata.kmsKeyName)
non_resumable_upload = (
(0 if upload_size is None else upload_size) < ResumableThreshold() or
src_url.IsStream() or src_url.IsFifo())
if ((src_url.IsStream() or src_url.IsFifo()) and
gsutil_api.GetApiSelector(provider=dst_url.scheme) == ApiSelector.JSON):
orig_stream = upload_stream
# Add limited seekable properties to the stream via buffering.
upload_stream = ResumableStreamingJsonUploadWrapper(
orig_stream, GetJsonResumableChunkSize())
if not parallel_composite_upload and len(hash_algs):
# Parallel composite uploads calculate hashes per-component in subsequent
# calls to this function, but the composition of the final object is a
# cloud-only operation.
wrapped_filestream = HashingFileUploadWrapper(upload_stream, digesters,
hash_algs, upload_url, logger)
else:
wrapped_filestream = upload_stream
def CallParallelCompositeUpload():
return _DoParallelCompositeUpload(upload_stream,
upload_url,
dst_url,
dst_obj_metadata,
global_copy_helper_opts.canned_acl,
upload_size,
preconditions,
gsutil_api,
command_obj,
copy_exception_handler,
logger,
gzip_encoded=gzip_encoded_file)
def CallNonResumableUpload():
return _UploadFileToObjectNonResumable(upload_url,
wrapped_filestream,
upload_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
gzip_encoded=gzip_encoded_file)
def CallResumableUpload():
return _UploadFileToObjectResumable(upload_url,
wrapped_filestream,
upload_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
is_component=is_component,
gzip_encoded=gzip_encoded_file)
if parallel_composite_upload:
delegate = CallParallelCompositeUpload
elif non_resumable_upload:
delegate = CallNonResumableUpload
else:
delegate = CallResumableUpload
elapsed_time, uploaded_object = _DelegateUploadFileToObject(
delegate, upload_url, upload_stream, zipped_file, gzip_encoded_file,
parallel_composite_upload, logger)
if not parallel_composite_upload:
try:
digests = _CreateDigestsFromDigesters(digesters)
_CheckHashes(logger,
dst_url,
uploaded_object,
src_url.object_name,
digests,
is_upload=True)
except HashMismatchException:
if _RENAME_ON_HASH_MISMATCH:
corrupted_obj_metadata = apitools_messages.Object(
name=dst_obj_metadata.name,
bucket=dst_obj_metadata.bucket,
etag=uploaded_object.etag)
dst_obj_metadata.name = (dst_url.object_name +
_RENAME_ON_HASH_MISMATCH_SUFFIX)
gsutil_api.CopyObject(corrupted_obj_metadata,
dst_obj_metadata,
provider=dst_url.scheme)
# If the digest doesn't match, delete the object.
gsutil_api.DeleteObject(dst_url.bucket_name,
dst_url.object_name,
generation=uploaded_object.generation,
provider=dst_url.scheme)
raise
result_url = dst_url.Clone()
result_url.generation = uploaded_object.generation
result_url.generation = GenerationFromUrlAndString(result_url,
uploaded_object.generation)
if not is_component:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(upload_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_UPLOAD,
size=upload_size,
finished=True))
return (elapsed_time, uploaded_object.size, result_url,
uploaded_object.md5Hash)
def _GetDownloadFile(dst_url, src_obj_metadata, logger):
"""Creates a new download file, and deletes the file that will be replaced.
Names and creates a temporary file for this download. Also, if there is an
existing file at the path where this file will be placed after the download
is completed, that file will be deleted.
Args:
dst_url: Destination FileUrl.
src_obj_metadata: Metadata from the source object.
logger: for outputting log messages.
Returns:
(download_file_name, need_to_unzip)
download_file_name: The name of the temporary file to which the object will
be downloaded.
need_to_unzip: If true, a temporary zip file was used and must be
uncompressed as part of validation.
"""
dir_name = os.path.dirname(dst_url.object_name)
if dir_name and not os.path.exists(dir_name):
# Do dir creation in try block so can ignore case where dir already
# exists. This is needed to avoid a race condition when running gsutil
# -m cp.
try:
os.makedirs(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
need_to_unzip = False
# For gzipped objects download to a temp file and unzip. For the XML API,
# this represents the result of a HEAD request. For the JSON API, this is
# the stored encoding which the service may not respect. However, if the
# server sends decompressed bytes for a file that is stored compressed
# (double compressed case), there is no way we can validate the hash and
# we will fail our hash check for the object.
if ObjectIsGzipEncoded(src_obj_metadata):
need_to_unzip = True
download_file_name = _GetDownloadTempZipFileName(dst_url)
logger.info('Downloading to temp gzip filename %s', download_file_name)
else:
download_file_name = _GetDownloadTempFileName(dst_url)
# If a file exists at the permanent destination (where the file will be moved
# after the download is completed), delete it here to reduce disk space
# requirements.
if os.path.exists(dst_url.object_name):
os.unlink(dst_url.object_name)
# Downloads open the temporary download file in r+b mode, which requires it
# to already exist, so we create it here if it doesn't exist already.
fp = open(download_file_name, 'ab')
fp.close()
return download_file_name, need_to_unzip
def _ShouldDoSlicedDownload(download_strategy, src_obj_metadata,
allow_splitting, logger):
"""Determines whether the sliced download strategy should be used.
Args:
download_strategy: CloudApi download strategy.
src_obj_metadata: Metadata from the source object.
allow_splitting: If false, then this function returns false.
logger: logging.Logger for log message output.
Returns:
True iff a sliced download should be performed on the source file.
"""
sliced_object_download_threshold = HumanReadableToBytes(
config.get('GSUtil', 'sliced_object_download_threshold',
DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
max_components = config.getint('GSUtil',
'sliced_object_download_max_components',
DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
# Don't use sliced download if it will prevent us from performing an
# integrity check.
check_hashes_config = config.get('GSUtil', 'check_hashes',
CHECK_HASH_IF_FAST_ELSE_FAIL)
parallel_hashing = src_obj_metadata.crc32c and UsingCrcmodExtension()
hashing_okay = parallel_hashing or check_hashes_config == CHECK_HASH_NEVER
use_slice = (allow_splitting and
download_strategy is not CloudApi.DownloadStrategy.ONE_SHOT and
max_components > 1 and hashing_okay and
sliced_object_download_threshold > 0 and
src_obj_metadata.size >= sliced_object_download_threshold)
if (not use_slice and
src_obj_metadata.size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD and
not UsingCrcmodExtension() and check_hashes_config != CHECK_HASH_NEVER):
with suggested_sliced_transfers_lock:
if not suggested_sliced_transfers.get('suggested'):
logger.info('\n'.join(
textwrap.wrap(
'==> NOTE: You are downloading one or more large file(s), which '
'would run significantly faster if you enabled sliced object '
'downloads. This feature is enabled by default but requires that '
'compiled crcmod be installed (see "gsutil help crcmod").')) +
'\n')
suggested_sliced_transfers['suggested'] = True
return use_slice
def _PerformSlicedDownloadObjectToFile(cls, args, thread_state=None):
"""Function argument to Apply for performing sliced downloads.
Args:
cls: Calling Command class.
args: PerformSlicedDownloadObjectToFileArgs tuple describing the target.
thread_state: gsutil Cloud API instance to use for the operation.
Returns:
PerformSlicedDownloadReturnValues named-tuple filled with:
component_num: The component number for this download.
crc32c: CRC32C hash value (integer) of the downloaded bytes.
bytes_transferred: The number of bytes transferred, potentially less
than the component size if the download was resumed.
component_total_size: The number of bytes corresponding to the whole
component size, potentially more than bytes_transferred
if the download was resumed.
"""
gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
# Deserialize the picklable object metadata.
src_obj_metadata = protojson.decode_message(apitools_messages.Object,
args.src_obj_metadata_json)
hash_algs = GetDownloadHashAlgs(cls.logger,
consider_crc32c=src_obj_metadata.crc32c)
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
(bytes_transferred, server_encoding) = (_DownloadObjectToFileResumable(
args.src_url,
src_obj_metadata,
args.dst_url,
args.download_file_name,
gsutil_api,
cls.logger,
digesters,
component_num=args.component_num,
start_byte=args.start_byte,
end_byte=args.end_byte,
decryption_key=args.decryption_key))
crc32c_val = None
if 'crc32c' in digesters:
crc32c_val = digesters['crc32c'].crcValue
return PerformSlicedDownloadReturnValues(args.component_num, crc32c_val,
bytes_transferred,
args.end_byte - args.start_byte + 1,
server_encoding)
def _MaintainSlicedDownloadTrackerFiles(src_obj_metadata, dst_url,
download_file_name, logger,
api_selector, num_components):
"""Maintains sliced download tracker files in order to permit resumability.
Reads or creates a sliced download tracker file representing this object
download. Upon an attempt at cross-process resumption, the contents of the
sliced download tracker file are verified to make sure a resumption is
possible and appropriate. In the case that a resumption should not be
attempted, existing component tracker files are deleted (to prevent child
processes from attempting resumption), and a new sliced download tracker
file is created.
Args:
src_obj_metadata: Metadata from the source object. Must include etag and
generation.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for the download.
logger: for outputting log messages.
api_selector: The Cloud API implementation used.
num_components: The number of components to perform this download with.
"""
assert src_obj_metadata.etag
tracker_file = None
# Only can happen if the resumable threshold is set higher than the
# parallel transfer threshold.
if src_obj_metadata.size < ResumableThreshold():
return
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.SLICED_DOWNLOAD,
api_selector)
fp = None
# Check to see if we should attempt resuming the download.
try:
fp = open(download_file_name, 'rb')
existing_file_size = GetFileSize(fp)
# A parallel resumption should be attempted only if the destination file
# size is exactly the same as the source size and the tracker file matches.
if existing_file_size == src_obj_metadata.size:
tracker_file = open(tracker_file_name, 'r')
tracker_file_data = json.load(tracker_file)
if (tracker_file_data['etag'] == src_obj_metadata.etag and
tracker_file_data['generation'] == src_obj_metadata.generation and
tracker_file_data['num_components'] == num_components):
return
else:
tracker_file.close()
logger.warn('Sliced download tracker file doesn\'t match for '
'download of %s. Restarting download from scratch.' %
dst_url.object_name)
except (IOError, ValueError) as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if isinstance(e, ValueError) or e.errno != errno.ENOENT:
logger.warn('Couldn\'t read sliced download tracker file (%s): %s. '
'Restarting download from scratch.' %
(tracker_file_name, str(e)))
finally:
if fp:
fp.close()
if tracker_file:
tracker_file.close()
# Delete component tracker files to guarantee download starts from scratch.
DeleteDownloadTrackerFiles(dst_url, api_selector)
# Create a new sliced download tracker file to represent this download.
try:
with open(tracker_file_name, 'w') as tracker_file:
tracker_file_data = {
'etag': src_obj_metadata.etag,
'generation': src_obj_metadata.generation,
'num_components': num_components,
}
tracker_file.write(json.dumps(tracker_file_data))
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
class SlicedDownloadFileWrapper(object):
"""Wraps a file object to be used in GetObjectMedia for sliced downloads.
In order to allow resumability, the file object used by each thread in a
sliced object download should be wrapped using SlicedDownloadFileWrapper.
Passing a SlicedDownloadFileWrapper object to GetObjectMedia will allow the
download component tracker file for this component to be updated periodically,
while the downloaded bytes are normally written to file.
"""
def __init__(self, fp, tracker_file_name, src_obj_metadata, start_byte,
end_byte):
"""Initializes the SlicedDownloadFileWrapper.
Args:
fp: The already-open file object to be used for writing in
GetObjectMedia. Data will be written to file starting at the current
seek position.
tracker_file_name: The name of the tracker file for this component.
src_obj_metadata: Metadata from the source object. Must include etag and
generation.
start_byte: The first byte to be downloaded for this parallel component.
end_byte: The last byte to be downloaded for this parallel component.
"""
self._orig_fp = fp
self._tracker_file_name = tracker_file_name
self._src_obj_metadata = src_obj_metadata
self._last_tracker_file_byte = None
self._start_byte = start_byte
self._end_byte = end_byte
@property
def mode(self):
"""Returns the mode of the underlying file descriptor, or None."""
return getattr(self._orig_fp, 'mode', None)
def write(self, data): # pylint: disable=invalid-name
current_file_pos = self._orig_fp.tell()
assert (self._start_byte <= current_file_pos and
current_file_pos + len(data) <= self._end_byte + 1)
text_util.write_to_fd(self._orig_fp, data)
current_file_pos = self._orig_fp.tell()
threshold = TRACKERFILE_UPDATE_THRESHOLD
if (self._last_tracker_file_byte is None or
current_file_pos - self._last_tracker_file_byte > threshold or
current_file_pos == self._end_byte + 1):
WriteDownloadComponentTrackerFile(self._tracker_file_name,
self._src_obj_metadata,
current_file_pos)
self._last_tracker_file_byte = current_file_pos
def seek(self, offset, whence=os.SEEK_SET): # pylint: disable=invalid-name
if whence == os.SEEK_END:
self._orig_fp.seek(offset + self._end_byte + 1)
else:
self._orig_fp.seek(offset, whence)
assert self._start_byte <= self._orig_fp.tell() <= self._end_byte + 1
def tell(self): # pylint: disable=invalid-name
return self._orig_fp.tell()
def flush(self): # pylint: disable=invalid-name
self._orig_fp.flush()
def close(self): # pylint: disable=invalid-name
if self._orig_fp:
self._orig_fp.close()
def _PartitionObject(src_url,
src_obj_metadata,
dst_url,
download_file_name,
decryption_key=None):
"""Partitions an object into components to be downloaded.
Each component is a byte range of the object. The byte ranges
of the returned components are mutually exclusive and collectively
exhaustive. The byte ranges are inclusive at both end points.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object with non-pickleable
fields removed.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for the download.
decryption_key: Base64-encoded decryption key for the source object, if any.
Returns:
components_to_download: A list of PerformSlicedDownloadObjectToFileArgs
to be used in Apply for the sliced download.
"""
sliced_download_component_size = HumanReadableToBytes(
config.get('GSUtil', 'sliced_object_download_component_size',
DEFAULT_SLICED_OBJECT_DOWNLOAD_COMPONENT_SIZE))
max_components = config.getint('GSUtil',
'sliced_object_download_max_components',
DEFAULT_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
num_components, component_size = _GetPartitionInfo(
src_obj_metadata.size, max_components, sliced_download_component_size)
components_to_download = []
component_lengths = []
for i in range(num_components):
start_byte = i * component_size
end_byte = min((i + 1) * (component_size) - 1, src_obj_metadata.size - 1)
component_lengths.append(end_byte - start_byte + 1)
# We need to serialize src_obj_metadata for pickling since it can
# contain nested classes such as custom metadata.
src_obj_metadata_json = protojson.encode_message(src_obj_metadata)
components_to_download.append(
PerformSlicedDownloadObjectToFileArgs(i, src_url, src_obj_metadata_json,
dst_url, download_file_name,
start_byte, end_byte,
decryption_key))
return components_to_download, component_lengths
def _DoSlicedDownload(src_url,
src_obj_metadata,
dst_url,
download_file_name,
command_obj,
logger,
copy_exception_handler,
api_selector,
decryption_key=None,
status_queue=None):
"""Downloads a cloud object to a local file using sliced download.
Byte ranges are decided for each thread/process, and then the parts are
downloaded in parallel.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for download.
command_obj: command object for use in Apply in parallel composite uploads.
logger: for outputting log messages.
copy_exception_handler: For handling copy exceptions during Apply.
api_selector: The Cloud API implementation used.
decryption_key: Base64-encoded decryption key for the source object, if any.
status_queue: Queue for posting file messages for UI/Analytics.
Returns:
(bytes_transferred, crc32c)
bytes_transferred: Number of bytes transferred from server this call.
crc32c: a crc32c hash value (integer) for the downloaded bytes, or None if
crc32c hashing wasn't performed.
"""
# CustomerEncryptionValue is a subclass and thus not pickleable for
# multiprocessing, but at this point we already have the matching key,
# so just discard the metadata.
src_obj_metadata.customerEncryption = None
components_to_download, component_lengths = _PartitionObject(
src_url, src_obj_metadata, dst_url, download_file_name, decryption_key)
num_components = len(components_to_download)
_MaintainSlicedDownloadTrackerFiles(src_obj_metadata, dst_url,
download_file_name, logger, api_selector,
num_components)
# Resize the download file so each child process can seek to its start byte.
with open(download_file_name, 'ab') as fp:
fp.truncate(src_obj_metadata.size)
# Assign a start FileMessage to each component
for (i, component) in enumerate(components_to_download):
size = component.end_byte - component.start_byte + 1
download_start_byte = GetDownloadStartByte(src_obj_metadata, dst_url,
api_selector,
component.start_byte, size, i)
bytes_already_downloaded = download_start_byte - component.start_byte
PutToQueueWithTimeout(
status_queue,
FileMessage(src_url,
dst_url,
time.time(),
size=size,
finished=False,
component_num=i,
message_type=FileMessage.COMPONENT_TO_DOWNLOAD,
bytes_already_downloaded=bytes_already_downloaded))
cp_results = command_obj.Apply(
_PerformSlicedDownloadObjectToFile,
components_to_download,
copy_exception_handler,
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=command_obj.ParallelOverrideReason.SLICE,
should_return_results=True)
if len(cp_results) < num_components:
raise CommandException(
'Some components of %s were not downloaded successfully. '
'Please retry this download.' % dst_url.object_name)
# Crc32c hashes have to be concatenated in the correct order.
cp_results = sorted(cp_results, key=attrgetter('component_num'))
crc32c = cp_results[0].crc32c
if crc32c is not None:
for i in range(1, num_components):
crc32c = ConcatCrc32c(crc32c, cp_results[i].crc32c, component_lengths[i])
bytes_transferred = 0
expect_gzip = ObjectIsGzipEncoded(src_obj_metadata)
# Assign an end FileMessage to each component
for cp_result in cp_results:
PutToQueueWithTimeout(
status_queue,
FileMessage(src_url,
dst_url,
time.time(),
size=cp_result.component_total_size,
finished=True,
component_num=cp_result.component_num,
message_type=FileMessage.COMPONENT_TO_DOWNLOAD))
bytes_transferred += cp_result.bytes_transferred
server_gzip = (cp_result.server_encoding and
cp_result.server_encoding.lower().endswith('gzip'))
# If the server gzipped any components on the fly, we will have no chance of
# properly reconstructing the file.
if server_gzip and not expect_gzip:
raise CommandException(
'Download of %s failed because the server sent back data with an '
'unexpected encoding.' % dst_url.object_name)
return bytes_transferred, crc32c
def _DownloadObjectToFileResumable(src_url,
src_obj_metadata,
dst_url,
download_file_name,
gsutil_api,
logger,
digesters,
component_num=None,
start_byte=0,
end_byte=None,
decryption_key=None):
"""Downloads an object to a local file using the resumable strategy.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for download.
gsutil_api: gsutil Cloud API instance to use for the download.
logger: for outputting log messages.
digesters: Digesters corresponding to the hash algorithms that will be used
for validation.
component_num: Which component of a sliced download this call is for, or
None if this is not a sliced download.
start_byte: The first byte of a byte range for a sliced download.
end_byte: The last byte of a byte range for a sliced download.
decryption_key: Base64-encoded decryption key for the source object, if any.
Returns:
(bytes_transferred, server_encoding)
bytes_transferred: Number of bytes transferred from server this call.
server_encoding: Content-encoding string if it was detected that the server
sent encoded bytes during transfer, None otherwise.
"""
if end_byte is None:
end_byte = src_obj_metadata.size - 1
download_size = end_byte - start_byte + 1
is_sliced = component_num is not None
api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
server_encoding = None
# Used for logging
download_name = dst_url.object_name
if is_sliced:
download_name += ' component %d' % component_num
fp = None
try:
fp = open(download_file_name, 'r+b')
fp.seek(start_byte)
api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
existing_file_size = GetFileSize(fp)
tracker_file_name, download_start_byte = ReadOrCreateDownloadTrackerFile(
src_obj_metadata,
dst_url,
logger,
api_selector,
start_byte,
existing_file_size,
component_num,
)
if download_start_byte < start_byte or download_start_byte > end_byte + 1:
DeleteTrackerFile(tracker_file_name)
raise CommandException(
'Resumable download start point for %s is not in the correct byte '
'range. Deleting tracker file, so if you re-try this download it '
'will start from scratch' % download_name)
download_complete = (download_start_byte == start_byte + download_size)
resuming = (download_start_byte != start_byte) and not download_complete
if resuming:
logger.info('Resuming download for %s', download_name)
elif download_complete:
logger.info(
'Download already complete for %s, skipping download but '
'will run integrity checks.', download_name)
# This is used for resuming downloads, but also for passing the mediaLink
# and size into the download for new downloads so that we can avoid
# making an extra HTTP call.
serialization_data = GetDownloadSerializationData(
src_obj_metadata,
progress=download_start_byte,
user_project=gsutil_api.user_project)
if resuming or download_complete:
# Catch up our digester with the hash data.
bytes_digested = 0
total_bytes_to_digest = download_start_byte - start_byte
hash_callback = ProgressCallbackWithTimeout(
total_bytes_to_digest,
FileProgressCallbackHandler(gsutil_api.status_queue,
component_num=component_num,
src_url=src_url,
dst_url=dst_url,
operation_name='Hashing').call)
while bytes_digested < total_bytes_to_digest:
bytes_to_read = min(DEFAULT_FILE_BUFFER_SIZE,
total_bytes_to_digest - bytes_digested)
data = fp.read(bytes_to_read)
bytes_digested += bytes_to_read
for alg_name in digesters:
digesters[alg_name].update(six.ensure_binary(data))
hash_callback.Progress(len(data))
elif not is_sliced:
# Delete file contents and start entire object download from scratch.
fp.truncate(0)
existing_file_size = 0
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
start_byte=start_byte,
override_total_size=download_size,
src_url=src_url,
dst_url=dst_url,
component_num=component_num,
operation_name='Downloading').call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
if is_sliced and src_obj_metadata.size >= ResumableThreshold():
fp = SlicedDownloadFileWrapper(fp, tracker_file_name, src_obj_metadata,
start_byte, end_byte)
compressed_encoding = ObjectIsGzipEncoded(src_obj_metadata)
# TODO: With gzip encoding (which may occur on-the-fly and not be part of
# the object's metadata), when we request a range to resume, it's possible
# that the server will just resend the entire object, which means our
# caught-up hash will be incorrect. We recalculate the hash on
# the local file in the case of a failed gzip hash anyway, but it would
# be better if we actively detected this case.
if not download_complete:
fp.seek(download_start_byte)
server_encoding = gsutil_api.GetObjectMedia(
src_url.bucket_name,
src_url.object_name,
fp,
start_byte=download_start_byte,
end_byte=end_byte,
compressed_encoding=compressed_encoding,
generation=src_url.generation,
object_size=src_obj_metadata.size,
download_strategy=CloudApi.DownloadStrategy.RESUMABLE,
provider=src_url.scheme,
serialization_data=serialization_data,
digesters=digesters,
progress_callback=progress_callback,
decryption_tuple=CryptoKeyWrapperFromKey(decryption_key))
except ResumableDownloadException as e:
logger.warning('Caught ResumableDownloadException (%s) for download of %s.',
e.reason, download_name)
raise
finally:
if fp:
fp.close()
bytes_transferred = end_byte - download_start_byte + 1
return bytes_transferred, server_encoding
def _DownloadObjectToFileNonResumable(src_url,
src_obj_metadata,
dst_url,
download_file_name,
gsutil_api,
digesters,
decryption_key=None):
"""Downloads an object to a local file using the non-resumable strategy.
This function does not support component transfers.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for download.
gsutil_api: gsutil Cloud API instance to use for the download.
digesters: Digesters corresponding to the hash algorithms that will be used
for validation.
decryption_key: Base64-encoded decryption key for the source object, if any.
Returns:
(bytes_transferred, server_encoding)
bytes_transferred: Number of bytes transferred from server this call.
server_encoding: Content-encoding string if it was detected that the server
sent encoded bytes during transfer, None otherwise.
"""
fp = None
try:
fp = open(download_file_name, 'w')
# This is used to pass the mediaLink and the size into the download so that
# we can avoid making an extra HTTP call.
serialization_data = GetDownloadSerializationData(
src_obj_metadata, 0, user_project=gsutil_api.user_project)
progress_callback = FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=src_url,
dst_url=dst_url,
operation_name='Downloading').call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
server_encoding = gsutil_api.GetObjectMedia(
src_url.bucket_name,
src_url.object_name,
fp,
generation=src_url.generation,
object_size=src_obj_metadata.size,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
provider=src_url.scheme,
serialization_data=serialization_data,
digesters=digesters,
progress_callback=progress_callback,
decryption_tuple=CryptoKeyWrapperFromKey(decryption_key))
finally:
if fp:
fp.close()
return src_obj_metadata.size, server_encoding
def _DownloadObjectToFile(src_url,
src_obj_metadata,
dst_url,
gsutil_api,
logger,
command_obj,
copy_exception_handler,
allow_splitting=True,
decryption_key=None,
is_rsync=False,
preserve_posix=False):
"""Downloads an object to a local file.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object.
dst_url: Destination FileUrl.
gsutil_api: gsutil Cloud API instance to use for the download.
logger: for outputting log messages.
command_obj: command object for use in Apply in sliced downloads.
copy_exception_handler: For handling copy exceptions during Apply.
allow_splitting: Whether or not to allow sliced download.
decryption_key: Base64-encoded decryption key for the source object, if any.
is_rsync: Whether or not the caller is the rsync command.
preserve_posix: Whether or not to preserve POSIX attributes.
Returns:
(elapsed_time, bytes_transferred, dst_url, md5), where time elapsed
excludes initial GET.
Raises:
FileConcurrencySkipError: if this download is already in progress.
CommandException: if other errors encountered.
"""
global open_files_map, open_files_lock
if dst_url.object_name.endswith(dst_url.delim):
logger.warn('\n'.join(
textwrap.wrap(
'Skipping attempt to download to filename ending with slash (%s). This '
'typically happens when using gsutil to download from a subdirectory '
'created by the Cloud Console (https://cloud.google.com/console)' %
dst_url.object_name)))
return (0, 0, dst_url, '')
api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
download_strategy = _SelectDownloadStrategy(dst_url)
sliced_download = _ShouldDoSlicedDownload(download_strategy, src_obj_metadata,
allow_splitting, logger)
download_file_name, need_to_unzip = _GetDownloadFile(dst_url,
src_obj_metadata, logger)
# Ensure another process/thread is not already writing to this file.
with open_files_lock:
if open_files_map.get(download_file_name, False):
raise FileConcurrencySkipError
open_files_map[download_file_name] = True
# Set up hash digesters.
consider_md5 = src_obj_metadata.md5Hash and not sliced_download
hash_algs = GetDownloadHashAlgs(logger,
consider_md5=consider_md5,
consider_crc32c=src_obj_metadata.crc32c)
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# Tracks whether the server used a gzip encoding.
server_encoding = None
download_complete = (src_obj_metadata.size == 0)
bytes_transferred = 0
start_time = time.time()
if not download_complete:
if sliced_download:
(bytes_transferred,
crc32c) = (_DoSlicedDownload(src_url,
src_obj_metadata,
dst_url,
download_file_name,
command_obj,
logger,
copy_exception_handler,
api_selector,
decryption_key=decryption_key,
status_queue=gsutil_api.status_queue))
if 'crc32c' in digesters:
digesters['crc32c'].crcValue = crc32c
elif download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
bytes_transferred, server_encoding = _DownloadObjectToFileNonResumable(
src_url,
src_obj_metadata,
dst_url,
download_file_name,
gsutil_api,
digesters,
decryption_key=decryption_key,
)
elif download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
bytes_transferred, server_encoding = _DownloadObjectToFileResumable(
src_url,
src_obj_metadata,
dst_url,
download_file_name,
gsutil_api,
logger,
digesters,
decryption_key=decryption_key,
)
else:
raise CommandException('Invalid download strategy %s chosen for'
'file %s' %
(download_strategy, download_file_name))
end_time = time.time()
server_gzip = server_encoding and server_encoding.lower().endswith('gzip')
local_md5 = _ValidateAndCompleteDownload(logger,
src_url,
src_obj_metadata,
dst_url,
need_to_unzip,
server_gzip,
digesters,
hash_algs,
download_file_name,
api_selector,
bytes_transferred,
gsutil_api,
is_rsync=is_rsync,
preserve_posix=preserve_posix)
with open_files_lock:
open_files_map.delete(download_file_name)
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
message_time=end_time,
message_type=FileMessage.FILE_DOWNLOAD,
size=src_obj_metadata.size,
finished=True))
return (end_time - start_time, bytes_transferred, dst_url, local_md5)
def _GetDownloadTempZipFileName(dst_url):
"""Returns temporary file name for a temporarily compressed download."""
return '%s_.gztmp' % dst_url.object_name
def _GetDownloadTempFileName(dst_url):
"""Returns temporary download file name for uncompressed downloads."""
return '%s_.gstmp' % dst_url.object_name
def _ValidateAndCompleteDownload(logger,
src_url,
src_obj_metadata,
dst_url,
need_to_unzip,
server_gzip,
digesters,
hash_algs,
download_file_name,
api_selector,
bytes_transferred,
gsutil_api,
is_rsync=False,
preserve_posix=False):
"""Validates and performs necessary operations on a downloaded file.
Validates the integrity of the downloaded file using hash_algs. If the file
was compressed (temporarily), the file will be decompressed. Then, if the
integrity of the file was successfully validated, the file will be moved
from its temporary download location to its permanent location on disk.
Args:
logger: For outputting log messages.
src_url: StorageUrl for the source object.
src_obj_metadata: Metadata for the source object, potentially containing
hash values.
dst_url: StorageUrl describing the destination file.
need_to_unzip: If true, a temporary zip file was used and must be
uncompressed as part of validation.
server_gzip: If true, the server gzipped the bytes (regardless of whether
the object metadata claimed it was gzipped).
digesters: dict of {string, hash digester} that contains up-to-date digests
computed during the download. If a digester for a particular
algorithm is None, an up-to-date digest is not available and the
hash must be recomputed from the local file.
hash_algs: dict of {string, hash algorithm} that can be used if digesters
don't have up-to-date digests.
download_file_name: Temporary file name that was used for download.
api_selector: The Cloud API implementation used (used tracker file naming).
bytes_transferred: Number of bytes downloaded (used for logging).
gsutil_api: Cloud API to use for service and status.
is_rsync: Whether or not the caller is the rsync function. Used to determine
if timeCreated should be used.
preserve_posix: Whether or not to preserve the posix attributes.
Returns:
An MD5 of the local file, if one was calculated as part of the integrity
check.
"""
final_file_name = dst_url.object_name
file_name = download_file_name
digesters_succeeded = True
for alg in digesters:
# If we get a digester with a None algorithm, the underlying
# implementation failed to calculate a digest, so we will need to
# calculate one from scratch.
if not digesters[alg]:
digesters_succeeded = False
break
if digesters_succeeded:
local_hashes = _CreateDigestsFromDigesters(digesters)
else:
local_hashes = _CreateDigestsFromLocalFile(gsutil_api.status_queue,
hash_algs, file_name, src_url,
src_obj_metadata)
digest_verified = True
hash_invalid_exception = None
try:
_CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
local_hashes)
DeleteDownloadTrackerFiles(dst_url, api_selector)
except HashMismatchException as e:
# If an non-gzipped object gets sent with gzip content encoding, the hash
# we calculate will match the gzipped bytes, not the original object. Thus,
# we'll need to calculate and check it after unzipping.
if server_gzip:
logger.debug('Hash did not match but server gzipped the content, will '
'recalculate.')
digest_verified = False
elif api_selector == ApiSelector.XML:
logger.debug(
'Hash did not match but server may have gzipped the content, will '
'recalculate.')
# Save off the exception in case this isn't a gzipped file.
hash_invalid_exception = e
digest_verified = False
else:
DeleteDownloadTrackerFiles(dst_url, api_selector)
if _RENAME_ON_HASH_MISMATCH:
os.rename(file_name, final_file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(file_name)
raise
if need_to_unzip or server_gzip:
# Log that we're uncompressing if the file is big enough that
# decompressing would make it look like the transfer "stalled" at the end.
if bytes_transferred > TEN_MIB:
logger.info('Uncompressing temporarily gzipped file to %s...',
final_file_name)
gzip_fp = None
try:
# Downloaded temporarily gzipped file, unzip to file without '_.gztmp'
# suffix.
gzip_fp = gzip.open(file_name, 'rb')
with open(final_file_name, 'wb') as f_out:
data = gzip_fp.read(GZIP_CHUNK_SIZE)
while data:
f_out.write(data)
data = gzip_fp.read(GZIP_CHUNK_SIZE)
except IOError as e:
# In the XML case where we don't know if the file was gzipped, raise
# the original hash exception if we find that it wasn't.
if 'Not a gzipped file' in str(e) and hash_invalid_exception:
# Linter improperly thinks we're raising None despite the above check.
# pylint: disable=raising-bad-type
raise hash_invalid_exception
finally:
if gzip_fp:
gzip_fp.close()
os.unlink(file_name)
file_name = final_file_name
if not digest_verified:
try:
# Recalculate hashes on the unzipped local file.
local_hashes = _CreateDigestsFromLocalFile(gsutil_api.status_queue,
hash_algs, file_name, src_url,
src_obj_metadata)
_CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
local_hashes)
DeleteDownloadTrackerFiles(dst_url, api_selector)
except HashMismatchException:
DeleteDownloadTrackerFiles(dst_url, api_selector)
if _RENAME_ON_HASH_MISMATCH:
os.rename(file_name, file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(file_name)
raise
if file_name != final_file_name:
# Data is still in a temporary file, so move it to a permanent location.
if os.path.exists(final_file_name):
os.unlink(final_file_name)
os.rename(file_name, final_file_name)
ParseAndSetPOSIXAttributes(final_file_name,
src_obj_metadata,
is_rsync=is_rsync,
preserve_posix=preserve_posix)
if 'md5' in local_hashes:
return local_hashes['md5']
def _CopyFileToFile(src_url, dst_url, status_queue=None, src_obj_metadata=None):
"""Copies a local file to a local file.
Args:
src_url: Source FileUrl.
dst_url: Destination FileUrl.
status_queue: Queue for posting file messages for UI/Analytics.
src_obj_metadata: An apitools Object that may contain file size, or None.
Returns:
(elapsed_time, bytes_transferred, dst_url, md5=None).
Raises:
CommandException: if errors encountered.
"""
src_fp = GetStreamFromFileUrl(src_url)
dir_name = os.path.dirname(dst_url.object_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(dst_url.object_name, 'wb') as dst_fp:
start_time = time.time()
shutil.copyfileobj(src_fp, dst_fp)
if not src_url.IsStream():
src_fp.close() # Explicitly close the src fp - necessary if it is a fifo.
end_time = time.time()
PutToQueueWithTimeout(
status_queue,
FileMessage(src_url,
dst_url,
end_time,
message_type=FileMessage.FILE_LOCAL_COPY,
size=src_obj_metadata.size if src_obj_metadata else None,
finished=True))
return (end_time - start_time, os.path.getsize(dst_url.object_name), dst_url,
None)
def _DummyTrackerCallback(_):
pass
# pylint: disable=undefined-variable
def _CopyObjToObjDaisyChainMode(src_url,
src_obj_metadata,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
decryption_key=None):
"""Copies from src_url to dst_url in "daisy chain" mode.
See -D OPTION documentation about what daisy chain mode is.
Args:
src_url: Source CloudUrl
src_obj_metadata: Metadata from source object
dst_url: Destination CloudUrl
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API to use for the copy.
logger: For outputting log messages.
decryption_key: Base64-encoded decryption key for the source object, if any.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
# We don't attempt to preserve ACLs across providers because
# GCS and S3 support different ACLs and disjoint principals.
if (global_copy_helper_opts.preserve_acl and
src_url.scheme != dst_url.scheme):
raise NotImplementedError('Cross-provider cp -p not supported')
if not global_copy_helper_opts.preserve_acl:
dst_obj_metadata.acl = []
# Don't use callbacks for downloads on the daisy chain wrapper because
# upload callbacks will output progress, but respect test hooks if present.
progress_callback = None
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
compressed_encoding = ObjectIsGzipEncoded(src_obj_metadata)
encryption_keywrapper = GetEncryptionKeyWrapper(config)
start_time = time.time()
upload_fp = DaisyChainWrapper(src_url,
src_obj_metadata.size,
gsutil_api,
compressed_encoding=compressed_encoding,
progress_callback=progress_callback,
decryption_key=decryption_key)
uploaded_object = None
if src_obj_metadata.size == 0:
# Resumable uploads of size 0 are not supported.
uploaded_object = gsutil_api.UploadObject(
upload_fp,
object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions,
provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS,
size=src_obj_metadata.size,
encryption_tuple=encryption_keywrapper)
else:
# TODO: Support process-break resumes. This will resume across connection
# breaks and server errors, but the tracker callback is a no-op so this
# won't resume across gsutil runs.
# TODO: Test retries via test_callback_file.
uploaded_object = gsutil_api.UploadObjectResumable(
upload_fp,
object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions,
provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS,
size=src_obj_metadata.size,
progress_callback=FileProgressCallbackHandler(
gsutil_api.status_queue,
src_url=src_url,
dst_url=dst_url,
operation_name='Uploading').call,
tracker_callback=_DummyTrackerCallback,
encryption_tuple=encryption_keywrapper)
end_time = time.time()
try:
_CheckCloudHashes(logger, src_url, dst_url, src_obj_metadata,
uploaded_object)
except HashMismatchException:
if _RENAME_ON_HASH_MISMATCH:
corrupted_obj_metadata = apitools_messages.Object(
name=dst_obj_metadata.name,
bucket=dst_obj_metadata.bucket,
etag=uploaded_object.etag)
dst_obj_metadata.name = (dst_url.object_name +
_RENAME_ON_HASH_MISMATCH_SUFFIX)
decryption_keywrapper = CryptoKeyWrapperFromKey(decryption_key)
gsutil_api.CopyObject(corrupted_obj_metadata,
dst_obj_metadata,
provider=dst_url.scheme,
decryption_tuple=decryption_keywrapper,
encryption_tuple=encryption_keywrapper)
# If the digest doesn't match, delete the object.
gsutil_api.DeleteObject(dst_url.bucket_name,
dst_url.object_name,
generation=uploaded_object.generation,
provider=dst_url.scheme)
raise
result_url = dst_url.Clone()
result_url.generation = GenerationFromUrlAndString(result_url,
uploaded_object.generation)
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
end_time,
message_type=FileMessage.FILE_DAISY_COPY,
size=src_obj_metadata.size,
finished=True))
return (end_time - start_time, src_obj_metadata.size, result_url,
uploaded_object.md5Hash)
def GetSourceFieldsNeededForCopy(dst_is_cloud,
skip_unsupported_objects,
preserve_acl,
is_rsync=False,
preserve_posix=False,
delete_source=False):
"""Determines the metadata fields needed for a copy operation.
This function returns the fields we will need to successfully copy any
cloud objects that might be iterated. By determining this prior to iteration,
the cp command can request this metadata directly from the iterator's
get/list calls, avoiding the need for a separate get metadata HTTP call for
each iterated result. As a trade-off, filtering objects at the leaf nodes of
the iteration (based on a remaining wildcard) is more expensive. This is
because more metadata will be requested when object name is all that is
required for filtering.
The rsync command favors fast listing and comparison, and makes the opposite
trade-off, optimizing for the low-delta case by making per-object get
metadata HTTP call so that listing can return minimal metadata. It uses
this function to determine what is needed for get metadata HTTP calls.
Args:
dst_is_cloud: if true, destination is a Cloud URL.
skip_unsupported_objects: if true, get metadata for skipping unsupported
object types.
preserve_acl: if true, get object ACL.
is_rsync: if true, the calling function is rsync. Determines if metadata is
needed to verify download.
preserve_posix: if true, retrieves POSIX attributes into user metadata.
delete_source: if true, source object will be deleted after the copy
(mv command).
Returns:
List of necessary field metadata field names.
"""
src_obj_fields_set = set()
if dst_is_cloud:
# For cloud or daisy chain copy, we need every copyable field.
# If we're not modifying or overriding any of the fields, we can get
# away without retrieving the object metadata because the copy
# operation can succeed with just the destination bucket and object
# name. But if we are sending any metadata, the JSON API will expect a
# complete object resource. Since we want metadata like the object size for
# our own tracking, we just get all of the metadata here.
src_obj_fields_set.update([
'cacheControl',
'componentCount',
'contentDisposition',
'contentEncoding',
'contentLanguage',
'contentType',
'crc32c',
'customerEncryption',
'etag',
'generation',
'md5Hash',
'mediaLink',
'metadata',
'metageneration',
'size',
'storageClass',
'timeCreated',
])
# We only need the ACL if we're going to preserve it.
if preserve_acl:
src_obj_fields_set.update(['acl'])
else:
# Just get the fields needed to perform and validate the download.
src_obj_fields_set.update([
'crc32c',
'contentEncoding',
'contentType',
'customerEncryption',
'etag',
'mediaLink',
'md5Hash',
'size',
'generation',
])
if is_rsync:
src_obj_fields_set.update(['metadata/%s' % MTIME_ATTR, 'timeCreated'])
if preserve_posix:
posix_fields = [
'metadata/%s' % ATIME_ATTR,
'metadata/%s' % MTIME_ATTR,
'metadata/%s' % GID_ATTR,
'metadata/%s' % MODE_ATTR,
'metadata/%s' % UID_ATTR,
]
src_obj_fields_set.update(posix_fields)
if delete_source:
src_obj_fields_set.update([
'storageClass',
'timeCreated',
])
if skip_unsupported_objects:
src_obj_fields_set.update(['storageClass'])
return list(src_obj_fields_set)
# Map of (lowercase) storage classes with early deletion charges to their
# minimum lifetime in seconds.
EARLY_DELETION_MINIMUM_LIFETIME = {
'nearline': 30 * SECONDS_PER_DAY,
'coldline': 90 * SECONDS_PER_DAY
}
def WarnIfMvEarlyDeletionChargeApplies(src_url, src_obj_metadata, logger):
"""Warns when deleting a gs:// object could incur an early deletion charge.
This function inspects metadata for Google Cloud Storage objects that are
subject to early deletion charges (such as Nearline), and warns when
performing operations like mv that would delete them.
Args:
src_url: CloudUrl for the source object.
src_obj_metadata: source object metadata with necessary fields
(per GetSourceFieldsNeededForCopy).
logger: logging.Logger for outputting warning.
"""
if (src_url.scheme == 'gs' and src_obj_metadata and
src_obj_metadata.timeCreated and src_obj_metadata.storageClass):
object_storage_class = src_obj_metadata.storageClass.lower()
early_deletion_cutoff_seconds = EARLY_DELETION_MINIMUM_LIFETIME.get(
object_storage_class, None)
if early_deletion_cutoff_seconds:
minimum_delete_age = (
early_deletion_cutoff_seconds +
ConvertDatetimeToPOSIX(src_obj_metadata.timeCreated))
if time.time() < minimum_delete_age:
logger.warn(
'Warning: moving %s object %s may incur an early deletion '
'charge, because the original object is less than %s '
'days old according to the local system time.',
object_storage_class, src_url.url_string,
early_deletion_cutoff_seconds // SECONDS_PER_DAY)
def MaybeSkipUnsupportedObject(src_url, src_obj_metadata):
"""Skips unsupported object types if requested.
Args:
src_url: CloudUrl for the source object.
src_obj_metadata: source object metadata with storageClass field
(per GetSourceFieldsNeededForCopy).
Raises:
SkipGlacierError: if skipping a s3 Glacier object.
"""
if (src_url.scheme == 's3' and
global_copy_helper_opts.skip_unsupported_objects and
src_obj_metadata.storageClass == 'GLACIER'):
raise SkipGlacierError()
def GetDecryptionCSEK(src_url, src_obj_metadata):
"""Ensures a matching decryption key is available for the source object.
Args:
src_url: CloudUrl for the source object.
src_obj_metadata: source object metadata with optional customerEncryption
field.
Raises:
EncryptionException if the object is encrypted and no matching key is found.
Returns:
Base64-encoded decryption key string if the object is encrypted and a
matching key is found, or None if object is not encrypted.
"""
if src_obj_metadata.customerEncryption:
decryption_key = FindMatchingCSEKInBotoConfig(
src_obj_metadata.customerEncryption.keySha256, config)
if not decryption_key:
raise EncryptionException(
'Missing decryption key with SHA256 hash %s. No decryption key '
'matches object %s' %
(src_obj_metadata.customerEncryption.keySha256, src_url))
return decryption_key
# pylint: disable=undefined-variable
# pylint: disable=too-many-statements
def PerformCopy(logger,
src_url,
dst_url,
gsutil_api,
command_obj,
copy_exception_handler,
src_obj_metadata=None,
allow_splitting=True,
headers=None,
manifest=None,
gzip_exts=None,
is_rsync=False,
preserve_posix=False,
gzip_encoded=False):
"""Performs copy from src_url to dst_url, handling various special cases.
Args:
logger: for outputting log messages.
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
gsutil_api: gsutil Cloud API instance to use for the copy.
command_obj: command object for use in Apply in parallel composite uploads
and sliced object downloads.
copy_exception_handler: for handling copy exceptions during Apply.
src_obj_metadata: If source URL is a cloud object, source object metadata
with all necessary fields (per GetSourceFieldsNeededForCopy).
Required for cloud source URLs. If source URL is a file, an
apitools Object that may contain file size, or None.
allow_splitting: Whether to allow the file to be split into component
pieces for an parallel composite upload or download.
headers: optional headers to use for the copy operation.
manifest: optional manifest for tracking copy operations.
gzip_exts: List of file extensions to gzip, if any.
If gzip_exts is GZIP_ALL_FILES, gzip all files.
is_rsync: Whether or not the caller is the rsync command.
preserve_posix: Whether or not to preserve posix attributes.
gzip_encoded: Whether to use gzip transport encoding for the upload. Used
in conjunction with gzip_exts. Streaming files compressed is only
supported on the JSON GCS API.
Returns:
(elapsed_time, bytes_transferred, version-specific dst_url) excluding
overhead like initial GET.
Raises:
ItemExistsError: if no clobber flag is specified and the destination
object already exists.
SkipUnsupportedObjectError: if skip_unsupported_objects flag is specified
and the source is an unsupported type.
CommandException: if other errors encountered.
"""
# TODO: Remove elapsed_time as it is currently unused by all callers.
if headers:
dst_obj_headers = headers.copy()
else:
dst_obj_headers = {}
# Create a metadata instance for each destination object so metadata
# such as content-type can be applied per-object.
# Initialize metadata from any headers passed in via -h.
dst_obj_metadata = ObjectMetadataFromHeaders(dst_obj_headers)
if dst_url.IsCloudUrl() and dst_url.scheme == 'gs':
preconditions = PreconditionsFromHeaders(dst_obj_headers)
else:
preconditions = Preconditions()
src_obj_filestream = None
decryption_key = None
copy_in_the_cloud = False
if src_url.IsCloudUrl():
if (dst_url.IsCloudUrl() and src_url.scheme == dst_url.scheme and
not global_copy_helper_opts.daisy_chain):
copy_in_the_cloud = True
if global_copy_helper_opts.perform_mv:
WarnIfMvEarlyDeletionChargeApplies(src_url, src_obj_metadata, logger)
MaybeSkipUnsupportedObject(src_url, src_obj_metadata)
decryption_key = GetDecryptionCSEK(src_url, src_obj_metadata)
src_obj_size = src_obj_metadata.size
dst_obj_metadata.contentType = src_obj_metadata.contentType
if global_copy_helper_opts.preserve_acl and dst_url.IsCloudUrl():
if src_url.scheme == 'gs' and not src_obj_metadata.acl:
raise CommandException(
'No OWNER permission found for object %s. OWNER permission is '
'required for preserving ACLs.' % src_url)
dst_obj_metadata.acl = src_obj_metadata.acl
# Special case for S3-to-S3 copy URLs using
# global_copy_helper_opts.preserve_acl.
# dst_url will be verified in _CopyObjToObjDaisyChainMode if it
# is not s3 (and thus differs from src_url).
if src_url.scheme == 's3':
acl_text = S3MarkerAclFromObjectMetadata(src_obj_metadata)
if acl_text:
AddS3MarkerAclToObjectMetadata(dst_obj_metadata, acl_text)
else: # src_url.IsFileUrl()
try:
src_obj_filestream = GetStreamFromFileUrl(src_url)
except Exception as e: # pylint: disable=broad-except
message = 'Error opening file "%s": %s.' % (src_url, str(e))
if command_obj.continue_on_error:
command_obj.op_failure_count += 1
logger.error(message)
return
else:
raise CommandException(message)
if src_url.IsStream() or src_url.IsFifo():
src_obj_size = None
elif src_obj_metadata and src_obj_metadata.size:
# Iterator retrieved the file's size, no need to stat it again.
src_obj_size = src_obj_metadata.size
else:
src_obj_size = os.path.getsize(src_url.object_name)
if global_copy_helper_opts.use_manifest:
# Set the source size in the manifest.
manifest.Set(src_url.url_string, 'size', src_obj_size)
if (dst_url.scheme == 's3' and src_url != 's3' and
src_obj_size is not None and # Can't compare int to None in py3
src_obj_size > S3_MAX_UPLOAD_SIZE):
raise CommandException(
'"%s" exceeds the maximum gsutil-supported size for an S3 upload. S3 '
'objects greater than %s in size require multipart uploads, which '
'gsutil does not support.' %
(src_url, MakeHumanReadable(S3_MAX_UPLOAD_SIZE)))
# On Windows, stdin is opened as text mode instead of binary which causes
# problems when piping a binary file, so this switches it to binary mode.
if IS_WINDOWS and src_url.IsFileUrl() and src_url.IsStream():
msvcrt.setmode(GetStreamFromFileUrl(src_url).fileno(), os.O_BINARY)
if global_copy_helper_opts.no_clobber:
# There are two checks to prevent clobbering:
# 1) The first check is to see if the URL
# already exists at the destination and prevent the upload/download
# from happening. This is done by the exists() call.
# 2) The second check is only relevant if we are writing to gs. We can
# enforce that the server only writes the object if it doesn't exist
# by specifying the header below. This check only happens at the
# server after the complete file has been uploaded. We specify this
# header to prevent a race condition where a destination file may
# be created after the first check and before the file is fully
# uploaded.
# In order to save on unnecessary uploads/downloads we perform both
# checks. However, this may come at the cost of additional HTTP calls.
if preconditions.gen_match:
raise ArgumentException('Specifying x-goog-if-generation-match is '
'not supported with cp -n')
else:
preconditions.gen_match = 0
if dst_url.IsFileUrl() and os.path.exists(dst_url.object_name):
raise ItemExistsError()
elif dst_url.IsCloudUrl():
try:
dst_object = gsutil_api.GetObjectMetadata(dst_url.bucket_name,
dst_url.object_name,
provider=dst_url.scheme)
except NotFoundException:
dst_object = None
if dst_object:
raise ItemExistsError()
if dst_url.IsCloudUrl():
# Cloud storage API gets object and bucket name from metadata.
dst_obj_metadata.name = dst_url.object_name
dst_obj_metadata.bucket = dst_url.bucket_name
if src_url.IsCloudUrl():
# Preserve relevant metadata from the source object if it's not already
# provided from the headers.
src_obj_metadata.name = src_url.object_name
src_obj_metadata.bucket = src_url.bucket_name
else:
_SetContentTypeFromFile(src_url, dst_obj_metadata)
# Only set KMS key name if destination provider is 'gs'.
encryption_keywrapper = GetEncryptionKeyWrapper(config)
if (encryption_keywrapper and
encryption_keywrapper.crypto_type == CryptoKeyType.CMEK and
dst_url.scheme == 'gs'):
dst_obj_metadata.kmsKeyName = encryption_keywrapper.crypto_key
if src_obj_metadata:
# Note that CopyObjectMetadata only copies specific fields. We intentionally
# do not copy storageClass, as the bucket's default storage class should be
# used (when copying to a gs:// bucket) unless explicitly overridden.
CopyObjectMetadata(src_obj_metadata, dst_obj_metadata, override=False)
if global_copy_helper_opts.dest_storage_class:
dst_obj_metadata.storageClass = global_copy_helper_opts.dest_storage_class
_LogCopyOperation(logger, src_url, dst_url, dst_obj_metadata)
if src_url.IsCloudUrl():
if dst_url.IsFileUrl():
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_DOWNLOAD,
size=src_obj_size,
finished=False))
return _DownloadObjectToFile(src_url,
src_obj_metadata,
dst_url,
gsutil_api,
logger,
command_obj,
copy_exception_handler,
allow_splitting=allow_splitting,
decryption_key=decryption_key,
is_rsync=is_rsync,
preserve_posix=preserve_posix)
elif copy_in_the_cloud:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_CLOUD_COPY,
size=src_obj_size,
finished=False))
return _CopyObjToObjInTheCloud(src_url,
src_obj_metadata,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
decryption_key=decryption_key)
else:
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_DAISY_COPY,
size=src_obj_size,
finished=False))
return _CopyObjToObjDaisyChainMode(src_url,
src_obj_metadata,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
decryption_key=decryption_key)
else: # src_url.IsFileUrl()
if dst_url.IsCloudUrl():
# The FileMessage for this upload object is inside _UploadFileToObject().
# This is such because the function may alter src_url, which would prevent
# us from correctly tracking the new url.
return _UploadFileToObject(src_url,
src_obj_filestream,
src_obj_size,
dst_url,
dst_obj_metadata,
preconditions,
gsutil_api,
logger,
command_obj,
copy_exception_handler,
gzip_exts=gzip_exts,
allow_splitting=allow_splitting,
gzip_encoded=gzip_encoded)
else: # dst_url.IsFileUrl()
PutToQueueWithTimeout(
gsutil_api.status_queue,
FileMessage(src_url,
dst_url,
time.time(),
message_type=FileMessage.FILE_LOCAL_COPY,
size=src_obj_size,
finished=False))
result = _CopyFileToFile(src_url,
dst_url,
status_queue=gsutil_api.status_queue,
src_obj_metadata=src_obj_metadata)
# Need to let _CopyFileToFile return before setting the POSIX attributes.
if not src_url.IsStream() and not dst_url.IsStream():
ParseAndSetPOSIXAttributes(dst_url.object_name,
src_obj_metadata,
is_rsync=is_rsync,
preserve_posix=preserve_posix)
return result
class Manifest(object):
"""Stores the manifest items for the CpCommand class."""
def __init__(self, path):
# self.items contains a dictionary of rows
self.items = {}
self.manifest_filter = {}
self.lock = parallelism_framework_util.CreateLock()
self.manifest_path = os.path.expanduser(path)
self._ParseManifest()
self._CreateManifestFile()
def _ParseManifest(self):
"""Load and parse a manifest file.
This information will be used to skip any files that have a skip or OK
status.
"""
try:
if os.path.exists(self.manifest_path):
# Note: we can't use io.open here or CSV reader will become upset
# https://stackoverflow.com/a/18449496
with open(self.manifest_path, 'r') as f:
first_row = True
reader = csv.reader(f)
for row in reader:
if first_row:
try:
source_index = row.index('Source')
result_index = row.index('Result')
except ValueError:
# No header and thus not a valid manifest file.
raise CommandException('Missing headers in manifest file: %s' %
self.manifest_path)
first_row = False
source = row[source_index]
result = row[result_index]
if result in ['OK', 'skip']:
# We're always guaranteed to take the last result of a specific
# source url.
self.manifest_filter[source] = result
except IOError:
raise CommandException('Could not parse %s' % self.manifest_path)
def WasSuccessful(self, src):
"""Returns whether the specified src url was marked as successful."""
return src in self.manifest_filter
def _CreateManifestFile(self):
"""Opens the manifest file and assigns it to the file pointer."""
try:
if ((not os.path.exists(self.manifest_path)) or
(os.stat(self.manifest_path).st_size == 0)):
# Add headers to the new file.
if six.PY3:
with open(self.manifest_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([
'Source',
'Destination',
'Start',
'End',
'Md5',
'UploadId',
'Source Size',
'Bytes Transferred',
'Result',
'Description',
])
else:
with open(self.manifest_path, 'wb', 1) as f:
writer = csv.writer(f)
writer.writerow([
'Source',
'Destination',
'Start',
'End',
'Md5',
'UploadId',
'Source Size',
'Bytes Transferred',
'Result',
'Description',
])
except IOError:
raise CommandException('Could not create manifest file.')
def Set(self, url, key, value):
if value is None:
# In case we don't have any information to set we bail out here.
# This is so that we don't clobber existing information.
# To zero information pass '' instead of None.
return
if url in self.items:
self.items[url][key] = value
else:
self.items[url] = {key: value}
def Initialize(self, source_url, destination_url):
# Always use the source_url as the key for the item. This is unique.
self.Set(source_url, 'source_uri', source_url)
self.Set(source_url, 'destination_uri', destination_url)
self.Set(source_url, 'start_time', datetime.datetime.utcnow())
def SetResult(self, source_url, bytes_transferred, result, description=''):
self.Set(source_url, 'bytes', bytes_transferred)
self.Set(source_url, 'result', result)
self.Set(source_url, 'description', description)
self.Set(source_url, 'end_time', datetime.datetime.utcnow())
self._WriteRowToManifestFile(source_url)
self._RemoveItemFromManifest(source_url)
def _WriteRowToManifestFile(self, url):
"""Writes a manifest entry to the manifest file for the url argument."""
row_item = self.items[url]
data = [
row_item['source_uri'],
row_item['destination_uri'],
'%sZ' % row_item['start_time'].isoformat(),
'%sZ' % row_item['end_time'].isoformat(),
row_item['md5'] if 'md5' in row_item else '',
row_item['upload_id'] if 'upload_id' in row_item else '',
str(row_item['size']) if 'size' in row_item else '',
str(row_item['bytes']) if 'bytes' in row_item else '',
row_item['result'],
row_item['description'],
]
data = [six.ensure_str(value) for value in data]
# Aquire a lock to prevent multiple threads writing to the same file at
# the same time. This would cause a garbled mess in the manifest file.
with self.lock:
if IS_WINDOWS and six.PY3:
f = open(self.manifest_path, 'a', 1, newline='')
else:
f = open(self.manifest_path, 'a', 1) # 1 == line buffered
writer = csv.writer(f)
writer.writerow(data)
f.close()
def _RemoveItemFromManifest(self, url):
# Remove the item from the dictionary since we're done with it and
# we don't want the dictionary to grow too large in memory for no good
# reason.
del self.items[url]
class ItemExistsError(Exception):
"""Exception class for objects that are skipped because they already exist."""
pass
class SkipUnsupportedObjectError(Exception):
"""Exception for objects skipped because they are an unsupported type."""
def __init__(self):
super(SkipUnsupportedObjectError, self).__init__()
self.unsupported_type = 'Unknown'
class SkipGlacierError(SkipUnsupportedObjectError):
"""Exception for objects skipped because they are an unsupported type."""
def __init__(self):
super(SkipGlacierError, self).__init__()
self.unsupported_type = 'GLACIER'
def GetPathBeforeFinalDir(url, exp_src_url):
"""Returns the path section before the final directory component of the URL.
This handles cases for file system directories, bucket, and bucket
subdirectories. Example: for gs://bucket/dir/ we'll return 'gs://bucket',
and for file://dir we'll return file://
Args:
url: StorageUrl representing a filesystem directory, cloud bucket or
bucket subdir.
exp_src_url: StorageUrl representing the fully expanded object
to-be-copied; used for resolving cloud wildcards.
Returns:
String name of above-described path, sans final path separator.
"""
sep = url.delim
if url.IsFileUrl():
past_scheme = url.url_string[len('file://'):]
if past_scheme.find(sep) == -1:
return 'file://'
else:
return 'file://%s' % past_scheme.rstrip(sep).rpartition(sep)[0]
if url.IsBucket():
return '%s://' % url.scheme
# Else it names a bucket subdir.
path_sans_final_dir = url.url_string.rstrip(sep).rpartition(sep)[0]
return ResolveWildcardsInPathBeforeFinalDir(path_sans_final_dir, exp_src_url)
def ResolveWildcardsInPathBeforeFinalDir(src_url_path_sans_final_dir,
exp_src_url):
"""Returns the path section for a bucket subdir with wildcards resolved.
This handles cases for bucket subdirectories where the initial source URL
contains a wildcard. In this case, src_url must be wildcard-expanded
before calculating the final directory.
Example:
A bucket containing:
gs://bucket/dir1/subdir/foo
gs://bucket/dir2/subdir/foo
and source URL gs://bucket/*/subdir
and src_url_path_sans_final dir gs://bucket/*
should yield final path gs://bucket/dir1 or gs://bucket/dir2 according to
the expanded source URL.
Args:
src_url_path_sans_final_dir: URL string with wildcards representing a
bucket subdir as computed from GetPathBeforeFinalDir.
exp_src_url: CloudUrl representing the fully expanded object to-be-copied.
Returns:
String name of above-described path, sans final path separator.
"""
if not ContainsWildcard(src_url_path_sans_final_dir):
return src_url_path_sans_final_dir
# Parse the expanded source URL, replacing wildcarded
# portions of the path with what they actually expanded to.
wildcarded_src_obj_path = StorageUrlFromString(
src_url_path_sans_final_dir).object_name.split('/')
expanded_src_obj_path = exp_src_url.object_name.split('/')
for path_segment_index in xrange(len(wildcarded_src_obj_path)):
if ContainsWildcard(wildcarded_src_obj_path[path_segment_index]):
# The expanded path is guaranteed to be have at least as many path
# segments as the wildcarded path.
wildcarded_src_obj_path[path_segment_index] = (
expanded_src_obj_path[path_segment_index])
resolved_src_path = '/'.join(wildcarded_src_obj_path)
final_path_url = exp_src_url.Clone()
final_path_url.object_name = resolved_src_path
return final_path_url.url_string
def _GetPartitionInfo(file_size, max_components, default_component_size):
"""Gets info about a file partition for parallel file/object transfers.
Args:
file_size: The number of bytes in the file to be partitioned.
max_components: The maximum number of components that can be composed.
default_component_size: The size of a component, assuming that
max_components is infinite.
Returns:
The number of components in the partitioned file, and the size of each
component (except the last, which will have a different size iff
file_size != 0 (mod num_components)).
"""
# num_components = ceil(file_size / default_component_size)
num_components = DivideAndCeil(file_size, default_component_size)
# num_components must be in the range [2, max_components]
num_components = max(min(num_components, max_components), 2)
# component_size = ceil(file_size / num_components)
component_size = DivideAndCeil(file_size, num_components)
return (num_components, component_size)
def _DeleteTempComponentObjectFn(cls, url_to_delete, thread_state=None):
"""Wrapper func to be used with command.Apply to delete temporary objects."""
gsutil_api = GetCloudApiInstance(cls, thread_state)
try:
gsutil_api.DeleteObject(url_to_delete.bucket_name,
url_to_delete.object_name,
generation=url_to_delete.generation,
provider=url_to_delete.scheme)
except NotFoundException:
# The temporary object could already be gone if a retry was
# issued at a lower layer but the original request succeeded.
# Barring other errors, the top-level command should still report success,
# so don't raise here.
pass
def FilterExistingComponents(dst_args, existing_components, bucket_url,
gsutil_api):
"""Determines course of action for component objects.
Given the list of all target objects based on partitioning the file and
the list of objects that have already been uploaded successfully,
this function determines which objects should be uploaded, which
existing components are still valid, and which existing components should
be deleted.
Args:
dst_args: The map of file_name -> PerformParallelUploadFileToObjectArgs
calculated by partitioning the file.
existing_components: A list of ObjectFromTracker objects that have been
uploaded in the past.
bucket_url: CloudUrl of the bucket in which the components exist.
gsutil_api: gsutil Cloud API instance to use for retrieving object metadata.
Returns:
components_to_upload: List of components that need to be uploaded.
uploaded_components: List of components that have already been
uploaded and are still valid. Each element of the list
contains the dst_url for the uploaded component and
its size.
existing_objects_to_delete: List of components that have already
been uploaded, but are no longer valid
and are in a versioned bucket, and
therefore should be deleted.
"""
components_to_upload = []
existing_component_names = [
component.object_name for component in existing_components
]
for component_name in dst_args:
if component_name not in existing_component_names:
components_to_upload.append(dst_args[component_name])
objects_already_chosen = []
# Don't reuse any temporary components whose MD5 doesn't match the current
# MD5 of the corresponding part of the file. If the bucket is versioned,
# also make sure that we delete the existing temporary version.
existing_objects_to_delete = []
uploaded_components = []
for tracker_object in existing_components:
if (tracker_object.object_name not in dst_args.keys() or
tracker_object.object_name in objects_already_chosen):
# This could happen if the component size has changed. This also serves
# to handle object names that get duplicated in the tracker file due
# to people doing things they shouldn't (e.g., overwriting an existing
# temporary component in a versioned bucket).
url = bucket_url.Clone()
url.object_name = tracker_object.object_name
url.generation = tracker_object.generation
existing_objects_to_delete.append(url)
continue
dst_arg = dst_args[tracker_object.object_name]
file_part = FilePart(dst_arg.filename, dst_arg.file_start,
dst_arg.file_length)
# TODO: calculate MD5's in parallel when possible.
content_md5 = CalculateB64EncodedMd5FromContents(file_part)
try:
# Get the MD5 of the currently-existing component.
dst_url = dst_arg.dst_url
dst_metadata = gsutil_api.GetObjectMetadata(
dst_url.bucket_name,
dst_url.object_name,
generation=dst_url.generation,
provider=dst_url.scheme,
fields=['customerEncryption', 'etag', 'md5Hash'])
cloud_md5 = dst_metadata.md5Hash
except Exception: # pylint: disable=broad-except
# We don't actually care what went wrong - we couldn't retrieve the
# object to check the MD5, so just upload it again.
cloud_md5 = None
if cloud_md5 != content_md5:
components_to_upload.append(dst_arg)
objects_already_chosen.append(tracker_object.object_name)
if tracker_object.generation:
# If the old object doesn't have a generation (i.e., it isn't in a
# versioned bucket), then we will just overwrite it anyway.
invalid_component_with_generation = dst_arg.dst_url.Clone()
invalid_component_with_generation.generation = tracker_object.generation
existing_objects_to_delete.append(invalid_component_with_generation)
else:
url = dst_arg.dst_url.Clone()
url.generation = tracker_object.generation
uploaded_components.append((url, dst_arg.file_length))
objects_already_chosen.append(tracker_object.object_name)
if uploaded_components:
logging.info('Found %d existing temporary components to reuse.',
len(uploaded_components))
return (components_to_upload, uploaded_components, existing_objects_to_delete)
| {
"content_hash": "2914a5a20f82e319a6e66ad630af0d6f",
"timestamp": "",
"source": "github",
"line_count": 4335,
"max_line_length": 130,
"avg_line_length": 42.09711649365629,
"alnum_prop": 0.6492703749773961,
"repo_name": "endlessm/chromium-browser",
"id": "97a13962c21a908fa9034c6c8cf9aec02e99fad1",
"size": "182491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/third_party/gsutil/gslib/utils/copy_helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from zerver.lib.test_classes import WebhookTestCase
from zerver.models import get_realm, get_user
class WordPressHookTests(WebhookTestCase):
STREAM_NAME = 'wordpress'
URL_TEMPLATE = "/api/v1/external/wordpress?api_key={api_key}"
FIXTURE_DIR_NAME = 'wordpress'
def test_publish_post(self) -> None:
expected_topic = u"WordPress Post"
expected_message = u"New post published.\n[New Blog Post](http://example.com\n)"
self.send_and_test_stream_message('publish_post', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_post_type_not_provided(self) -> None:
expected_topic = u"WordPress Post"
expected_message = u"New post published.\n[New Blog Post](http://example.com\n)"
self.send_and_test_stream_message('publish_post_type_not_provided',
expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_post_no_data_provided(self) -> None:
# Note: the fixture includes 'hook=publish_post' because it's always added by HookPress
expected_topic = u"WordPress Notification"
expected_message = u"New post published.\n" + "[New WordPress Post](WordPress Post URL)"
self.send_and_test_stream_message('publish_post_no_data_provided',
expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_page(self) -> None:
expected_topic = u"WordPress Page"
expected_message = u"New page published.\n" + "[New Blog Page](http://example.com\n)"
self.send_and_test_stream_message('publish_page', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_user_register(self) -> None:
expected_topic = u"New Blog Users"
expected_message = u"New blog user registered.\nName: test_user\nemail: test_user@example.com"
self.send_and_test_stream_message('user_register', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_wp_login(self) -> None:
expected_topic = u"New Login"
expected_message = u"User testuser logged in."
self.send_and_test_stream_message('wp_login', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_unknown_action_no_data(self) -> None:
# Mimic send_and_test_stream_message() to manually execute a negative test.
# Otherwise its call to send_json_payload() would assert on the non-success
# we are testing. The value of result is the error message the webhook should
# return if no params are sent. The fixture for this test is an empty file.
# subscribe to the target stream
self.subscribe(self.test_user, self.STREAM_NAME)
# post to the webhook url
post_params = {'stream_name': self.STREAM_NAME,
'content_type': 'application/x-www-form-urlencoded'}
result = self.client_post(self.url, 'unknown_action', **post_params)
# check that we got the expected error message
self.assert_json_error(result, "Unknown WordPress webhook action: WordPress Action")
def test_unknown_action_no_hook_provided(self) -> None:
# Similar to unknown_action_no_data, except the fixture contains valid blog post
# params but without the hook parameter. This should also return an error.
self.subscribe(self.test_user, self.STREAM_NAME)
post_params = {'stream_name': self.STREAM_NAME,
'content_type': 'application/x-www-form-urlencoded'}
result = self.client_post(self.url, 'unknown_action', **post_params)
self.assert_json_error(result, "Unknown WordPress webhook action: WordPress Action")
def get_body(self, fixture_name: str) -> str:
return self.fixture_data("wordpress", fixture_name, file_type="txt")
| {
"content_hash": "e329315869a3bc9d29efd59ef7316ac1",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 102,
"avg_line_length": 47.02197802197802,
"alnum_prop": 0.6326244449637766,
"repo_name": "mahim97/zulip",
"id": "85927876837bf75b6a0648015d424c9fa0e20fe0",
"size": "4303",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "zerver/webhooks/wordpress/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
import torch
from torch.utils.data import Dataset
class MimicDatasetSynthetic(Dataset):
def __init__(self, doc_length=512, num_vocab=1_000, num_docs=100, num_classes=10):
self.doc_length = doc_length
self.num_vocab = num_vocab
self.num_docs = num_docs
self.num_classes = num_classes
self.docs = self.create_docs(doc_length, num_vocab, num_docs)
self.masks = self.create_masks(doc_length, num_docs)
self.segment_ids = self.create_segment_ids(doc_length, num_docs)
self.labels = self.create_labels(num_classes, num_docs)
def __repr__(self):
return (
f"MimicRandom(doc_length={self.doc_length}, "
f"num_vocab={self.num_vocab}, "
f"num_docs={self.num_docs}, "
f"num_classes={self.num_classes}) "
)
def __len__(self):
return self.num_docs
def __getitem__(self, idx):
return {
"tokens": self.docs[idx],
"masks": self.masks[idx],
"seg_ids": self.segment_ids[idx],
"label": self.labels[idx],
}
def random_doc(self, length, num_vocab):
return torch.LongTensor(length).random_(0, num_vocab + 1)
def create_docs(self, length, num_vocab, num_docs):
docs = [self.random_doc(length, num_vocab) for _ in range(num_docs)]
return torch.stack(docs)
def random_mask(self, length):
return torch.LongTensor(length).random_(0, 2)
def create_masks(self, length, num_docs):
masks = [self.random_mask(length) for _ in range(num_docs)]
return torch.stack(masks)
def empty_segment_id(self, length):
return torch.zeros(length, dtype=torch.long)
def create_segment_ids(self, length, num_docs):
segment_ids = [self.empty_segment_id(length) for _ in range(num_docs)]
return torch.stack(segment_ids)
def random_multitask_label(self, num_classes):
return torch.FloatTensor(num_classes).random_(0, 2)
def create_labels(self, num_classes, num_docs):
labels = [self.random_multitask_label(num_classes) for _ in range(num_docs)]
return torch.stack(labels)
| {
"content_hash": "dcba3391550724d9685dbb6f1a165369",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 86,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6129476584022039,
"repo_name": "ECP-CANDLE/Benchmarks",
"id": "bba8890548da1c4c5910b247ecfb8c6f878d5c2b",
"size": "2178",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pilot3/P3B6/random_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1362421"
},
{
"name": "R",
"bytes": "6354"
},
{
"name": "Shell",
"bytes": "6751"
}
],
"symlink_target": ""
} |
"""
A model for testing
"""
from django.db import models
from fields import MACAddressField
class NetworkThingy(models.Model):
mac = MACAddressField()
def __unicode__(self):
return "%s" % self.mac
| {
"content_hash": "631a56ae3f8439eae9274542869011e6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 34,
"avg_line_length": 19.545454545454547,
"alnum_prop": 0.6790697674418604,
"repo_name": "jimfunk/django-macaddress",
"id": "06a95e9d1b558361861c7c32432c2d442ef9dde3",
"size": "215",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "macaddress/tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3640"
}
],
"symlink_target": ""
} |
from django.test import RequestFactory
from django_th.tests.test_main import MainTest
import requests
from th_wallabag.models import Wallabag
from th_wallabag.forms import WallabagProviderForm, WallabagConsumerForm
from th_wallabag.my_wallabag import ServiceWallabag
from unittest.mock import patch
class WallabagTest(MainTest):
"""
wallabagTest Model
"""
def setUp(self):
super(WallabagTest, self).setUp()
self.token = 'AZERTY1234'
self.trigger_id = 1
self.data = {'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else'}
def test_wallabag(self):
"""
Test if the creation of the wallabag object looks fine
"""
t = self.create_triggerservice()
d = self.create_wallabag(t)
self.assertTrue(isinstance(d, Wallabag))
self.assertEqual(d.show(), "My Wallabag %s" % d.url)
self.assertEqual(d.__str__(), "%s" % d.url)
"""
Form
"""
# provider
def test_valid_provider_form(self):
"""
test if that form is a valid provider one
"""
t = self.create_triggerservice()
d = self.create_wallabag(t)
data = {'url': d.url}
form = WallabagProviderForm(data=data)
self.assertTrue(form.is_valid())
# consumer
def test_valid_consumer_form(self):
"""
test if that form is a valid consumer one
"""
t = self.create_triggerservice()
d = self.create_wallabag(t)
data = {'url': d.url}
form = WallabagConsumerForm(data=data)
self.assertTrue(form.is_valid())
class ServiceWallabagTest(WallabagTest):
def setUp(self):
super(ServiceWallabagTest, self).setUp()
self.factory = RequestFactory()
# self.user = User.objects.get(username='john')
def test_read_data(self):
self.create_triggerservice()
kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00',
'trigger_id': self.trigger_id,
'user': self.user,
'model_name': 'Wallabag'})
self.token = 'AZERTY1234'
params = dict({'access_token': self.token,
'archive': 0,
'star': 0,
'delete': 0,
'sort': 'created',
'order': 'desc',
'page': 1,
'perPage': 30,
'tags': []})
with patch.object(requests, 'get') as mock_read_data:
mock_read_data.return_value.status_code = 200
se = ServiceWallabag(self.token)
se.read_data(**kwargs)
mock_read_data.assert_called_once_with('http://localhost/api'
'/entries.json', params=params)
def test_save_data(self):
self.create_triggerservice()
kwargs = {'title': 'foobar', 'link': 'https://google.com'}
with patch.object(ServiceWallabag, 'wall') as mock_save_data:
se = ServiceWallabag()
se.save_data(self.trigger_id, **kwargs)
mock_save_data.assert_called_once_with()
"""
def test_auth(self):
self.create_triggerservice()
request = self.factory.get('/wallabag_callback')
callback_url = 'http://%s%s' % (request.get_host(),
reverse('wallabag_callback'))
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
request.session = session
se = ServiceWallabag()
response = se.auth(request)
self.assertEqual(response, callback_url)
def test_callback(self):
self.create_triggerservice()
request = self.factory.get('/wallabag_callback')
request.user = self.user
se = ServiceWallabag()
response = se.callback(request, **{})
self.assertEqual(response, 'wallabag/callback.html')
# request.user = User.objects.create(id=99)
# response = se.callback(request, **{})
# self.assertEqual(response, '/')
def test_create_entry(self):
data = dict({'link': ''})
title = ''
tags = ''
se = ServiceWallabag()
response = se._create_entry(title, data, tags)
self.assertTrue(type(response) is bool)
"""
| {
"content_hash": "e223474281bc1bdf1f3541efc38d5bbd",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 32.52554744525548,
"alnum_prop": 0.5536355475763016,
"repo_name": "foxmask/django-th",
"id": "c60f75275152eaa7022803f897099e5fd1befbf5",
"size": "4472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "th_wallabag/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1654"
},
{
"name": "Dockerfile",
"bytes": "357"
},
{
"name": "HTML",
"bytes": "188416"
},
{
"name": "JavaScript",
"bytes": "796"
},
{
"name": "Python",
"bytes": "397000"
}
],
"symlink_target": ""
} |
"""
Created on Wed Nov 9 10:40:36 2016
@authors: Danny Neil, iulialexandra
@contact: iulialexandralungu@gmail.com
Utility file used to transform a series of AVI files into an LMDB for
training in Caffe.
Uses categories {paper, rock, scissors, background} for classification.
"""
import os
import os.path
import imageio
import caffe
import lmdb
import os
import argparse
import time
import sys
import numpy as np
DB_KEY_FORMAT = "{:0>10d}"
def create_label_files(label_filenames, num_frames, working_dir, labels_dir):
"""Creates one .txt file for each AVI movie containing the class number
for each frame in the movie.
Parameters
----------
label_filenames: list of filenames, one for each AVI movie,
where the labels will be saved
num_frames: list of frame number in each AVI movie.
working_dir: working directory, where the labels files will be saved
"""
labels_dir = os.path.join(working_dir, labels_dir)
if not os.path.exists(labels_dir):
os.makedirs(labels_dir)
for idx, f in enumerate(label_filenames):
label = f.split("_")[0]
fout = open(os.path.join(labels_dir, f), "w+")
if (label.find("paper") == 0):
for frame in range(num_frames[idx]):
fout.write(" 0\n")
if (label.find("scissors") == 0):
for frame in range(num_frames[idx]):
fout.write(" 1\n")
if (label.find("rock") == 0):
for frame in range(num_frames[idx]):
fout.write(" 2\n")
if (label.find("background") == 0):
for frame in range(num_frames[idx]):
fout.write(" 3\n")
fout.close()
def create_workfile(dir_to_walk, working_dir, workfile, labels_dir):
"""Traverses a directory and its subdirectories and places all .avi files
it finds in a .txt file, along with the name of a label file corresponding
to each .avi.
Parameters
----------
dir_to_walk: directory to traverse in search for .avi files
working_dir: directory where the workfile will be saved
workfile: name of the workfile
labels_dir: directory where the labels files will be saved
"""
recordings = []
num_frames = []
label_filenames = []
for dirpath, dirnames, filenames in os.walk(dir_to_walk):
for filename in (f for f in filenames if f.endswith(".avi")):
avi_path = os.path.join(dirpath, filename)
recordings.append(avi_path)
label_filenames.append(filename.split('.')[0] + '_label.txt')
vid = imageio.get_reader(avi_path, 'ffmpeg')
num_frames.append(vid._meta['nframes'])
import math
if math.isinf(vid._meta['nframes']):
print("The following avi movie has too many frames: ",
label_filenames[-1])
sys.exit()
create_label_files(label_filenames, num_frames, working_dir, labels_dir)
fout = open(os.path.join(working_dir, workfile), "w+")
for idx, rec in enumerate(recordings):
fout.write(rec + ' ' + os.path.join(working_dir, labels_dir,
label_filenames[idx] + "\n"))
fout.close()
def avi_to_frame_list(avi_filename, gray):
"""Creates a list of frames starting from an AVI movie.
Inverts axes to have num_channels, height, width in this order.
Parameters
----------
avi_filename: name of the AVI movie
gray: if True, the resulting images are treated as grey images with only
one channel. If False, the images have three channels.
"""
print('Loading {}'.format(avi_filename))
vid = imageio.get_reader(avi_filename, 'ffmpeg')
if gray:
data = [np.mean(np.moveaxis(im, 2, 0), axis=0, keepdims=True)
for im in vid.iter_data()]
print('Loaded grayscale images.')
else:
data = [np.moveaxis(im, 2, 0) for im in vid.iter_data()]
print('Loaded RGB images.')
return data
def label_file_to_labels(label_filename):
"""Reads a file containing labels for one AVI movie and puts it in a list.
"""
with open(label_filename, 'r') as f:
all_labels = [int(label) for label in f.readlines()]
return all_labels
def read_data_from_LMDB(read_db, max_to_read=None):
"""Reads data from LMDB database and returns a list of entries in
non-human readable form. To convert them to string, use Caffe's tools
"""
data = []
with read_db.begin() as txn:
if max_to_read is None:
max_to_read = txn.stat()['entries']
cursor = txn.cursor()
it = cursor.iternext(keys=False, values=True)
for counter in range(max_to_read):
if counter % 100000 == 0:
print("Reading entry {}".format(counter))
data.append(it.item())
it.next()
read_db.close()
return data
def shuffle_LMDB(in_lmdb_name, LMDB_path):
"""Creates shuffled LMDB starting from a given LMDB.
"""
print("Shuffling database {}".format(in_lmdb_name))
in_lmdb = lmdb.open(os.path.join(LMDB_path, in_lmdb_name), readonly=True)
shuffled_db = lmdb.open(os.path.join(LMDB_path,
'shuffled_' + in_lmdb_name))
with in_lmdb.begin() as in_txn:
num_entries = in_txn.stat()['entries']
random_indices = np.arange(num_entries)
np.random.shuffle(random_indices)
with shuffled_db.begin(write=True) as shuffled_txn:
for i in range(num_entries):
if i % 100000 == 0:
print(i)
in_key = DB_KEY_FORMAT.format(random_indices[i])
in_dat = in_txn.get(in_key)
out_key = DB_KEY_FORMAT.format(i)
shuffled_txn.put(out_key, in_dat)
in_lmdb.close()
shuffled_db.close()
def write_data_to_lmdb(db, data_in, labels_in, curr_idx):
"""Given arrays of data and the labels, it writes the information in an
LMDB
Parameters
----------
db: LMDB to write the data into
data_in: image arrays
labels_in: list of labels
curr_idx: the idx used as key for writing in the LMDB
"""
with db.begin(write=True) as in_txn:
for i in range(len(data_in)):
d, l = data_in[i], labels_in[i]
im_dat = caffe.io.array_to_datum(d.astype('uint8'),
label=int(l))
key = DB_KEY_FORMAT.format(curr_idx)
in_txn.put(key, im_dat.SerializeToString())
curr_idx += 1
return curr_idx
def write_categ_lmdb(workfile, categories, LMDB_path, gray):
"""Given a workfile containing the AVI movies and the labels for each
frame, this function creates an LMDB for each classification category.
"""
# Open databases
categ_db = []
for idx, categ in enumerate(categories):
categ_db.append(
lmdb.open(os.path.join(LMDB_path, categ), map_size=int(1e12),
map_async=True, writemap=True, meminit=False))
curr_idx = np.zeros(len(categories), dtype='int')
with open(workfile, 'r') as f:
for line in f.readlines():
# Load work to do
avi_file, label_file = line.strip().split(' ')
# Convert to data
file_frames = avi_to_frame_list(avi_file, gray)
file_labels = label_file_to_labels(label_file)
# Quick check the lengths
assert len(file_frames) == len(file_labels), \
'Frames and Labels do not match in length!'
# Write data in each LMDB corresponding to the .avi category
db_label = avi_file.split("/")[-1].split("_")[0]
index = categories.index(db_label)
curr_idx[index] = write_data_to_lmdb(categ_db[index],
file_frames, file_labels,
curr_idx[index])
return curr_idx
def write_train_test_lmdb(categories, LMDB_path, train_test_split, num_rot):
"""Entire pipeline of train and test LMDB creation.
"""
DB_KEY_FORMAT = "{:0>10d}"
curr_idx = np.zeros(len(categories), dtype='int')
for c_idx, categ in enumerate(categories):
categ_db = lmdb.open(os.path.join(LMDB_path, categ), readonly=True)
with categ_db.begin() as txn:
curr_idx[c_idx] = txn.stat()['entries']
categ_db.close()
min_idx = min(curr_idx)
print("Number of images for each category: ", curr_idx)
train_idx = int(train_test_split * min_idx)
print("train samples ", train_idx)
test_idx = min_idx - train_idx
print("test samples ", test_idx)
indices_categ = np.arange(min_idx)
print(len(categories))
indices_train_database = np.arange(
len(categories) * train_idx * (num_rot + 1))
print("total training samples ", len(indices_train_database))
np.random.shuffle(indices_train_database)
indices_test_database = np.arange(
len(categories) * test_idx * (num_rot + 1))
print("total test samples", len(indices_test_database))
np.random.shuffle(indices_test_database)
# Open databases
train_db = lmdb.open(os.path.join(LMDB_path, 'train'), map_size=int(1e12),
map_async=True, writemap=True, meminit=False)
test_db = lmdb.open(os.path.join(LMDB_path, 'test'), map_size=int(1e12),
map_async=True, writemap=True, meminit=False)
last = time.time()
# Iterate over the category LMDBs
curr_train_idx, curr_test_idx = 0, 0
for idx, categ in enumerate(categories):
categ_db = lmdb.open(os.path.join(LMDB_path, categ), readonly=True)
with categ_db.begin() as categ_txn:
print("Writing {} samples to database".format(categ))
np.random.shuffle(indices_categ)
# Populating training LMDB
with train_db.begin(write=True) as in_txn_train:
for i in range(train_idx):
if i % 100000 == 0:
elapsed = time.time() - last
last = time.time()
print(
"Wrote {} training samples to database in {} "
"seconds".format(
i * (num_rot + 1),
elapsed))
in_idx = indices_categ[i]
in_key = DB_KEY_FORMAT.format(in_idx)
in_item = categ_txn.get(in_key)
out_key = DB_KEY_FORMAT.format(indices_train_database[i * (
num_rot + 1) + curr_train_idx])
in_txn_train.put(out_key, in_item)
# Rotate images by 90 degrees
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(in_item)
assert datum.channels == 1, "The algorithm currently only " \
"works for 1-channel images"
flat_x = np.fromstring(datum.data, dtype=np.uint8)
x = flat_x.reshape(datum.height, datum.width)
y = datum.label
for rotation_idx in range(num_rot):
x = np.rot90(x)
out_image = np.reshape(x, (
datum.channels, datum.height, datum.width))
im_dat = caffe.io.array_to_datum(
out_image.astype('uint8'),
label=int(y))
out_key = DB_KEY_FORMAT.format(indices_train_database[
i * (
num_rot + 1) +
curr_train_idx
+ rotation_idx + 1])
in_txn_train.put(out_key, im_dat.SerializeToString())
curr_train_idx += train_idx * (num_rot + 1)
with test_db.begin(write=True) as in_txn_test:
for i in range(test_idx):
if i % 100000 == 0:
print("Wrote {} testing samples to database".format(
i * (num_rot + 1)))
in_idx = indices_categ[train_idx + i]
in_key = DB_KEY_FORMAT.format(in_idx)
in_item = categ_txn.get(in_key)
out_key = DB_KEY_FORMAT.format(indices_test_database[i * (
num_rot + 1) + curr_test_idx])
in_txn_test.put(out_key, in_item)
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(in_item)
assert datum.channels == 1, "The algorithm currently only " \
"works for 1-channel images"
flat_x = np.fromstring(datum.data, dtype=np.uint8)
x = flat_x.reshape(datum.height, datum.width)
y = datum.label
for rotation_idx in range(num_rot):
x = np.rot90(x)
out_image = np.reshape(x, (
datum.channels, datum.height, datum.width))
im_dat = caffe.io.array_to_datum(
out_image.astype('uint8'),
label=int(y))
out_key = DB_KEY_FORMAT.format(indices_test_database[
i * (
num_rot + 1) +
curr_test_idx +
rotation_idx + 1])
in_txn_test.put(out_key, im_dat.SerializeToString())
curr_test_idx += test_idx * (num_rot + 1)
print('Wrote so far: {} train, {} test.\n'.format(curr_train_idx,
curr_test_idx))
categ_db.close()
test_db.close()
train_db.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Turn a list of movies into\
an LMDB.')
parser.add_argument('--seed', default=42, type=int, help='Initialize the\
random seed of the run (for reproducibility).')
parser.add_argument('--workfile', default='workfile_train.txt',
help='File which is a list of movie files\
and labels to process,\
one per line, separated by four spaces')
parser.add_argument('--categories', default=['paper', 'scissors', 'rock',
'background'],
help='List of categories used for classification')
parser.add_argument('--LMDB_path', default='./lmdb_train/',
help='Where to write out the LMDB dataset.')
parser.add_argument('--avi_dir', default='./recordings',
help='Where to look for .avi files to add to the LMDB')
parser.add_argument('--working_dir', default='.',
help='Where to create the workfile used for creating '
'the LMDB')
parser.add_argument('--labels_dir', default='labels_train',
help='Where to create the labels files')
parser.add_argument('--train_test_split', default=0.8, type=float,
help='Where to split the data (not shuffled).')
parser.add_argument('--num_rotations', default=3, type=int,
help='How many 90 degree rotations to perform for '
'each image')
parser.add_argument('--gray', default=True,
help='If the input data is grayscale, the output' +
' LMDB images will have only one channel. '
'Otherwise' +
' it will have 3 channels.')
args = parser.parse_args()
# Set seed
np.random.seed(args.seed)
# Make the output directory
if not os.path.exists(args.LMDB_path):
os.makedirs(args.LMDB_path)
# Make the workfile used to create the LMDBs
create_workfile(args.avi_dir, args.working_dir, args.workfile,
args.labels_dir)
# Create the LMDBs
start_time = time.time()
curr_idx = write_categ_lmdb(args.workfile, args.categories,
args.LMDB_path, args.gray)
print("Number of samples for each category:{}".format(curr_idx))
write_train_test_lmdb(args.categories, args.LMDB_path,
args.train_test_split, args.num_rotations)
print('Finished in {}s.'.format(time.time() - start_time)) | {
"content_hash": "847b1b0295114711178a86be7af1acdf",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 81,
"avg_line_length": 41.75305623471883,
"alnum_prop": 0.5322948995725244,
"repo_name": "NeuromorphicProcessorProject/snn_toolbox",
"id": "271beea3644aea3b579dc1e75ac49b81b1f3652f",
"size": "17101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snntoolbox/datasets/aedat/avi_to_lmdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "700830"
}
],
"symlink_target": ""
} |
from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
# data types
SqlColumn, SqlColumnAgg,
AggOver,
# transformations
wrap_annotate,
sql_agg,
win_agg,
win_cumul,
sql_not_impl,
# wiring up translator
extend_base,
SqlTranslator
)
from .postgresql import (
PostgresqlColumn,
PostgresqlColumnAgg,
)
from .base import sql_func_rank
# Data ========================================================================
class DuckdbColumn(PostgresqlColumn): pass
class DuckdbColumnAgg(PostgresqlColumnAgg, DuckdbColumn): pass
# Annotations =================================================================
def returns_int(func_names):
# TODO: MC-NOTE - shift all translations to directly register
# TODO: MC-NOTE - make an AliasAnnotated class or something, that signals
# it is using another method, but w/ an updated annotation.
from siuba.ops import ALL_OPS
for name in func_names:
generic = ALL_OPS[name]
f_concrete = generic.dispatch(SqlColumn)
f_annotated = wrap_annotate(f_concrete, result_type="int")
generic.register(DuckdbColumn, f_annotated)
# Translations ================================================================
def sql_quantile(is_analytic=False):
# Ordered and theoretical set aggregates
sa_func = getattr(sql.func, "percentile_cont")
def f_quantile(codata, col, q, *args):
if args:
raise NotImplementedError("Quantile only supports the q argument.")
if not isinstance(q, (int, float)):
raise TypeError("q argument must be int or float, but received: %s" %type(q))
# as far as I can tell, there's no easy way to tell sqlalchemy to render
# the exact text a dialect would render for a literal (except maybe using
# literal_column), so use the classic sql.text.
q_text = sql.text(str(q))
if is_analytic:
return AggOver(sa_func(sql.text(q_text)).within_group(col))
return sa_func(q_text).within_group(col)
return f_quantile
# scalar ----
extend_base(
DuckdbColumn,
**{
"str.contains": lambda _, col, re: fn.regexp_matches(col, re),
"str.title": sql_not_impl(),
}
)
returns_int([
"dt.day", "dt.dayofyear", "dt.days_in_month",
"dt.daysinmonth", "dt.hour", "dt.minute", "dt.month",
"dt.quarter", "dt.second", "dt.week",
"dt.weekofyear", "dt.year"
])
# window ----
extend_base(
DuckdbColumn,
rank = sql_func_rank,
#quantile = sql_quantile(is_analytic=True),
)
# aggregate ----
extend_base(
DuckdbColumnAgg,
quantile = sql_quantile(),
)
translator = SqlTranslator.from_mappings(DuckdbColumn, DuckdbColumnAgg)
| {
"content_hash": "4ba8d01565b5c998091724a3dba62b38",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 89,
"avg_line_length": 25.84259259259259,
"alnum_prop": 0.6005016123253314,
"repo_name": "machow/siuba",
"id": "aa132b10e1131a3b118271c909635d90ed41c69d",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "siuba/sql/dialects/duckdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1007"
},
{
"name": "Python",
"bytes": "573788"
}
],
"symlink_target": ""
} |
import wx
from cairis.core.armid import *
from cairis.core.ARM import *
from cairis.core.Borg import Borg
from WeaknessTreatmentDialog import WeaknessTreatmentDialog
__author__ = 'Shamal Faily'
class WeaknessTargetListCtrl(wx.ListCtrl):
def __init__(self,parent,winId,cvName):
wx.ListCtrl.__init__(self,parent,winId,size=wx.DefaultSize,style=wx.LC_REPORT)
b = Borg()
self.dbProxy = b.dbProxy
self.theViewName = cvName
self.theComponents = []
self.InsertColumn(0,'Target')
self.SetColumnWidth(0,100)
self.InsertColumn(1,'Components')
self.SetColumnWidth(1,250)
self.InsertColumn(2,'Assets')
self.SetColumnWidth(2,250)
self.theSelectedIdx = -1
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onTargetActivated)
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def onTargetActivated(self,evt):
try:
targetName = evt.GetLabel()
target = self.theTargets[targetName]
dlg = WeaknessTreatmentDialog(self,targetName,self.theViewName,target.requirement(),target.asset(),target.effectiveness())
if (dlg.ShowModal() == WEAKNESSTREATMENT_BUTTONCOMMIT_ID):
target.addTreatment(dlg.requirement(),dlg.asset(),dlg.effectiveness(),dlg.rationale())
self.theTargets[targetName] = target
dlg.Destroy()
except KeyError:
return
def load(self,targets):
self.theTargets = targets
for targetKey in targets:
idx = self.GetItemCount()
self.InsertStringItem(idx,targetKey)
target = targets[targetKey]
self.SetStringItem(idx,1,",".join(target.components()))
self.SetStringItem(idx,2,",".join(target.templateAssets()))
def dimensions(self):
return self.theTargets
| {
"content_hash": "84769f333751eb8379b05e6123de62c6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 130,
"avg_line_length": 34.85454545454545,
"alnum_prop": 0.7104851330203443,
"repo_name": "nathanbjenx/cairis",
"id": "0fdeca18b543e240947669ee15f1f7964a00b692",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cairis/gui/WeaknessTargetListCtrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
# Choose .FITS file
filename = '...'
hdu = fits.open(filename)
data = hdu[0].data
# display histogram
plt.figure()
plt.hist(np.asarray(data).flatten(), 500)
plt.yscale('Log')
# display image
plt.figure()
plt.imshow(data, cmap='gray', vmin=np.median(data), vmax=np.median(data) + 500)
plt.colorbar()
plt.title(file)
plt.show()
| {
"content_hash": "15612a66334cc75c45aaff00ecac41c2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 19.636363636363637,
"alnum_prop": 0.6759259259259259,
"repo_name": "CalebHarada/DCT-photometry",
"id": "1de5c6222f541706f9ee22f0729d9c08c952d4e7",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ViewImage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77661"
}
],
"symlink_target": ""
} |
"""Very basic example of a simulation without a resampler or boundary
conditions"""
from copy import copy
import sys
## Logging
import sys
from eliot import start_action, to_file
to_file(open("_output/file_log.eliot.json", 'ab'))
### Setup Junk
with start_action(action_type="Setup") as setup_cx:
## Application Imports
with start_action(action_type="imports"):
import simtk.openmm as omm
import simtk.unit as unit
from openmm_systems.test_systems import LennardJonesPair
from wepy.resampling.resamplers.resampler import NoResampler
from wepy.runners.openmm import OpenMMRunner, gen_walker_state
from wepy.walker import Walker
from wepy.sim_manager import Manager
from wepy.work_mapper.mapper import Mapper
# from wepy.work_mapper.thread import ThreadMapper
# use a ready made system for OpenMM MD simulation
with start_action(action_type="Instantiate test system"):
test_sys = LennardJonesPair()
with start_action(action_type="Gen Runner"):
integrator = omm.LangevinIntegrator(300.0*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds)
init_state = gen_walker_state(test_sys.positions, test_sys.system, integrator)
runner = OpenMMRunner(test_sys.system, test_sys.topology, integrator,
platform='Reference')
# a trivial resampler which does nothing
with start_action(action_type="Instantiate Resampler"):
resampler = NoResampler()
# Run the simulation
# number of cycles of WE to perform
n_cycles = 1
# the number of MD dynamics steps for each cycle
n_steps = 1000000
steps = [n_steps for i in range(n_cycles)]
# number of parallel simulations
n_walkers = 10
# the work mapper
# work_mapper = ThreadMapper()
work_mapper = Mapper()
# create the initial walkers with equal weights
with start_action(action_type="Init Walkers") as ctx:
init_weight = 1.0 / n_walkers
init_walkers = [Walker(copy(init_state), init_weight) for i in range(n_walkers)]
with start_action(action_type="Init Sim Manager") as ctx:
sim_manager = Manager(
init_walkers,
runner=runner,
resampler=resampler,
work_mapper=work_mapper)
# run the simulation and get the results
with start_action(action_type="Simulation") as ctx:
final_walkers, _ = sim_manager.run_simulation(n_cycles, steps)
| {
"content_hash": "295fbc3875ca5920b8a13f30f52ab139",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 89,
"avg_line_length": 30.738095238095237,
"alnum_prop": 0.65143299767622,
"repo_name": "ADicksonLab/wepy",
"id": "347f01d393f25f93bb8b33c65a0c396fac7963b3",
"size": "2582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jigs/eliot/source/file_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "920"
},
{
"name": "Dockerfile",
"bytes": "421"
},
{
"name": "HTML",
"bytes": "5283"
},
{
"name": "Makefile",
"bytes": "581"
},
{
"name": "Python",
"bytes": "1512860"
},
{
"name": "Shell",
"bytes": "7263"
},
{
"name": "TeX",
"bytes": "9643"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class SpiritGroupConfig(AppConfig):
name = 'spirit.group'
verbose_name = "Spirit Group"
label = 'spirit_group'
| {
"content_hash": "1292637431313c8f93f5ffb1ff2b3cc7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 20.1,
"alnum_prop": 0.7164179104477612,
"repo_name": "alesdotio/Spirit",
"id": "94eb887e3e8a8872cb50c6de6bf76622486bc361",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/group/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255435"
},
{
"name": "CoffeeScript",
"bytes": "128350"
},
{
"name": "HTML",
"bytes": "203306"
},
{
"name": "JavaScript",
"bytes": "28458"
},
{
"name": "Makefile",
"bytes": "187"
},
{
"name": "Python",
"bytes": "773246"
}
],
"symlink_target": ""
} |
from django import template
register = template.Library()
@register.filter(name='index_at')
def list_value_at_index(list_from_html, index):
return list_from_html[index]
@register.filter(name='add')
def add(value_from_html, value_need_add):
return value_from_html + value_need_add
@register.filter(name='format')
def add(list_from_html):
return '|'.join(list_from_html)
| {
"content_hash": "80185b02bf8f7908c1822364fed1b9c5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 47,
"avg_line_length": 20.473684210526315,
"alnum_prop": 0.7146529562982005,
"repo_name": "Guiders/CoreIce",
"id": "a118bd9d9327687a18d6bee69cfdadd793256e3b",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/templatetags/web_tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "28154"
},
{
"name": "Python",
"bytes": "66896"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
from lsanomaly import LSAnomaly
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def test_example_code(mc_model, example_arrays, check_ndarray, check_ps):
x_train, x_test, expected_predict, expected_prob = example_arrays
mc_model.fit(x_train)
p = mc_model.predict(x_test)
logger.debug("predict = {}".format(p))
assert p == expected_predict
p = mc_model.predict_proba(x_test)
logger.debug("probs = {}".format(p))
check_ps(p)
check_ndarray(p, expected_prob)
def test_example_doc(doc_arrays, check_ndarray):
test_pt = np.array([[0]])
x_train, predict_prob = doc_arrays
anomaly_model = LSAnomaly(sigma=3, rho=0.1, seed=42)
anomaly_model.fit(x_train)
expected = [0.0]
p = anomaly_model.predict(test_pt)
assert p == expected
expected = np.array([[0.7231233, 0.2768767]])
p = anomaly_model.predict_proba(test_pt)
logger.debug("p = {}".format(p))
check_ndarray(expected, p)
| {
"content_hash": "aab02aeb2192a8b91ab5ffe2efae4272",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 24.951219512195124,
"alnum_prop": 0.6627565982404692,
"repo_name": "lsanomaly/lsanomaly",
"id": "1ecbfda0ce513fd54b31a7d22bee5fe1193c6fb4",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lsanomaly/tests/test_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "254947"
},
{
"name": "Makefile",
"bytes": "254"
},
{
"name": "Python",
"bytes": "60058"
},
{
"name": "TeX",
"bytes": "1771"
}
],
"symlink_target": ""
} |
from mysite import db
class Notebook(db.Model):
__tablename__ = 'Notebook'
id = db.Column(db.Integer, primary_key=True)
nid = db.Column(db.String(32), default='')
name = db.Column(db.String(32), default='')
price = db.Column(db.Int, default=0)
state = db.Column(db.Boolean, default=True)
status_id = db.Column(db.Integer, db.ForeignKey('Status.id'))
status = db.relationship('Status', backref='notebooks')
user_id = db.Column(db.Integer, db.ForeignKey('User.id'))
user = db.relationship('User', backref='notebooks')
def __init__(self, *args, **kwargs):
super(Notebook, self).__init__(*args, **kwargs) | {
"content_hash": "0e8ce6a2e8d596093c8fccbecb234bfe",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.6656151419558359,
"repo_name": "liyigerry/caixiang",
"id": "d49dd882c744f5296466ab2c683c1238663049d4",
"size": "634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/models/notebook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117825"
},
{
"name": "HTML",
"bytes": "35324"
},
{
"name": "JavaScript",
"bytes": "46042"
},
{
"name": "Python",
"bytes": "31456"
}
],
"symlink_target": ""
} |
from courses.models import Course, FilenameExtension, DefaultTeacher, MarkField, CourseMarkSystem, StudentCourseMark
from django.contrib import admin
class CourseAdmin(admin.ModelAdmin):
list_display = ('name', 'year',)
list_filter = ('year__start_year', 'is_active')
filter_horizontal = ('filename_extensions', 'issue_fields')
raw_id_fields = ('teachers', 'groups')
search_fields = ('name', 'year__start_year', 'teachers__username', 'groups__name')
class DefaultTeacherAdmin(admin.ModelAdmin):
list_display = ('teacher', 'group', 'course')
list_filter = ('group', 'course')
class CourseMarkSystemAdmin(admin.ModelAdmin):
filter_horizontal = ('marks',)
class StudentCourseMarkAdmin(admin.ModelAdmin):
list_display = ('student', 'course', 'mark')
list_filter = ('student', 'course', 'mark')
readonly_fields = ('update_time',)
class MarkFieldAdmin(admin.ModelAdmin):
list_display = ('name', 'name_int')
admin.site.register(Course, CourseAdmin)
admin.site.register(FilenameExtension)
admin.site.register(DefaultTeacher, DefaultTeacherAdmin)
admin.site.register(CourseMarkSystem, CourseMarkSystemAdmin)
admin.site.register(MarkField, MarkFieldAdmin)
admin.site.register(StudentCourseMark, StudentCourseMarkAdmin)
| {
"content_hash": "1691939a1619a2241426c038ec04a067",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 116,
"avg_line_length": 34.2972972972973,
"alnum_prop": 0.7344365642237982,
"repo_name": "znick/anytask",
"id": "29f533ae9bb3e5e4621537733fddb4398a3154c9",
"size": "1269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anytask/courses/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89720"
},
{
"name": "Dockerfile",
"bytes": "7709"
},
{
"name": "HTML",
"bytes": "826638"
},
{
"name": "JavaScript",
"bytes": "296467"
},
{
"name": "Less",
"bytes": "7302"
},
{
"name": "Python",
"bytes": "965878"
},
{
"name": "Shell",
"bytes": "30922"
}
],
"symlink_target": ""
} |
import copy
import csv
import logging
import os
import uuid
from django.shortcuts import render
from django.http import HttpResponse
from amas import AMAS
from .forms import GeneTableForm
from core.utils import get_context, get_voucher_codes, get_gene_codes
from create_dataset.utils import CreateDataset
from public_interface.models import Genes
log = logging.getLogger(__name__)
def index(request):
form = GeneTableForm()
context = get_context(request)
context["form"] = form
return render(request, 'gene_table/index.html', context)
def results(request):
context = get_context(request)
if request.method == 'POST':
form = GeneTableForm(request.POST)
if form.is_valid():
table = GeneTable(form.cleaned_data)
response = create_excel_file(table.stats)
return response
context["form"] = GeneTableForm()
return render(request, 'gene_table/index.html', context)
class GeneTable(object):
def __init__(self, cleaned_data):
self.cleaned_data = self.populate_cleaned_data_form(cleaned_data)
self.voucher_codes = get_voucher_codes(cleaned_data)
self.gene_codes = get_gene_codes(cleaned_data)
self.fasta_datasets = self.get_fasta_datasets()
self.genes_type = self.get_genes_type()
self.stats = self.get_stats_from_datasets()
def populate_cleaned_data_form(self, cleaned_data):
cleaned_data['number_genes'] = None
cleaned_data['aminoacids'] = False
cleaned_data['translations'] = False
cleaned_data['positions'] = ['ALL']
cleaned_data['file_format'] = 'FASTA'
cleaned_data['partition_by_positions'] = 'by gene'
cleaned_data['taxon_names'] = ['CODE', 'GENUS', 'SPECIES']
cleaned_data['outgroup'] = ''
return cleaned_data
def get_fasta_datasets(self):
fasta_datasets = []
for gene_code in self.gene_codes:
cleaned_data = copy.copy(self.cleaned_data)
cleaned_data['geneset'] = None
cleaned_data['gene_codes'] = [Genes.objects.get(gene_code=gene_code)]
fasta = CreateDataset(cleaned_data)
fasta_datasets.append(fasta)
return fasta_datasets
def get_genes_type(self):
genes = Genes.objects.all().values('gene_code', 'gene_type')
genes_dict = {}
for gene in genes:
gene_code = gene['gene_code']
gene_type = gene['gene_type']
genes_dict[gene_code] = gene_type
return genes_dict
def get_stats_from_datasets(self):
"""These are the stats headers of AMAS v0.2
"""
stats = {}
in_file = self.make_guid() + '.fas'
for dataset in self.fasta_datasets:
code = dataset.gene_codes[0]
with open(in_file, 'w') as handle:
handle.write(dataset.dataset_str)
aln = AMAS.DNAAlignment(in_file, 'fasta', 'dna')
aln_stats = aln.summarize_alignment()
freq_summary = aln.get_freq_summary()[1][0:4]
aln_stats += freq_summary
aln_stats.append(code)
this_stat = {
'data_type': self.genes_type[code],
'number_of_taxa': aln_stats[1],
'alignment_length': aln_stats[2],
'total_matrix_cells': aln_stats[3],
'undetermined_chars': aln_stats[4],
'missing_percent': aln_stats[5],
'number_variable_sites': aln_stats[6],
'proportion_variable_sites': aln_stats[7],
'parsimony_informative_sites': aln_stats[8],
'proportion_parsimony_informative': aln_stats[9],
'freq_a': aln_stats[10],
'freq_c': aln_stats[11],
'freq_g': aln_stats[12],
'freq_t': aln_stats[13],
}
stats[code] = this_stat
try:
os.remove(in_file)
except OSError as e:
log.error("There is no partition file to remove: {0}".format(e))
return stats
def make_guid(self):
return uuid.uuid4().hex
def create_excel_file(stats):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="gene_table.csv"'
writer = csv.writer(response)
row = ['Data set', 'Data type', 'Length', 'Dataset completion (%)', 'Variable (%)',
'Pars. Inf. (%)', 'Conserved (%)', 'Freq. A (%)', 'Freq. T/U (%)', 'Freq. C (%)',
'Freq. G (%)', 'Introns (n)', 'Tot. intron length (bp)']
writer.writerow(row)
for gene in stats:
this_stats = stats[gene]
row = [gene]
row.append(this_stats['data_type'])
row.append(this_stats['alignment_length'])
row.append(100 - float(this_stats['missing_percent']))
row.append(float(this_stats['proportion_variable_sites']) * 100)
row.append(this_stats['proportion_parsimony_informative'])
row.append(100 - (float(this_stats['proportion_variable_sites']) * 100))
row.append(float(this_stats['freq_a']) * 100)
row.append(float(this_stats['freq_t']) * 100)
row.append(float(this_stats['freq_c']) * 100)
row.append(float(this_stats['freq_g']) * 100)
writer.writerow(row)
return response
| {
"content_hash": "79c6f61abd23628d63558b4b4110ab46",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 92,
"avg_line_length": 36,
"alnum_prop": 0.5859433258762118,
"repo_name": "carlosp420/VoSeq",
"id": "e4905d107b3d3f79b39c373c0d997e95e86ca49d",
"size": "5364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gene_table/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19319"
},
{
"name": "HTML",
"bytes": "95764"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "357630"
},
{
"name": "Shell",
"bytes": "11587"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from scopus.classes import Retrieval
from scopus.utils import chained_get, get_id, detect_id_type, get_link,\
listify
class AbstractRetrieval(Retrieval):
@property
def abstract(self):
"""The abstract of a document.
Note: If this is empty, try property description instead.
"""
return self._head.get('abstracts')
@property
def affiliation(self):
"""A list of namedtuples representing listed affiliations in
the form (id, name, city, country).
Note: Might be empty.
"""
out = []
aff = namedtuple('Affiliation', 'id name city country')
affs = listify(self._json.get('affiliation', []))
for item in affs:
new = aff(id=item.get('@id'), name=item.get('affilname'),
city=item.get('affiliation-city'),
country=item.get('affiliation-country'))
out.append(new)
return out or None
@property
def aggregationType(self):
"""Aggregation type of source the document is published in."""
return chained_get(self._json, ['coredata', 'prism:aggregationType'])
@property
def authkeywords(self):
"""List of author-provided keywords of the document."""
keywords = self._json.get('authkeywords')
if not keywords:
return None
else:
try:
return [d['$'] for d in keywords['author-keyword']]
except TypeError: # Singleton keyword
return [keywords['author-keyword']['$']]
@property
def authorgroup(self):
"""A list of namedtuples representing the article's authors organized
by affiliation, in the form (affiliation_id, dptid, organization,
city, postalcode, addresspart, country, auid, indexed_name,
surname, given_name).
If "given_name" is not present, fall back to initials.
Note: Affiliation information might be missing or mal-assigned even
when it lookes correct in the web view. In this case please request
a correction.
"""
out = []
fields = 'affiliation_id dptid organization city postalcode '\
'addresspart country auid indexed_name surname given_name'
auth = namedtuple('Author', fields)
items = listify(self._head.get('author-group', []))
index_path = ['preferred-name', 'ce:indexed-name']
for item in items:
# Affiliation information
aff = item.get('affiliation', {})
try:
aff_ids = listify(aff['affiliation-id'])
aff_id = ", ".join([a["@afid"] for a in aff_ids])
except KeyError:
aff_id = aff.get("@afid")
org = _get_org(aff)
# Author information (might relate to collaborations)
authors = listify(item.get('author', item.get('collaboration', [])))
for au in authors:
try:
given = au.get('ce:given-name', au['ce:initials'])
except KeyError: # Collaboration
given = au.get('ce:text')
new = auth(affiliation_id=aff_id, organization=org,
city=aff.get('city'), dptid=aff.get("@dptid"),
postalcode=aff.get('postal-code'),
addresspart=aff.get('address-part'),
country=aff.get('country'), auid=au.get('@auid'),
surname=au.get('ce:surname'), given_name=given,
indexed_name=chained_get(au, index_path))
out.append(new)
return out or None
@property
def authors(self):
"""A list of namedtuples representing the article's authors, in the
form (auid, indexed_name, surname, given_name, affiliation_id,
affiliation, city, country).
Note: The affiliation referred to here is what Scopus' algorithm
determined as the main affiliation. Property `authorgroup` provides
all affiliations.
"""
out = []
fields = 'auid indexed_name surname given_name affiliation'
auth = namedtuple('Author', fields)
for item in chained_get(self._json, ['authors', 'author'], []):
affs = [a for a in listify(item.get('affiliation')) if a]
if affs:
aff = [aff.get('@id') for aff in affs]
else:
aff = None
new = auth(auid=item['@auid'], surname=item.get('ce:surname'),
indexed_name=item.get('ce:indexed-name'), affiliation=aff,
given_name=chained_get(item, ['preferred-name', 'ce:given-name']))
out.append(new)
return out or None
@property
def citedby_count(self):
"""Number of articles citing the document."""
cites = chained_get(self._json, ['coredata', 'citedby-count'])
if cites:
cites = int(cites)
return cites
@property
def citedby_link(self):
"""URL to Scopus page listing citing documents."""
return get_link(self._json, 2)
@property
def chemicals(self):
"""List of namedtuples representing chemical entities in the form
(source, chemical_name, cas_registry_number). In case multiple
numbers given, they are joined on ";".
"""
path = ['enhancement', 'chemicalgroup', 'chemicals']
items = listify(chained_get(self._head, path, []))
fields = 'source chemical_name cas_registry_number'
chemical = namedtuple('Chemical', fields)
out = []
for item in items:
for chem in listify(item['chemical']):
number = chem.get('cas-registry-number')
try: # Multiple numbers given
num = ";".join([n['$'] for n in number])
except TypeError:
num = number
new = chemical(source=item['@source'], cas_registry_number=num,
chemical_name=chem['chemical-name'])
out.append(new)
return out or None
@property
def confcode(self):
"""Code of the conference the document belong to."""
return self._confevent.get('confcode')
@property
def confdate(self):
"""Date range of the conference the document belongs to represented
by two tuples in the form (YYYY, MM, DD).
"""
date = self._confevent.get('confdate', {})
if len(date) > 0:
start = {k: int(v) for k, v in date['startdate'].items()}
end = {k: int(v) for k, v in date['enddate'].items()}
return ((start['@year'], start['@month'], start['@day']),
(end['@year'], end['@month'], end['@day']))
else:
return ((None, None, None), (None, None, None))
@property
def conflocation(self):
"""Location of the conference the document belongs to."""
return chained_get(self._confevent, ['conflocation', 'city-group'])
@property
def confname(self):
"""Name of the conference the document belongs to."""
return self._confevent.get('confname')
@property
def confsponsor(self):
"""Sponsor(s) of the conference the document belongs to."""
path = ['confsponsors', 'confsponsor']
sponsors = chained_get(self._confevent, path, [])
if len(sponsors) == 0:
return None
if isinstance(sponsors, list):
return [s['$'] for s in sponsors]
return sponsors
@property
def contributor_group(self):
"""List of namedtuples representing contributors compiled by Scopus,
in the form (given_name, initials, surname, indexed_name, role).
"""
path = ['source', 'contributor-group']
items = listify(chained_get(self._head, path, []))
out = []
fields = 'given_name initials surname indexed_name role'
pers = namedtuple('Contributor', fields)
for item in items:
entry = item.get('contributor', {})
new = pers(indexed_name=entry.get('ce:indexed-name'),
role=entry.get('@role'), surname=entry.get('ce:surname'),
given_name=entry.get('ce:given-name'),
initials=entry.get('ce:initials'))
out.append(new)
return out or None
@property
def correspondence(self):
"""namedtuple representing the author to whom correspondence should
be addressed, in the form
(surname, initials, organization, country, city_group). Multiple
organziations are joined on semicolon.
"""
fields = 'surname initials organization country city_group'
auth = namedtuple('Correspondence', fields)
corr = self._head.get('correspondence')
if corr is None:
return None
aff = corr.get('affiliation', {})
try:
org = aff['organization']
try:
org = org['$']
except TypeError: # Multiple names given
org = "; ".join([d['$'] for d in org])
except KeyError:
org = None
return auth(surname=corr.get('person', {}).get('ce:surname'),
initials=corr.get('person', {}).get('ce:initials'),
organization=org, country=aff.get('country'),
city_group=aff.get('city-group'))
@property
def coverDate(self):
"""The date of the cover the document is in."""
return chained_get(self._json, ['coredata', 'prism:coverDate'])
@property
def description(self):
"""Return the description of a record.
Note: If this is empty, try property abstract instead.
"""
return chained_get(self._json, ['coredata', 'dc:description'])
@property
def doi(self):
"""DOI of the document."""
return chained_get(self._json, ['coredata', 'prism:doi'])
@property
def eid(self):
"""EID of the document."""
return chained_get(self._json, ['coredata', 'eid'])
@property
def endingPage(self):
"""Ending page."""
return chained_get(self._json, ['coredata', 'prism:endingPage'])
@property
def funding(self):
"""List of namedtuples parsed funding information in the form
(agency string id acronym country).
"""
path = ['item', 'xocs:meta', 'xocs:funding-list', 'xocs:funding']
funds = listify(chained_get(self._json, path, []))
out = []
fund = namedtuple('Funding', 'agency string id acronym country')
for item in funds:
new = fund(agency=item.get('xocs:funding-agency'),
string=item.get('xocs:funding-agency-matched-string'),
id=item.get('xocs:funding-agency-id'),
acronym=item.get('xocs:funding-agency-acronym'),
country=item.get('xocs:funding-agency-country'))
out.append(new)
return out or None
@property
def funding_text(self):
"""The raw text from which Scopus derives funding information."""
path = ['item', 'xocs:meta', 'xocs:funding-list', 'xocs:funding-text']
return chained_get(self._json, path)
@property
def isbn(self):
"""ISBNs belonging to publicationName as tuple of variying length,
(e.g. ISBN-10 or ISBN-13)."""
isbns = listify(chained_get(self._head, ['source', 'isbn'], []))
if len(isbns) == 0:
return None
else:
return tuple((i['$'] for i in isbns))
@property
def issn(self):
"""ISSN belonging to the publicationName.
Note: If E-ISSN is known to Scopus, this returns both
ISSN and E-ISSN in random order separated by blank space.
"""
return chained_get(self._json, ['coredata', 'prism:issn'])
@property
def identifier(self):
"""ID of the document (same as EID without "2-s2.0-")."""
return get_id(self._json)
@property
def idxterms(self):
"""List of index terms."""
try:
terms = listify(self._json.get("idxterms", {}).get('mainterm', []))
except AttributeError: # idxterms is empty
return None
try:
return [d['$'] for d in terms]
except AttributeError:
return None
@property
def issueIdentifier(self):
"""Number of the issue the document was published in."""
return chained_get(self._json, ['coredata', 'prism:issueIdentifier'])
@property
def issuetitle(self):
"""Title of the issue the document was published in."""
return chained_get(self._head, ['source', 'issuetitle'])
@property
def language(self):
"""Language of the article."""
return chained_get(self._json, ['language', '@xml:lang'])
@property
def pageRange(self):
"""Page range."""
return chained_get(self._json, ['coredata', 'prism:pageRange'])
@property
def publicationName(self):
"""Name of source the document is published in."""
return chained_get(self._json, ['coredata', 'prism:publicationName'])
@property
def publisher(self):
"""Name of the publisher of the document.
Note: Information provided in the FULL view of the article might be
more complete.
"""
# Return information from FULL view, fall back to other views
full = chained_get(self._head, ['source', 'publisher', 'publishername'])
if full is None:
return chained_get(self._json, ['coredata', 'dc:publisher'])
else:
return full
@property
def publisheraddress(self):
"""Name of the publisher of the document."""
return chained_get(self._head, ['source', 'publisher', 'publisheraddress'])
@property
def refcount(self):
"""Number of references of an article.
Note: Requires the FULL view of the article.
"""
if self._view == "REF":
path = ["references", '@total-references']
else:
path = ['item', 'bibrecord', 'tail', 'bibliography', '@refcount']
return chained_get(self._json, path)
@property
def references(self):
"""List of namedtuples representing references listed in the document,
in the form (position, id, doi, title, authors, authors_auid,
authors_affiliationid, sourcetitle, publicationyear, volume, issue,
first, last, citedbycount, type, text, fulltext).
`position` is the number at which the reference appears in the
document, `id` is the Scopus ID of the referenced document (EID
without the "2-s2.0-"), `authors` is a string of the names of the
authors in the format "Surname1, Initials1; Surname2, Initials2",
`authors_auid` is a string of the author IDs joined on "; ",
`authors_affiliationid` is a string of the authors' affiliation IDs
joined on "; ", `sourcetitle` is the name of the source (e.g. the
journal), `publicationyear` is the year of the publication as a string,
`volume` and `issue`, are strings referring to the volume and issue,
`first` and `last` refer to the page range, `citedbycount` is a string
for the total number of citations of the cited item, `type` describes
the parsing status of the reference (resolved or not), `text` is
Scopus-provided information on the publication, `fulltext` is the text
the authors used for the reference.
Note: Requires either the FULL view or REF view of the article. Might
be empty even if refcount is positive. Specific fields can be empty.
Author lists (authors, authors_auid, authors_affiliationid) may contain
duplicates but have been filtered of None's.
"""
out = []
fields = 'position id doi title authors authors_auid '\
'authors_affiliationid sourcetitle publicationyear volume '\
'issue first last citedbycount type text fulltext'
ref = namedtuple('Reference', fields)
if self._view == "REF":
path = ['references', 'reference']
else:
path = ['item', 'bibrecord', 'tail', 'bibliography', 'reference']
items = listify(chained_get(self._json, path, []))
for item in items:
info = item.get('ref-info', item)
volisspag = info.get('volisspag', {}) or {}
if isinstance(volisspag, list):
volisspag = volisspag[0]
# Parse author information
try: # FULL view parsing
auth = listify(item['ref-info']['ref-authors']['author'])
authors = [', '.join([d['ce:surname'], d['ce:initials']])
for d in auth]
auids = None
affids = None
except KeyError: # REF view parsing
auth = (info.get('author-list') or {}).get('author', [])
authors = [', '.join(filter(None, [d.get('ce:surname'),
d.get('ce:given-name')]))
for d in auth]
auids = "; ".join(filter(None, [d.get('@auid') for d in auth]))
affs = filter(None, [d.get('affiliation') for d in auth])
affids = "; ".join([aff.get('@id') for aff in affs])
# Parse IDs
try:
ids = listify(info['refd-itemidlist']['itemid'])
except KeyError:
ids = []
try:
doi = _select_by_idtype(ids, 'DOI')[0]
except IndexError:
doi = info.get('ce:doi')
try:
scopus_id = _select_by_idtype(ids, 'SGR')[0]
except IndexError:
scopus_id = info.get('scopus-id')
# Combine information
new = ref(position=item.get('@id'), id=scopus_id, doi=doi,
authors="; ".join(authors), authors_auid=auids or None,
authors_affiliationid=affids or None,
title=info.get('ref-title', {}).get('ref-titletext', info.get('title')),
sourcetitle=info.get('ref-sourcetitle', info.get('sourcetitle')),
publicationyear=info.get('ref-publicationyear', {}).get('@first'),
volume=volisspag.get('voliss', {}).get('@volume'),
issue=volisspag.get('voliss', {}).get('@issue'),
first=volisspag.get('pagerange', {}).get('@first'),
last=volisspag.get('pagerange', {}).get('@last'),
citedbycount=info.get('citedby-count'), type=info.get('type'),
text=info.get('ref-text'),
fulltext=item.get('ref-fulltext'))
out.append(new)
return out or None
@property
def scopus_link(self):
"""URL to the document page on Scopus."""
return get_link(self._json, 1)
@property
def self_link(self):
"""URL to Scopus API page of this document."""
return get_link(self._json, 0)
@property
def sequencebank(self):
"""List of namedtuples representing biological entities defined or
mentioned in the text, in the form (name, sequence_number, type).
"""
path = ['enhancement', 'sequencebanks', 'sequencebank']
items = listify(chained_get(self._head, path, []))
bank = namedtuple('Sequencebank', 'name sequence_number type')
out = []
for item in items:
numbers = listify(item['sequence-number'])
for number in numbers:
new = bank(name=item['@name'], sequence_number=number['$'],
type=number['@type'])
out.append(new)
return out or None
@property
def source_id(self):
"""Scopus source ID of the document."""
return chained_get(self._json, ['coredata', 'source-id'])
@property
def sourcetitle_abbreviation(self):
"""Abbreviation of the source the document is published in.
Note: Requires the FULL view of the article.
"""
return self._head.get('source', {}).get('sourcetitle-abbrev')
@property
def srctype(self):
"""Aggregation type of source the document is published in (short
version of aggregationType).
"""
return chained_get(self._json, ['coredata', 'srctype'])
@property
def startingPage(self):
"""Starting page."""
return chained_get(self._json, ['coredata', 'prism:startingPage'])
@property
def subject_areas(self):
"""List of namedtuples containing subject areas of the article
in the form (area abbreviation code).
Note: Requires the FULL view of the article.
"""
area = namedtuple('Area', 'area abbreviation code')
path = ['subject-areas', 'subject-area']
out = [area(area=item['$'], abbreviation=item['@abbrev'],
code=item['@code'])
for item in listify(chained_get(self._json, path, []))]
return out or None
@property
def title(self):
"""Title of the document."""
return chained_get(self._json, ['coredata', 'dc:title'])
@property
def url(self):
"""URL to the API view of the document."""
return chained_get(self._json, ['coredata', 'prism:url'])
@property
def volume(self):
"""Volume for the document."""
return chained_get(self._json, ['coredata', 'prism:volume'])
@property
def website(self):
"""Website of publisher."""
path = ['source', 'website', 'ce:e-address', '$']
return chained_get(self._head, path)
def __init__(self, identifier=None, refresh=False, view='META_ABS',
id_type=None):
"""Class to represent the results from retrieval request from the
Scopus Abstract API.
Parameters
----------
identifier : str or int
The identifier of a document. Can be the Scoups EID, the Scopus
ID, the PII, the Pubmed-ID or the DOI.
refresh : bool (optional, default=False)
Whether to refresh the cached file if it exists or not.
id_type: str (optional, default=None)
The type of used ID. Allowed values: None, 'eid', 'pii',
'scopus_id', 'pubmed_id', 'doi'. If the value is None, the
function tries to infer the ID type itself.
view : str (optional, default=META_ABS)
The view of the file that should be downloaded. Allowed values:
META, META_ABS, REF, FULL, where FULL includes all information
of META_ABS view and META_ABS includes all information of the
META view. For details see
https://dev.elsevier.com/guides/AbstractRetrievalViews.htm.
Raises
------
ValueError
If the id_type parameter or the view parameter contains
invalid entries.
Notes
-----
The files are cached in ~/.scopus/abstract_retrieval/{view}/{identifier}.
In case a DOI is used as identifier, an underscore replaces the
forward slash in the filename.
"""
# Checks
identifier = str(identifier)
allowed_views = ('META', 'META_ABS', 'REF', 'FULL')
if view not in allowed_views:
raise ValueError('view parameter must be one of ' +
', '.join(allowed_views))
if id_type is None:
id_type = detect_id_type(identifier)
else:
allowed_id_types = ('eid', 'pii', 'scopus_id', 'pubmed_id', 'doi')
if id_type not in allowed_id_types:
raise ValueError('id_type parameter must be one of ' +
', '.join(allowed_id_types))
# Load json
Retrieval.__init__(self, identifier=identifier, id_type=id_type,
api='AbstractRetrieval', refresh=refresh, view=view)
self._json = self._json['abstracts-retrieval-response']
self._head = chained_get(self._json, ["item", "bibrecord", "head"], {})
path = ['source', 'additional-srcinfo', 'conferenceinfo', 'confevent']
self._confevent = chained_get(self._head, path, {})
def __str__(self):
"""Return pretty text version of the document.
Assumes the document is a journal article and was loaded with
view="META_ABS" or view="FULL".
"""
# Authors
if len(self.authors) > 1:
authors = _list_authors(self.authors)
else:
a = self.authors[0]
authors = str(a.given_name) + ' ' + str(a.surname)
# All other information
s = '[[{link}][{eid}]] {auth}, {title}, {jour}, {vol}'.format(
link=self.scopus_link, eid=self.eid, auth=authors,
title=self.title, jour=self.publicationName, vol=self.volume)
if self.issueIdentifier:
s += '({}), '.format(self.issueIdentifier)
else:
s += ', '
if self.pageRange:
s += 'pp. {}, '.format(self.pageRange)
elif self.startingPage:
s += 'pp. {}-{}, '.format(self.startingPage, self.endingPage)
else:
s += '(no pages found) '
s += '({}).'.format(self.coverDate[:4])
if self.doi:
s += ' https://doi.org/{},'.format(self.doi)
s += ' {}, cited {} times (Scopus).\n Affiliations:\n '.format(
self.scopus_link, self.citedby_count)
s += '\n '.join([aff.name for aff in self.affiliation])
return s
def get_bibtex(self):
"""Bibliographic entry in BibTeX format.
Raises
------
ValueError
If the item's aggregationType is not Journal.
"""
if self.aggregationType != 'Journal':
raise ValueError('Only Journal articles supported.')
# Item key
year = self.coverDate[0:4]
first = self.title.split()[0].title()
last = self.title.split()[-1].title()
key = ''.join([self.authors[0].surname, year, first, last])
# Authors
authors = ' and '.join(["{} {}".format(a.given_name, a.surname)
for a in self.authors])
# Pages
if self.pageRange:
pages = self.pageRange
elif self.startingPage:
pages = '{}-{}'.format(self.startingPage, self.endingPage)
else:
pages = '-'
# All information
bib = "@article{{{key},\n author = {{{auth}}},\n title = "\
"{{{{{title}}}}},\n journal = {{{jour}}},\n year = "\
"{{{year}}},\n volume = {{{vol}}},\n number = {{{number}}},"\
"\n pages = {{{pages}}}".format(
key=key, auth=authors, title=self.title, year=year,
jour=self.publicationName, vol=self.volume,
number=self.issueIdentifier, pages=pages)
# DOI
if self.doi:
bib += ",\n doi = {{{}}}".format(self.doi)
bib += "}"
return bib
def get_html(self):
"""Bibliographic entry in html format."""
# Author links
au_link = ('<a href="https://www.scopus.com/authid/detail.url'
'?origin=AuthorProfile&authorId={0}">{1}</a>')
if len(self.authors) > 1:
authors = u', '.join([au_link.format(a.auid, a.given_name +
' ' + a.surname)
for a in self.authors[0:-1]])
authors += (u' and ' +
au_link.format(self.authors[-1].auid,
(str(self.authors[-1].given_name) +
' ' +
str(self.authors[-1].surname))))
else:
a = self.authors[0]
authors = au_link.format(a.auid, a.given_name + ' ' + a.surname)
title = u'<a href="{}">{}</a>'.format(self.scopus_link, self.title)
if self.volume and self.issueIdentifier:
volissue = u'<b>{}({})</b>'.format(self.volume, self.issueIdentifier)
elif self.volume:
volissue = u'<b>{}</b>'.format(self.volume)
else:
volissue = 'no volume'
jlink = '<a href="https://www.scopus.com/source/sourceInfo.url'\
'?sourceId={}">{}</a>'.format(
self.source_id, self.publicationName)
pages = _parse_pages(self, unicode=True)
s = "{auth}, {title}, {jour}, {volissue}, {pages}, ({year}).".format(
auth=authors, title=title, jour=jlink, volissue=volissue,
pages=pages, year=self.coverDate[:4])
if self.doi:
s += ' <a href="https://doi.org/{0}">doi:{0}</a>.'.format(self.doi)
return s
def get_latex(self):
"""Bibliographic entry in LaTeX format."""
if len(self.authors) > 1:
authors = _list_authors(self.authors)
else:
a = self.authors
authors = ' '.join([a.given_name, a.surname])
if self.volume and self.issueIdentifier:
volissue = '\\textbf{{{}({})}}'.format(self.volume, self.issueIdentifier)
elif self.volume:
volissue = '\\textbf{{{}}}'.format(self.volume)
else:
volissue = 'no volume'
pages = _parse_pages(self)
s = '{auth}, \\textit{{{title}}}, {jour}, {vol}, {pages} ({year}).'.format(
auth=authors, title=self.title, jour=self.publicationName,
vol=volissue, pages=pages, year=self.coverDate[:4])
if self.doi is not None:
s += ' \\href{{https://doi.org/{0}}}{{doi:{0}}}, '.format(self.doi)
s += '\\href{{{0}}}{{scopus:{1}}}.'.format(self.scopus_link, self.eid)
return s
def get_ris(self):
"""Bibliographic entry in RIS (Research Information System Format)
format for journal articles.
Raises
------
ValueError
If the item's aggregationType is not Journal.
"""
if self.aggregationType != 'Journal':
raise ValueError('Only Journal articles supported.')
# Basic information
ris = "TY - JOUR\nTI - {title}\nJO - {jour}\nVL - {vol}\n"\
"DA - {date}\nPY - {year}\nSP - {pages}\n".format(
title=self.title, jour=self.publicationName, vol=self.volume,
date=self.coverDate, year=self.coverDate[0:4],
pages=self.pageRange)
# Authors
for au in self.authors:
ris += 'AU - {}\n'.format(au.indexed_name)
# DOI
if self.doi is not None:
ris += 'DO - {0}\nUR - https://doi.org/{0}\n'.format(self.doi)
# Issue
if self.issueIdentifier is not None:
ris += 'IS - {}\n'.format(self.issueIdentifier)
ris += 'ER - \n\n'
return ris
def _get_org(aff):
"""Auxiliary function to extract org information from affiliation
for authorgroup.
"""
try:
org = aff['organization']
if not isinstance(org, str):
try:
org = org['$']
except TypeError: # Multiple names given
org = ', '.join([d['$'] for d in org if d])
except KeyError: # Author group w/o affiliation
org = None
return org
def _list_authors(lst):
"""Format a list of authors (Surname, Firstname and Firstname Surname)."""
authors = ', '.join([' '.join([a.given_name, a.surname]) for a in lst[0:-1]])
authors += ' and ' + ' '.join([lst[-1].given_name, lst[-1].surname])
return authors
def _parse_pages(self, unicode=False):
"""Auxiliary function to parse and format page range of a document."""
if self.pageRange:
pages = 'pp. {}'.format(self.pageRange)
elif self.startingPage:
pages = 'pp. {}-{}'.format(self.startingPage, self.endingPage)
else:
pages = '(no pages found)'
if unicode:
pages = u'{}'.format(pages)
return pages
def _select_by_idtype(lst, selector):
"""Auxiliary function to return items matching a special idtype."""
return [d['$'] for d in lst if d['@idtype'] == selector]
| {
"content_hash": "49f6f6402479889f862bf005c0252035",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 88,
"avg_line_length": 40.53416149068323,
"alnum_prop": 0.5520073551946062,
"repo_name": "scopus-api/scopus",
"id": "990598a649e354d8594867471a9e95cd9916518a",
"size": "32630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scopus/abstract_retrieval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133243"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Rocket'
copyright = u'2011, Timothy Farrell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Rocketdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Rocket.tex', u'Rocket Documentation',
u'Timothy Farrell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "1e9b5e3a63317478222c17d39b1cb889",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 80,
"avg_line_length": 33.2967032967033,
"alnum_prop": 0.6881188118811881,
"repo_name": "zoni/Rocket",
"id": "5aac74b9f7fd23b8f9a8e4ba30d4a554e26f7c8d",
"size": "6488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149483"
},
{
"name": "Shell",
"bytes": "3184"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cobra.core.loading import get_model, get_class
from cobra.forms.fields import UserField
DailyDeadline = get_model('workreport', 'DailyDeadline')
# class DailyReportDeadlineForm(forms.ModelForm):
#
# def __init__(self, organization, *args, **kwargs):
# try:
# instance = DailyDeadline.objects.get(organization=organization)
# except DailyDeadline.DoesNotExist:
# deadline = settings.COBRA_WORKREPORT_DAILY_DEADLINE
# instance = DailyDeadline(organization=organization, deadline_time=deadline)
# kwargs['instance'] = instance
#
# super(DailyReportDeadlineForm, self).__init__(*args, **kwargs)
#
# class Meta:
# fields = ('deadline_time',)
# model = DailyDeadline
# widgets = {
# 'deadline_time': forms.TimeInput(format='%H:%M', attrs={'class':'form-control'}),
# } | {
"content_hash": "8dbaf079c4f4c2b4bbb45b69897b61dc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 95,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.6641148325358852,
"repo_name": "lyoniionly/django-cobra",
"id": "99d413bb77bd2860365c6f864bc1a93c852cc436",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cobra/apps/organization/summary/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "745958"
},
{
"name": "HTML",
"bytes": "254436"
},
{
"name": "JavaScript",
"bytes": "2679541"
},
{
"name": "Python",
"bytes": "1440198"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Disable the "exactly one space required after comma" message, for the input
# bindings it looks nicer to insert some spaces (see the setup method)
# pylint: disable=bad-whitespace
from __future__ import print_function
from panda3d.core import ModifierButtons, Vec3, PStatClient
from panda3d.core import Point3, CurveFitter
class MovementController(object):
""" This is a helper class, used to controll the camera and enable various
debugging features. It is not really part of the pipeline, but included to
view the demo scenes. """
def __init__(self, showbase):
self.showbase = showbase
self.movement = [0, 0, 0]
self.velocity = Vec3(0.0)
self.hpr_movement = [0, 0]
self.speed = 0.4
self.initial_position = Vec3(0)
self.initial_destination = Vec3(0)
self.initial_hpr = Vec3(0)
self.mouse_enabled = False
self.last_mouse_pos = [0, 0]
self.mouse_sensivity = 0.7
self.keyboard_hpr_speed = 0.4
self.use_hpr = False
self.smoothness = 6.0
self.bobbing_amount = 1.5
self.bobbing_speed = 0.5
def set_initial_position(self, pos, target):
""" Sets the initial camera position """
self.initial_position = pos
self.initial_destination = target
self.use_hpr = False
self.reset_to_initial()
def set_initial_position_hpr(self, pos, hpr):
""" Sets the initial camera position """
self.initial_position = pos
self.initial_hpr = hpr
self.use_hpr = True
self.reset_to_initial()
def reset_to_initial(self):
""" Resets the camera to the initial position """
self.showbase.camera.set_pos(self.initial_position)
if self.use_hpr:
self.showbase.camera.set_hpr(self.initial_hpr)
else:
self.showbase.camera.look_at(
self.initial_destination.x, self.initial_destination.y,
self.initial_destination.z)
def set_movement(self, direction, amount):
self.movement[direction] = amount
def set_hpr_movement(self, direction, amount):
self.hpr_movement[direction] = amount
def set_mouse_enabled(self, enabled):
self.mouse_enabled = enabled
def increase_speed(self):
self.speed *= 1.4
def decrease_speed(self):
self.speed *= 0.6
def unbind(self):
""" Unbinds the movement controler and restores the previous state """
raise NotImplementedError()
@property
def clock_obj(self):
""" Returns the global clock object """
return self.showbase.taskMgr.globalClock
def setup(self):
""" Attaches the movement controller and inits the keybindings """
# x
self.showbase.accept("raw-w", self.set_movement, [0, 1])
self.showbase.accept("raw-w-up", self.set_movement, [0, 0])
self.showbase.accept("raw-s", self.set_movement, [0, -1])
self.showbase.accept("raw-s-up", self.set_movement, [0, 0])
# y
self.showbase.accept("raw-a", self.set_movement, [1, -1])
self.showbase.accept("raw-a-up", self.set_movement, [1, 0])
self.showbase.accept("raw-d", self.set_movement, [1, 1])
self.showbase.accept("raw-d-up", self.set_movement, [1, 0])
# z
self.showbase.accept("space", self.set_movement, [2, 1])
self.showbase.accept("space-up", self.set_movement, [2, 0])
self.showbase.accept("shift", self.set_movement, [2, -1])
self.showbase.accept("shift-up", self.set_movement, [2, 0])
# wireframe + debug + buffer viewer
self.showbase.accept("f3", self.showbase.toggle_wireframe)
self.showbase.accept("f11", lambda: self.showbase.win.save_screenshot("screenshot.png"))
self.showbase.accept("j", self.print_position)
# mouse
self.showbase.accept("mouse1", self.set_mouse_enabled, [True])
self.showbase.accept("mouse1-up", self.set_mouse_enabled, [False])
# arrow mouse navigation
self.showbase.accept("arrow_up", self.set_hpr_movement, [1, 1])
self.showbase.accept("arrow_up-up", self.set_hpr_movement, [1, 0])
self.showbase.accept("arrow_down", self.set_hpr_movement, [1, -1])
self.showbase.accept("arrow_down-up", self.set_hpr_movement, [1, 0])
self.showbase.accept("arrow_left", self.set_hpr_movement, [0, 1])
self.showbase.accept("arrow_left-up", self.set_hpr_movement, [0, 0])
self.showbase.accept("arrow_right", self.set_hpr_movement, [0, -1])
self.showbase.accept("arrow_right-up", self.set_hpr_movement, [0, 0])
# increase / decrease speed
self.showbase.accept("+", self.increase_speed)
self.showbase.accept("-", self.decrease_speed)
# disable modifier buttons to be able to move while pressing shift for
# example
self.showbase.mouseWatcherNode.set_modifier_buttons(ModifierButtons())
self.showbase.buttonThrowers[0].node().set_modifier_buttons(ModifierButtons())
# disable pandas builtin mouse control
self.showbase.disableMouse()
# add ourself as an update task which gets executed very early before
# the rendering
self.update_task = self.showbase.addTask(
self.update, "RP_UpdateMovementController", sort=-40)
# Hotkeys to connect to pstats and reset the initial position
self.showbase.accept("1", PStatClient.connect)
self.showbase.accept("3", self.reset_to_initial)
def print_position(self):
""" Prints the camera position and hpr """
pos = self.showbase.cam.get_pos(self.showbase.render)
hpr = self.showbase.cam.get_hpr(self.showbase.render)
print("(Vec3({}, {}, {}), Vec3({}, {}, {})),".format(
pos.x, pos.y, pos.z, hpr.x, hpr.y, hpr.z))
def update(self, task):
""" Internal update method """
delta = self.clock_obj.get_dt()
# Update mouse first
if self.showbase.mouseWatcherNode.has_mouse():
x = self.showbase.mouseWatcherNode.get_mouse_x()
y = self.showbase.mouseWatcherNode.get_mouse_y()
self.current_mouse_pos = (x * self.showbase.camLens.get_fov().x * self.mouse_sensivity,
y * self.showbase.camLens.get_fov().y * self.mouse_sensivity)
if self.mouse_enabled:
diffx = self.last_mouse_pos[0] - self.current_mouse_pos[0]
diffy = self.last_mouse_pos[1] - self.current_mouse_pos[1]
# Don't move in the very beginning
if self.last_mouse_pos[0] == 0 and self.last_mouse_pos[1] == 0:
diffx = 0
diffy = 0
self.showbase.camera.set_h(self.showbase.camera.get_h() + diffx)
self.showbase.camera.set_p(self.showbase.camera.get_p() - diffy)
self.last_mouse_pos = self.current_mouse_pos[:]
# Compute movement in render space
movement_direction = (Vec3(self.movement[1], self.movement[0], 0) *
self.speed * delta * 100.0)
# transform by the camera direction
camera_quaternion = self.showbase.camera.get_quat(self.showbase.render)
translated_direction = camera_quaternion.xform(movement_direction)
# z-force is inddpendent of camera direction
translated_direction.add_z(
self.movement[2] * delta * 40.0 * self.speed)
self.velocity += translated_direction * 0.15
# apply the new position
self.showbase.camera.set_pos(self.showbase.camera.get_pos() + self.velocity)
# transform rotation (keyboard keys)
rotation_speed = self.keyboard_hpr_speed * 100.0
rotation_speed *= delta
self.showbase.camera.set_hpr(
self.showbase.camera.get_hpr() + Vec3(
self.hpr_movement[0], self.hpr_movement[1], 0) * rotation_speed)
# fade out velocity
self.velocity = self.velocity * max(
0.0, 1.0 - delta * 60.0 / max(0.01, self.smoothness))
# bobbing
ftime = self.clock_obj.get_frame_time()
rotation = (ftime % self.bobbing_speed) / self.bobbing_speed
rotation = (min(rotation, 1.0 - rotation) * 2.0 - 0.5) * 2.0
if self.velocity.length_squared() > 1e-5 and self.speed > 1e-5:
rotation *= self.bobbing_amount
rotation *= min(1, self.velocity.length()) / self.speed * 0.5
else:
rotation = 0
self.showbase.camera.set_r(rotation)
return task.cont
def play_motion_path(self, points, point_duration=1.2):
""" Plays a motion path from the given set of points """
fitter = CurveFitter()
for i, (pos, hpr) in enumerate(points):
fitter.add_xyz_hpr(i, pos, hpr)
fitter.compute_tangents(1.0)
curve = fitter.make_hermite()
print("Starting motion path with", len(points), "CVs")
self.showbase.render2d.hide()
self.showbase.aspect2d.hide()
self.curve = curve
self.curve_time_start = self.clock_obj.get_frame_time()
self.curve_time_end = self.clock_obj.get_frame_time() + len(points) * point_duration
self.delta_time_sum = 0.0
self.delta_time_count = 0
self.showbase.addTask(self.camera_motion_update, "RP_CameraMotionPath", sort=-50)
self.showbase.taskMgr.remove(self.update_task)
def camera_motion_update(self, task):
if self.clock_obj.get_frame_time() > self.curve_time_end:
print("Camera motion path finished")
# Print performance stats
avg_ms = self.delta_time_sum / self.delta_time_count
print("Average frame time (ms): {:4.1f}".format(avg_ms * 1000.0))
print("Average frame rate: {:4.1f}".format(1.0 / avg_ms))
self.update_task = self.showbase.addTask(
self.update, "RP_UpdateMovementController", sort=-50)
self.showbase.render2d.show()
self.showbase.aspect2d.show()
return task.done
lerp = (self.clock_obj.get_frame_time() - self.curve_time_start) /\
(self.curve_time_end - self.curve_time_start)
lerp *= self.curve.get_max_t()
pos, hpr = Point3(0), Vec3(0)
self.curve.evaluate_xyz(lerp, pos)
self.curve.evaluate_hpr(lerp, hpr)
self.showbase.camera.set_pos(pos)
self.showbase.camera.set_hpr(hpr)
self.delta_time_sum += self.clock_obj.get_dt()
self.delta_time_count += 1
return task.cont
| {
"content_hash": "741db9e12806f2b2492f67b1bf2407c8",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 99,
"avg_line_length": 40.064625850340136,
"alnum_prop": 0.6265387554121742,
"repo_name": "eswartz/RenderPipeline",
"id": "91ba33401edc40647f6e48515d6fd59c56db0b99",
"size": "11779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpcore/util/movement_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1241"
},
{
"name": "C",
"bytes": "21397"
},
{
"name": "C++",
"bytes": "160537"
},
{
"name": "GLSL",
"bytes": "712004"
},
{
"name": "Groff",
"bytes": "114"
},
{
"name": "Python",
"bytes": "1374140"
}
],
"symlink_target": ""
} |
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import deferred2 as deferred
from deferred2 import defer, defer_multi, task
import pytest
uses = pytest.mark.usefixtures
BigPayloadEntity = deferred.ext.big_payloads.BigPayload
def ResolvedFuture(val):
fut = ndb.Future()
fut.set_result(val)
return fut
FutureNone = ResolvedFuture(None)
@pytest.fixture
def mockito():
import mockito
yield mockito
mockito.unstub()
messages = []
@pytest.fixture
def clear_messages():
messages[:] = []
def work(data):
messages.append(data)
class _Work(object):
def append_message(self, data):
messages.append(data)
@pytest.fixture(ids=['function', 'method'],
params=[lambda: work, lambda: _Work().append_message])
def callable(request, clear_messages):
yield request.param()
@uses('clear_messages', 'ndb')
class TestPayloadStores:
def testSmall(self, deferreds, callable):
data = 'A'
defer(callable, data)
assert BigPayloadEntity.query().fetch() == []
deferreds.consume()
assert ['A'] == messages
def testLarge(self, deferreds, callable):
data = 'A' * 100000
defer(callable, data)
payload = BigPayloadEntity.query().get()
assert payload.large
deferreds.consume()
assert [data] == messages
assert BigPayloadEntity.query().fetch() == []
def testHuge(self, deferreds, blobstore, callable):
data = 'A' * 1000000
defer(callable, data)
payload = BigPayloadEntity.query().get()
assert payload.huge
deferreds.consume()
assert [data] == messages
assert BigPayloadEntity.query().fetch() == []
assert blobstore.BlobInfo.all().fetch(limit=None) == []
@uses('clear_messages', 'ndb')
class TestNameMungling:
def testHashifyTooLongNames(self, deferreds):
name = 'N' * 1000
defer(work, _name=name)
def testHashifyNonStrings(self, deferreds):
name = ndb.Key('User', '1234567', 'Order', 'as2897')
defer(work, _name=name)
DEFAULT_URL = deferred._DEFAULT_URL
@uses('clear_messages')
class TestAdditionalCosmeticUrlArguments:
def testAddsArgsToTheUrl(self, deferreds):
task = defer(work, 'A', _urlsuffix='foo')
assert task.url == DEFAULT_URL + "/foo"
task = defer(work, 'A', _urlsuffix=('foo'))
assert task.url == DEFAULT_URL + "/foo"
task = defer(work, 'A',
_urlsuffix=('foo', ndb.Key('User', '1234').id()))
assert task.url == DEFAULT_URL + "/foo/1234"
task = defer(work, 'A', _urlsuffix=('foo', 'bar'))
assert task.url == DEFAULT_URL + "/foo/bar"
def testRemovesArgsBeforeCallingTheDeferred(self, deferreds):
defer(work, 'A', _urlsuffix=('foo', 'bar'))
deferreds.consume()
assert ['A'] == messages
def testAutoName(self, taskqueue):
defer(work, 'A')
print work.__module__, work.__name__
print work.__class__
inst = _Work()
meth = inst.append_message
print inst.__class__
print inst.__class__.__module__, inst.__class__.__name__
print meth.__name__
# 1/0
class TestAutoTransactional:
def testTransactionalIfInTransaction(self, mockito, ndb):
mockito.when(deferred.batcher) \
.queue_multiple_tasks('default', True, mockito.any(list)) \
.thenReturn(FutureNone)
ndb.transaction(lambda: defer(work, 'A'))
def testNotTransactionalIfOutsideTransaction(self, mockito, ndb):
mockito.when(deferred.batcher) \
.queue_multiple_tasks('default', False, mockito.any(list)) \
.thenReturn(FutureNone)
defer(work, 'A')
def testNotTransactionalIfWanted(self, mockito, ndb):
mockito.when(deferred.batcher) \
.queue_multiple_tasks('default', False, mockito.any(list)) \
.thenReturn(FutureNone)
ndb.transaction(
lambda: defer(work, 'A', _transactional=False))
def testCannotOptinToTransactionalOutsideOfTransaction(self, mockito, ndb):
with pytest.raises(taskqueue.BadTransactionStateError):
defer(work, 'A', _transactional=True)
def testTransactionalMustBeFalseIfNameIsGiven(self, taskqueue, ndb):
ndb.transaction(lambda: defer(work, 'A', _name='a'))
@uses('clear_messages', 'ndb')
class TestCleanDbIfAddingTheTaskFails:
def testTransactionalSpecifiedButNotInTransaction(self, deferreds):
data = 'A' * 100000
with pytest.raises(taskqueue.BadTransactionStateError):
defer(work, data, _transactional=True)
assert BigPayloadEntity.query().fetch() == []
def testTaskCreationFailsNonTransactional(self, deferreds, monkeypatch):
def fail(*a, **kw):
raise taskqueue.Error()
monkeypatch.setattr(taskqueue, 'Task', fail)
data = 'A' * 100000
with pytest.raises(taskqueue.Error):
defer(work, data)
assert BigPayloadEntity.query().fetch() == []
def testTaskCreationFailsInTransaction(self, deferreds, monkeypatch):
def fail(*a, **kw):
raise taskqueue.Error()
monkeypatch.setattr(taskqueue, 'Task', fail)
data = 'A' * 100000
with pytest.raises(taskqueue.Error):
ndb.transaction(lambda: defer(work, data))
assert BigPayloadEntity.query().fetch() == []
@uses('clear_messages', 'ndb')
class TestAddMultipleTasks:
def testAsync(self, deferreds):
deferred.defer_multi_async(
*[deferred.task(work, i) for i in range(10)]).get_result()
deferreds.consume()
assert sorted(messages) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def testSync(self, deferreds):
deferred.defer_multi(*[deferred.task(work, i) for i in range(10)])
deferreds.consume()
assert sorted(messages) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def testFilterWhenTranformedIntoNone(self, deferreds):
tasks = deferred.defer_multi(
deferred.task(work, 'A'),
transformers=[ndb.tasklet(lambda t, c: None)])
assert len(tasks) == 0
class TestBatching:
def testDifferentiateTransactional(self, mockito):
def assertTasksLen(wanted_length):
def _answer(q, tr, tasks):
assert len(tasks) == wanted_length
return FutureNone
return _answer
mockito.when(deferred.batcher) \
.queue_multiple_tasks('Foo', False, mockito.any(list)) \
.thenAnswer(assertTasksLen(2))
mockito.when(deferred.batcher) \
.queue_multiple_tasks('Foo', True, mockito.any(list)) \
.thenAnswer(assertTasksLen(1))
defer_multi(
task(work, 'A', _queue='Foo'),
task(work, 'B', _queue='Foo'),
task(work, 'A', _queue='Foo', _transactional=True))
def testDifferentiateQueueName(self, mockito):
def assertTasksLen(wanted_length):
def _answer(q, tr, tasks):
assert len(tasks) == wanted_length
return FutureNone
return _answer
mockito.when(deferred.batcher) \
.queue_multiple_tasks('Foo', False, mockito.any(list)) \
.thenAnswer(assertTasksLen(2))
mockito.when(deferred.batcher) \
.queue_multiple_tasks('Bar', False, mockito.any(list)) \
.thenAnswer(assertTasksLen(1))
defer_multi(
task(work, 'A', _queue='Foo'),
task(work, 'B', _queue='Foo'),
task(work, 'A', _queue='Bar'))
@uses('clear_messages', 'ndb')
class TestOneShot:
def testEnqueueOneTaskWhichEnqueuesTheRest(self, deferreds):
deferred.one_shot(
*[deferred.task(work, i) for i in range(10)]
)
assert deferreds.count_tasks() == 1
deferreds.tick()
assert deferreds.count_tasks() == 10
deferreds.consume()
| {
"content_hash": "287d876b457c7daa34cf45086a7192ed",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 79,
"avg_line_length": 26.794117647058822,
"alnum_prop": 0.597755823880961,
"repo_name": "kaste/deferred2",
"id": "07b35d2c28a10549675947b84b7a484efe1ae82f",
"size": "8200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/deferred_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19789"
}
],
"symlink_target": ""
} |
class PID:
"""
Discrete PID control
"""
def __init__(self, P=2.0, I=0.0, D=1.0, Derivator=0, Integrator=0, Integrator_max=500, Integrator_min=-500):
self.Kp=P
self.Ki=I
self.Kd=D
self.Derivator=Derivator
self.Integrator=Integrator
self.Integrator_max=Integrator_max
self.Integrator_min=Integrator_min
self.set_point=0.0
self.fitness=-1
def update(self,current_value):
"""
Calculate PID output value for given reference input and feedback
"""
self.error = self.set_point - current_value
self.P_value = self.Kp * self.error
self.D_value = self.Kd * ( self.Derivator - current_value)
self.Derivator = current_value
self.Integrator = self.Integrator + self.error
if self.Integrator > self.Integrator_max:
self.Integrator = self.Integrator_max
elif self.Integrator < self.Integrator_min:
self.Integrator = self.Integrator_min
self.I_value = self.Integrator * self.Ki
PID = self.P_value + self.I_value + self.D_value
if PID > 1:
PID = 1
if PID < 0:
PID = 0
return PID
def setPoint(self,set_point):
"""
Initilize the setpoint of PID
"""
self.set_point = set_point
self.Integrator=0
self.Derivator=0
def setIntegrator(self, Integrator):
self.Integrator = Integrator
def setDerivator(self, Derivator):
self.Derivator = Derivator
def setKp(self,P):
self.Kp=P
def setKi(self,I):
self.Ki=I
def setKd(self,D):
self.Kd=D
def getPoint(self):
return self.set_point
def getError(self):
return self.error
def getIntegrator(self):
return self.Integrator
def getDerivator(self):
return self.Derivator
def setFitness(self, f):
self.fitness = f
def getFitness(self):
return self.fitness
| {
"content_hash": "7fbaefe90614819751aa240e899d9d5d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 109,
"avg_line_length": 19.75581395348837,
"alnum_prop": 0.6957033549146556,
"repo_name": "Miceuz/pidgenetics",
"id": "54bb2a2699d48d58483c3da62afdb7ef2500b568",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13050"
}
],
"symlink_target": ""
} |
import minisolvers
import time
'''
Function : parse_SAT(stream)
Function to parse and forumlate a SAT Instance object that has methods such
as .solve and .get_model to solve the sat instance. Parses the SAT problem
from the standard DIMACS format for SAT Benchmarks
'''
def parse_SAT(stream):
lines = stream.readlines()
nVars = 0
nClauses = 0
S = minisolvers.MinisatSolver()
cList = []
for line in lines:
line = line.rstrip()
if(line.startswith('c') or
line.startswith('%') or
line.startswith('0') or
line == ""):
# The current line is boilerplate code
pass
elif(line.startswith('p')):
# The current line the problems definition
line = line.split()
nVars = int(line[2])
nClauses = int(line[3])
for i in range(nVars):
S.new_var()
else:
# The current line is a clause
clause = set([int(x) for x in line.split() if not(x == '0')])
cList.append(clause)
S.add_clause(clause)
# Return the SAT Instance
return [S,cList]
'''
Function : get_ucp_matrix(m_i,cList)
Returns the UCP Matrix for the minterm. This function is a part of formulat
-ing the Hitting Set problem for the project and checks membership in the
clause list that is provided
'''
def get_ucp_matrix(m_i,cList):
# Transforming minterm to indicate the values of the terms
m_i = [(i+1) if m_i[i] == 1 else -(i+1) for i in range(len(m_i))]
# Populating the UCP Matrix
ucp_matrix = [0]*len(cList)
for i in range(len(cList)):
ucp_row = get_ucp_row(m_i,cList[i])
ucp_matrix[i] = ucp_row
return [m_i,ucp_matrix]
def get_ucp_row(m_i,clause):
ucp_row = [0]*len(m_i)
for i in range(len(m_i)):
if(m_i[i] in clause):
ucp_row[i] = 1
return ucp_row
'''
Function : get_essential_literals_and_modify(m_i,matrix)
Function returns the set of essential literals that are required for the
set of clauses to be covered. Also modifies the matrix to eliminate the
covered set of clauses and returns it
'''
def get_essential_literals_and_modify(m_i,matrix):
essentialI = get_essential_literals(matrix)
clauses_covered_I = get_clauses_covered_I(essentialI,matrix)
ucp_matrix_new = get_pruned_matrix(clauses_covered_I,matrix)
return [set([m_i[x] for x in essentialI]),
ucp_matrix_new]
'''
SubFunction : get_essential_literals(matrix)
Returns the set of essential literals by checking the row sum of each of
the rows. Each row here is a clause in the SAT Instance
'''
# TODO : Parallelizable function
def get_essential_literals(matrix):
essentialI = set([])
for row in matrix:
if(sum(row) == 1):
essentialI.add(row.index(1))
return essentialI
'''
SubFunction : get_clauses_covered(essentialI,matrix)
Returns the clauses covered by the essential literals since the essential
literal will cover many clauses and not only the the clause that makes it
essential
'''
def get_clauses_covered_I(essentialI,matrix):
clauses_covered = set([])
for index in essentialI:
for rowI in range(len(matrix)):
if(matrix[rowI][index] == 1):
clauses_covered.add(rowI)
return clauses_covered
'''
Function : get_pruned_matrix(clauses_covered_I,ucp_matrix)
Returns a new matrix by pruning the existing matrix by eliminating the
clauses that have already been covered
'''
def get_pruned_matrix(clauses_covered_I,ucp_matrix):
ucp_matrix_new = []
for i in range(len(ucp_matrix)):
if(i not in clauses_covered_I):
ucp_matrix_new.append(ucp_matrix[i])
return ucp_matrix_new
'''
Function : prune_implied(matrix)
Determines implied clauses and eliminates them. Note that this function
assumes that there are no repeated(same) clauses in the input instance. This
should be the case as per the DIMACS guidelines
'''
def prune_implied(matrix):
row_elims = get_implied_rows(matrix)
matrix = elim(matrix,row_elims)
return matrix
'''
Function : elim(matrix,row_elims)
Eliminates the particular set of rows from the matrix as per the reference
'''
def elim(matrix,row_elims):
new_matrix = []
for i in range(len(matrix)):
if(i not in row_elims):
new_matrix.append(matrix[i])
return new_matrix
'''
Function : get_implied_rows(matrix)
Returns the set of implied rows or clauses from the given matrix. Performs
the simple boolean implication test
'''
def get_implied_rows(matrix):
rows_I = set([])
for i in range(len(matrix)):
other_set = set(range(len(matrix))) - set([i])
for index in other_set:
if(implied(index,i,matrix)):
rows_I.add(index)
return rows_I
'''
Function : implied(index,i,matrix)
Returns if row indexed by 'i' implied row indexed by 'index'
'''
def implied(index,i,matrix):
row1 = matrix[i]
row2 = matrix[index]
for i in range(len(row1)):
if(row1[i] == 1):
if(row2[i] == 0):
return False
return True
def transpose(matrix):
return zip(*matrix)
'''
Function : get_greedy_cover(m_i,ucp_matrix)
This function returns the greedy set cover for the set cover problem. In
the current scenario, we prune the exiting ucp_matrix after every greedy
literal pick, until all the clauses have been exhausted
'''
def get_greedy_cover(m_i,ucp_matrix):
cover_vars = set([])
while(len(ucp_matrix) > 0):
ucp_T = transpose(ucp_matrix)
sumList = [sum(row) for row in ucp_T]
max_val = max(sumList)
max_val_I = sumList.index(max_val)
cover_vars.add(max_val_I)
ucp_matrix = prune_literal(max_val_I,ucp_matrix)
return set([m_i[i] for i in cover_vars])
def prune_literal(max_val_I,ucp_matrix):
ucp_new = []
for row in ucp_matrix:
if(row[max_val_I] != 1):
ucp_new.append(row)
return ucp_new
'''
Function : get_cube_cover(minterm,cList)
Returns the cube cover of a particular minterm and the associated blocking
clause. Uses the SAT Instance defined by cList as the base for computing
the cube cover for the particular minterm
'''
def get_cube_cover(minterm,cList):
[m_i,ucp_matrix] = get_ucp_matrix(minterm,cList)
[e_lit,ucp_matrix] = get_essential_literals_and_modify(m_i,ucp_matrix)
ucp_matrix = prune_implied(ucp_matrix)
greedy_terms = get_greedy_cover(m_i,ucp_matrix)
cube_cover = e_lit | greedy_terms
blocking_clause = set([-x for x in cube_cover])
return [cube_cover,blocking_clause]
def print_result(i,Q):
if(i == 1):
print "UNSATISFIABLE"
else:
print "SATISFIABLE"
def get_cur_problem_stream(i):
file_string = "input/Random3SAT/uf100-430/uf100-0" + str(i+1) + ".cnf"
stream = open(file_string)
return stream
def get_all_sat(S,cList):
# F is updated every iteration while the initial clause list as in
# cList remains the same. This is the crucial change that creates the
# DNF Cover faster. If we update cList each iteration, it will produce
# disjoing cubes
j = 1
F = S
Q = []
while(F.solve()):
minterm = list(F.get_model())
[cube_cover,blocking_clause] = get_cube_cover(minterm,cList)
Q.append(list(cube_cover))
F.add_clause(blocking_clause)
# Previous technique Used to produce disjoint cubes by uncommenting
# the following line: -
# cList.append(bloking_clause)
j = j + 1
return [j,Q]
'''
Main Program for the SAT Instance algorithm. This function is incharge of
which SAT Benchmark the algorithm is tried upon and produces the required
results
'''
if __name__ == "__main__":
# Parsing the Input in cnf form and forming a SAT Instance
for i in range(100):
print "Current problem : " + str(i)
stream = get_cur_problem_stream(i)
[S,cList] = parse_SAT(stream)
[j,Q] = get_all_sat(S,cList)
print_result(j,Q)
| {
"content_hash": "88e2886f6f54990e35cbafdfd2408af4",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 80,
"avg_line_length": 33.4453125,
"alnum_prop": 0.6101378182667601,
"repo_name": "tejasnikumbh/AllSAT",
"id": "5c7fcf1cc4a2a00aaba358c0b05825ac29f2e46f",
"size": "8597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "261767"
},
{
"name": "C++",
"bytes": "629844"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "19"
},
{
"name": "Makefile",
"bytes": "17047"
},
{
"name": "Python",
"bytes": "5299856"
},
{
"name": "Shell",
"bytes": "5265"
}
],
"symlink_target": ""
} |
from django import forms
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import FormView
from trix.widgets import TrixEditor
class EditorForm(forms.Form):
content = forms.CharField(widget=TrixEditor)
class EditorView(FormView):
form_class = EditorForm
template_name = 'index.html'
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^trix/', include('trix.urls')),
url(r'^$', EditorView.as_view()),
]
| {
"content_hash": "848f171d3c6028db3a7d95b5eb6024fa",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 48,
"avg_line_length": 22.636363636363637,
"alnum_prop": 0.7168674698795181,
"repo_name": "istrategylabs/django-trix",
"id": "057ea16750cab4cd286e21f1351be3e4df53893c",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15769"
},
{
"name": "HTML",
"bytes": "265"
},
{
"name": "JavaScript",
"bytes": "395"
},
{
"name": "Python",
"bytes": "6060"
}
],
"symlink_target": ""
} |
'''
A list of exceptions that may be raised.
'''
class InvalidCharacterTypeError(Exception):
'''
A character can only be a consonant-vowel pair or a vowel.
'''
pass
class UnexpectedCharacterError(Exception):
'''
Found a character in the string that we can't deal with.
'''
pass
| {
"content_hash": "9313fb92ed5bc2539afedc567c273dfc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 18.470588235294116,
"alnum_prop": 0.6592356687898089,
"repo_name": "msikma/kanaconv",
"id": "ac38f07c03cd4047c9df55814e236f343dadea78",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kanaconv/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115293"
}
],
"symlink_target": ""
} |
import allure
@allure.step('step in __init__.py')
def init_step():
pass
| {
"content_hash": "ce4bb6efb508004a19078b0a7df543c1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 13,
"alnum_prop": 0.6282051282051282,
"repo_name": "igogorek/allure-python",
"id": "cfd8b09bd3ce2eec238109721c727ecc1911b03f",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allure-pytest/test/steps/outside/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23469"
},
{
"name": "Python",
"bytes": "176288"
}
],
"symlink_target": ""
} |
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeDataBufferFeature(SeaBreezeFeature):
identifier = "data_buffer"
def clear(self) -> None:
raise NotImplementedError("implement in derived class")
def remove_oldest_spectra(self, number_of_spectra: int) -> None:
raise NotImplementedError("implement in derived class")
def get_number_of_elements(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity(self) -> int:
raise NotImplementedError("implement in derived class")
def set_buffer_capacity(self, capacity: int) -> None:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_maximum(self) -> int:
raise NotImplementedError("implement in derived class")
def get_buffer_capacity_minimum(self) -> int:
raise NotImplementedError("implement in derived class")
| {
"content_hash": "cb91341b34da162de5a393e3d13443eb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 33.54838709677419,
"alnum_prop": 0.7115384615384616,
"repo_name": "ap--/python-seabreeze",
"id": "42fbc76e1d6280aab0965172a68f9b871d7ac8cb",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/seabreeze/pyseabreeze/features/databuffer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "139445"
},
{
"name": "C++",
"bytes": "792990"
},
{
"name": "Cython",
"bytes": "167272"
},
{
"name": "Python",
"bytes": "220055"
}
],
"symlink_target": ""
} |
"""The following routines are specific to queries to
www.dictionary.com (as of 2003-07-23)"""
def get_def_page(word):
"""Retrieve the definition page for the word of interest.
"""
import urllib
url = "http://www.dictionary.com/cgi-bin/dict.pl?term=%s" % word
fo = urllib.urlopen(url)
page = fo.read()
return page
def get_definitions(wlist):
"""Return a dictionary comprising words (keys) and a definition
lists (values).
"""
ddict = {}
for word in wlist:
text = get_def_page(word)
defs = extract_defs(text)
ddict[word] = defs
return ddict
def extract_defs(text):
"""The site formats its definitions as list items <LI>definition</LI>
We first look for all of the list items and then strip them of any
remaining tags (like <ul>, <CITE>, etc.). This is done using simple
regular expressions, but could probably be done more robustly by
the method detailed in
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52281 .
"""
import re
clean_defs = []
LI_re = re.compile(r'<LI>(.*)</LI>')
HTML_re = re.compile(r'<[^>]+>\s*')
defs = LI_re.findall(text)
# remove internal tags
for d in defs:
clean_d = HTML_re.sub('',d)
if clean_d: clean_defs.append(clean_d)
return clean_defs
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
if __name__ == "__main__":
defdict = get_definitions(['monty','python','language'])
print defdict
| {
"content_hash": "9f42ee2011bae90fcf2e69bddacb8f95",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 132,
"avg_line_length": 47.375,
"alnum_prop": 0.34074632491519036,
"repo_name": "ActiveState/code",
"id": "5f435117ad419b9cc9136ac964607ce069d5b4a8",
"size": "2653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/211886_Retrieve_word_definitions_online_dictionary/recipe-211886.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import sys
from nose.tools import assert_raises
import textwrap
from StringIO import StringIO
from webassets.bundle import Bundle
from webassets.loaders import PythonLoader, YAMLLoader, LoaderError
from webassets.exceptions import ImminentDeprecationWarning
from nose import SkipTest
from helpers import check_warnings
class TestYAML(object):
def setup(self):
try:
import yaml
except ImportError:
raise SkipTest()
def loader(self, text, filename=None):
io = StringIO(textwrap.dedent(text))
if filename:
io.name = filename
return YAMLLoader(io)
def test_load_bundles(self):
bundles = self.loader("""
standard:
filters: cssmin,gzip
output: output.css
contents:
- file1
- file2
empty-bundle:
single-content-as-string-bundle:
contents: only-this
nested:
output: nested.css
filters: cssmin
contents:
- cssfile1
- filters: less
contents:
- lessfile1
- lessfile2
- contents:
reallynested.css
- lessfile3
""").load_bundles()
assert len(bundles) == 4
assert bundles['standard'].output == 'output.css'
assert len(bundles['standard'].filters) == 2
assert bundles['standard'].contents == ('file1', 'file2')
assert bundles['empty-bundle'].contents == ()
assert bundles['single-content-as-string-bundle'].contents == ('only-this',)
assert bundles['nested'].output == 'nested.css'
assert len(bundles['nested'].filters) == 1
assert len(bundles['nested'].contents) == 2
nested_bundle = bundles['nested'].contents[1]
assert isinstance(nested_bundle, Bundle)
assert len(nested_bundle.filters) == 1
assert len(nested_bundle.contents) == 4
assert isinstance(nested_bundle.contents[2], Bundle)
def test_load_recursive_bundles(self):
bundles = self.loader("""
standard:
filters: cssmin,gzip
output: output.css
contents:
- file1
- file2
recursive:
output: recursive.css
filters: cssmin
contents:
- cssfile1
- standard
- cssfile2
""").load_bundles()
assert len(bundles) == 2
assert bundles['recursive'].contents[1].contents == bundles['standard'].contents
assert isinstance(bundles['recursive'].contents[1], Bundle)
def test_empty_files(self):
"""YAML loader can deal with empty files.
"""
self.loader("""""").load_bundles()
self.loader("""""").load_environment()
def test_load_environment(self):
environment = self.loader("""
url: /foo
directory: something
versions: 'timestamp'
auto_build: true
url_expire: true
config:
compass_bin: /opt/compass
bundles:
test:
output: foo
""").load_environment()
assert environment.url == '/foo'
assert environment.url_expire == True
assert environment.auto_build == True
assert environment.config['versions'] == 'timestamp'
assert environment.config['COMPASS_BIN'] == '/opt/compass'
# Because the loader isn't aware of the file location, the
# directory is read as-is, relative to cwd rather than the
# file location.
assert environment.config['directory'] == 'something'
# [bug] Make sure the bundles are loaded as well.
assert len(environment) == 1
def test_load_environment_no_url_or_directory(self):
"""Check that "url" and "directory" are not required.
"""
self.loader("""foo: bar""").load_environment()
def test_load_deprecated_attrs(self):
with check_warnings(("", ImminentDeprecationWarning)) as w:
environment = self.loader("""
url: /foo
directory: something
expire: false
""").load_environment()
assert environment.url_expire == False
def test_load_environment_directory_base(self):
environment = self.loader("""
url: /foo
directory: ../something
""", filename='/var/www/project/config/yaml').load_environment()
# The directory is considered relative to the YAML file location.
assert environment.directory == '/var/www/project/something'
def test_load_extra_default(self):
"""[Regression] If no extra= is given, the value defaults to {}"""
bundles = self.loader("""
foo:
output: foo
""").load_bundles()
assert bundles['foo'].extra == {}
class TestPython(object):
"""Test the PythonLoader.
"""
def test_path(self):
"""[bug] Regression test: Python loader does not leave
sys.path messed up.
"""
old_path = sys.path[:]
loader = PythonLoader('sys')
assert sys.path == old_path
def test_load_bundles(self):
import types
module = types.ModuleType('test')
module.foo = Bundle('bar')
loader = PythonLoader(module)
bundles = loader.load_bundles()
assert len(bundles) == 1
assert bundles.values()[0].contents[0] == 'bar'
| {
"content_hash": "593b975b0ff95daa2c15ff846d5c2e98",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 88,
"avg_line_length": 32.47976878612717,
"alnum_prop": 0.5662929346858872,
"repo_name": "torchbox/webassets",
"id": "892e21e3aa98ce979c1684d5e2510aaf07b850f5",
"size": "5619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_loaders.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
from setuptools import setup, find_packages
if __name__ == '__main__':
packages = find_packages(exclude=['*tests*'])
setup(
name="pyrelic",
license="GPL",
version='0.8.0',
description=u'Python API Wrapper for NewRelic API',
author=u'Andrew Gross',
author_email=u'andrew.w.gross@gmail.com',
include_package_data=True,
url='https://github.com/andrewgross/pyrelic',
packages=packages,
install_requires = ["six", "requests>=2.5.0"],
extras_require = { "tests": [
"mock==1.0.1",
"sure==1.2.2",
"nose==1.2.1",
"coverage==3.6",
"httpretty==0.8.3"
]},
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
)
)
| {
"content_hash": "dbe20baff0f7a580d36735bf821c1861",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 89,
"avg_line_length": 33.17142857142857,
"alnum_prop": 0.5193798449612403,
"repo_name": "andrewgross/pyrelic",
"id": "8a0219ee16ba2ce0df78d0fb5c811c3ec1ef3fd1",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1814"
},
{
"name": "Python",
"bytes": "55567"
}
],
"symlink_target": ""
} |
def extractWwwLiteroticaCom(item):
'''
Parser for 'www.literotica.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "fdf319e667f2a8ecbc0da981329f8d51",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 89,
"avg_line_length": 26,
"alnum_prop": 0.7115384615384616,
"repo_name": "fake-name/ReadableWebProxy",
"id": "5153aa6ebad32a94909ef2a6751a0d17dfce9ccf",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWwwLiteroticaCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Create a JACK client that copies input audio directly to the outputs.
This is somewhat modeled after the "thru_client.c" example of JACK 2:
http://github.com/jackaudio/jack2/blob/master/example-clients/thru_client.c
If you have a microphone and loudspeakers connected, this might cause an
acoustical feedback!
"""
import sys
import os
import jack
import threading
argv = iter(sys.argv)
# By default, use script name without extension as client name:
defaultclientname = os.path.splitext(os.path.basename(next(argv)))[0]
clientname = next(argv, defaultclientname)
servername = next(argv, None)
client = jack.Client(clientname, servername=servername)
if client.status.server_started:
print('JACK server started')
if client.status.name_not_unique:
print(f'unique name {client.name!r} assigned')
event = threading.Event()
@client.set_process_callback
def process(frames):
assert len(client.inports) == len(client.outports)
assert frames == client.blocksize
for i, o in zip(client.inports, client.outports):
o.get_buffer()[:] = i.get_buffer()
@client.set_shutdown_callback
def shutdown(status, reason):
print('JACK shutdown!')
print('status:', status)
print('reason:', reason)
event.set()
# create two port pairs
for number in 1, 2:
client.inports.register(f'input_{number}')
client.outports.register(f'output_{number}')
with client:
# When entering this with-statement, client.activate() is called.
# This tells the JACK server that we are ready to roll.
# Our process() callback will start running now.
# Connect the ports. You can't do this before the client is activated,
# because we can't make connections to clients that aren't running.
# Note the confusing (but necessary) orientation of the driver backend
# ports: playback ports are "input" to the backend, and capture ports
# are "output" from it.
capture = client.get_ports(is_physical=True, is_output=True)
if not capture:
raise RuntimeError('No physical capture ports')
for src, dest in zip(capture, client.inports):
client.connect(src, dest)
playback = client.get_ports(is_physical=True, is_input=True)
if not playback:
raise RuntimeError('No physical playback ports')
for src, dest in zip(client.outports, playback):
client.connect(src, dest)
print('Press Ctrl+C to stop')
try:
event.wait()
except KeyboardInterrupt:
print('\nInterrupted by user')
# When the above with-statement is left (either because the end of the
# code block is reached, or because an exception was raised inside),
# client.deactivate() and client.close() are called automatically.
| {
"content_hash": "0f454a28efa0ba79e87dbeb737c50385",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 31.511627906976745,
"alnum_prop": 0.7140221402214022,
"repo_name": "spatialaudio/jackclient-python",
"id": "657d7ae052deac94a3108c3b390222e082aadec9",
"size": "2734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/thru_client.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118060"
}
],
"symlink_target": ""
} |
"""Support for running subprocess commands."""
import io
import datetime
import logging
import os
import shlex
import subprocess
import time
from buildtool import (
ensure_dir_exists,
log_embedded_output,
log_timestring,
raise_and_log_error,
timedelta_string,
ExecutionError)
from buildtool.base_metrics import BaseMetricsRegistry
# Directory where error logfiles are copied to.
# This is exposed so it can be configured externally since
# this module does not offer encapsulated configuration.
ERROR_LOGFILE_DIR = 'errors'
def start_subprocess(cmd, stream=None, stdout=None, echo=False, **kwargs):
"""Starts a subprocess and returns handle to it."""
split_cmd = shlex.split(cmd)
actual_command = cmd if kwargs.get('shell') else split_cmd
log_level = logging.INFO if echo else logging.DEBUG
extra_log_info = ''
if 'cwd' in kwargs:
extra_log_info += ' in cwd="%s"' % kwargs['cwd']
logging.log(log_level, 'Running %s%s...', repr(cmd), extra_log_info)
start_date = datetime.datetime.now()
if stream:
stream.write(u'{time} Spawning {cmd!r}{extra}\n----\n\n'.format(
time=log_timestring(now=start_date), cmd=cmd, extra=extra_log_info))
stream.flush()
process = subprocess.Popen(
actual_command,
close_fds=True,
stdout=stdout or subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
logging.log(log_level, 'Running %s as pid %s', split_cmd[0], process.pid)
process.start_date = start_date
time.sleep(0) # yield this thread
return process
def wait_subprocess(process, stream=None, echo=False, postprocess_hook=None):
"""Waits for subprocess to finish and returns (final status, stdout).
This will also consume the remaining output to return it.
Returns:
Process exit code, stdout remaining in process prior to this invocation.
Any previously read output from the process will not be included.
"""
text_lines = []
if process.stdout is not None:
# stdout isnt going to another stream; collect it from the pipe.
for raw_line in iter(process.stdout.readline, ''):
if not raw_line:
break
decoded_line = raw_line.decode(encoding='utf-8')
text_lines.append(decoded_line)
if stream:
stream.write(decoded_line)
stream.flush()
process.wait()
if stream is None and process.stdout is not None:
# Close stdout pipe if we didnt give a stream.
# Otherwise caller owns the stream.
process.stdout.close()
if hasattr(process, 'start_date'):
end_date = datetime.datetime.now()
delta_time_str = timedelta_string(end_date - process.start_date)
else:
delta_time_str = 'UNKNOWN'
returncode = process.returncode
stdout = ''.join(text_lines)
if stream:
stream.write(
u'\n\n----\n{time} Spawned process completed'
u' with returncode {returncode} in {delta_time}.\n'
.format(time=log_timestring(now=end_date), returncode=returncode,
delta_time=delta_time_str))
stream.flush()
if echo:
logging.info('%s returned %d with output:\n%s',
process.pid, returncode, stdout)
logging.debug('Finished %s with returncode=%d in %s',
process.pid, returncode, delta_time_str)
if postprocess_hook:
postprocess_hook(returncode, stdout)
return returncode, stdout.strip()
def run_subprocess(cmd, stream=None, echo=False, **kwargs):
"""Returns retcode, stdout."""
postprocess_hook = kwargs.pop('postprocess_hook', None)
process = start_subprocess(cmd, stream=stream, echo=echo, **kwargs)
return wait_subprocess(process, stream=stream, echo=echo,
postprocess_hook=postprocess_hook)
def check_subprocess(cmd, stream=None, **kwargs):
"""Run_subprocess and raise CalledProcessError if it fails."""
# pylint: disable=inconsistent-return-statements
embed_errors = kwargs.pop('embed_errors', True)
retcode, stdout = run_subprocess(cmd, stream=stream, **kwargs)
if retcode == 0:
return stdout.strip()
if embed_errors:
log_embedded_output(logging.ERROR, 'command output', stdout)
logging.error('Command failed. See embedded output above.')
else:
lines = stdout.split('\n')
if len(lines) > 30:
lines = lines[-30:]
log_embedded_output(logging.ERROR,
'Command failed with last %d lines' % len(lines),
'\n'.join(lines))
program = os.path.basename(shlex.split(cmd)[0])
raise_and_log_error(ExecutionError(program + ' failed.', program=program))
def check_subprocess_sequence(cmd_list, stream=None, **kwargs):
"""Run multiple commands until one fails.
Returns:
A list of each result in sequence if all succeeded.
"""
response = []
for one in cmd_list:
response.append(check_subprocess(one, stream=stream, **kwargs))
return response
def run_subprocess_sequence(cmd_list, stream=None, **kwargs):
"""Run multiple commands until one fails.
Returns:
A list of (code, output) tuples for each result in sequence.
"""
response = []
for one in cmd_list:
response.append(run_subprocess(one, stream=stream, **kwargs))
return response
def check_subprocesses_to_logfile(what, logfile, cmds, append=False, **kwargs):
"""Wrapper around check_subprocess that logs output to a logfile.
Args:
what: [string] For logging purposes, what is the command for.
logfile: [path] The logfile to write to.
cmds: [list of string] A list of commands to run.
append: [boolean] Open the log file as append if true, write new default.
kwargs: [kwargs] Additional keyword arguments to pass to check_subprocess.
"""
mode = 'a' if append else 'w'
how = 'Appending' if append else 'Logging'
logging.info('%s %s to %s', how, what, logfile)
ensure_dir_exists(os.path.dirname(logfile))
with io.open(logfile, mode, encoding='utf-8') as stream:
try:
check_subprocess_sequence(
cmds, stream=stream, embed_errors=False, **kwargs)
except Exception as ex:
logging.error('%s failed. Log file [%s] follows:', what, logfile)
import traceback
traceback.print_exc()
with io.open(logfile, 'r', encoding='utf-8') as readagain:
output = readagain.read()
log_embedded_output(logging.ERROR, logfile, output)
logging.error('Caught exception %s\n%s failed. See embedded logfile above',
ex, what)
ensure_dir_exists(ERROR_LOGFILE_DIR)
error_path = os.path.join('errors', os.path.basename(logfile))
logging.info('Copying error log file to %s', error_path)
with io.open(error_path, 'w', encoding='utf-8') as f:
f.write(output);
f.write(u'\n--------\n')
f.write(u'Exeception caught in parent process:\n%s' % ex)
raise
def determine_subprocess_outcome_labels(result, labels):
"""For determining outcome labels when timing calls to subprocesses."""
if result is None:
return BaseMetricsRegistry.default_determine_outcome_labels(
result, labels)
outcome_labels = dict(labels)
retcode, _ = result
outcome_labels.update({
'success': retcode == 0,
'exception_type': 'BadExitCode'
})
return outcome_labels
| {
"content_hash": "e88104e05b62e540f333f878ddb106c9",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 81,
"avg_line_length": 32.917808219178085,
"alnum_prop": 0.6736024413927035,
"repo_name": "ewiseblatt/spinnaker",
"id": "0d74f74a8d72de588be8a7174b28462a508e0b39",
"size": "7806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/buildtool/subprocess_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1832"
},
{
"name": "Go",
"bytes": "8690"
},
{
"name": "HTML",
"bytes": "614"
},
{
"name": "Jsonnet",
"bytes": "35034"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "1250457"
},
{
"name": "Shell",
"bytes": "185432"
},
{
"name": "Smarty",
"bytes": "2087"
}
],
"symlink_target": ""
} |
"""Test evaluation of pulses entries.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from exopy_pulses.pulses.pulse import Pulse
from exopy_pulses.pulses.sequences.base_sequences import RootSequence
from exopy_pulses.pulses.shapes.square_shape import SquareShape
from exopy_pulses.testing.context import DummyContext
@pytest.fixture
def pulse():
return Pulse(root=RootSequence(context=DummyContext()))
def test_eval_pulse1(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, meaningful values.
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals, missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 2.0
assert root_vars['0_stop'] == 3.0
assert root_vars['0_duration'] == 1.0
assert seq_locals['0_start'] == 2.0
assert seq_locals['0_stop'] == 3.0
assert seq_locals['0_duration'] == 1.0
assert pulse.start == 2.0
assert pulse.stop == 3.0
assert pulse.duration == 1.0
assert_array_equal(pulse.waveform, np.ones(1))
def test_eval_pulse1bis(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, zero duration.
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '2.0'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals, missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 2.0
assert root_vars['0_stop'] == 2.0
assert root_vars['0_duration'] == 0.0
assert seq_locals['0_start'] == 2.0
assert seq_locals['0_stop'] == 2.0
assert seq_locals['0_duration'] == 0.0
assert pulse.start == 2.0
assert pulse.stop == 2.0
assert pulse.duration == 0.0
assert_array_equal(pulse.waveform, np.ones(0))
def test_eval_pulse_validation_fail(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, meaningless start.
"""
pulse.root.context.rectify_time = False
pulse.def_1 = '1.0*2.1'
pulse.def_2 = '5.1*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_start' in errors
assert '0_start' not in root_vars
assert '0_duration' not in root_vars
assert '0_start' not in seq_locals
assert '0_duration' not in seq_locals
def test_eval_pulse2(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, meaningless start.
"""
pulse.def_1 = '-1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_start' in errors
assert '0_start' not in root_vars
assert '0_duration' not in root_vars
assert '0_start' not in seq_locals
assert '0_duration' not in seq_locals
def test_eval_pulse3(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, meaningless stop (0).
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 0.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_stop' in errors
assert '0_stop' not in root_vars
assert '0_duration' not in root_vars
assert '0_stop' not in seq_locals
assert '0_duration' not in seq_locals
def test_eval_pulse4(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Stop mode, meaningless stop < start.
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_stop' in errors
assert '0_stop' not in root_vars
assert '0_duration'not in root_vars
assert '0_stop' not in seq_locals
assert '0_duration' not in seq_locals
def test_eval_pulse5(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Duration mode, meaningful values.
"""
pulse.def_mode = 'Start/Duration'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 2.0
assert root_vars['0_stop'] == 5.0
assert root_vars['0_duration'] == 3.0
assert seq_locals['0_start'] == 2.0
assert seq_locals['0_stop'] == 5.0
assert seq_locals['0_duration'] == 3.0
assert pulse.start == 2.0
assert pulse.stop == 5.0
assert pulse.duration == 3.0
assert_array_equal(pulse.waveform, np.ones(3))
def test_eval_pulse5bis(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Duration mode, 0 duration.
"""
pulse.def_mode = 'Start/Duration'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '0'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 2.0
assert root_vars['0_stop'] == 2.0
assert root_vars['0_duration'] == 0.0
assert seq_locals['0_start'] == 2.0
assert seq_locals['0_stop'] == 2.0
assert seq_locals['0_duration'] == 0.0
assert pulse.start == 2.0
assert pulse.stop == 2.0
assert pulse.duration == 0
assert_array_equal(pulse.waveform, np.ones(0))
def test_eval_pulse6(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Duration mode, meaningless start.
"""
pulse.def_mode = 'Start/Duration'
pulse.def_1 = '-1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_start' in errors
assert '0_start' not in root_vars
assert '0_stop' not in root_vars
assert '0_start' not in seq_locals
assert '0_stop' not in seq_locals
def test_eval_pulse7(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Start/Duration mode, meaningless duration.
"""
pulse.def_mode = 'Start/Duration'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': -1.0, 'b': 10.0, 'c': 0.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_duration' in errors
assert '0_duration' not in root_vars
assert '0_stop' not in root_vars
assert '0_duration' not in seq_locals
assert '0_stop' not in seq_locals
def test_eval_pulse8(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Duration/Stop mode, meaningful values.
"""
pulse.def_mode = 'Duration/Stop'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 1.0
assert root_vars['0_stop'] == 3.0
assert root_vars['0_duration'] == 2.0
assert seq_locals['0_start'] == 1.0
assert seq_locals['0_stop'] == 3.0
assert seq_locals['0_duration'] == 2.0
assert pulse.start == 1.0
assert pulse.stop == 3.0
assert pulse.duration == 2.0
assert_array_equal(pulse.waveform, np.ones(2))
def test_eval_pulse8bis(pulse):
"""Test evaluating the entries of a pulse when everything is ok.
Duration/Stop mode, zero duration.
"""
pulse.def_mode = 'Duration/Stop'
pulse.def_1 = '0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert errors == {}
assert root_vars['0_start'] == 3.0
assert root_vars['0_stop'] == 3.0
assert root_vars['0_duration'] == 0.0
assert seq_locals['0_start'] == 3.0
assert seq_locals['0_stop'] == 3.0
assert seq_locals['0_duration'] == 0.0
assert pulse.start == 3.0
assert pulse.stop == 3.0
assert pulse.duration == 0.0
assert_array_equal(pulse.waveform, np.ones(0))
def test_eval_pulse9(pulse):
"""Test evaluating the entries of a pulse Duration/Stop mode,
meaningless duration.
"""
pulse.def_mode = 'Duration/Stop'
pulse.def_1 = '-1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_duration' in errors
assert '0_duration' not in root_vars
assert '0_start' not in root_vars
assert '0_duration' not in seq_locals
assert '0_start' not in seq_locals
def test_eval_pulse10(pulse):
"""Test evaluating the entries of a pulse Duration/Stop mode,
meaningless stop.
"""
pulse.def_mode = 'Duration/Stop'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 0.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_stop' in errors
assert '0_stop' not in root_vars
assert '0_start' not in root_vars
assert '0_stop' not in seq_locals
assert '0_start' not in seq_locals
def test_eval_pulse11(pulse):
"""Test evaluating the entries of a pulse Duration/Stop mode, duration
larger than stop.
"""
pulse.def_mode = 'Duration/Stop'
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 0.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_stop' in errors
assert '0_start' not in root_vars
assert '0_start' not in seq_locals
def test_eval_pulse12(pulse):
"""Test evaluating the entries of a pulse when some vars are missing.
Issue in def_1
"""
pulse.def_1 = '1.0*2.0*{d}'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set('d')
assert '0_start' not in errors
assert '0_start' not in root_vars
assert '0_stop' in root_vars
assert '0_start' not in seq_locals
assert '0_stop', seq_locals
def test_eval_pulse13(pulse):
"""Test evaluating the entries of a pulse when some vars are missing.
Issue in def_2
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 10.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set('c')
assert '0_stop' not in errors
assert '0_stop' not in root_vars
assert '0_start' in root_vars
assert '0_stop' not in seq_locals
assert '0_start' in seq_locals
def test_eval_pulse14(pulse):
"""Test evaluating the entries of a pulse when some entries are
incorrect.
Issue def_1
"""
pulse.def_1 = '1.0*2.0*zeffer'
pulse.def_2 = '5.0*{a}/{b} + {c}'
root_vars = {'a': 2.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_start' in errors
assert '0_start' not in root_vars
assert '0_stop' in root_vars
assert '0_start' not in seq_locals
assert '0_stop' in seq_locals
def test_eval_pulse15(pulse):
"""Test evaluating the entries of a pulse when some entries are
incorrect.
Issue in def_2
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c} + zeffer'
root_vars = {'a': 2.0, 'b': 10.0, 'c': 1.0}
missing = set()
errors = {}
seq_locals = root_vars.copy()
assert not pulse.eval_entries(root_vars, seq_locals,
missing, errors)
assert missing == set()
assert '0_stop' in errors
assert '0_stop' not in root_vars
assert '0_start' in root_vars
assert '0_stop' not in seq_locals
assert '0_start' in seq_locals
def test_eval_pulse16(pulse):
"""Test evaluating the entries of an analogical pulse.
"""
pulse.index = 2
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
pulse.shape = SquareShape(amplitude='0.5')
pulse.kind = 'Analogical'
pulse.modulation.frequency = '0.0'
pulse.modulation.phase = '0.0'
pulse.modulation.kind = 'cos'
pulse.modulation.activated = True
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
assert pulse.eval_entries(root_vars, root_vars,
missing, errors)
assert missing == set()
assert errors == {}
assert_array_equal(pulse.waveform, 0.5*np.ones(1))
assert pulse.shape.index == 2
pulse.clean_cached_values()
assert not pulse.modulation._cache
assert not pulse.shape._cache
def test_eval_pulse17(pulse):
"""Test evaluating the entries of an analogical pulse whose modulation
evaluation fails.
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
pulse.shape = SquareShape(amplitude='0.5')
pulse.kind = 'Analogical'
pulse.modulation.frequency = '1.0**'
pulse.modulation.phase = '1.0'
pulse.modulation.activated = True
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
assert not pulse.eval_entries(root_vars, root_vars,
missing, errors)
assert missing == set()
assert '0_modulation_frequency' in errors
def test_eval_pulse18(pulse):
"""Test evaluating the entries of an analogical pulse whose shape
evaluation fails.
"""
pulse.def_1 = '1.0*2.0'
pulse.def_2 = '5.0*{a}/{b} + {c}'
pulse.shape = SquareShape(amplitude='0.5*')
pulse.kind = 'Analogical'
pulse.modulation.frequency = '1.0'
pulse.modulation.phase = '1.0'
pulse.modulation.activated = True
root_vars = {'a': 2.0, 'b': 5.0, 'c': 1.0}
missing = set()
errors = {}
assert not pulse.eval_entries(root_vars, root_vars,
missing, errors)
assert missing == set()
assert'0_shape_amplitude' in errors
def test_traversing_pulse(pulse):
"""Test traversing a pulse.
"""
assert list(pulse.traverse()) == [pulse]
pulse.kind = 'Analogical'
assert list(pulse.traverse()) == [pulse, pulse.modulation]
pulse.shape = SquareShape()
assert list(pulse.traverse()) == [pulse, pulse.modulation, pulse.shape]
def test_pulse_view(exopy_qtbot, workbench, pulse, dialog_sleep):
"""Test the view of the Pulse class.
"""
import enaml
from exopy.testing.util import show_widget
with enaml.imports():
from exopy_pulses.pulses.sequences.views.base_sequences_views\
import RootSequenceView
pulse.kind = 'Analogical'
root = pulse.root
root.add_child_item(0, pulse)
core = workbench.get_plugin('enaml.workbench.core')
root_view = RootSequenceView(item=root, core=core)
pulse_view = root_view.view_for(pulse)
show_widget(exopy_qtbot, root_view)
exopy_qtbot.wait(dialog_sleep)
# Test swithcing between logical and analogical
widgets_num = len(pulse_view.widgets())
pulse.kind = 'Logical'
def assert_widgets():
assert widgets_num - 1 == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
pulse.kind = 'Analogical'
def assert_widgets():
assert widgets_num == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
# Test selecting a shape
shape_select = pulse_view.widgets()[-1].widgets()[-1]
shape_select.selected = 'exopy_pulses.SquareShape'
def assert_widgets():
assert widgets_num + 1 == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
shape_select.selected = ''
def assert_widgets():
assert widgets_num == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
# Test adding a modulation
mod_check = pulse_view.widgets()[-1].widgets()[0]
mod_check.checked = True
def assert_widgets():
assert widgets_num + 1 == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
mod_check.checked = False
def assert_widgets():
assert widgets_num == len(pulse_view.widgets())
exopy_qtbot.wait_until(assert_widgets)
exopy_qtbot.wait(dialog_sleep)
def test_pulse_view2(exopy_qtbot, workbench, pulse):
"""Test showing a pulse logical at the start.
"""
import enaml
from exopy.testing.util import show_and_close_widget
with enaml.imports():
from exopy_pulses.pulses.sequences.views.base_sequences_views\
import RootSequenceView
pulse.kind = 'Logical'
root = pulse.root
root.add_child_item(0, pulse)
core = workbench.get_plugin('enaml.workbench.core')
root_view = RootSequenceView(item=root, core=core)
show_and_close_widget(exopy_qtbot, root_view)
def test_pulse_view3(exopy_qtbot, workbench, pulse):
"""Test showing a pulse with a shape at the start at the start.
"""
import enaml
from exopy.testing.util import show_and_close_widget
with enaml.imports():
from exopy_pulses.pulses.sequences.views.base_sequences_views\
import RootSequenceView
pulse.kind = 'Analogical'
pulse.shape = SquareShape()
root = pulse.root
root.add_child_item(0, pulse)
core = workbench.get_plugin('enaml.workbench.core')
root_view = RootSequenceView(item=root, core=core)
show_and_close_widget(exopy_qtbot, root_view)
| {
"content_hash": "ab7de12a8252b76ad4ec570803003557",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 75,
"avg_line_length": 28.040389972144848,
"alnum_prop": 0.5983211642576863,
"repo_name": "Ecpy/ecpy_pulses",
"id": "34d7b7c9351850e1d50f656eda320153ffaaf69e",
"size": "20522",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/pulses/test_pulse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "Python",
"bytes": "377974"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
} |
""" Plateau Scheduler
Adapts PyTorch plateau scheduler and allows application of noise, warmup.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from .scheduler import Scheduler
class PlateauLRScheduler(Scheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(
self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(
optimizer,
'lr',
noise_range_t=noise_range_t,
noise_type=noise_type,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self._is_apply_noise(epoch):
self._apply_noise(epoch)
def step_update(self, num_updates: int, metric: float = None):
return None
def _apply_noise(self, epoch):
noise = self._calculate_noise(epoch)
# apply the noise on top of previous LR, cache the old value so we can restore for normal
# stepping of base scheduler
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
def _get_lr(self, t: int) -> float:
assert False, 'should not be called as step is overridden'
| {
"content_hash": "7f5fd8464e5abafe31593ac3d6875ba0",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 97,
"avg_line_length": 32.481818181818184,
"alnum_prop": 0.5549958018471872,
"repo_name": "rwightman/pytorch-image-models",
"id": "9f8271579bbefaf3e9d0322cbe233af638a7433a",
"size": "3573",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "timm/scheduler/plateau_lr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2368284"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
} |
""" An ICAP response copy service using a gevent StreamServer. """
import logging
from respmod_copy import Copy
def main():
import gevent.server
logging.basicConfig(level=logging.INFO)
Copy().icap_handler_class().server(gevent.server.StreamServer).serve_forever()
if __name__ == '__main__':
main()
| {
"content_hash": "474efa0baaf59bc75636bdd0d3ee7815",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 22.785714285714285,
"alnum_prop": 0.6959247648902821,
"repo_name": "gilesbrown/python-icapservice",
"id": "c4d83f6497fb06f581105990aefedac2f621c48d",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/respmod_copy_gevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44717"
},
{
"name": "Shell",
"bytes": "964"
}
],
"symlink_target": ""
} |
import os
import logging
import unittest
from simple.bulk import SimpleBulkTest
N = 100000
COUNT = 5
log = logging.getLogger('test.auto')
class CompactionTest(SimpleBulkTest):
"Start a clean accumulo, bulk import a lot of map files, read while a multi-pass compaction is happening"
order = 26
tableSettings = SimpleBulkTest.tableSettings.copy()
tableSettings['test_ingest'] = {
'table.compaction.major.ratio': 1.0
}
settings = SimpleBulkTest.settings.copy()
settings.update({
'tserver.compaction.major.files.open.max':4,
'tserver.compaction.major.delay': 1,
'tserver.compaction.major.concurrent.max':1,
})
def createRFiles(self, host):
handle = self.runClassOn(
self.masterHost(),
'org.apache.accumulo.test.CreateRFiles',
"--output testrf --numThreads 4 --start 0 --end 500000 --splits 59".split())
out, err = handle.communicate()
self.assert_(handle.returncode == 0)
def runTest(self):
# initialize the database
self.createTable('test_ingest')
self.execute(self.masterHost(), 'hadoop dfs -rmr testrf'.split())
self.execute(self.masterHost(), 'hadoop dfs -rmr testrfFail'.split())
# insert some data
self.createRFiles(self.masterHost())
self.bulkLoad(self.masterHost(), 'testrf')
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
beforeCount = len(out.split('\n'))
log.info("Verifying Ingestion")
for c in range(5):
handles = []
for i in range(COUNT):
handles.append(self.verify(self.hosts[i%len(self.hosts)], N, i * N))
for h in handles:
out, err = h.communicate()
self.assert_(h.returncode == 0)
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
afterCount = len(out.split('\n'))
self.assert_(afterCount < beforeCount)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(CompactionTest())
return result
| {
"content_hash": "2850ccb3f9a4a15388e1895e4a205a8e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 109,
"avg_line_length": 30.904109589041095,
"alnum_prop": 0.6112588652482269,
"repo_name": "phrocker/accumulo",
"id": "bd39d62e1bfd5cd2fa14be65819045a65ae621e1",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/ACCUMULO-3709",
"path": "test/system/auto/simple/compaction.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12217"
},
{
"name": "C++",
"bytes": "1297281"
},
{
"name": "CSS",
"bytes": "5889"
},
{
"name": "HTML",
"bytes": "6628"
},
{
"name": "Java",
"bytes": "14131538"
},
{
"name": "JavaScript",
"bytes": "249599"
},
{
"name": "Makefile",
"bytes": "3565"
},
{
"name": "Perl",
"bytes": "28190"
},
{
"name": "Python",
"bytes": "906539"
},
{
"name": "Ruby",
"bytes": "193322"
},
{
"name": "Shell",
"bytes": "194601"
},
{
"name": "Thrift",
"bytes": "46026"
}
],
"symlink_target": ""
} |
"""Module for the custom Django create_cards command."""
import os
from django.core import management
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.staticfiles import finders
from learning_area_cards.models import (
AchievementObjective,
ProgressOutcome,
)
from learning_area_cards.utils import get_card_set_metadata
from weasyprint import HTML, CSS
class Command(management.base.BaseCommand):
"""Required command class for the custom Django create_cards command."""
help = 'Create learning area card PDFs for each level.'
def handle(self, *args, **options):
"""Automatically called when the create_cards command is given."""
pdf_directory = settings.LEARNING_AREA_CARDS_GENERATION_LOCATION
if not os.path.exists(pdf_directory):
os.makedirs(pdf_directory)
self.create_achievement_objective_cards(pdf_directory)
self.create_progress_outcome_cards(pdf_directory)
def create_achievement_objective_cards(self, pdf_directory):
"""Create achievement objective cards.
Args:
pdf_directory (str): Path to write files to.
"""
achievement_objectives_level_values = AchievementObjective.objects.all().values_list(
'level', flat=True).order_by('level').distinct()
card_type = 'ao'
for level_num in achievement_objectives_level_values:
for print_type in settings.LEARNING_AREA_CARDS_PRINT_TYPES:
(title, filename) = get_card_set_metadata(
card_type=card_type,
print_type=print_type,
level=level_num,
quote=False,
)
objectives = AchievementObjective.objects.filter(level=level_num).order_by('code')
context = dict()
context['print_type'] = print_type
context['filename'] = filename
context['card_type'] = card_type
context['cards'] = self.prepare_card_data(objectives, print_type)
self.write_card_pdf(pdf_directory, filename, context)
print('Created {}'.format(filename))
def create_progress_outcome_cards(self, pdf_directory):
"""Create progress outcome cards.
Args:
pdf_directory (str): Path to write files to.
"""
learning_areas = ProgressOutcome.objects.all().values_list(
'learning_area', flat=True).distinct()
card_type = 'po'
for learning_area in learning_areas:
for print_type in settings.LEARNING_AREA_CARDS_PRINT_TYPES:
(title, filename) = get_card_set_metadata(
card_type=card_type,
print_type=print_type,
learning_area=learning_area,
quote=False,
)
outcomes = ProgressOutcome.objects.filter(learning_area=learning_area).order_by('code')
context = dict()
context['print_type'] = print_type
context['filename'] = filename
context['card_type'] = card_type
context['cards'] = self.prepare_card_data(outcomes, print_type)
self.write_card_pdf(pdf_directory, filename, context)
print('Created {}'.format(filename))
def prepare_card_data(self, items, print_type):
"""Prepare card data for rendering."""
cards = list()
if print_type == settings.LEARNING_AREA_CARDS_SINGLE_PRINT:
for item in items:
cards.append(
{
'item': item,
'side': 'back',
}
)
cards.append(
{
'item': item,
'side': 'front',
}
)
else:
items = list(items)
cards_per_page = 4
fronts = list()
backs = list()
blank_card = {
'item': None,
'side': 'back',
}
for item in items:
backs.append(
{
'item': item,
'side': 'back',
}
)
fronts.append(
{
'item': item,
'side': 'front',
}
)
if len(fronts) == 4 or item == items[-1]:
blanks = list()
for i in range(cards_per_page - len(fronts)):
blanks.append(blank_card)
cards.extend(backs)
cards.extend(blanks)
if len(fronts) == 1:
cards.append(blanks.pop())
cards.extend(fronts)
cards.extend(blanks)
elif len(fronts) == 2:
cards.append(fronts.pop(1))
cards.append(fronts.pop(0))
cards.extend(blanks)
elif len(fronts) == 3:
cards.append(fronts.pop(0))
cards.append(fronts.pop(0))
cards.append(blanks.pop())
cards.extend(fronts)
cards.extend(blanks)
else:
cards.append(fronts.pop(1))
cards.append(fronts.pop(0))
cards.append(fronts.pop(1))
cards.append(fronts.pop(0))
backs = list()
fronts = list()
return cards
def write_card_pdf(self, pdf_directory, filename, context):
"""Write card PDF to directory.
Args:
pdf_directory (str): Path to write files to.
filename (str): Filename of file to write.
context (dict): Context used for rendering template.
"""
pdf_html = render_to_string('learning_area_cards/cards-pdf.html', context)
html = HTML(string=pdf_html, base_url=settings.BUILD_ROOT)
# Render as PDF
css_file = finders.find('css/learning-area-cards.css')
css_string = open(css_file, encoding='UTF-8').read()
base_css = CSS(string=css_string)
pdf_file = html.write_pdf(stylesheets=[base_css])
# Save file
pdf_file_output = open(os.path.join(pdf_directory, filename), 'wb')
pdf_file_output.write(pdf_file)
pdf_file_output.close()
| {
"content_hash": "f00c567616089ce6696c73dee1c24366",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 103,
"avg_line_length": 39.133720930232556,
"alnum_prop": 0.5080968652503343,
"repo_name": "uccser/cs4teachers",
"id": "4fd88f4674d4bb5ebbb9facad597d0255b70189f",
"size": "6731",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "dthm4kaiako/learning_area_cards/management/commands/create_cards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "956"
},
{
"name": "HTML",
"bytes": "18217"
},
{
"name": "JavaScript",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "82799"
},
{
"name": "Shell",
"bytes": "9836"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
class HeadExceptionOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head200(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 200 status code if successful
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/success/200'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head204(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 204 status code if successful
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/success/204'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head404(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 404 status code if successful
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/http/success/404'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| {
"content_hash": "ea5a8e07653701e1c07af7234db82d99",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 140,
"avg_line_length": 38.33587786259542,
"alnum_prop": 0.6326164874551972,
"repo_name": "vulcansteel/autorest",
"id": "66757ed881bc6c4e537a6a61afd8d50332d32dea",
"size": "5496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/HeadExceptions/auto_rest_head_exception_test_service/operations/head_exception_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
} |
def hyper_descriptive(data, tiers, metric, mode='median', ascending=False):
'''
data = the datafile with the results from hyperscan()
tiers = a string or a list where first tier is first and so
metric = the metric that will be measured against
mode = 'mean', 'median', 'std', 'min', or 'max'
ascending = default is 'False' (can also be True)
'''
data[metric] = data[metric].astype(float)
if type(tiers) is str:
cols = [tiers, metric]
tiers = [tiers]
else:
cols = tiers + [metric]
temp = data[cols]
temp = temp.groupby(tiers)
if mode is 'mean':
temp = temp.mean()
if mode is 'median':
temp = temp.median()
if mode is 'std':
temp = temp.std()
if mode is 'min':
temp = temp.min()
if mode is 'max':
temp = temp.max()
out = temp.sort_values(metric, ascending=ascending)
return out
| {
"content_hash": "8ebbafb6e672d112818c70badfc34274",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 23.871794871794872,
"alnum_prop": 0.5767991407089151,
"repo_name": "botlabio/autonomio",
"id": "41f496760a456f27e22f8ce42b26dd45ba2d887d",
"size": "931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autonomio/hyperstats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20987"
},
{
"name": "Shell",
"bytes": "2372"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import keystone
def set_tenant_name_to_objects(request, objects):
try:
tenants, has_more = keystone.tenant_list(request)
except Exception:
tenants = []
msg = _('Unable to retrieve share project information.')
exceptions.handle(request, msg)
tenant_dict = dict([(t.id, t) for t in tenants])
for obj in objects:
tenant_id = getattr(obj, "project_id", None)
tenant = tenant_dict.get(tenant_id, None)
obj.tenant_name = getattr(tenant, "name", None)
| {
"content_hash": "b5b51a508edee0e71d4d72117d73dcbd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6682389937106918,
"repo_name": "jcsp/manila-ui",
"id": "e73583e27a17d63b32dd6d4149c99fd3f48187f3",
"size": "1241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila_ui/dashboards/admin/shares/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "41908"
},
{
"name": "Python",
"bytes": "258347"
},
{
"name": "Shell",
"bytes": "16344"
}
],
"symlink_target": ""
} |
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import Tkinter
from Tkconstants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxint as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"}
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = Tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in xrange(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
| {
"content_hash": "91061a6f3a9074925147181cdb8ecc4d",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 80,
"avg_line_length": 48.39772727272727,
"alnum_prop": 0.5635125616341864,
"repo_name": "Jeff-Tian/mybnb",
"id": "add89b88a6ade6760cf39acef0400b7d42c9cc37",
"size": "8518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python27/Lib/idlelib/CodeContext.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455330"
},
{
"name": "Batchfile",
"bytes": "6263"
},
{
"name": "C",
"bytes": "2304983"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "31815"
},
{
"name": "CSS",
"bytes": "30628"
},
{
"name": "Cucumber",
"bytes": "248616"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "31983"
},
{
"name": "HTML",
"bytes": "376863"
},
{
"name": "JavaScript",
"bytes": "20239"
},
{
"name": "M4",
"bytes": "67848"
},
{
"name": "Makefile",
"bytes": "142926"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19913027"
},
{
"name": "REXX",
"bytes": "3862"
},
{
"name": "Ruby",
"bytes": "14954382"
},
{
"name": "Shell",
"bytes": "366205"
},
{
"name": "Tcl",
"bytes": "2150972"
},
{
"name": "TeX",
"bytes": "230259"
},
{
"name": "Visual Basic",
"bytes": "494"
},
{
"name": "XSLT",
"bytes": "3736"
},
{
"name": "Yacc",
"bytes": "14342"
}
],
"symlink_target": ""
} |
"""Tests for the create_future() function."""
import asyncio
from launch.utilities import create_future
def test_create_future():
"""Test the create_future() function."""
future_none_result = create_future(None)
assert isinstance(future_none_result, asyncio.Future)
future_event_loop_result = create_future(asyncio.get_event_loop())
assert isinstance(future_event_loop_result, asyncio.Future)
| {
"content_hash": "528a0863b1d177e3d5e197f2b5855f74",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 32.07692307692308,
"alnum_prop": 0.7338129496402878,
"repo_name": "ros2/launch",
"id": "57131b13f2f312a650b5919d5a1f30368ec90475",
"size": "1019",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "launch/test/launch/utilities/test_create_future.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "857"
},
{
"name": "C++",
"bytes": "1468"
},
{
"name": "CMake",
"bytes": "8807"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "1063971"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
try:
import json
except ImportError:
from django.utils import simplejson as json
from cked import default_settings
json_encode = json.JSONEncoder().encode
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
js = (settings.STATIC_URL + 'cked/ckeditor/ckeditor.js',)
def __init__(self, config = None, *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Use default config
self.options = default_settings.CKEDITOR_DEFAULT_OPTIONS.copy()
if config:
self.options = config
# If CKEDITOR_OPTIONS presented in settings, use it!
options = getattr(settings, 'CKEDITOR_OPTIONS', None)
if options is not None:
if isinstance(options, dict):
# Override defaults with CKEDITOR_OPTIONS.
self.options.update(options)
else:
raise ImproperlyConfigured('CKEDITOR_OPTIONS setting must be'
' a dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.options['filebrowserBrowseUrl'] = reverse('cked_elfinder')
return mark_safe(render_to_string('cked/ckeditor.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'options': json_encode(self.options)})
)
| {
"content_hash": "d4b6d46f970018ce42bf8e813cf4fb86",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 33.06349206349206,
"alnum_prop": 0.6524243879020644,
"repo_name": "20tab/twentytab-tcked",
"id": "10d5c108ac620c86a8290a237f736e01508f099a",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcked/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5555"
}
],
"symlink_target": ""
} |
"""
Code PyQt4
"""
import sys, os
from PyQt4 import QtGui, QtCore
class Button(QtGui.QToolButton):
def __init__(self, text, parent=None):
super(Button, self).__init__(parent)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
self.setText(text)
def sizeHint(self):
size = super(Button, self).sizeHint()
size.setHeight(size.height() + 20)
size.setWidth(max(size.width(), size.height()))
return size
class Calculator(QtGui.QDialog):
NumDigitButtons = 10
def __init__(self, parent=None):
super(Calculator, self).__init__(parent)
self.pendingAdditiveOperator = ''
self.pendingMultiplicativeOperator = ''
self.waitingForOperand = True
self.display = QtGui.QLineEdit('0')
self.display.setReadOnly(True)
self.display.setAlignment(QtCore.Qt.AlignRight)
self.display.setMaxLength(15)
font = self.display.font()
font.setPointSize(font.pointSize() + 8)
self.display.setFont(font)
self.digitButtons = []
for i in range(Calculator.NumDigitButtons):
self.digitButtons.append(self.createButton(str(i),
self.digitClicked))
self.pointButton = self.createButton(".", self.pointClicked)
self.changeSignButton = self.createButton("\261",
self.changeSignClicked)
self.divisionButton = self.createButton("\367",
self.multiplicativeOperatorClicked)
self.multiplicatButton = self.createButton("\327",
self.multiplicativeOperatorClicked)
self.minusButton = self.createButton("-", self.additiveOperatorClicked)
self.plusButton = self.createButton("+", self.additiveOperatorClicked)
self.equalButton = self.createButton("=", self.equalClicked)
mainLayout = QtGui.QGridLayout()
mainLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
mainLayout.addWidget(self.display, 0, 0, 1, 6)
for i in range(1, Calculator.NumDigitButtons):
row = ((9 - i) / 3) + 2
column = ((i - 1) % 3) + 1
mainLayout.addWidget(self.digitButtons[i], row, column)
mainLayout.addWidget(self.digitButtons[0], 5, 1)
mainLayout.addWidget(self.pointButton, 5, 2)
mainLayout.addWidget(self.changeSignButton, 5, 3)
mainLayout.addWidget(self.divisionButton, 2, 4)
mainLayout.addWidget(self.multiplicatButton, 3, 4)
mainLayout.addWidget(self.minusButton, 4, 4)
mainLayout.addWidget(self.plusButton, 5, 4)
mainLayout.addWidget(self.digitButtons[0], 5, 1)
mainLayout.addWidget(self.equalButton, 5, 5)
self.setLayout(mainLayout)
self.setWindowTitle("Calculator")
def createButton(self, text, member):
button = Button(text)
button.clicked.connect(member)
return button
def digitClicked(self):
clickedButton = self.sender()
digitValue = int(clickedButton.text())
if self.display.text() == '0' and digitValue == 0.0:
return
if self.waitingForOperand:
self.display.clear()
self.waitingForOperand = False
self.display.setText(self.display.text() + str(digitValue))
def multiplicativeOperatorClicked(self):
clickedButton = self.sender()
clickedOperator = clickedButton.text()
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if not self.calculate(operand, self.pendingMultiplicativeOperator):
self.abortOperation()
return
self.display.setText(str(self.factorSoFar))
else:
self.factorSoFar = operand
self.pendingMultiplicativeOperator = clickedOperator
self.waitingForOperand = True
def additiveOperatorClicked(self):
clickedButton = self.sender()
clickedOperator = clickedButton.text()
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if not self.calculate(operand, self.pendingMultiplicativeOperator):
self.abortOperation()
return
self.display.setText(str(self.factorSoFar))
operand = self.factorSoFar
self.factorSoFar = 0.0
self.pendingMultiplicativeOperator = ''
if self.pendingAdditiveOperator:
if not self.calculate(operand, self.pendingAdditiveOperator):
self.abortOperation()
return
self.display.setText(str(self.sumSoFar))
else:
self.sumSoFar = operand
self.pendingAdditiveOperator = clickedOperator
self.waitingForOperand = True
def equalClicked(self):
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if not self.calculate(operand, self.pendingMultiplicativeOperator):
self.abortOperation()
return
operand = self.factorSoFar
self.factorSoFar = 0.0
self.pendingMultiplicativeOperator = ''
if self.pendingAdditiveOperator:
if not self.calculate(operand, self.pendingAdditiveOperator):
self.abortOperation()
return
self.pendingAdditiveOperator = ''
else:
self.sumSoFar = operand
self.display.setText(str(self.sumSoFar))
self.sumSoFar = 0.0
self.waitingForOperand = True
def pointClicked(self):
if self.waitingForOperand:
self.display.setText('0')
if "." not in self.display.text():
self.display.setText(self.display.text() + ".")
self.waitingForOperand = False
def changeSignClicked(self):
text = self.display.text()
value = float(text)
if value > 0.0:
text = "-" + text
elif value < 0.0:
text = text[1:]
self.display.setText(text)
def calculate(self, rightOperand, pendingOperator):
if pendingOperator == "+":
self.sumSoFar += rightOperand
elif pendingOperator == "-":
self.sumSoFar -= rightOperand
elif pendingOperator == "\327":
self.factorSoFar *= rightOperand
elif pendingOperator == "\367":
if rightOperand == 0.0:
return False
self.factorSoFar /= rightOperand
return True
def main():
app = QtGui.QApplication(sys.argv)
calc = Calculator()
sys.exit(calc.exec_())
if __name__ == '__main__':
main()
| {
"content_hash": "68b5fbfe860e433c83d8a8afb06fc91f",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 30.497737556561084,
"alnum_prop": 0.6093471810089021,
"repo_name": "janusnic/21v-pyqt",
"id": "1819d019694443f2d4e300d6266606c6f8d05f5b",
"size": "6783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_02/calculator/4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "525972"
},
{
"name": "SQLPL",
"bytes": "150"
}
],
"symlink_target": ""
} |
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from androlyze.log.Log import * | {
"content_hash": "86b1eb0ca561068cae162780355f232a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 52,
"avg_line_length": 30,
"alnum_prop": 0.7166666666666667,
"repo_name": "nachtmaar/androlyze",
"id": "24be761f943fcf8bb76bde182486110666bc7046",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "androlyze/log/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509940"
},
{
"name": "Shell",
"bytes": "11367"
}
],
"symlink_target": ""
} |
"""
WSGI config for arena project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arena.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "415247817b29cea554e96e8f956f4de6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.5,
"alnum_prop": 0.7714285714285715,
"repo_name": "abhishek97/arena",
"id": "4c205806922f9dd47b04ff126c7f6ee3bf1b2fac",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arena/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23857"
}
],
"symlink_target": ""
} |
import logging
import medianrunner
import wordcount
logging.basicConfig(level=logging.INFO)
t = wordcount.WordCounter('wc_input', 'wc_output/wc_result.txt')
t.run()
t = medianrunner.MedianRunner('wc_input', 'wc_output/med_result.txt')
t.run()
| {
"content_hash": "c11b82da98b99e345fe58f12eda6916f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 20.583333333333332,
"alnum_prop": 0.757085020242915,
"repo_name": "vsharma1/InsightDataScience",
"id": "3d033d34b1a8cc2a4b5e1ea37a18a233733058e9",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9329"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
"""Basic Image Process."""
from absl import logging
import numpy as np
import tensorflow as tf
from data.augment import apply_augment
from data.augment import compose_augment_seq
class BasicImageProcess():
"""Basic Image Process."""
def __init__(self, data, input_shape=(256, 256, 3)):
self.input_shape = input_shape
self.data = data
def image_normalize(self, image, do_mean=True, do_std=True):
channel_means = [0.485, 0.456, 0.406]
channel_stds = [0.229, 0.224, 0.225]
if do_mean:
means = tf.broadcast_to(channel_means, tf.shape(image))
image = image - means
if do_std:
stds = tf.broadcast_to(channel_stds, tf.shape(image))
image = image / stds
return image
def preprocess_image(self, image, dtype=tf.float32, aug_ops_list=None):
"""Preprocess images."""
image = tf.cast(image, dtype) / 255.0
image = tf.reshape(
image, shape=tf.stack(self.input_shape)) # may become problematic
images = apply_augment(image, ops_list=aug_ops_list)
return images
def parse_record_fn(self, raw_record, is_training, dtype, aug_list=None):
"""Parse record function."""
# create augmentation list
aug_ops_list = [
compose_augment_seq(aug_type, is_training=is_training)
for aug_type in aug_list
]
# do preprocessing
image, label, image_id = raw_record
images = self.preprocess_image(
image, dtype=dtype, aug_ops_list=aug_ops_list)
label = tf.cast(tf.reshape(label, shape=[1]), dtype=tf.float32)
return images + (label, image_id)
def process_record_dataset(self,
dataset,
aug_list,
is_training,
batch_size,
shuffle_buffer,
num_batch_per_epoch=1,
dtype=tf.float32,
datasets_num_private_threads=None,
force_augment=False,
drop_remainder=False):
"""Process record dataset."""
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = (
datasets_num_private_threads)
dataset = dataset.with_options(options)
logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
if is_training:
# multiplier if original dataset is too small
num_data = len([1 for _ in dataset.enumerate()])
multiplier = np.maximum(1, np.int(np.ceil(batch_size / num_data)))
if multiplier > 1:
dataset = dataset.repeat(multiplier)
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(
buffer_size=shuffle_buffer, reshuffle_each_iteration=True)
# Parses the raw records into images and labels.
dataset = dataset.map(
lambda *args: self.parse_record_fn(
args,
is_training=is_training or force_augment,
dtype=dtype,
aug_list=aug_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
if not is_training:
num_batch_per_epoch = len([1 for _ in dataset.enumerate()])
else:
if num_batch_per_epoch <= 0:
num_batch_per_epoch = len([1 for _ in dataset.enumerate()])
dataset = dataset.repeat()
# Prefetch.
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return [dataset, num_batch_per_epoch]
def input_fn(self,
is_training,
batch_size,
aug_list=None,
num_batch_per_epoch=1,
dtype=tf.float32,
datasets_num_private_threads=None,
input_context=None,
force_augment=False,
training_dataset_cache=False):
"""Creates an input function from the dataset."""
dataset = self.make_dataset(
is_training=is_training, input_context=input_context)
if is_training and training_dataset_cache:
# Improve training performance when training data is in remote storage and
# can fit into worker memory.
dataset = dataset.cache()
# Aug_list should be a list of list of tuples
if not isinstance(aug_list, list):
raise TypeError('augmentation list should be a list')
if isinstance(aug_list, list):
if not isinstance(aug_list[0], list):
aug_list = [aug_list]
return self.process_record_dataset(
dataset=dataset,
aug_list=aug_list,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=1000,
num_batch_per_epoch=num_batch_per_epoch,
dtype=dtype,
datasets_num_private_threads=datasets_num_private_threads,
force_augment=force_augment,
drop_remainder=True if is_training else False)
def make_dataset(self, is_training, input_context=None):
"""Makes a dataset."""
dataset = tf.data.Dataset.from_tensor_slices(self.data)
if input_context:
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
input_context.input_pipeline_id, input_context.num_input_pipelines)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
# Shuffle the input files
dataset = dataset.shuffle(buffer_size=len(self.data[0]))
return dataset
| {
"content_hash": "19c0ba7a96320575c48f6d1c9b3dc64b",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 80,
"avg_line_length": 36.8051948051948,
"alnum_prop": 0.6152081863091038,
"repo_name": "google-research/deep_representation_one_class",
"id": "2575382bff16d698f7832c971be2b553f13deb41",
"size": "6295",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "data/data_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""string processing utilities for extracting strings with various kinds of delimiters"""
import logging
import htmlentitydefs
def find_all(searchin, substr):
"""returns a list of locations where substr occurs in searchin
locations are not allowed to overlap"""
location = 0
locations = []
while location != -1:
location = searchin.find(substr, location)
if location != -1:
locations.append(location)
location += len(substr)
return locations
def extract(source, startdelim, enddelim, escape=None, startinstring=False, allowreentry=True):
"""Extracts a doublequote-delimited string from a string, allowing for backslash-escaping
returns tuple of (quoted string with quotes, still in string at end)"""
# note that this returns the quote characters as well... even internally
instring = startinstring
enteredonce = False
lenstart = len(startdelim)
lenend = len(enddelim)
startdelim_places = find_all(source, startdelim)
if startdelim == enddelim:
enddelim_places = startdelim_places[:]
else:
enddelim_places = find_all(source, enddelim)
if escape is not None:
lenescape = len(escape)
escape_places = find_all(source, escape)
last_escape_pos = -1
# filter escaped escapes
true_escape = False
true_escape_places = []
for escape_pos in escape_places:
if escape_pos - lenescape in escape_places:
true_escape = not true_escape
else:
true_escape = True
if true_escape:
true_escape_places.append(escape_pos)
startdelim_places = [pos for pos in startdelim_places if pos - lenescape not in true_escape_places]
enddelim_places = [pos + lenend for pos in enddelim_places if pos - lenescape not in true_escape_places]
else:
enddelim_places = [pos + lenend for pos in enddelim_places]
# get a unique sorted list of the significant places in the string
significant_places = [0] + startdelim_places + enddelim_places + [len(source)-1]
significant_places.sort()
extracted = ""
lastpos = None
for pos in significant_places:
if instring and pos in enddelim_places:
# make sure that if startdelim == enddelim we don't get confused and count the same string as start and end
if lastpos == pos - lenstart and lastpos in startdelim_places:
continue
extracted += source[lastpos:pos]
instring = False
lastpos = pos
if (not instring) and pos in startdelim_places and not (enteredonce and not allowreentry):
instring = True
enteredonce = True
lastpos = pos
if instring:
extracted += source[lastpos:]
return (extracted, instring)
def extractfromlines(lines, startdelim, enddelim, escape):
"""Calls extract over multiple lines, remembering whether in the string or not"""
result = ""
instring = 0
for line in lines:
(string, instring) = extract(line, startdelim, enddelim, escape, instring)
result += string
if not instring: break
return result
def extractstr(source):
"Extracts a doublequote-delimited string from a string, allowing for backslash-escaping"
(string, instring) = extract(source, '"', '"', '\\')
return string
def extractcomment(lines):
"Extracts <!-- > XML comments from lines"
return extractfromlines(lines, "<!--", "-->", None)
def extractwithoutquotes(source, startdelim, enddelim, escape=None, startinstring=False, includeescapes=True, allowreentry=True):
"""Extracts a doublequote-delimited string from a string, allowing for backslash-escaping
includeescapes can also be a function that takes the whole escaped string and returns the replaced version"""
instring = startinstring
enteredonce = False
lenstart = len(startdelim)
lenend = len(enddelim)
startdelim_places = find_all(source, startdelim)
if startdelim == enddelim:
enddelim_places = startdelim_places[:]
else:
enddelim_places = find_all(source, enddelim)
#hell slow because it is called far too often
if escape is not None:
lenescape = len(escape)
escape_places = find_all(source, escape)
last_escape_pos = -1
# filter escaped escapes
true_escape = False
true_escape_places = []
for escape_pos in escape_places:
if escape_pos - lenescape in escape_places:
true_escape = not true_escape
else:
true_escape = True
if true_escape:
true_escape_places.append(escape_pos)
startdelim_places = [pos for pos in startdelim_places if pos - lenescape not in true_escape_places]
enddelim_places = [pos + lenend for pos in enddelim_places if pos - lenescape not in true_escape_places]
else:
enddelim_places = [pos + lenend for pos in enddelim_places]
# get a unique sorted list of the significant places in the string
significant_places = [0] + startdelim_places + enddelim_places + [len(source)-1]
significant_places.sort()
extracted = ""
lastpos = 0
callable_includeescapes = callable(includeescapes)
checkescapes = callable_includeescapes or not includeescapes
for pos in significant_places:
if instring and pos in enddelim_places and lastpos != pos - lenstart:
section_start, section_end = lastpos + len(startdelim), pos - len(enddelim)
section = source[section_start:section_end]
if escape is not None and checkescapes:
escape_list = [epos - section_start for epos in true_escape_places if section_start <= epos <= section_end]
new_section = ""
last_epos = 0
for epos in escape_list:
new_section += section[last_epos:epos]
if callable_includeescapes:
replace_escape = includeescapes(section[epos:epos+lenescape+1])
# TODO: deprecate old method of returning boolean from includeescape, by removing this if block
if not isinstance(replace_escape, basestring):
if replace_escape:
replace_escape = section[epos:epos+lenescape+1]
else:
replace_escape = section[epos+lenescape:epos+lenescape+1]
new_section += replace_escape
last_epos = epos + lenescape + 1
else:
last_epos = epos + lenescape
section = new_section + section[last_epos:]
extracted += section
instring = False
lastpos = pos
if (not instring) and pos in startdelim_places and not (enteredonce and not allowreentry):
instring = True
enteredonce = True
lastpos = pos
if instring:
section_start = lastpos + len(startdelim)
section = source[section_start:]
if escape is not None and not includeescapes:
escape_list = [epos - section_start for epos in true_escape_places if section_start <= epos]
new_section = ""
last_epos = 0
for epos in escape_list:
new_section += section[last_epos:epos]
if callable_includeescapes and includeescapes(section[epos:epos+lenescape+1]):
last_epos = epos
else:
last_epos = epos + lenescape
section = new_section + section[last_epos:]
extracted += section
return (extracted, instring)
def escapequotes(source, escapeescapes=0):
"Returns the same string, with double quotes escaped with backslash"
if escapeescapes:
return source.replace('\\', '\\\\').replace('"', '\\"')
else:
return source.replace('"','\\"')
def escapesinglequotes(source):
"Returns the same string, with single quotes doubled"
return source.replace("'","''")
def htmlentityencode(source):
"""encodes source using HTML entities e.g. © -> ©"""
output = ""
for char in source:
charnum = ord(char)
if charnum in htmlentitydefs.codepoint2name:
output += "&%s;" % htmlentitydefs.codepoint2name[charnum]
else:
output += str(char)
return output
def htmlentitydecode(source):
"""decodes source using HTML entities e.g. © -> ©"""
output = u""
inentity = False
for char in source:
if char == "&":
inentity = True
possibleentity = ""
continue
if inentity:
if char == ";":
if len(possibleentity) > 0 and possibleentity in htmlentitydefs.name2codepoint:
output += unichr(htmlentitydefs.name2codepoint[possibleentity])
inentity = False
else:
output += "&" + possibleentity + ";"
inentity = False
elif char == " ":
output += "&" + possibleentity + char
inentity = False
else:
possibleentity += char
else:
output += char
return output
def javapropertiesencode(source):
"""encodes source in the escaped-unicode encoding used by Java .properties files"""
output = u""
for char in source:
charnum = ord(char)
if char in controlchars:
output += controlchars[char]
elif 0 <= charnum < 128:
output += str(char)
else:
output += u"\\u%04X" % charnum
return output
def mozillapropertiesencode(source):
"""encodes source in the escaped-unicode encoding used by Mozilla .properties files"""
output = u""
for char in source:
charnum = ord(char)
if char in controlchars:
output += controlchars[char]
else:
output += char
return output
propertyescapes = {
# escapes that are self-escaping
"\\": "\\", "'": "'", '"': '"',
# control characters that we keep
"f": "\f", "n": "\n", "r": "\r", "t": "\t",
}
controlchars = {
# the reverse of the above...
"\\": "\\\\",
"\f": "\\f", "\n": "\\n", "\r": "\\r", "\t": "\\t"
}
def escapecontrols(source):
"""escape control characters in the given string"""
for key, value in controlchars.iteritems():
source = source.replace(key, value)
return source
def propertiesdecode(source):
"""Decodes source from the escaped-unicode encoding used by .properties files.
Java uses Latin1 by default, and Mozilla uses UTF-8 by default."""
# since the .decode("unicode-escape") routine decodes everything, and we don't want to
# we reimplemented the algorithm from Python Objects/unicode.c in Python here
# and modified it to retain escaped control characters
output = u""
s = 0
if isinstance(source, str):
source = source.decode(encoding)
def unichr2(i):
"""Returns a Unicode string of one character with ordinal 32 <= i, otherwise an escaped control character"""
if 32 <= i:
return unichr(i)
elif unichr(i) in controlchars:
# we just return the character, unescaped
# if people want to escape them they can use escapecontrols
return unichr(i)
else:
return "\\u%04x" % i
while s < len(source):
c = source[s]
if c != '\\':
output += c
s += 1
continue
s += 1
if s >= len(source):
# this is an escape at the end of the line, which implies a continuation...
# return the escape to inform the parser
output += c
continue
c = source[s]
s += 1
if c == '\n': pass
# propertyescapes lookups
elif c in propertyescapes: output += propertyescapes[c]
# \uXXXX escapes
# \UXXXX escapes
elif c in "uU":
digits = 4
x = 0
for digit in range(digits):
x <<= 4
if s + digit >= len(source):
digits = digit
break
c = source[s+digit].lower()
if c.isdigit():
x += ord(c) - ord('0')
elif c in "abcdef":
x += ord(c) - ord('a') + 10
else:
break
s += digits
output += unichr2(x)
elif c == "N":
if source[s] != "{":
logging.warn("Invalid named unicode escape: no { after \\N")
output += "\\" + c
continue
s += 1
e = source.find("}", s)
if e == -1:
logging.warn("Invalid named unicode escape: no } after \\N{")
output += "\\" + c
continue
import unicodedata
name = source[s:e]
output += unicodedata.lookup(name)
s = e + 1
else:
output += c # Drop any \ that we don't specifically handle
return output
def quotestr(source, escapeescapes=0):
"Returns a doublequote-delimited quoted string, escaping double quotes with backslash"
if isinstance(source, list):
firstline = True
for line in source:
if firstline:
newsource = '"' + escapequotes(line, escapeescapes) + '"'
firstline = False
else:
newsource = newsource + '\n' + '"' + escapequotes(line, escapeescapes) + '"'
return newsource
else:
return '"' + escapequotes(source, escapeescapes) + '"'
def singlequotestr(source):
"Returns a doublequote-delimited quoted string, escaping single quotes with themselves"
return "'" + escapesinglequotes(source) + "'"
def eitherquotestr(source):
"Returns a singlequote- or doublequote-delimited string, depending on what quotes it contains"
if '"' in source:
return singlequotestr(source)
else:
return quotestr(source)
def findend(string, substring):
s = string.find(substring)
if s != -1:
s += len(substring)
return s
def rstripeol(string):
return string.rstrip("\r\n")
def stripcomment(comment, startstring="<!--", endstring="-->"):
cstart = comment.find(startstring)
if cstart == -1:
cstart = 0
else:
cstart += len(startstring)
cend = comment.find(endstring, cstart)
return comment[cstart:cend].strip()
def unstripcomment(comment, startstring="<!-- ", endstring=" -->\n"):
return startstring+comment.strip()+endstring
def encodewithdict(unencoded, encodedict):
"""encodes certain characters in the string using an encode dictionary"""
encoded = unencoded
for key, value in encodedict.iteritems():
if key in encoded:
encoded = encoded.replace(key, value)
return encoded
def makeutf8(d):
"""convert numbers to utf8 codes in the values of a dictionary"""
for key, value in d.items():
if type(value) == int:
d[key] = unichr(value).encode('utf8')
return d
def testcase():
x = ' "this" " is " "a" " test!" '
print extract(x, '"', '"', None)
print extract(x, '"', '"', '!')
print extractwithoutquotes(x, '"', '"', None)
print extractwithoutquotes(x, '"', '"', '!')
print extractwithoutquotes(x, '"', '"', '!', includeescapes=False)
if __name__ == '__main__':
testcase()
| {
"content_hash": "59f8b955149582f6c02cbf04d850b912",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 129,
"avg_line_length": 38.24757281553398,
"alnum_prop": 0.5830689173753014,
"repo_name": "dbbhattacharya/kitsune",
"id": "0e917bf3a5e9bea3625d3c789302f2e94a6cbc45",
"size": "16588",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/misc/quote.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
"""Unit tests for the "create_udm_events" function."""
import json
import unittest
from unittest import mock
from google.auth.transport import requests
from . import create_udm_events
_test_event = json.dumps([{
"metadata": {
"eventTimestamp": "2021-07-01T19:39:08.304950563Z",
"eventType": "SCAN_HOST",
"vendorName": "Telemetry4u",
"productName": "Inspectotron",
},
"target": {
"hostname": "workbox10",
},
"securityResult": [{
"category": ["DATA_AT_REST"],
"summary": "Personal",
"description": "Files Labeled: 21+"
}, {
"category": ["DATA_AT_REST"],
"summary": "PCI",
"description": "Files Labeled: 21+"
}]
}, {
"metadata": {
"eventTimestamp": "2021-07-02T19:39:08.304950563Z",
"eventType": "SCAN_HOST",
"vendorName": "Telemetry4u",
"productName": "Inspectotron",
},
"target": {
"hostname": "workbox10",
},
"securityResult": [{
"category": ["DATA_AT_REST"],
"summary": "Personal",
"description": "Files Labeled: 21+"
}, {
"category": ["DATA_AT_REST"],
"summary": "PCI",
"description": "Files Labeled: 21+"
}]
}])
class CreateUdmEventTest(unittest.TestCase):
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
create_udm_events.create_udm_events(
mock_session, "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", _test_event)
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
create_udm_events.create_udm_events(
mock_session, "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", _test_event)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "fe8bc78e87222b769f98838afae5d37e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 76,
"avg_line_length": 30.705128205128204,
"alnum_prop": 0.631732776617954,
"repo_name": "chronicle/api-samples-python",
"id": "0b0ff55f4a6e58eef6191864e3642a9f208a1193",
"size": "2971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingestion/create_udm_events_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "556471"
}
],
"symlink_target": ""
} |
"""Atom classes for describing dataset contents."""
# Imports
# =======
import re
import sys
import inspect
import cPickle
import numpy
from tables.utils import SizeType
from tables.misc.enum import Enum
import warnings
from tables.exceptions import FlavorWarning
# Public variables
# ================
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
all_types = set() # filled as atom classes are created
"""Set of all PyTables types."""
atom_map = {} # filled as atom classes are created
"""Maps atom kinds to item sizes and atom classes.
If there is a fixed set of possible item sizes for a given kind, the
kind maps to another mapping from item size in bytes to atom class.
Otherwise, the kind maps directly to the atom class.
"""
deftype_from_kind = {} # filled as atom classes are created
"""Maps atom kinds to their default atom type (if any)."""
# Public functions
# ================
_type_re = re.compile(r'^([a-z]+)([0-9]*)$')
def split_type(type):
"""Split a PyTables type into a PyTables kind and an item size.
Returns a tuple of (kind, itemsize). If no item size is present in the type
(in the form of a precision), the returned item size is None::
>>> split_type('int32')
('int', 4)
>>> split_type('string')
('string', None)
>>> split_type('int20')
Traceback (most recent call last):
...
ValueError: precision must be a multiple of 8: 20
>>> split_type('foo bar')
Traceback (most recent call last):
...
ValueError: malformed type: 'foo bar'
"""
match = _type_re.match(type)
if not match:
raise ValueError("malformed type: %r" % type)
kind, precision = match.groups()
itemsize = None
if precision:
precision = int(precision)
itemsize, remainder = divmod(precision, 8)
if remainder: # 0 could be a valid item size
raise ValueError("precision must be a multiple of 8: %d"
% precision)
return (kind, itemsize)
# Private functions
# =================
def _invalid_itemsize_error(kind, itemsize, itemsizes):
isizes = sorted(itemsizes)
return ValueError("invalid item size for kind ``%s``: %r; "
"it must be one of ``%r``"
% (kind, itemsize, isizes))
def _abstract_atom_init(deftype, defvalue):
"""Return a constructor for an abstract `Atom` class."""
defitemsize = split_type(deftype)[1]
def __init__(self, itemsize=defitemsize, shape=(), dflt=defvalue):
assert self.kind in atom_map
try:
atomclass = atom_map[self.kind][itemsize]
except KeyError:
raise _invalid_itemsize_error(self.kind, itemsize,
atom_map[self.kind])
self.__class__ = atomclass
atomclass.__init__(self, shape, dflt)
return __init__
def _normalize_shape(shape):
"""Check that the `shape` is safe to be used and return it as a tuple."""
if isinstance(shape, (int, numpy.integer, long)):
if shape < 1:
raise ValueError("shape value must be greater than 0: %d"
% shape)
shape = (shape,) # N is a shorthand for (N,)
try:
shape = tuple(shape)
except TypeError:
raise TypeError("shape must be an integer or sequence: %r"
% (shape,))
## XXX Get from HDF5 library if possible.
# HDF5 does not support ranks greater than 32
if len(shape) > 32:
raise ValueError(
"shapes with rank > 32 are not supported: %r" % (shape,))
return tuple(SizeType(s) for s in shape)
def _normalize_default(value, dtype):
"""Return `value` as a valid default of NumPy type `dtype`."""
# Create NumPy objects as defaults
# This is better in order to serialize them as attributes
if value is None:
value = 0
basedtype = dtype.base
try:
default = numpy.array(value, dtype=basedtype)
except ValueError:
array = numpy.array(value)
if array.shape != basedtype.shape:
raise
# Maybe nested dtype with "scalar" value.
default = numpy.array(value, dtype=basedtype.base)
# 0-dim arrays will be representented as NumPy scalars
# (PyTables attribute convention)
if default.shape == ():
default = default[()]
return default
def _cmp_dispatcher(other_method_name):
"""Dispatch comparisons to a method of the *other* object.
Returns a new *rich comparison* method which dispatches calls to
the method `other_method_name` of the *other* object. If there is
no such method in the object, ``False`` is returned.
This is part of the implementation of a double dispatch pattern.
"""
def dispatched_cmp(self, other):
try:
other_method = getattr(other, other_method_name)
except AttributeError:
return False
return other_method(self)
return dispatched_cmp
# Helper classes
# ==============
class MetaAtom(type):
"""Atom metaclass.
This metaclass ensures that data about atom classes gets inserted
into the suitable registries.
"""
def __init__(class_, name, bases, dict_):
super(MetaAtom, class_).__init__(name, bases, dict_)
kind = dict_.get('kind')
itemsize = dict_.get('itemsize')
type_ = dict_.get('type')
deftype = dict_.get('_deftype')
if kind and deftype:
deftype_from_kind[kind] = deftype
if type_:
all_types.add(type_)
if kind and itemsize and not hasattr(itemsize, '__int__'):
# Atom classes with a non-fixed item size do have an
# ``itemsize``, but it's not a number (e.g. property).
atom_map[kind] = class_
return
if kind: # first definition of kind, make new entry
atom_map[kind] = {}
if itemsize and hasattr(itemsize, '__int__'): # fixed
kind = class_.kind # maybe from superclasses
atom_map[kind][int(itemsize)] = class_
# Atom classes
# ============
class Atom(object):
"""Defines the type of atomic cells stored in a dataset.
The meaning of *atomic* is that individual elements of a cell can
not be extracted directly by indexing (i.e. __getitem__()) the
dataset; e.g. if a dataset has shape (2, 2) and its atoms have
shape (3,), to get the third element of the cell at (1, 0) one
should use dataset[1,0][2] instead of dataset[1,0,2].
The Atom class is meant to declare the different properties of the
*base element* (also known as *atom*) of CArray, EArray and
VLArray datasets, although they are also used to describe the base
elements of Array datasets. Atoms have the property that their
length is always the same. However, you can grow datasets along
the extensible dimension in the case of EArray or put a variable
number of them on a VLArray row. Moreover, they are not restricted
to scalar values, and they can be *fully multidimensional
objects*.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in
bytes of individual items in the atom.
shape : tuple
Sets the shape of the atom. An integer shape of
N is equivalent to the tuple (N,).
dflt
Sets the default value for the atom.
The following are the public methods and attributes of the Atom class.
Notes
-----
A series of descendant classes are offered in order to make the
use of these element descriptions easier. You should use a
particular Atom descendant class whenever you know the exact type
you will need when writing your code. Otherwise, you may use one
of the Atom.from_*() factory Methods.
.. rubric:: Atom attributes
.. attribute:: dflt
The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to disk.
If the user supplies a scalar value for a multidimensional
atom, this value is automatically *broadcast* to all the items
in the atom cell. If dflt is not supplied, an appropriate zero
value (or *null* string) will be chosen by default. Please
note that default values are kept internally as NumPy objects.
.. attribute:: dtype
The NumPy dtype that most closely matches this atom.
.. attribute:: itemsize
Size in bytes of a single item in the atom.
Specially useful for atoms of the string kind.
.. attribute:: kind
The PyTables kind of the atom (a string).
.. attribute:: shape
The shape of the atom (a tuple for scalar atoms).
.. attribute:: type
The PyTables type of the atom (a string).
Atoms can be compared with atoms and other objects for
strict (in)equality without having to compare individual
attributes::
>>> atom1 = StringAtom(itemsize=10) # same as ``atom2``
>>> atom2 = Atom.from_kind('string', 10) # same as ``atom1``
>>> atom3 = IntAtom()
>>> atom1 == 'foo'
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom3 != atom2
True
"""
# Register data for all subclasses.
__metaclass__ = MetaAtom
# Class methods
# ~~~~~~~~~~~~~
@classmethod
def prefix(class_):
"""Return the atom class prefix."""
cname = class_.__name__
return cname[:cname.rfind('Atom')]
@classmethod
def from_sctype(class_, sctype, shape=(), dflt=None):
"""Create an Atom from a NumPy scalar type sctype.
Optional shape and default value may be specified as the
shape and dflt
arguments, respectively. Information in the
sctype not represented in an Atom is ignored::
>>> import numpy
>>> Atom.from_sctype(numpy.int16, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_sctype('S5', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown NumPy scalar type: 'S5'
>>> Atom.from_sctype('Float64')
Float64Atom(shape=(), dflt=0.0)
"""
if (not isinstance(sctype, type)
or not issubclass(sctype, numpy.generic)):
if sctype not in numpy.sctypeDict:
raise ValueError("unknown NumPy scalar type: %r" % (sctype,))
sctype = numpy.sctypeDict[sctype]
return class_.from_dtype(numpy.dtype((sctype, shape)), dflt)
@classmethod
def from_dtype(class_, dtype, dflt=None):
"""Create an Atom from a NumPy dtype.
An optional default value may be specified as the dflt
argument. Information in the dtype not represented in an Atom is
ignored::
>>> import numpy
>>> Atom.from_dtype(numpy.dtype((numpy.int16, (2, 2))))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_dtype(numpy.dtype('Float64'))
Float64Atom(shape=(), dflt=0.0)
Note: for easier use in Python 3, where all strings lead to the
Unicode dtype, this dtype will also generate a StringAtom. Since
this is only viable for strings that are castable as ascii, a
warning is issued.
>>> Atom.from_dtype(numpy.dtype('U20')) # doctest: +SKIP
Atom.py:392: FlavorWarning: support for unicode type is very limited,
and only works for strings that can be cast as ascii
StringAtom(itemsize=20, shape=(), dflt=b'')
"""
basedtype = dtype.base
if basedtype.names:
raise ValueError("compound data types are not supported: %r"
% dtype)
if basedtype.shape != ():
raise ValueError("nested data types are not supported: %r"
% dtype)
if basedtype.kind == 'S': # can not reuse something like 'string80'
itemsize = basedtype.itemsize
return class_.from_kind('string', itemsize, dtype.shape, dflt)
elif basedtype.kind == 'U':
# workaround for unicode type (standard string type in Python 3)
warnings.warn("support for unicode type is very limited, "
"and only works for strings that can be cast as ascii", FlavorWarning)
itemsize = basedtype.itemsize // 4
assert str(itemsize) in basedtype.str, "something went wrong in handling unicode."
return class_.from_kind('string', itemsize, dtype.shape, dflt)
# Most NumPy types have direct correspondence with PyTables types.
return class_.from_type(basedtype.name, dtype.shape, dflt)
@classmethod
def from_type(class_, type, shape=(), dflt=None):
"""Create an Atom from a PyTables type.
Optional shape and default value may be specified as the
shape and dflt arguments, respectively::
>>> Atom.from_type('bool')
BoolAtom(shape=(), dflt=False)
>>> Atom.from_type('int16', shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_type('string40', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown type: 'string40'
>>> Atom.from_type('Float64')
Traceback (most recent call last):
...
ValueError: unknown type: 'Float64'
"""
if type not in all_types:
raise ValueError("unknown type: %r" % (type,))
kind, itemsize = split_type(type)
return class_.from_kind(kind, itemsize, shape, dflt)
@classmethod
def from_kind(class_, kind, itemsize=None, shape=(), dflt=None):
"""Create an Atom from a PyTables kind.
Optional item size, shape and default value may be
specified as the itemsize, shape and dflt
arguments, respectively. Bear in mind that not all atoms support
a default item size::
>>> Atom.from_kind('int', itemsize=2, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=(2, 2))
Int32Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=1)
Int32Atom(shape=(1,), dflt=0)
>>> Atom.from_kind('string', dflt=b'hello')
Traceback (most recent call last):
...
ValueError: no default item size for kind ``string``
>>> Atom.from_kind('Float')
Traceback (most recent call last):
...
ValueError: unknown kind: 'Float'
Moreover, some kinds with atypical constructor signatures
are not supported; you need to use the proper
constructor::
>>> Atom.from_kind('enum') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: the ``enum`` kind is not supported...
"""
kwargs = {'shape': shape}
if kind not in atom_map:
raise ValueError("unknown kind: %r" % (kind,))
# This incompatibility detection may get out-of-date and is
# too hard-wired, but I couldn't come up with something
# smarter. -- Ivan (2007-02-08)
if kind in ['enum']:
raise ValueError("the ``%s`` kind is not supported; "
"please use the appropriate constructor"
% kind)
# If no `itemsize` is given, try to get the default type of the
# kind (which has a fixed item size).
if itemsize is None:
if kind not in deftype_from_kind:
raise ValueError("no default item size for kind ``%s``"
% kind)
type_ = deftype_from_kind[kind]
kind, itemsize = split_type(type_)
kdata = atom_map[kind]
# Look up the class and set a possible item size.
if hasattr(kdata, 'kind'): # atom class: non-fixed item size
atomclass = kdata
kwargs['itemsize'] = itemsize
else: # dictionary: fixed item size
if itemsize not in kdata:
raise _invalid_itemsize_error(kind, itemsize, kdata)
atomclass = kdata[itemsize]
# Only set a `dflt` argument if given (`None` may not be understood).
if dflt is not None:
kwargs['dflt'] = dflt
return atomclass(**kwargs)
# Properties
# ~~~~~~~~~~
@property
def size(self):
"""Total size in bytes of the atom."""
return self.dtype.itemsize
@property
def recarrtype(self):
"""String type to be used in numpy.rec.array()."""
return str(self.dtype.shape) + self.dtype.base.str[1:]
@property
def ndim(self):
"""The number of dimensions of the atom.
.. versionadded:: 2.4"""
return len(self.shape)
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, nptype, shape, dflt):
if not hasattr(self, 'type'):
raise NotImplementedError("``%s`` is an abstract class; "
"please use one of its subclasses"
% self.__class__.__name__)
self.shape = shape = _normalize_shape(shape)
"""The shape of the atom (a tuple for scalar atoms)."""
# Curiously enough, NumPy isn't generally able to accept NumPy
# integers in a shape. ;(
npshape = tuple(int(s) for s in shape)
self.dtype = dtype = numpy.dtype((nptype, npshape))
"""The NumPy dtype that most closely matches this atom."""
self.dflt = _normalize_default(dflt, dtype)
"""The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to
disk. If the user supplies a scalar value for a
multidimensional atom, this value is automatically *broadcast*
to all the items in the atom cell. If dflt is not supplied, an
appropriate zero value (or *null* string) will be chosen by
default. Please note that default values are kept internally
as NumPy objects."""
def __repr__(self):
args = 'shape=%s, dflt=%r' % (self.shape, self.dflt)
if not hasattr(self.__class__.itemsize, '__int__'): # non-fixed
args = 'itemsize=%s, %s' % (self.itemsize, args)
return '%s(%s)' % (self.__class__.__name__, args)
__eq__ = _cmp_dispatcher('_is_equal_to_atom')
def __ne__(self, other):
return not self.__eq__(other)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.type, self.shape, self.itemsize,
# self.dflt))
# Public methods
# ~~~~~~~~~~~~~~
def copy(self, **override):
"""Get a copy of the atom, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as
keyword arguments::
>>> atom1 = Int32Atom(shape=12)
>>> atom2 = atom1.copy()
>>> print(atom1)
Int32Atom(shape=(12,), dflt=0)
>>> print(atom2)
Int32Atom(shape=(12,), dflt=0)
>>> atom1 is atom2
False
>>> atom3 = atom1.copy(shape=(2, 2))
>>> print(atom3)
Int32Atom(shape=(2, 2), dflt=0)
>>> atom1.copy(foobar=42)
Traceback (most recent call last):
...
TypeError: __init__() got an unexpected keyword argument 'foobar'
"""
newargs = self._get_init_args()
newargs.update(override)
return self.__class__(**newargs)
# Private methods
# ~~~~~~~~~~~~~~~
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
"""
return dict((arg, getattr(self, arg))
for arg in inspect.getargspec(self.__init__)[0]
if arg != 'self')
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return (self.type == atom.type and self.shape == atom.shape
and self.itemsize == atom.itemsize
and numpy.all(self.dflt == atom.dflt))
class StringAtom(Atom):
"""Defines an atom of type string.
The item size is the *maximum* length in characters of strings.
"""
kind = 'string'
type = 'string'
_defvalue = b''
@property
def itemsize(self):
"Size in bytes of a sigle item in the atom."
return self.dtype.base.itemsize
def __init__(self, itemsize, shape=(), dflt=_defvalue):
if not hasattr(itemsize, '__int__') or int(itemsize) < 0:
raise ValueError("invalid item size for kind ``%s``: %r; "
"it must be a positive integer"
% ('string', itemsize))
Atom.__init__(self, 'S%d' % itemsize, shape, dflt)
class BoolAtom(Atom):
"""Defines an atom of type bool."""
kind = 'bool'
itemsize = 1
type = 'bool'
_deftype = 'bool8'
_defvalue = False
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, self.type, shape, dflt)
class IntAtom(Atom):
"""Defines an atom of a signed integral type (int kind)."""
kind = 'int'
signed = True
_deftype = 'int32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class UIntAtom(Atom):
"""Defines an atom of an unsigned integral type (uint kind)."""
kind = 'uint'
signed = False
_deftype = 'uint32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class FloatAtom(Atom):
"""Defines an atom of a floating point type (float kind)."""
kind = 'float'
_deftype = 'float64'
_defvalue = 0.0
__init__ = _abstract_atom_init(_deftype, _defvalue)
def _create_numeric_class(baseclass, itemsize):
"""Create a numeric atom class with the given `baseclass` and an
`itemsize`."""
prefix = '%s%d' % (baseclass.prefix(), itemsize * 8)
type_ = prefix.lower()
classdict = {'itemsize': itemsize, 'type': type_,
'__doc__': "Defines an atom of type ``%s``." % type_}
def __init__(self, shape=(), dflt=baseclass._defvalue):
Atom.__init__(self, self.type, shape, dflt)
classdict['__init__'] = __init__
return type('%sAtom' % prefix, (baseclass,), classdict)
Int8Atom = _create_numeric_class(IntAtom, 1)
Int16Atom = _create_numeric_class(IntAtom, 2)
Int32Atom = _create_numeric_class(IntAtom, 4)
Int64Atom = _create_numeric_class(IntAtom, 8)
UInt8Atom = _create_numeric_class(UIntAtom, 1)
UInt16Atom = _create_numeric_class(UIntAtom, 2)
UInt32Atom = _create_numeric_class(UIntAtom, 4)
UInt64Atom = _create_numeric_class(UIntAtom, 8)
if hasattr(numpy, 'float16'):
Float16Atom = _create_numeric_class(FloatAtom, 2)
Float32Atom = _create_numeric_class(FloatAtom, 4)
Float64Atom = _create_numeric_class(FloatAtom, 8)
if hasattr(numpy, 'float96'):
Float96Atom = _create_numeric_class(FloatAtom, 12)
if hasattr(numpy, 'float128'):
Float128Atom = _create_numeric_class(FloatAtom, 16)
class ComplexAtom(Atom):
"""Defines an atom of kind complex.
Allowed item sizes are 8 (single precision) and 16 (double precision). This
class must be used instead of more concrete ones to avoid confusions with
numarray-like precision specifications used in PyTables 1.X.
"""
# This definition is a little more complex (no pun intended)
# because, although the complex kind is a normal numerical one,
# the usage of bottom-level classes is artificially forbidden.
# Everything will be back to normality when people has stopped
# using the old bottom-level complex classes.
kind = 'complex'
_deftype = 'complex128'
_defvalue = 0j
_isizes = [8, 16]
@property
def itemsize(self):
"Size in bytes of a sigle item in the atom."
return self.dtype.base.itemsize
# Only instances have a `type` attribute, so complex types must be
# registered by hand.
all_types.add('complex64')
all_types.add('complex128')
if hasattr(numpy, 'complex192'):
all_types.add('complex192')
_isizes.append(24)
if hasattr(numpy, 'complex256'):
all_types.add('complex256')
_isizes.append(32)
def __init__(self, itemsize, shape=(), dflt=_defvalue):
if itemsize not in self._isizes:
raise _invalid_itemsize_error('complex', itemsize, self._isizes)
self.type = '%s%d' % (self.kind, itemsize * 8)
Atom.__init__(self, self.type, shape, dflt)
class _ComplexErrorAtom(ComplexAtom):
"""Reminds the user to stop using the old complex atom names."""
__metaclass__ = type # do not register anything about this class
def __init__(self, shape=(), dflt=ComplexAtom._defvalue):
raise TypeError(
"to avoid confusions with PyTables 1.X complex atom names, "
"please use ``ComplexAtom(itemsize=N)``, "
"where N=8 for single precision complex atoms, "
"and N=16 for double precision complex atoms")
Complex32Atom = Complex64Atom = Complex128Atom = _ComplexErrorAtom
if hasattr(numpy, 'complex192'):
Complex192Atom = _ComplexErrorAtom
if hasattr(numpy, 'complex256'):
Complex256Atom = _ComplexErrorAtom
class TimeAtom(Atom):
"""Defines an atom of time type (time kind).
There are two distinct supported types of time: a 32 bit integer value and
a 64 bit floating point value. Both of them reflect the number of seconds
since the Unix epoch. This atom has the property of being stored using the
HDF5 time datatypes.
"""
kind = 'time'
_deftype = 'time32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class Time32Atom(TimeAtom):
"""Defines an atom of type time32."""
itemsize = 4
type = 'time32'
_defvalue = 0
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, 'int32', shape, dflt)
class Time64Atom(TimeAtom):
"""Defines an atom of type time64."""
itemsize = 8
type = 'time64'
_defvalue = 0.0
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, 'float64', shape, dflt)
class EnumAtom(Atom):
"""Description of an atom of an enumerated type.
Instances of this class describe the atom type used to store enumerated
values. Those values belong to an enumerated type, defined by the first
argument (enum) in the constructor of the atom, which accepts the same
kinds of arguments as the Enum class (see :ref:`EnumClassDescr`). The
enumerated type is stored in the enum attribute of the atom.
A default value must be specified as the second argument (dflt) in the
constructor; it must be the *name* (a string) of one of the enumerated
values in the enumerated type. When the atom is created, the corresponding
concrete value is broadcast and stored in the dflt attribute (setting
different default values for items in a multidimensional atom is not
supported yet). If the name does not match any value in the enumerated
type, a KeyError is raised.
Another atom must be specified as the base argument in order to determine
the base type used for storing the values of enumerated values in memory
and disk. This *storage atom* is kept in the base attribute of the created
atom. As a shorthand, you may specify a PyTables type instead of the
storage atom, implying that this has a scalar shape.
The storage atom should be able to represent each and every concrete value
in the enumeration. If it is not, a TypeError is raised. The default value
of the storage atom is ignored.
The type attribute of enumerated atoms is always enum.
Enumerated atoms also support comparisons with other objects::
>>> enum = ['T0', 'T1', 'T2']
>>> atom1 = EnumAtom(enum, 'T0', 'int8') # same as ``atom2``
>>> atom2 = EnumAtom(enum, 'T0', Int8Atom()) # same as ``atom1``
>>> atom3 = EnumAtom(enum, 'T0', 'int16')
>>> atom4 = Int8Atom()
>>> atom1 == enum
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom1 == atom4
False
>>> atom4 != atom1
True
Examples
--------
The next C enum construction::
enum myEnum {
T0,
T1,
T2
};
would correspond to the following PyTables
declaration::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', 'int32')
Please note the dflt argument with a value of 'T0'. Since the concrete
value matching T0 is unknown right now (we have not used explicit concrete
values), using the name is the only option left for defining a default
value for the atom.
The chosen representation of values for this enumerated atom uses unsigned
32-bit integers, which surely wastes quite a lot of memory. Another size
could be selected by using the base argument (this time with a full-blown
storage atom)::
>>> my_enum_atom = EnumAtom(['T0', 'T1', 'T2'], 'T0', UInt8Atom())
You can also define multidimensional arrays for data elements::
>>> my_enum_atom = EnumAtom(
... ['T0', 'T1', 'T2'], 'T0', base='uint32', shape=(3,2))
for 3x2 arrays of uint32.
"""
# Registering this class in the class map may be a little wrong,
# since the ``Atom.from_kind()`` method fails miserably with
# enumerations, as they don't support an ``itemsize`` argument.
# However, resetting ``__metaclass__`` to ``type`` doesn't seem to
# work and I don't feel like creating a subclass of ``MetaAtom``.
kind = 'enum'
type = 'enum'
# Properties
# ~~~~~~~~~~
@property
def itemsize(self):
"Size in bytes of a sigle item in the atom."
return self.dtype.base.itemsize
# Private methods
# ~~~~~~~~~~~~~~~
def _checkbase(self, base):
"""Check the `base` storage atom."""
if base.kind == 'enum':
raise TypeError("can not use an enumerated atom "
"as a storage atom: %r" % base)
# Check whether the storage atom can represent concrete values
# in the enumeration...
basedtype = base.dtype
pyvalues = [value for (name, value) in self.enum]
try:
npgenvalues = numpy.array(pyvalues)
except ValueError:
raise TypeError("concrete values are not uniformly-shaped")
try:
npvalues = numpy.array(npgenvalues, dtype=basedtype.base)
except ValueError:
raise TypeError("storage atom type is incompatible with "
"concrete values in the enumeration")
if npvalues.shape[1:] != basedtype.shape:
raise TypeError("storage atom shape does not match that of "
"concrete values in the enumeration")
if npvalues.tolist() != npgenvalues.tolist():
raise TypeError("storage atom type lacks precision for "
"concrete values in the enumeration")
# ...with some implementation limitations.
if not npvalues.dtype.kind in ['i', 'u']:
raise NotImplementedError("only integer concrete values "
"are supported for the moment, sorry")
if len(npvalues.shape) > 1:
raise NotImplementedError("only scalar concrete values "
"are supported for the moment, sorry")
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
return dict(enum=self.enum, dflt=self._defname,
base=self.base, shape=self.shape)
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return False
def _is_equal_to_enumatom(self, enumatom):
"""Is this object equal to the given `enumatom`?"""
return (self.enum == enumatom.enum and self.shape == enumatom.shape
and numpy.all(self.dflt == enumatom.dflt)
and self.base == enumatom.base)
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, enum, dflt, base, shape=()):
if not isinstance(enum, Enum):
enum = Enum(enum)
self.enum = enum
if isinstance(base, str):
base = Atom.from_type(base)
self._checkbase(base)
self.base = base
default = enum[dflt] # check default value
self._defname = dflt # kept for representation purposes
# These are kept to ease dumping this particular
# representation of the enumeration to storage.
names, values = [], []
for (name, value) in enum:
names.append(name)
values.append(value)
basedtype = self.base.dtype
self._names = names
self._values = numpy.array(values, dtype=basedtype.base)
Atom.__init__(self, basedtype, shape, default)
def __repr__(self):
return ('EnumAtom(enum=%r, dflt=%r, base=%r, shape=%r)'
% (self.enum, self._defname, self.base, self.shape))
__eq__ = _cmp_dispatcher('_is_equal_to_enumatom')
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self.__class__, self.enum, self.shape, self.dflt,
# self.base))
# Pseudo-atom classes
# ===================
#
# Now, there come three special classes, `ObjectAtom`, `VLStringAtom`
# and `VLUnicodeAtom`, that actually do not descend from `Atom`, but
# which goal is so similar that they should be described here.
# Pseudo-atoms can only be used with `VLArray` datasets, and they do
# not support multidimensional values, nor multiple values per row.
#
# They can be recognised because they also have ``kind``, ``type`` and
# ``shape`` attributes, but no ``size``, ``itemsize`` or ``dflt``
# ones. Instead, they have a ``base`` atom which defines the elements
# used for storage.
#
# See ``examples/vlarray1.py`` and ``examples/vlarray2.py`` for
# further examples on `VLArray` datasets, including object
# serialization and string management.
class PseudoAtom(object):
"""Pseudo-atoms can only be used in ``VLArray`` nodes.
They can be recognised because they also have `kind`, `type` and
`shape` attributes, but no `size`, `itemsize` or `dflt` ones.
Instead, they have a `base` atom which defines the elements used
for storage.
"""
def __repr__(self):
return '%s()' % self.__class__.__name__
def toarray(self, object_):
"""Convert an `object_` into an array of base atoms."""
raise NotImplementedError
def fromarray(self, array):
"""Convert an `array` of base atoms into an object."""
raise NotImplementedError
class _BufferedAtom(PseudoAtom):
"""Pseudo-atom which stores data as a buffer (flat array of uints)."""
shape = ()
def toarray(self, object_):
buffer_ = self._tobuffer(object_)
array = numpy.ndarray(buffer=buffer_, dtype=self.base.dtype,
shape=len(buffer_))
return array
def _tobuffer(self, object_):
"""Convert an `object_` into a buffer."""
raise NotImplementedError
class VLStringAtom(_BufferedAtom):
"""Defines an atom of type ``vlstring``.
This class describes a *row* of the VLArray class, rather than an atom. It
differs from the StringAtom class in that you can only add *one instance of
it to one specific row*, i.e. the :meth:`VLArray.append` method only
accepts one object when the base atom is of this type.
Like StringAtom, this class does not make assumptions on the encoding of
the string, and raw bytes are stored as is. Unicode strings are supported
as long as no character is out of the ASCII set; otherwise, you will need
to *explicitly* convert them to strings before you can save them. For full
Unicode support, using VLUnicodeAtom (see :ref:`VLUnicodeAtom`) is
recommended.
Variable-length string atoms do not accept parameters and they cause the
reads of rows to always return Python strings. You can regard vlstring
atoms as an easy way to save generic variable length strings.
"""
kind = 'vlstring'
type = 'vlstring'
base = UInt8Atom()
def _tobuffer(self, object_):
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
return numpy.string_(object_)
def fromarray(self, array):
return array.tostring()
class VLUnicodeAtom(_BufferedAtom):
"""Defines an atom of type vlunicode.
This class describes a *row* of the VLArray class, rather than an atom. It
is very similar to VLStringAtom (see :ref:`VLStringAtom`), but it stores
Unicode strings (using 32-bit characters a la UCS-4, so all strings of the
same length also take up the same space).
This class does not make assumptions on the encoding of plain input
strings. Plain strings are supported as long as no character is out of the
ASCII set; otherwise, you will need to *explicitly* convert them to Unicode
before you can save them.
Variable-length Unicode atoms do not accept parameters and they cause the
reads of rows to always return Python Unicode strings. You can regard
vlunicode atoms as an easy way to save variable length Unicode strings.
"""
kind = 'vlunicode'
type = 'vlunicode'
base = UInt32Atom()
if sys.version_info[0] > 2 or sys.maxunicode <= 0xffff:
# numpy.unicode_ no more implements the buffer interface in Python 3
#
# When the Python build is UCS-2, we need to promote the
# Unicode string to UCS-4. We *must* use a 0-d array since
# NumPy scalars inherit the UCS-2 encoding from Python (see
# NumPy ticket #525). Since ``_tobuffer()`` can't return an
# array, we must override ``toarray()`` itself.
def toarray(self, object_):
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
ustr = unicode(object_)
uarr = numpy.array(ustr, dtype='U')
return numpy.ndarray(
buffer=uarr, dtype=self.base.dtype, shape=len(ustr))
def _tobuffer(self, object_):
# This works (and is used) only with UCS-4 builds of Python,
# where the width of the internal representation of a
# character matches that of the base atoms.
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
return numpy.unicode_(object_)
def fromarray(self, array):
length = len(array)
if length == 0:
return u'' # ``array.view('U0')`` raises a `TypeError`
return array.view('U%d' % length).item()
class ObjectAtom(_BufferedAtom):
"""Defines an atom of type object.
This class is meant to fit *any* kind of Python object in a row of a
VLArray dataset by using pickle behind the scenes. Due to the fact that
you can not foresee how long will be the output of the pickle
serialization (i.e. the atom already has a *variable* length), you can only
fit *one object per row*. However, you can still group several objects in a
single tuple or list and pass it to the :meth:`VLArray.append` method.
Object atoms do not accept parameters and they cause the reads of rows to
always return Python objects. You can regard object atoms as an easy way to
save an arbitrary number of generic Python objects in a VLArray dataset.
"""
kind = 'object'
type = 'object'
base = UInt8Atom()
def _tobuffer(self, object_):
return cPickle.dumps(object_, cPickle.HIGHEST_PROTOCOL)
def fromarray(self, array):
# We have to check for an empty array because of a possible
# bug in HDF5 which makes it claim that a dataset has one
# record when in fact it is empty.
if array.size == 0:
return None
return cPickle.loads(array.tostring())
| {
"content_hash": "7a5e86ea87be9beb4b828fe2e04be2c3",
"timestamp": "",
"source": "github",
"line_count": 1170,
"max_line_length": 96,
"avg_line_length": 35.212820512820514,
"alnum_prop": 0.6071992038641715,
"repo_name": "joonro/PyTables",
"id": "d9acdd845684381d5bcdc48531b3c88f4ac7df3f",
"size": "41487",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tables/atom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896101"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "21598"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "3322852"
},
{
"name": "Shell",
"bytes": "16985"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
import subprocess
import JTutils
import os
dataset = CURDATA()
N = str(int(GETPARSTAT("L 22")) - 1)
LB = GETPAR("LB")
GB = GETPAR("USERP1")
s = GETPAR("USERP2")
cycle = float(GETPARSTAT("P 60"))
if (cycle < 1): # P60 is not likely to have stored the cycle time then uses historic calculation
# historic qcpmg.jt cycle calculation
D3 = float(GETPARSTAT("D 3"))*1e6
D6 = float(GETPARSTAT("D 6"))*1e6
P4 = float(GETPARSTAT("P 4"))
cycle = 2*(D3+D6)+P4
cycle = str(cycle)
result = INPUT_DIALOG("processing parameters",
"""please provide the gaussian broadening (GB) applyied
to each echo, the exponential decay that weight the
different echoes and the number of echoes to sum.""",
["GB=","LB=", "N","slope","cycle"],[GB,LB,N,s,cycle])
(GB, LB, N, s, cycle) = (result[0], result[1], result[2], result[3], result[4])
PUTPAR("LB", LB)
PUTPAR("USERP1", GB)
PUTPAR("USERP2", s)
fulldataPATH = JTutils.fullpath(dataset)
opt_args=" -g %s -l %s -n %s -s %s -c %s" % (GB, LB, N, s, cycle)
JTutils.run_CpyBin_script('qcpmgadd2D_.py', opt_args.split()+[fulldataPATH])
RE(dataset)
| {
"content_hash": "d2e97ab908b222ffe5bb8273e4d5df17",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 96,
"avg_line_length": 30.18421052631579,
"alnum_prop": 0.6573670444638187,
"repo_name": "jtrebosc/JTutils",
"id": "e3eaa660a53c345087d2f8196078d89b35a38f7e",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TSpy/qcpmgadd2D.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307974"
}
],
"symlink_target": ""
} |
"""
Pushover platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushover/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_TARGET, ATTR_DATA,
BaseNotificationService)
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-pushover==0.2']
_LOGGER = logging.getLogger(__name__)
CONF_USER_KEY = 'user_key'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_USER_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
})
# pylint: disable=unused-variable
def get_service(hass, config):
"""Get the Pushover notification service."""
from pushover import InitError
try:
return PushoverNotificationService(config[CONF_USER_KEY],
config[CONF_API_KEY])
except InitError:
_LOGGER.error(
'Wrong API key supplied. Get it at https://pushover.net')
return None
# pylint: disable=too-few-public-methods
class PushoverNotificationService(BaseNotificationService):
"""Implement the notification service for Pushover."""
def __init__(self, user_key, api_token):
"""Initialize the service."""
from pushover import Client
self._user_key = user_key
self._api_token = api_token
self.pushover = Client(
self._user_key, api_token=self._api_token)
def send_message(self, message='', **kwargs):
"""Send a message to a user."""
from pushover import RequestError
# Make a copy and use empty dict if necessary
data = dict(kwargs.get(ATTR_DATA) or {})
data['title'] = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
target = kwargs.get(ATTR_TARGET)
if target is not None:
data['device'] = target
try:
self.pushover.send_message(message, **data)
except ValueError as val_err:
_LOGGER.error(str(val_err))
except RequestError:
_LOGGER.exception('Could not send pushover notification')
| {
"content_hash": "e11c7943398d7da1816a10343be14c24",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 30.47945205479452,
"alnum_prop": 0.6570786516853933,
"repo_name": "leoc/home-assistant",
"id": "c0a067fe918645018c2e253f937ee4c12cc7c94d",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/pushover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from src.markdown.images import MarkdownImages
class TestMarkdownImages(TestCase):
def setUp(self):
self.converter = MarkdownImages()
def test_simple_image_embed(self):
src = "{{img.png}}"
expected = "<img style='' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_in_subdir(self):
src = "{{:dir:subdir:img.png}}"
expected = "<img style='' src='/img//dir/subdir/img.png'>" # I really don't care about the double slash
self.assertEqual(expected, self.converter.convert(src))
def test_image_left_aligned(self):
src = "{{ img.png}}"
expected = "<img style='float: left;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_right_aligned(self):
src = "{{img.png }}"
expected = "<img style='float: right;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_right_aligned_with_specific_dimensions(self):
src = "{{ img.png?500x400}}"
expected = "<img style='float: left; width: 500px; height: 400px;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_center_aligned(self):
src = "{{ img.png }}"
expected = "<img style='margin-left: auto; margin-right: auto;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_with_specific_dimensions(self):
src = "{{img.png?500x400}}"
expected = "<img style='width: 500px; height: 400px;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
def test_image_with_specific_width(self):
src = "{{img.png?500}}"
expected = "<img style='width: 500px;' src='/img/img.png'>"
self.assertEqual(expected, self.converter.convert(src))
| {
"content_hash": "cd5857e576b51c1dad9fa31be23942c9",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 112,
"avg_line_length": 40.833333333333336,
"alnum_prop": 0.6301020408163265,
"repo_name": "wgroeneveld/dokuwiki-to-hugo",
"id": "ee53c50b557e00eaa77dfd4140a424b9d73431c8",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/markdown/test_images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "324"
},
{
"name": "Python",
"bytes": "35879"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, division, absolute_import
import unittest
import panphon
from panphon import distance
feature_model = 'segment'
dim = 24
class TestLevenshtein(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_trivial1(self):
self.assertEqual(self.dist.levenshtein_distance('pop', 'pʰop'), 1)
def test_trivial2(self):
self.assertEqual(self.dist.levenshtein_distance('pop', 'pʰom'), 2)
class TestDolgoPrime(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_trivial1(self):
self.assertEqual(self.dist.dolgo_prime_distance('pop', 'bob'), 0)
def test_trivial2(self):
self.assertEqual(self.dist.dolgo_prime_distance('pop', 'bab'), 0)
class TestUnweightedFeatureEditDist(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_unweighted_substitution_cost(self):
self.assertEqual(self.dist.unweighted_substitution_cost([0, 1, -1], [0, 1, 1]) * 3, 1)
def test_unweighted_deletion_cost(self):
self.assertEqual(self.dist.unweighted_deletion_cost([1, -1, 1, 0]) * 4, 3.5)
def test_trivial1(self):
self.assertEqual(self.dist.feature_edit_distance('bim', 'pym') * dim, 3)
def test_trivial2(self):
self.assertEqual(self.dist.feature_edit_distance('ti', 'tʰi') * dim, 1)
def test_xsampa(self):
self.assertEqual(self.dist.feature_edit_distance('t i', 't_h i', xsampa=True) * dim, 1)
def test_xsampa2(self):
self.assertEqual(self.dist.feature_edit_distance('p u n', 'p y n', xsampa=True) * dim, 1)
def test_xsampa3(self):
ipa = self.dist.jt_feature_edit_distance_div_maxlen('kʰin', 'pʰin')
xs = self.dist.jt_feature_edit_distance_div_maxlen('k_h i n', 'p_h i n', xsampa=True)
self.assertEqual(ipa, xs)
class TestWeightedFeatureEditDist(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_trivial1(self):
self.assertGreater(self.dist.weighted_feature_edit_distance('ti', 'tʰu'),
self.dist.weighted_feature_edit_distance('ti', 'tʰi'))
def test_trivial2(self):
self.assertGreater(self.dist.weighted_feature_edit_distance('ti', 'te'),
self.dist.weighted_feature_edit_distance('ti', 'tḭ'))
class TestHammingFeatureEditDistanceDivMaxlen(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_hamming_substitution_cost(self):
self.assertEqual(self.dist.hamming_substitution_cost(['+', '-', '0'], ['0', '-', '0']) * 3, 1)
def test_trivial1(self):
self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('pa', 'ba') * dim * 2, 1)
def test_trivial2(self):
self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('i', 'pi') * 2, 1)
def test_trivial3(self):
self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('sɛks', 'ɛɡz'), (1 + (1 / dim) + (1 / dim)) / 4)
def test_trivial4(self):
self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('k', 'ɡ'), 1 / dim)
class TestMany(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
def test_fast_levenshtein_distance(self):
self.assertEqual(self.dist.fast_levenshtein_distance('p', 'b'), 1)
def test_fast_levenshtein_distance_div_maxlen(self):
self.assertEqual(self.dist.fast_levenshtein_distance_div_maxlen('p', 'b'), 1)
def test_dolgo_prime_distance(self):
self.assertEqual(self.dist.dolgo_prime_distance('p', 'b'), 0)
def test_dolgo_prime_div_maxlen(self):
self.assertEqual(self.dist.dolgo_prime_distance_div_maxlen('p', 'b'), 0)
def test_feature_edit_distance(self):
self.assertEqual(self.dist.feature_edit_distance('p', 'b'), 1 / dim)
def test_jt_feature_edit_distance(self):
self.assertEqual(self.dist.jt_feature_edit_distance('p', 'b'), 1 / dim)
def test_feature_edit_distance_div_maxlen(self):
self.assertEqual(self.dist.feature_edit_distance_div_maxlen('p', 'b'), 1 / dim)
def test_jt_feature_edit_distance_div_maxlen(self):
self.assertEqual(self.dist.jt_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim)
def test_hamming_feature_edit_distance(self):
self.assertEqual(self.dist.hamming_feature_edit_distance('p', 'b'), 1 / dim)
def test_jt_hamming_feature_edit_distance(self):
self.assertEqual(self.dist.jt_hamming_feature_edit_distance('p', 'b'), 1 / dim)
def test_hamming_feature_edit_distance_div_maxlen(self):
self.assertEqual(self.dist.hamming_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim)
def test_jt_hamming_feature_edit_distance_div_maxlen(self):
self.assertEqual(self.dist.jt_hamming_feature_edit_distance_div_maxlen('p', 'b'), 1 / dim)
class TestXSampa(unittest.TestCase):
def setUp(self):
self.dist = distance.Distance(feature_model=feature_model)
self.ft = panphon.FeatureTable()
def test_feature_edit_distance(self):
self.assertEqual(self.dist.feature_edit_distance("p_h", "p", xsampa=True), 1 / dim)
| {
"content_hash": "3dae33e168c2e4aff2265880dd52625d",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 124,
"avg_line_length": 38.77857142857143,
"alnum_prop": 0.6682630318659053,
"repo_name": "dmort27/panphon",
"id": "6a3009a265d4b8e068d0a3e86e7608ad6f7020ac",
"size": "5465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panphon/test/test_distance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120798"
},
{
"name": "Shell",
"bytes": "73"
},
{
"name": "TeX",
"bytes": "937"
}
],
"symlink_target": ""
} |
"""
Generic discrete convolution
Description
-----------
This is a manual (not optimized!) implementation of discrete 1D
convolution intended for spectroscopy analysis. The difference with
commonly used methods is the possibility to adapt the convolution
kernel for each convolution point, e.g. change the FWHM of the
Gaussian kernel as a function of the energy scale.
Resources
---------
.. [WPconv] <http://en.wikipedia.org/wiki/Convolution#Discrete_convolution>
.. [Fisher] <http://homepages.inf.ed.ac.uk/rbf/HIPR2/convolve.htm>
.. [GP1202] <http://glowingpython.blogspot.fr/2012/02/convolution-with-numpy.html>
TODO
----
- [] get_ene_index: substitute with more elegant
'np.argmin(np.abs(ene-(x)))'
- [] atan_gamma_fdmnes: define atan_gamma as in FDMNES
"""
import os
import math
import subprocess
from optparse import OptionParser
from datetime import date
from string import Template
import numpy as np
MODNAME = "_math"
DEBUG = 0
# No deps on Larch: used only if you want to access this as Larch plugin
HAS_LARCH = False
try:
from larch import use_plugin_path
use_plugin_path("math")
HAS_LARCH = True
except ImportError:
pass
# <LINESHAPES> #
def gaussian(x, cen=0, sigma=1, fwhm=False, peak=None):
"""1 dimensional Gaussian function (https://en.wikipedia.org/wiki/Gaussian_function)
Parameters
----------
x : array
cen : [0] center, x0
sigma : [1] standard deviation, FWHM = 2*sqrt(2*ln(2)) * sigma =~ 2.35482 * sigma
fwhm : [False] if True, the given sigma is assumed as fwhm and then converted accordingly
peak : [None] if None, peak = 1 / math.sqrt(2*math.pi), the distribution integrate to 1
"""
if fwhm is True:
sigma = sigma / 2 * math.sqrt(2 * math.log(2))
if peak is None:
peak = 1.0 / math.sqrt(2 * math.pi)
return peak * np.exp(-((1.0 * x - cen) ** 2) / (2 * sigma ** 2))
def lorentzian(x, cen=0, gamma=1, peak=None):
"""1 dimensional Lorentzian
Parameters
----------
x : array
cen : [0] center, x0
gamma : [1] half width at half maximum
peak : [None] if None, peak = 1 / (math.pi*sigma), the distribution integrate to 1
"""
if peak is None:
peak = 1.0 / (math.pi * gamma)
return peak * (1.0 / (1.0 + ((1.0 * x - cen) / gamma) ** 2))
# </LINESHAPES> #
def get_ene_index(ene, cen, hwhm):
""" returns the min/max indexes for array ene at (cen-hwhm) and (cen+hwhm)
very similar to index_of in larch
"""
try:
if (cen - hwhm) <= min(ene):
ene_imin = 0
else:
ene_imin = max(np.where(ene < (cen - hwhm))[0])
if (cen + hwhm) >= max(ene):
ene_imax = len(ene) - 1
else:
ene_imax = min(np.where(ene > (cen + hwhm))[0])
return ene_imin, ene_imax
except Exception:
print("index not found for {0} +/- {1}".format(cen, hwhm))
return None, None
def lin_gamma(ene, fwhm=1.0, linbroad=None):
"""returns constant or linear energy-dependent broadening
Parameters
----------
ene : energy array in eV
fwhm : first full width at half maximum in eV
linbroad : list of 3-elements giving
'second full width at half maximum'
'energy starting point of the linear increase'
'energy ending point of the linear increase'
"""
w = np.ones_like(ene)
if linbroad is None:
return w * fwhm
else:
try:
fwhm2 = linbroad[0]
e1 = linbroad[1]
e2 = linbroad[2]
except Exception:
raise ValueError("wrong format for linbroad")
for en, ee in enumerate(ene):
if ee < e1:
w[en] *= fwhm
elif ee <= e2:
wlin = fwhm + (ee - e1) * (fwhm2 - fwhm) / (e2 - e1)
w[en] *= wlin
elif ee >= e2:
w[en] *= fwhm2
return w
def atan_gamma(ene, gamma_hole, gamma_max=15.0, e0=0, eslope=1.0):
"""returns arctangent-like broadening, $\Gamma(E)$
..math
\Gamma(E) = \Gamma_{hole} + \Gamma_{max} * ( \arctan( \frac{E-E_{0}}{E_{slope}} ) / \pi + 1/2) )
"""
if eslope == 0:
print("Warning: eslope cannot be zero, using default value of 1")
eslope = 1.0
return gamma_hole + gamma_max * ((np.arctan((ene - e0) / eslope) / np.pi) + 0.5)
def conv(e, mu, kernel="gaussian", fwhm_e=None, efermi=None):
""" linear broadening
Parameters
----------
e : x-axis (energy)
mu : f(x) to convolve with g(x) kernel, mu(energy)
kernel : convolution kernel, g(x)
'gaussian'
'lorentzian'
fwhm_e: the full width half maximum in eV for the kernel
broadening. It is an array of size 'e' with constants or
an energy-dependent values determined by a function as
'lin_gamma()' or 'atan_gamma()'
"""
f = np.copy(mu)
z = np.zeros_like(f)
if efermi is not None:
# ief = index_nearest(e, efermi)
ief = np.argmin(np.abs(e - efermi))
f[0:ief] *= 0
if e.shape != fwhm_e.shape:
print("Error: 'fwhm_e' does not have the same shape of 'e'")
return 0
# linar fit upper part of the spectrum to avoid border effects
# polyfit => pf
lpf = int(len(e) / 2)
cpf = np.polyfit(e[-lpf:], f[-lpf:], 1)
fpf = np.poly1d(cpf)
# extend upper energy border to 3*fhwm_e[-1]
estep = e[-1] - e[-2]
eup = np.append(e, np.arange(e[-1] + estep, e[-1] + 3 * fwhm_e[-1], estep))
for n in range(len(f)):
# from now on I change e with eup
eimin, eimax = get_ene_index(eup, eup[n], 1.5 * fwhm_e[n])
if (eimin is None) or (eimax is None):
if DEBUG:
raise IndexError("e[{0}]".format(n))
if len(range(eimin, eimax)) % 2 == 0:
kx = eup[eimin:eimax + 1] # odd range centered at the convolution point
else:
kx = eup[eimin:eimax]
# kernel ###
hwhm = fwhm_e[n] / 2.0
if "gauss" in kernel.lower():
ky = gaussian(kx, cen=eup[n], sigma=hwhm)
elif "lor" in kernel.lower():
ky = lorentzian(kx, cen=eup[n], gamma=hwhm)
else:
raise ValueError("convolution kernel '{0}' not implemented".format(kernel))
ky = ky / ky.sum() # normalize
zn = 0
lk = int(len(kx))
for mf, mg in zip(range(-int(lk / 2), int(lk / 2) + 1), range(lk)):
if ((n + mf) >= 0) and ((n + mf) < len(f)):
zn += f[n + mf] * ky[mg]
elif (n + mf) >= 0:
zn += fpf(eup[n + mf]) * ky[mg]
z[n] = zn
return z
def glinbroad(e, mu, fwhm_e=None, efermi=None, _larch=None):
"""gaussian linear convolution in Larch """
if _larch is None:
raise Warning("larch broken?")
return conv(e, mu, kernel="gaussian", fwhm_e=fwhm_e, efermi=efermi)
glinbroad.__doc__ = conv.__doc__
# CONVOLUTION WITH FDMNES VIA SYSTEM CALL #
class FdmnesConv(object):
""" Performs convolution with FDMNES within Python """
def __init__(self, opts=None, calcroot=None, fn_in=None, fn_out=None):
if opts is None:
self.opts = dict(
creator="FDMNES toolbox",
today=date.today(),
calcroot=calcroot,
fn_in=fn_in,
fn_out=fn_out,
fn_ext="txt",
estart_sel="",
estart="-20.",
efermi_sel="",
efermi="-5.36",
spin="",
core_sel="!",
core="!",
hole_sel="",
hole="0.5",
conv_const="!",
conv_sel="",
ecent="25.0",
elarg="20.0",
gamma_max="10.0",
gamma_type="Gamma_fix",
gauss_sel="",
gaussian="0.9",
)
else:
self.opts = opts
if calcroot is not None:
self.opts["calcroot"] = calcroot
self.opts["fn_in"] = "{}.{}".format(calcroot, self.opts["fn_ext"])
self.opts["fn_out"] = "{}_conv{}.{}".format(
calcroot, self.opts["spin"], self.opts["fn_ext"]
)
if fn_in is not None:
self.opts["calcroot"] = fn_in[:-4]
self.opts["fn_in"] = fn_in
self.opts["fn_out"] = "{}_conv{}.{}".format(
fn_in[:-4], self.opts["spin"], self.opts["fn_ext"]
)
if fn_out is not None:
self.opts["fn_out"] = fn_out
# then check all options
self.checkopts()
def checkopts(self):
if (self.opts["calcroot"] is None) or (self.opts["fn_in"] is None):
raise NameError("missing 'calcroot' or 'fn_in'")
if self.opts["estart"] == "!":
self.opts["estart_sel"] = "!"
if self.opts["efermi"] == "!":
self.opts["efermi_sel"] = "!"
if self.opts["spin"] == "up":
self.opts["core_sel"] = ""
self.opts["core"] = "2 !spin up"
elif self.opts["spin"] == "down":
self.opts["core_sel"] = ""
self.opts["core"] = "1 !spin down"
elif self.opts["spin"] == "":
self.opts["core_sel"] = "!"
elif self.opts["spin"] == "both":
raise NameError('spin="both" not implemented!')
else:
self.opts["spin"] = ""
self.opts["core_sel"] = "!"
self.opts["core"] = "!"
if self.opts["hole"] == "!":
self.opts["hole_sel"] = "!"
if self.opts["conv_const"] == "!":
self.opts["conv_sel"] = "!"
else:
self.opts["conv_sel"] = ""
if self.opts["gamma_type"] == "Gamma_fix":
pass
elif self.opts["gamma_type"] == "Gamma_var":
pass
else:
raise NameError('gamma_type="Gamma_fix"/"Gamma_var"')
if self.opts["gaussian"] == "!":
self.opts["gauss_sel"] = "!"
else:
self.opts["gauss_sel"] = ""
# update the output file name
self.opts["fn_out"] = "{}_conv{}.{}".format(
self.opts["calcroot"], self.opts["spin"], self.opts["fn_ext"]
)
def setopt(self, opt, value):
self.opts[opt] = value
self.checkopts()
def wfdmfile(self):
""" write a simple fdmfile.txt to enable the convolution
first makes a copy of previous fdmfile.txt if not already done """
if os.path.exists("fdmfile.bak"):
print("fdmfile.bak exists, good")
else:
subprocess.call("cp fdmfile.txt fdmfile.bak", shell=True)
print("copied fdmfile.txt to fmdfile.bak")
#
s = Template(
"!fdmfile.txt automatically created by ${creator} on ${today} (for convolution)\n\
!--------------------------------------------------------------------!\n\
! Number of calculations\n\
1\n\
! FOR CONVOLUTION STEP\n\
convfile.txt\n\
!--------------------------------------------------------------------!\n\
"
)
outstr = s.substitute(self.opts)
f = open("fdmfile.txt", "w")
f.write(outstr)
f.close()
def wconvfile(self):
s = Template(
"""
!FDMNES convolution file\n\
!created by ${creator} on ${today}\n\
!
Calculation\n\
${fn_in}\n\
Conv_out\n\
${fn_out}\n\
${estart_sel}Estart\n\
${estart_sel}${estart}\n\
${efermi_sel}Efermi\n\
${efermi_sel}${efermi}\n\
${core_sel}Selec_core\n\
${core_sel}${core}\n\
${hole_sel}Gamma_hole\n\
${hole_sel}${hole}\n\
${conv_sel}Convolution\n\
${conv_sel}${ecent} ${elarg} ${gamma_max} !Ecent Elarg Gamma_max\n\
${conv_sel}${gamma_type}\n\
${gauss_sel}Gaussian\n\
${gauss_sel}${gaussian} !Gaussian conv for experimental res\n\
"""
)
outstr = s.substitute(self.opts)
f = open("convfile.txt", "w")
f.write(outstr)
f.close()
def run(self):
""" runs fdmnes """
self.wfdmfile() # write fdmfile.txt
self.wconvfile() # write convfile.txt
try:
subprocess.call("fdmnes", shell=True)
except OSError:
print("check 'fdmnes' executable exists!")
# LARCH PLUGIN #
def registerLarchPlugin():
return (MODNAME, {"glinbroad": glinbroad})
if __name__ == "__main__":
# tests/examples in xraysloth/examples/convolution1D_tests.py
pass
| {
"content_hash": "d61e3852c4afb667f85a6d325919649a",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 100,
"avg_line_length": 31.880102040816325,
"alnum_prop": 0.5258061934864368,
"repo_name": "maurov/xraysloth",
"id": "cf90c05f04b8b8d2072324f7719cb20d4db3c242",
"size": "12543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sloth/math/convolution1D.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "IDL",
"bytes": "882"
},
{
"name": "Jupyter Notebook",
"bytes": "328173"
},
{
"name": "Python",
"bytes": "791348"
},
{
"name": "Shell",
"bytes": "3536"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0007_auto_20151023_2311'),
]
operations = [
migrations.AlterField(
model_name='task',
name='task_type',
field=models.ForeignKey(null=True, to='tasks.TaskType', verbose_name='Type de tâche'),
),
]
| {
"content_hash": "ba5ecfaf5b30f0590cccd6d1ac07d204",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 98,
"avg_line_length": 23.61111111111111,
"alnum_prop": 0.6023529411764705,
"repo_name": "SamuelDauzon/Improllow-up",
"id": "2d8b3c1922a0ef2e3f0b66f71861222714753885",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/migrations/0008_auto_20151023_2312.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20839"
},
{
"name": "Python",
"bytes": "46769"
}
],
"symlink_target": ""
} |
import datetime
import decimal
import itertools
import re
import time
import uuid
import warnings
from operator import itemgetter
from bson import Binary, DBRef, ObjectId, SON
import gridfs
import pymongo
import six
try:
import dateutil
except ImportError:
dateutil = None
else:
import dateutil.parser
try:
from bson.int64 import Int64
except ImportError:
Int64 = long
from mongoengine.base import (BaseDocument, BaseField, ComplexBaseField,
GeoJsonBaseField, ObjectIdField, get_document)
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.errors import DoesNotExist, InvalidQueryError, ValidationError
from mongoengine.python_support import StringIO
from mongoengine.queryset import DO_NOTHING, QuerySet
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = (
'StringField', 'URLField', 'EmailField', 'IntField', 'LongField',
'FloatField', 'DecimalField', 'BooleanField', 'DateTimeField',
'ComplexDateTimeField', 'EmbeddedDocumentField', 'ObjectIdField',
'GenericEmbeddedDocumentField', 'DynamicField', 'ListField',
'SortedListField', 'EmbeddedDocumentListField', 'DictField',
'MapField', 'ReferenceField', 'CachedReferenceField',
'GenericReferenceField', 'BinaryField', 'GridFSError', 'GridFSProxy',
'FileField', 'ImageGridFsProxy', 'ImproperlyConfigured', 'ImageField',
'GeoPointField', 'PointField', 'LineStringField', 'PolygonField',
'SequenceField', 'UUIDField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeoJsonBaseField'
)
RECURSIVE_REFERENCE_CONSTANT = 'self'
class StringField(BaseField):
"""A unicode string field."""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super(StringField, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, six.text_type):
return value
try:
value = value.decode('utf-8')
except Exception:
pass
return value
def validate(self, value):
if not isinstance(value, six.string_types):
self.error('StringField only accepts string values')
if self.max_length is not None and len(value) > self.max_length:
self.error('String value is too long')
if self.min_length is not None and len(value) < self.min_length:
self.error('String value is too short')
if self.regex is not None and self.regex.match(value) is None:
self.error('String value did not match validation regex')
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, six.string_types):
return value
if op.lstrip('i') in ('startswith', 'endswith', 'contains', 'exact'):
flags = 0
if op.startswith('i'):
flags = re.IGNORECASE
op = op.lstrip('i')
regex = r'%s'
if op == 'startswith':
regex = r'^%s'
elif op == 'endswith':
regex = r'%s$'
elif op == 'exact':
regex = r'^%s$'
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return super(StringField, self).prepare_query_value(op, value)
class URLField(StringField):
"""A field that validates input as an URL.
.. versionadded:: 0.3
"""
_URL_REGEX = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
_URL_SCHEMES = ['http', 'https', 'ftp', 'ftps']
def __init__(self, verify_exists=False, url_regex=None, schemes=None, **kwargs):
self.verify_exists = verify_exists
self.url_regex = url_regex or self._URL_REGEX
self.schemes = schemes or self._URL_SCHEMES
super(URLField, self).__init__(**kwargs)
def validate(self, value):
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
self.error('Invalid scheme {} in URL: {}'.format(scheme, value))
return
# Then check full URL
if not self.url_regex.match(value):
self.error('Invalid URL: {}'.format(value))
return
class EmailField(StringField):
"""A field that validates input as an email address.
.. versionadded:: 0.4
"""
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"'
# domain (max length of an ICAAN TLD is 22 characters)
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))$', re.IGNORECASE
)
def validate(self, value):
if not EmailField.EMAIL_REGEX.match(value):
self.error('Invalid email address: %s' % value)
super(EmailField, self).validate(value)
class IntField(BaseField):
"""32-bit integer field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(IntField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = int(value)
except Exception:
self.error('%s could not be converted to int' % value)
if self.min_value is not None and value < self.min_value:
self.error('Integer value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Integer value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(IntField, self).prepare_query_value(op, int(value))
class LongField(BaseField):
"""64-bit integer field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(LongField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = long(value)
except ValueError:
pass
return value
def to_mongo(self, value):
return Int64(value)
def validate(self, value):
try:
value = long(value)
except Exception:
self.error('%s could not be converted to long' % value)
if self.min_value is not None and value < self.min_value:
self.error('Long value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Long value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(LongField, self).prepare_query_value(op, long(value))
class FloatField(BaseField):
"""Floating point number field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(FloatField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = float(value)
except ValueError:
pass
return value
def validate(self, value):
if isinstance(value, six.integer_types):
try:
value = float(value)
except OverflowError:
self.error('The value is too large to be converted to float')
if not isinstance(value, float):
self.error('FloatField only accepts float and integer values')
if self.min_value is not None and value < self.min_value:
self.error('Float value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Float value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(FloatField, self).prepare_query_value(op, float(value))
class DecimalField(BaseField):
"""Fixed-point decimal number field.
.. versionchanged:: 0.8
.. versionadded:: 0.3
"""
def __init__(self, min_value=None, max_value=None, force_string=False,
precision=2, rounding=decimal.ROUND_HALF_UP, **kwargs):
"""
:param min_value: Validation rule for the minimum acceptable value.
:param max_value: Validation rule for the maximum acceptable value.
:param force_string: Store as a string.
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
"""
self.min_value = min_value
self.max_value = max_value
self.force_string = force_string
self.precision = precision
self.rounding = rounding
super(DecimalField, self).__init__(**kwargs)
def to_python(self, value):
if value is None:
return value
# Convert to string for python 2.6 before casting to Decimal
try:
value = decimal.Decimal('%s' % value)
except decimal.InvalidOperation:
return value
return value.quantize(decimal.Decimal('.%s' % ('0' * self.precision)), rounding=self.rounding)
def to_mongo(self, value):
if value is None:
return value
if self.force_string:
return six.text_type(self.to_python(value))
return float(self.to_python(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, six.string_types):
value = six.text_type(value)
try:
value = decimal.Decimal(value)
except Exception as exc:
self.error('Could not convert value to decimal: %s' % exc)
if self.min_value is not None and value < self.min_value:
self.error('Decimal value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Decimal value is too large')
def prepare_query_value(self, op, value):
return super(DecimalField, self).prepare_query_value(op, self.to_mongo(value))
class BooleanField(BaseField):
"""Boolean field type.
.. versionadded:: 0.1.2
"""
def to_python(self, value):
try:
value = bool(value)
except ValueError:
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error('BooleanField only accepts boolean values')
class DateTimeField(BaseField):
"""Datetime field.
Uses the python-dateutil library if available alternatively use time.strptime
to parse the dates. Note: python-dateutil's parser is fully featured and when
installed you can utilise it to convert varying types of date formats into valid
python datetime objects.
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effectively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
new_value = self.to_mongo(value)
if not isinstance(new_value, (datetime.datetime, datetime.date)):
self.error(u'cannot parse date "%s"' % value)
def to_mongo(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if callable(value):
return value()
if not isinstance(value, six.string_types):
return None
# Attempt to parse a datetime:
if dateutil:
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError):
return None
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M:%S')[:6], **kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M')[:5], **kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d')[:3], **kwargs)
except ValueError:
return None
def prepare_query_value(self, op, value):
return super(DateTimeField, self).prepare_query_value(op, self.to_mongo(value))
class ComplexDateTimeField(StringField):
"""
ComplexDateTimeField handles microseconds exactly instead of rounding
like DateTimeField does.
Derives from a StringField so you can do `gte` and `lte` filtering by
using lexicographical comparison when filtering / sorting strings.
The stored string has the following format:
YYYY,MM,DD,HH,MM,SS,NNNNNN
Where NNNNNN is the number of microseconds of the represented `datetime`.
The `,` as the separator can be easily modified by passing the `separator`
keyword when initializing the field.
.. versionadded:: 0.5
"""
def __init__(self, separator=',', **kwargs):
self.names = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
self.separator = separator
self.format = separator.join(['%Y', '%m', '%d', '%H', '%M', '%S', '%f'])
super(ComplexDateTimeField, self).__init__(**kwargs)
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to a string representation (which will be
stored in MongoDB). This is the reverse function of
`_convert_from_string`.
>>> a = datetime(2011, 6, 8, 20, 26, 24, 92284)
>>> ComplexDateTimeField()._convert_from_datetime(a)
'2011,06,08,20,26,24,092284'
"""
return val.strftime(self.format)
def _convert_from_string(self, data):
"""
Convert a string representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
>>> a = '2011,06,08,20,26,24,092284'
>>> ComplexDateTimeField()._convert_from_string(a)
datetime.datetime(2011, 6, 8, 20, 26, 24, 92284)
"""
values = map(int, data.split(self.separator))
return datetime.datetime(*values)
def __get__(self, instance, owner):
data = super(ComplexDateTimeField, self).__get__(instance, owner)
if data is None:
return None if self.null else datetime.datetime.now()
if isinstance(data, datetime.datetime):
return data
return self._convert_from_string(data)
def __set__(self, instance, value):
value = self._convert_from_datetime(value) if value else value
return super(ComplexDateTimeField, self).__set__(instance, value)
def validate(self, value):
value = self.to_python(value)
if not isinstance(value, datetime.datetime):
self.error('Only datetime objects may used in a '
'ComplexDateTimeField')
def to_python(self, value):
original_value = value
try:
return self._convert_from_string(value)
except Exception:
return original_value
def to_mongo(self, value):
value = self.to_python(value)
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return super(ComplexDateTimeField, self).prepare_query_value(op, self._convert_from_datetime(value))
class EmbeddedDocumentField(BaseField):
"""An embedded document field - with a declared document_type.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
if (
not isinstance(document_type, six.string_types) and
not issubclass(document_type, EmbeddedDocument)
):
self.error('Invalid embedded document class provided to an '
'EmbeddedDocumentField')
self.document_type_obj = document_type
super(EmbeddedDocumentField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, six.string_types):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def to_python(self, value):
if not isinstance(value, self.document_type):
return self.document_type._from_son(value, _auto_dereference=self._auto_dereference)
return value
def to_mongo(self, value, use_db_field=True, fields=None):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_mongo(value, use_db_field, fields)
def validate(self, value, clean=True):
"""Make sure that the document instance is an instance of the
EmbeddedDocument subclass provided when the document was defined.
"""
# Using isinstance also works for subclasses of self.document
if not isinstance(value, self.document_type):
self.error('Invalid embedded document instance provided to an '
'EmbeddedDocumentField')
self.document_type.validate(value, clean)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def prepare_query_value(self, op, value):
if value is not None and not isinstance(value, self.document_type):
try:
value = self.document_type._from_son(value)
except ValueError:
raise InvalidQueryError("Querying the embedded document '%s' failed, due to an invalid query value" %
(self.document_type._class_name,))
super(EmbeddedDocumentField, self).prepare_query_value(op, value)
return self.to_mongo(value)
class GenericEmbeddedDocumentField(BaseField):
"""A generic embedded document field - allows any
:class:`~mongoengine.EmbeddedDocument` to be stored.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
.. note ::
You can use the choices param to limit the acceptable
EmbeddedDocument types
"""
def prepare_query_value(self, op, value):
return super(GenericEmbeddedDocumentField, self).prepare_query_value(op, self.to_mongo(value))
def to_python(self, value):
if isinstance(value, dict):
doc_cls = get_document(value['_cls'])
value = doc_cls._from_son(value)
return value
def validate(self, value, clean=True):
if not isinstance(value, EmbeddedDocument):
self.error('Invalid embedded document instance provided to an '
'GenericEmbeddedDocumentField')
value.validate(clean=clean)
def to_mongo(self, document, use_db_field=True, fields=None):
if document is None:
return None
data = document.to_mongo(use_db_field, fields)
if '_cls' not in data:
data['_cls'] = document._class_name
return data
class DynamicField(BaseField):
"""A truly dynamic field type capable of handling different and varying
types of data.
Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data"""
def to_mongo(self, value, use_db_field=True, fields=None):
"""Convert a Python type to a MongoDB compatible type.
"""
if isinstance(value, six.string_types):
return value
if hasattr(value, 'to_mongo'):
cls = value.__class__
val = value.to_mongo(use_db_field, fields)
# If we its a document thats not inherited add _cls
if isinstance(value, Document):
val = {'_ref': value.to_dbref(), '_cls': cls.__name__}
if isinstance(value, EmbeddedDocument):
val['_cls'] = cls.__name__
return val
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, 'items'):
is_list = True
value = {k: v for k, v in enumerate(value)}
data = {}
for k, v in value.iteritems():
data[k] = self.to_mongo(v, use_db_field, fields)
value = data
if is_list: # Convert back to a list
value = [v for k, v in sorted(data.iteritems(), key=itemgetter(0))]
return value
def to_python(self, value):
if isinstance(value, dict) and '_cls' in value:
doc_cls = get_document(value['_cls'])
if '_ref' in value:
value = doc_cls._get_db().dereference(value['_ref'])
return doc_cls._from_son(value)
return super(DynamicField, self).to_python(value)
def lookup_member(self, member_name):
return member_name
def prepare_query_value(self, op, value):
if isinstance(value, six.string_types):
return StringField().prepare_query_value(op, value)
return super(DynamicField, self).prepare_query_value(op, self.to_mongo(value))
def validate(self, value, clean=True):
if hasattr(value, 'validate'):
value.validate(clean=clean)
class ListField(ComplexBaseField):
"""A list field that wraps a standard field, allowing multiple instances
of the field to be used as a list in the database.
If using with ReferenceFields see: :ref:`one-to-many-with-listfields`
.. note::
Required means it cannot be empty - as the default for ListFields is []
"""
def __init__(self, field=None, **kwargs):
self.field = field
kwargs.setdefault('default', lambda: [])
super(ListField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used."""
if (not isinstance(value, (list, tuple, QuerySet)) or
isinstance(value, six.string_types)):
self.error('Only lists and tuples may be used in a list field')
super(ListField, self).validate(value)
def prepare_query_value(self, op, value):
if self.field:
# If the value is iterable and it's not a string nor a
# BaseDocument, call prepare_query_value for each of its items.
if (
op in ('set', 'unset', None) and
hasattr(value, '__iter__') and
not isinstance(value, six.string_types) and
not isinstance(value, BaseDocument)
):
return [self.field.prepare_query_value(op, v) for v in value]
return self.field.prepare_query_value(op, value)
return super(ListField, self).prepare_query_value(op, value)
class EmbeddedDocumentListField(ListField):
"""A :class:`~mongoengine.ListField` designed specially to hold a list of
embedded documents to provide additional query helpers.
.. note::
The only valid list values are subclasses of
:class:`~mongoengine.EmbeddedDocument`.
.. versionadded:: 0.9
"""
def __init__(self, document_type, **kwargs):
"""
:param document_type: The type of
:class:`~mongoengine.EmbeddedDocument` the list will hold.
:param kwargs: Keyword arguments passed directly into the parent
:class:`~mongoengine.ListField`.
"""
super(EmbeddedDocumentListField, self).__init__(
field=EmbeddedDocumentField(document_type), **kwargs
)
class SortedListField(ListField):
"""A ListField that sorts the contents of its list before writing to
the database in order to ensure that a sorted list is always
retrieved.
.. warning::
There is a potential race condition when handling lists. If you set /
save the whole list then other processes trying to save the whole list
as well could overwrite changes. The safest way to append to a list is
to perform a push operation.
.. versionadded:: 0.4
.. versionchanged:: 0.6 - added reverse keyword
"""
_ordering = None
_order_reverse = False
def __init__(self, field, **kwargs):
if 'ordering' in kwargs.keys():
self._ordering = kwargs.pop('ordering')
if 'reverse' in kwargs.keys():
self._order_reverse = kwargs.pop('reverse')
super(SortedListField, self).__init__(field, **kwargs)
def to_mongo(self, value, use_db_field=True, fields=None):
value = super(SortedListField, self).to_mongo(value, use_db_field, fields)
if self._ordering is not None:
return sorted(value, key=itemgetter(self._ordering),
reverse=self._order_reverse)
return sorted(value, reverse=self._order_reverse)
def key_not_string(d):
"""Helper function to recursively determine if any key in a
dictionary is not a string.
"""
for k, v in d.items():
if not isinstance(k, six.string_types) or (isinstance(v, dict) and key_not_string(v)):
return True
def key_has_dot_or_dollar(d):
"""Helper function to recursively determine if any key in a
dictionary contains a dot or a dollar sign.
"""
for k, v in d.items():
if ('.' in k or '$' in k) or (isinstance(v, dict) and key_has_dot_or_dollar(v)):
return True
class DictField(ComplexBaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined.
.. note::
Required means it cannot be empty - as the default for DictFields is {}
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Can now handle complex / varying types of data
"""
def __init__(self, basecls=None, field=None, *args, **kwargs):
self.field = field
self._auto_dereference = False
self.basecls = basecls or BaseField
if not issubclass(self.basecls, BaseField):
self.error('DictField only accepts dict values')
kwargs.setdefault('default', lambda: {})
super(DictField, self).__init__(*args, **kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used."""
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a DictField')
if key_not_string(value):
msg = ('Invalid dictionary key - documents must '
'have only string keys')
self.error(msg)
if key_has_dot_or_dollar(value):
self.error('Invalid dictionary key name - keys may not contain "."'
' or "$" characters')
super(DictField, self).validate(value)
def lookup_member(self, member_name):
return DictField(basecls=self.basecls, db_field=member_name)
def prepare_query_value(self, op, value):
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
if op in match_operators and isinstance(value, six.string_types):
return StringField().prepare_query_value(op, value)
if hasattr(self.field, 'field'):
if op in ('set', 'unset') and isinstance(value, dict):
return {
k: self.field.prepare_query_value(op, v)
for k, v in value.items()
}
return self.field.prepare_query_value(op, value)
return super(DictField, self).prepare_query_value(op, value)
class MapField(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
.. versionadded:: 0.5
"""
def __init__(self, field=None, *args, **kwargs):
if not isinstance(field, BaseField):
self.error('Argument to MapField constructor must be a valid '
'field')
super(MapField, self).__init__(field=field, *args, **kwargs)
class ReferenceField(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields does not support reverse_delete_rule and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING (0) - don't do anything (default).
* NULLIFY (1) - Updates the reference to null.
* CASCADE (2) - Deletes the documents associated with the reference.
* DENY (3) - Prevent the deletion of the reference object.
* PULL (4) - Pull the reference from a :class:`~mongoengine.fields.ListField` of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
.. note ::
`reverse_delete_rule` does not trigger pre / post delete signals to be
triggered.
.. versionchanged:: 0.5 added `reverse_delete_rule`
"""
def __init__(self, document_type, dbref=False,
reverse_delete_rule=DO_NOTHING, **kwargs):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
.. note ::
A reference to an abstract document type is always stored as a
:class:`~pymongo.dbref.DBRef`, regardless of the value of `dbref`.
"""
if (
not isinstance(document_type, six.string_types) and
not issubclass(document_type, Document)
):
self.error('Argument to ReferenceField constructor must be a '
'document class or a string')
self.dbref = dbref
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super(ReferenceField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, six.string_types):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing."""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
if hasattr(value, 'cls'):
# Dereference using the class type specified in the reference
cls = get_document(value.cls)
else:
cls = self.document_type
dereferenced = cls._get_db().dereference(value)
if dereferenced is None:
raise DoesNotExist('Trying to dereference unknown document %s' % value)
else:
instance._data[self.name] = cls._from_son(dereferenced)
return super(ReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
# Use the attributes from the document instance, so that they
# override the attributes of this field's document type
cls = document
else:
id_ = document
cls = self.document_type
id_field_name = cls._meta['id_field']
id_field = cls._fields[id_field_name]
id_ = id_field.to_mongo(id_)
if self.document_type._meta.get('abstract'):
collection = cls._get_collection_name()
return DBRef(collection, id_, cls=cls._class_name)
elif self.dbref:
collection = cls._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type."""
if (not self.dbref and
not isinstance(value, (DBRef, Document, EmbeddedDocument))):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
super(ReferenceField, self).prepare_query_value(op, value)
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, DBRef)):
self.error('A ReferenceField only accepts DBRef or documents')
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
if self.document_type._meta.get('abstract') and \
not isinstance(value, self.document_type):
self.error(
'%s is not an instance of abstract reference type %s' % (
self.document_type._class_name)
)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class CachedReferenceField(BaseField):
"""
A referencefield with cache fields to purpose pseudo-joins
.. versionadded:: 0.9
"""
def __init__(self, document_type, fields=None, auto_sync=True, **kwargs):
"""Initialises the Cached Reference Field.
:param fields: A list of fields to be cached in document
:param auto_sync: if True documents are auto updated.
"""
if fields is None:
fields = []
if (
not isinstance(document_type, six.string_types) and
not issubclass(document_type, Document)
):
self.error('Argument to CachedReferenceField constructor must be a'
' document class or a string')
self.auto_sync = auto_sync
self.document_type_obj = document_type
self.fields = fields
super(CachedReferenceField, self).__init__(**kwargs)
def start_listener(self):
from mongoengine import signals
signals.post_save.connect(self.on_document_pre_save,
sender=self.document_type)
def on_document_pre_save(self, sender, document, created, **kwargs):
if created:
return None
update_kwargs = {
'set__%s__%s' % (self.name, key): val
for key, val in document._delta()[0].items()
if key in self.fields
}
if update_kwargs:
filter_kwargs = {}
filter_kwargs[self.name] = document
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
def to_python(self, value):
if isinstance(value, dict):
collection = self.document_type._get_collection_name()
value = DBRef(
collection, self.document_type.id.to_python(value['_id']))
return self.document_type._from_son(self.document_type._get_db().dereference(value))
return value
@property
def document_type(self):
if isinstance(self.document_type_obj, six.string_types):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
dereferenced = self.document_type._get_db().dereference(value)
if dereferenced is None:
raise DoesNotExist('Trying to dereference unknown document %s' % value)
else:
instance._data[self.name] = self.document_type._from_son(dereferenced)
return super(CachedReferenceField, self).__get__(instance, owner)
def to_mongo(self, document, use_db_field=True, fields=None):
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
self.error('Only accept a document object')
# TODO: should raise here or will fail next statement
value = SON((
('_id', id_field.to_mongo(id_)),
))
if fields:
new_fields = [f for f in self.fields if f in fields]
else:
new_fields = self.fields
value.update(dict(document.to_mongo(use_db_field, fields=new_fields)))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
if isinstance(value, Document):
if value.pk is None:
self.error('You can only reference documents once they have'
' been saved to the database')
return {'_id': value.pk}
raise NotImplementedError
def validate(self, value):
if not isinstance(value, self.document_type):
self.error('A CachedReferenceField only accepts documents')
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def sync_all(self):
"""
Sync all cached fields on demand.
Caution: this operation may be slower.
"""
update_key = 'set__%s' % self.name
for doc in self.document_type.objects:
filter_kwargs = {}
filter_kwargs[self.name] = doc
update_kwargs = {}
update_kwargs[update_key] = doc
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
class GenericReferenceField(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
.. versionadded:: 0.3
"""
def __init__(self, *args, **kwargs):
choices = kwargs.pop('choices', None)
super(GenericReferenceField, self).__init__(*args, **kwargs)
self.choices = []
# Keep the choices as a list of allowed Document class names
if choices:
for choice in choices:
if isinstance(choice, six.string_types):
self.choices.append(choice)
elif isinstance(choice, type) and issubclass(choice, Document):
self.choices.append(choice._class_name)
else:
self.error('Invalid choices provided: must be a list of'
'Document subclasses and/or six.string_typess')
def _validate_choices(self, value):
if isinstance(value, dict):
# If the field has not been dereferenced, it is still a dict
# of class and DBRef
value = value.get('_cls')
elif isinstance(value, Document):
value = value._class_name
super(GenericReferenceField, self)._validate_choices(value)
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
if self._auto_dereference and isinstance(value, (dict, SON)):
dereferenced = self.dereference(value)
if dereferenced is None:
raise DoesNotExist('Trying to dereference unknown document %s' % value)
else:
instance._data[self.name] = dereferenced
return super(GenericReferenceField, self).__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef, dict, SON)):
self.error('GenericReferences can only contain documents')
if isinstance(value, (dict, SON)):
if '_ref' not in value or '_cls' not in value:
self.error('GenericReferences can only contain documents')
# We need the id from the saved object to create the DBRef
elif isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been'
' saved to the database')
def dereference(self, value):
doc_cls = get_document(value['_cls'])
reference = value['_ref']
doc = doc_cls._get_db().dereference(reference)
if doc is not None:
doc = doc_cls._from_son(doc)
return doc
def to_mongo(self, document):
if document is None:
return None
if isinstance(document, (dict, SON, ObjectId, DBRef)):
return document
id_field_name = document.__class__._meta['id_field']
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return SON((
('_cls', document._class_name),
('_ref', ref)
))
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
class BinaryField(BaseField):
"""A binary data field."""
def __init__(self, max_bytes=None, **kwargs):
self.max_bytes = max_bytes
super(BinaryField, self).__init__(**kwargs)
def __set__(self, instance, value):
"""Handle bytearrays in python 3.1"""
if six.PY3 and isinstance(value, bytearray):
value = six.binary_type(value)
return super(BinaryField, self).__set__(instance, value)
def to_mongo(self, value):
return Binary(value)
def validate(self, value):
if not isinstance(value, (six.binary_type, six.text_type, Binary)):
self.error('BinaryField only accepts instances of '
'(%s, %s, Binary)' % (
six.binary_type.__name__, six.text_type.__name__))
if self.max_bytes is not None and len(value) > self.max_bytes:
self.error('Binary value is too long')
class GridFSError(Exception):
pass
class GridFSProxy(object):
"""Proxy object to handle writing and reading of files to and from GridFS
.. versionadded:: 0.4
.. versionchanged:: 0.5 - added optional size param to read
.. versionchanged:: 0.6 - added collection name param
"""
_fs = None
def __init__(self, grid_id=None, key=None,
instance=None,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
self.grid_id = grid_id # Store GridFS id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if hasattr(obj, name):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __nonzero__(self):
return bool(self.grid_id)
def __getstate__(self):
self_dict = self.__dict__
self_dict['_fs'] = None
return self_dict
def __copy__(self):
copied = GridFSProxy()
copied.__dict__.update(self.__getstate__())
return copied
def __deepcopy__(self, memo):
return self.__copy__()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def __str__(self):
name = getattr(
self.get(), 'filename', self.grid_id) if self.get() else '(no file)'
return '<%s: %s>' % (self.__class__.__name__, name)
def __eq__(self, other):
if isinstance(other, GridFSProxy):
return ((self.grid_id == other.grid_id) and
(self.collection_name == other.collection_name) and
(self.db_alias == other.db_alias))
else:
return False
@property
def fs(self):
if not self._fs:
self._fs = gridfs.GridFS(
get_db(self.db_alias), self.collection_name)
return self._fs
def get(self, grid_id=None):
if grid_id:
self.grid_id = grid_id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = self.fs.get(self.grid_id)
return self.gridout
except Exception:
# File has been deleted
return None
def new_file(self, **kwargs):
self.newfile = self.fs.new_file(**kwargs)
self.grid_id = self.newfile._id
self._mark_as_changed()
def put(self, file_obj, **kwargs):
if self.grid_id:
raise GridFSError('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = self.fs.put(file_obj, **kwargs)
self._mark_as_changed()
def write(self, string):
if self.grid_id:
if not self.newfile:
raise GridFSError('This document already has a file. Either '
'delete it or call replace to overwrite it')
else:
self.new_file()
self.newfile.write(string)
def writelines(self, lines):
if not self.newfile:
self.new_file()
self.grid_id = self.newfile._id
self.newfile.writelines(lines)
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except Exception:
return ''
def delete(self):
# Delete file from GridFS, FileField still remains
self.fs.delete(self.grid_id)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def replace(self, file_obj, **kwargs):
self.delete()
self.put(file_obj, **kwargs)
def close(self):
if self.newfile:
self.newfile.close()
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class FileField(BaseField):
"""A GridFS storage field.
.. versionadded:: 0.4
.. versionchanged:: 0.5 added optional size param for read
.. versionchanged:: 0.6 added db_alias for multidb support
"""
proxy_class = GridFSProxy
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs',
**kwargs):
super(FileField, self).__init__(**kwargs)
self.collection_name = collection_name
self.db_alias = db_alias
def __get__(self, instance, owner):
if instance is None:
return self
# Check if a file already exists for this model
grid_file = instance._data.get(self.name)
if not isinstance(grid_file, self.proxy_class):
grid_file = self.get_proxy_obj(key=self.name, instance=instance)
instance._data[self.name] = grid_file
if not grid_file.key:
grid_file.key = self.name
grid_file.instance = instance
return grid_file
def __set__(self, instance, value):
key = self.name
if (
(hasattr(value, 'read') and not isinstance(value, GridFSProxy)) or
isinstance(value, (six.binary_type, six.string_types))
):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except Exception:
pass
# Create a new proxy object as we don't already have one
instance._data[key] = self.get_proxy_obj(
key=key, instance=instance)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def get_proxy_obj(self, key, instance, db_alias=None, collection_name=None):
if db_alias is None:
db_alias = self.db_alias
if collection_name is None:
collection_name = self.collection_name
return self.proxy_class(key=key, instance=instance,
db_alias=db_alias,
collection_name=collection_name)
def to_mongo(self, value):
# Store the GridFS file id in MongoDB
if isinstance(value, self.proxy_class) and value.grid_id is not None:
return value.grid_id
return None
def to_python(self, value):
if value is not None:
return self.proxy_class(value,
collection_name=self.collection_name,
db_alias=self.db_alias)
def validate(self, value):
if value.grid_id is not None:
if not isinstance(value, self.proxy_class):
self.error('FileField only accepts GridFSProxy values')
if not isinstance(value.grid_id, ObjectId):
self.error('Invalid GridFSProxy value')
class ImageGridFsProxy(GridFSProxy):
"""
Proxy for ImageField
versionadded: 0.6
"""
def put(self, file_obj, **kwargs):
"""
Insert a image in database
applying field properties (size, thumbnail_size)
"""
field = self.instance._fields[self.key]
# Handle nested fields
if hasattr(field, 'field') and isinstance(field.field, FileField):
field = field.field
try:
img = Image.open(file_obj)
img_format = img.format
except Exception as e:
raise ValidationError('Invalid image: %s' % e)
# Progressive JPEG
# TODO: fixme, at least unused, at worst bad implementation
progressive = img.info.get('progressive') or False
if (kwargs.get('progressive') and
isinstance(kwargs.get('progressive'), bool) and
img_format == 'JPEG'):
progressive = True
else:
progressive = False
if (field.size and (img.size[0] > field.size['width'] or
img.size[1] > field.size['height'])):
size = field.size
if size['force']:
img = ImageOps.fit(img,
(size['width'],
size['height']),
Image.ANTIALIAS)
else:
img.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
thumbnail = None
if field.thumbnail_size:
size = field.thumbnail_size
if size['force']:
thumbnail = ImageOps.fit(
img, (size['width'], size['height']), Image.ANTIALIAS)
else:
thumbnail = img.copy()
thumbnail.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
if thumbnail:
thumb_id = self._put_thumbnail(thumbnail, img_format, progressive)
else:
thumb_id = None
w, h = img.size
io = StringIO()
img.save(io, img_format, progressive=progressive)
io.seek(0)
return super(ImageGridFsProxy, self).put(io,
width=w,
height=h,
format=img_format,
thumbnail_id=thumb_id,
**kwargs)
def delete(self, *args, **kwargs):
# deletes thumbnail
out = self.get()
if out and out.thumbnail_id:
self.fs.delete(out.thumbnail_id)
return super(ImageGridFsProxy, self).delete()
def _put_thumbnail(self, thumbnail, format, progressive, **kwargs):
w, h = thumbnail.size
io = StringIO()
thumbnail.save(io, format, progressive=progressive)
io.seek(0)
return self.fs.put(io, width=w,
height=h,
format=format,
**kwargs)
@property
def size(self):
"""
return a width, height of image
"""
out = self.get()
if out:
return out.width, out.height
@property
def format(self):
"""
return format of image
ex: PNG, JPEG, GIF, etc
"""
out = self.get()
if out:
return out.format
@property
def thumbnail(self):
"""
return a gridfs.grid_file.GridOut
representing a thumbnail of Image
"""
out = self.get()
if out and out.thumbnail_id:
return self.fs.get(out.thumbnail_id)
def write(self, *args, **kwargs):
raise RuntimeError('Please use "put" method instead')
def writelines(self, *args, **kwargs):
raise RuntimeError('Please use "put" method instead')
class ImproperlyConfigured(Exception):
pass
class ImageField(FileField):
"""
A Image File storage field.
@size (width, height, force):
max size to store images, if larger will be automatically resized
ex: size=(800, 600, True)
@thumbnail (width, height, force):
size to generate a thumbnail
.. versionadded:: 0.6
"""
proxy_class = ImageGridFsProxy
def __init__(self, size=None, thumbnail_size=None,
collection_name='images', **kwargs):
if not Image:
raise ImproperlyConfigured('PIL library was not found')
params_size = ('width', 'height', 'force')
extra_args = {
'size': size,
'thumbnail_size': thumbnail_size
}
for att_name, att in extra_args.items():
value = None
if isinstance(att, (tuple, list)):
if six.PY3:
value = dict(itertools.zip_longest(params_size, att,
fillvalue=None))
else:
value = dict(map(None, params_size, att))
setattr(self, att_name, value)
super(ImageField, self).__init__(
collection_name=collection_name,
**kwargs)
class SequenceField(BaseField):
"""Provides a sequential counter see:
http://www.mongodb.org/display/DOCS/Object+IDs#ObjectIDs-SequenceNumbers
.. note::
Although traditional databases often use increasing sequence
numbers for primary keys. In MongoDB, the preferred approach is to
use Object IDs instead. The concept is that in a very large
cluster of machines, it is easier to create an object ID than have
global, uniformly increasing sequence numbers.
:param collection_name: Name of the counter collection (default 'mongoengine.counters')
:param sequence_name: Name of the sequence in the collection (default 'ClassName.counter')
:param value_decorator: Any callable to use as a counter (default int)
Use any callable as `value_decorator` to transform calculated counter into
any value suitable for your needs, e.g. string or hexadecimal
representation of the default integer counter value.
.. note::
In case the counter is defined in the abstract document, it will be
common to all inherited documents and the default sequence name will
be the class name of the abstract document.
.. versionadded:: 0.5
.. versionchanged:: 0.8 added `value_decorator`
"""
_auto_gen = True
COLLECTION_NAME = 'mongoengine.counters'
VALUE_DECORATOR = int
def __init__(self, collection_name=None, db_alias=None, sequence_name=None,
value_decorator=None, *args, **kwargs):
self.collection_name = collection_name or self.COLLECTION_NAME
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
self.sequence_name = sequence_name
self.value_decorator = (callable(value_decorator) and
value_decorator or self.VALUE_DECORATOR)
super(SequenceField, self).__init__(*args, **kwargs)
def generate(self):
"""
Generate and Increment the counter
"""
sequence_name = self.get_sequence_name()
sequence_id = '%s.%s' % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={'_id': sequence_id},
update={'$inc': {'next': 1}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def set_next_value(self, value):
"""Helper method to set the next sequence value"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$set": {"next": value}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def get_next_value(self):
"""Helper method to get the next value for previewing.
.. warning:: There is no guarantee this will be the next value
as it is only fixed on set.
"""
sequence_name = self.get_sequence_name()
sequence_id = '%s.%s' % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
data = collection.find_one({'_id': sequence_id})
if data:
return self.value_decorator(data['next'] + 1)
return self.value_decorator(1)
def get_sequence_name(self):
if self.sequence_name:
return self.sequence_name
owner = self.owner_document
if issubclass(owner, Document) and not owner._meta.get('abstract'):
return owner._get_collection_name()
else:
return ''.join('_%s' % c if c.isupper() else c
for c in owner._class_name).strip('_').lower()
def __get__(self, instance, owner):
value = super(SequenceField, self).__get__(instance, owner)
if value is None and instance._initialised:
value = self.generate()
instance._data[self.name] = value
instance._mark_as_changed(self.name)
return value
def __set__(self, instance, value):
if value is None and instance._initialised:
value = self.generate()
return super(SequenceField, self).__set__(instance, value)
def prepare_query_value(self, op, value):
"""
This method is overridden in order to convert the query value into to required
type. We need to do this in order to be able to successfully compare query
values passed as string, the base implementation returns the value as is.
"""
return self.value_decorator(value)
def to_python(self, value):
if value is None:
value = self.generate()
return value
class UUIDField(BaseField):
"""A UUID field.
.. versionadded:: 0.6
"""
_binary = None
def __init__(self, binary=True, **kwargs):
"""
Store UUID data in the database
:param binary: if False store as a string.
.. versionchanged:: 0.8.0
.. versionchanged:: 0.6.19
"""
self._binary = binary
super(UUIDField, self).__init__(**kwargs)
def to_python(self, value):
if not self._binary:
original_value = value
try:
if not isinstance(value, six.string_types):
value = six.text_type(value)
return uuid.UUID(value)
except Exception:
return original_value
return value
def to_mongo(self, value):
if not self._binary:
return six.text_type(value)
elif isinstance(value, six.string_types):
return uuid.UUID(value)
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, uuid.UUID):
if not isinstance(value, six.string_types):
value = str(value)
try:
uuid.UUID(value)
except Exception as exc:
self.error('Could not convert to UUID: %s' % exc)
class GeoPointField(BaseField):
"""A list storing a longitude and latitude coordinate.
.. note:: this represents a generic point in a 2D plane and a legacy way of
representing a geo point. It admits 2d indexes but not "2dsphere" indexes
in MongoDB > 2.4 which are more natural for modeling geospatial points.
See :ref:`geospatial-indexes`
.. versionadded:: 0.4
"""
_geo_index = pymongo.GEO2D
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)"""
if not isinstance(value, (list, tuple)):
self.error('GeoPointField can only accept tuples or lists '
'of (x, y)')
if not len(value) == 2:
self.error('Value (%s) must be a two-dimensional point' %
repr(value))
elif (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
self.error(
'Both values (%s) in point must be float or int' % repr(value))
class PointField(GeoJsonBaseField):
"""A GeoJSON field storing a longitude and latitude coordinate.
The data is represented as:
.. code-block:: js
{'type' : 'Point' ,
'coordinates' : [x, y]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = 'Point'
class LineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a line of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{'type' : 'LineString' ,
'coordinates' : [[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = 'LineString'
class PolygonField(GeoJsonBaseField):
"""A GeoJSON field storing a polygon of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{'type' : 'Polygon' ,
'coordinates' : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list
of LineStrings. The first LineString being the outside and the rest being
holes.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = 'Polygon'
class MultiPointField(GeoJsonBaseField):
"""A GeoJSON field storing a list of Points.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPoint' ,
'coordinates' : [[x1, y1], [x2, y2]]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = 'MultiPoint'
class MultiLineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a list of LineStrings.
The data is represented as:
.. code-block:: js
{'type' : 'MultiLineString' ,
'coordinates' : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = 'MultiLineString'
class MultiPolygonField(GeoJsonBaseField):
"""A GeoJSON field storing list of Polygons.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPolygon' ,
'coordinates' : [[
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
], [
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
]
}
You can either pass a dict with the full information or a list
of Polygons.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = 'MultiPolygon'
| {
"content_hash": "7df04e64e757044aacc39714b97f8379",
"timestamp": "",
"source": "github",
"line_count": 2055,
"max_line_length": 138,
"avg_line_length": 34.372262773722625,
"alnum_prop": 0.5806045161747009,
"repo_name": "ConsumerPhysics/mongoengine",
"id": "3991ef5c04afd8a71a7ba41338c11a1f16340580",
"size": "70635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1030656"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import TextAnalyticsClientConfiguration
from .operations import TextAnalyticsClientOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class TextAnalyticsClient(TextAnalyticsClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword
"""The language service API is a suite of natural language processing (NLP) skills built with
best-in-class Microsoft machine learning algorithms. The API can be used to analyze
unstructured text for tasks such as sentiment analysis, key phrase extraction, language
detection and question answering. Further documentation can be found in :code:`<a
href="https://docs.microsoft.com/azure/cognitive-services/language-service/overview">https://docs.microsoft.com/azure/cognitive-services/language-service/overview</a>`.0.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: Supported Cognitive Services endpoint (e.g.,
https://:code:`<resource-name>`.api.cognitiveservices.azure.com). Required.
:type endpoint: str
:keyword api_version: Api Version. Default value is "2022-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
_endpoint = '{Endpoint}/language'
self._config = TextAnalyticsClientConfiguration(credential=credential, endpoint=endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=_endpoint, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "TextAnalyticsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| {
"content_hash": "4340d81822f55835d1bf40f7f5aa0bed",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 174,
"avg_line_length": 46.056179775280896,
"alnum_prop": 0.7021224688948524,
"repo_name": "Azure/azure-sdk-for-python",
"id": "da09436932599da50bbe9fe15b804d6fdea7ec5c",
"size": "4567",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import copy
import datetime
import json
import uuid
import pymongo
import db_driver
import stream as pstream
# Collections:
# ["events"] = event docs
#
# ["trigger_defs"] = { 'trigger_name',
# 'stream_id',
# 'identifying_traits': {trait: value, ...},
# 'last_update',
# 'commit_errors',
# 'last_error',
# 'state',
# }
#
# ["streams'] = {'stream_id', 'message_id'}
class Stream(pstream.Stream):
def __init__(self, uuid, trigger_name, state, last_update,
identifying_traits, driver):
super(Stream, self).__init__(uuid, trigger_name, state, last_update,
identifying_traits)
self.driver = driver
self.events_loaded = False
def load_events(self):
if self.events_loaded:
return
self.driver._load_events(self)
self.events_loaded = True
def error(self, last_exception):
self.driver.error(self, last_exception)
def commit_error(self, last_exception):
self.driver.commit_error(self, last_exception)
class MongoDBDriver(db_driver.DBDriver):
"""Trivial DBDriver that works in a distributed fashion.
For testing only. Do not attempt to use in production.
"""
def __init__(self, trigger_defs):
super(MongoDBDriver, self).__init__(trigger_defs)
self.client = pymongo.MongoClient()
self.db = self.client['stacktach']
self.events = self.db['events']
self.events.ensure_index("message_id")
self.events.ensure_index("when")
self.events.ensure_index("_context_request_id")
self.tdef_collection = self.db['trigger_defs']
self.tdef_collection.ensure_index("trigger_name")
self.tdef_collection.ensure_index("stream_id")
self.tdef_collection.ensure_index("state")
self.tdef_collection.ensure_index("last_update")
self.tdef_collection.ensure_index("identifying_traits")
self.streams = self.db['streams']
self.streams.ensure_index('stream_id')
self.streams.ensure_index('when')
def _scrub_event(self, event):
if type(event) is list:
for x in event:
self._scrub_event(x)
elif type(event) is dict:
to_delete = []
to_add = []
for k, v in event.iteritems():
if '.' in k:
new_k = k.replace('.', '~')
to_delete.append(k)
to_add.append((new_k, v))
self._scrub_event(v)
for k in to_delete:
del event[k]
for k, v in to_add:
event[k] = v
def save_event(self, message_id, event):
safe = copy.deepcopy(event)
self._scrub_event(safe)
safe['message_id'] = message_id # Force to known location.
self.events.insert(safe)
def append_event(self, message_id, trigger_def, event, trait_dict):
# Find the stream (or make one) and tack on the message_id.
stream_id = None
for doc in self.tdef_collection.find({'trigger_name': trigger_def.name,
'state': pstream.COLLECTING,
'identifying_traits': trait_dict}
).limit(1):
stream_id = doc['stream_id']
break
now = datetime.datetime.utcnow()
update_time = True
if not stream_id:
# Make a new Stream for this trait_dict ...
stream_id = str(uuid.uuid4())
stream = {'stream_id': stream_id,
'trigger_name': trigger_def.name,
'last_update': now,
'identifying_traits': trait_dict,
'state_version': 1,
'commit_errors': 0,
'last_error': "",
'state': pstream.COLLECTING,
}
update_time = False
self.tdef_collection.insert(stream)
# Add this message_id to the stream collection ...
entry = {'stream_id': stream_id,
'when': event['timestamp'],
'message_id': message_id}
self.streams.insert(entry)
if update_time:
self.tdef_collection.update({'stream_id': stream_id},
{'$set': {'last_update': now}})
return not update_time # a new stream if we didn't update the time.
def do_trigger_check(self, state, chunk, now=None):
# TODO(sandy) - we need to get the expiry time as part of the
# stream document so the search is optimal.
num = 0
ready = 0
query = self.tdef_collection.find({'state': pstream.COLLECTING}).sort(
[('last_update', pymongo.ASCENDING)]
).skip(state.offset).limit(chunk)
for doc in query:
trigger_name = doc['trigger_name']
trigger = self.trigger_defs_dict[trigger_name]
num += 1
stream = self._stream_from_mongo(doc, False)
if self._check_for_trigger(trigger, stream, now=now):
ready += 1
size = query.retrieved
print "%s - checked %d (%d ready) off/lim/sz=%d/%d/%d" % (
now, num, ready,
state.offset, chunk, size)
if size < chunk:
state.offset = 0
else:
state.offset += num
def purge_processed_streams(self, state, chunk):
now = datetime.datetime.utcnow()
print "%s - purged %d" % (now,
self.tdef_collection.remove({'state': pstream.PROCESSED})['n'])
def _load_events(self, stream):
events = []
hit = False
x = self.streams.find({'stream_id': stream.uuid}) \
.sort('when', pymongo.ASCENDING)
#print "Stream: %s" % stream.uuid
for mdoc in x:
for e in self.events.find({'message_id': mdoc['message_id']}):
events.append(e)
#print e['event_type'], e['payload'].get(
# 'audit_period_beginning',
# "nothinghere")[-8:] == "00:00:00", e['timestamp']
stream.set_events(events)
def process_ready_streams(self, state, chunk, now):
num = 0
locked = 0
query = self.tdef_collection.find({'state': pstream.READY}
).limit(chunk).skip(state.offset)
for ready in query:
result = self.tdef_collection.update(
{'_id': ready['_id'],
'state_version': ready['state_version']},
{'$set': {'state': pstream.TRIGGERED},
'$inc': {'state_version': 1}},
safe=True)
if result['n'] == 0:
locked += 1
continue # Someone else got it first, move to next one.
stream = self._stream_from_mongo(ready, True)
num += 1
trigger = self.trigger_defs_dict[ready['trigger_name']]
self._do_pipeline_callbacks(stream, trigger)
size = query.retrieved
if size < chunk:
state.offset = 0
else:
state.offset += num
print "%s - processed %d/%d (%d locked, off/lim/sz: %d/%d/%d)" % (
now, num,
chunk, locked,
state.offset, chunk, size)
def trigger(self, trigger_name, stream):
self.tdef_collection.update({'stream_id': stream.uuid},
{'$set': {'state': pstream.TRIGGERED}})
def ready(self, trigger_name, stream):
self.tdef_collection.update({'stream_id': stream.uuid},
{'$set': {'state': pstream.READY}})
def processed(self, trigger_name, stream):
self.tdef_collection.update({'stream_id': stream.uuid},
{'$set': {'state': pstream.PROCESSED}})
def error(self, trigger_name, stream, error):
self.tdef_collection.update({'stream_id': stream.uuid},
{'$set': {'state': pstream.ERROR,
'last_error': error}})
def commit_error(self, trigger_name, stream, error):
self.tdef_collection.update({'stream_id': stream.uuid},
{'$set': {'state': pstream.COMMIT_ERROR,
'last_error': error},
'$inc': {'commit_errors': 1},
})
def get_num_active_streams(self, trigger_name):
return self.tdef_collection.find({'trigger_name': trigger_name}
).count()
def _stream_from_mongo(self, record, details):
s = Stream(record['stream_id'], record['trigger_name'], record['state'],
record['last_update'], record['identifying_traits'], self)
if details:
s.load_events()
return s
def find_streams(self, **kwargs):
query = self.tdef_collection
hits = 0
details = kwargs.get('details')
state = kwargs.get('state')
if state:
query = query.find({'state': state})
hits += 1
name = kwargs.get('trigger_name')
if name:
query = query.find({'trigger_name': name})
hits += 1
older = kwargs.get('older_than')
if older:
query = query.find({'last_update': {'$lt': older}})
hits += 1
younger = kwargs.get('younger_than')
if younger:
query = query.find({'last_update': {'$gt': younger}})
hits += 1
if not hits:
query = query.find({})
return [self._stream_from_mongo(r, details).to_dict() for r in query]
def get_stream(self, stream_id, details):
return [self._stream_from_mongo(r, details).to_dict()
for r in self.tdef_collection.find({'stream_id': stream_id})]
def flush_all(self):
self.db.drop_collection('trigger_defs')
self.db.drop_collection('streams')
self.db.drop_collection('events')
| {
"content_hash": "f43cb0a07babf8d1161993f5e2309880",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 80,
"avg_line_length": 36.72318339100346,
"alnum_prop": 0.4976915104117592,
"repo_name": "StackTach/oahu",
"id": "9bc107192850f2a7fe5a5115c2f2716a9010aa4d",
"size": "11208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oahu/mongodb_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50471"
}
],
"symlink_target": ""
} |
"""Data pipelines to parse benchmark YAML for charts.
Extracts benchmark YAML and combines with template chart YAML for Vega
charts. Run `python _data/charts.py` to build the charts.
"""
# pylint: disable=no-value-for-parameter
import glob
import os
import json
import re
from dateutil.parser import parse
import jinja2
# pylint: disable=redefined-builtin, no-name-in-module
from toolz.curried import map, pipe, get, curry, filter, compose, juxt
from toolz.curried import valmap, itemmap, groupby, memoize, keymap, update_in
import yaml
def fcompose(*args):
"""Helper function to compose functions.
>>> f = lambda x: x - 2
>>> g = lambda x: 2 * x
>>> f(g(3))
4
>>> fcompose(g, f)(3)
4
Args:
*args: tuple of functions
Returns:
composed functions
"""
return compose(*args[::-1])
def read_yaml(filepath):
"""Read a YAML file
Args:
filepath: the path to the YAML file
Returns:
returns a dictionary
"""
with open(filepath) as stream:
data = yaml.safe_load(stream)
return data
@curry
def write_json(data, filepath):
"""Write a JSON file
Args:
data: the dictionary to write
filepath: the path to the JSON file
Returns:
returns a tuple of (filepath, data)
"""
with open(filepath, 'w') as stream:
json.dump(data, stream, sort_keys=True, indent=2)
return (filepath, data)
def get_path():
"""Return the local file path for this file.
Returns:
the filepath
"""
return pipe(
__file__,
os.path.realpath,
os.path.split,
get(0)
)
@curry
def update_dict(dict_, **kwargs):
"""Add keys to a new dictionary.
Args:
dict_: the dictionary to add to
kwargs: the key, value pairs to add
Returns:
a new dictionary
"""
return dict(list(dict_.items()) + list(kwargs.items()))
@curry
def filter_data(field, yaml_data):
"""Extract a field of data from the YAML files.
Args:
field: the name of the field to extract
yaml_data: the benchmark YAML data
Returns:
the filtered data from the YAML data
"""
return pipe(
yaml_data,
dict,
valmap(lambda val: val['data']),
valmap(filter(lambda item: item['name'].lower() == field)),
valmap(list),
valmap(get(0)),
itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
lambda dict_: sorted(list(dict_.values()),
key=lambda item: item['name']),
map(update_in(keys=['transform'],
func=lambda x: x + [dict(expr="datum.x > 0.01",
type="filter")]))
)
@curry
def filter_memory_data(yaml_data):
"""Filter the memory time data from the meta.yaml's
Args:
yaml_data: the benchmark YAML data
Returns:
memory versus time data
"""
def time_ratio(data):
"""Calcuate the sim_time over wall_time ration
"""
return pipe(
data[-1],
juxt(lambda x: x.get('sim_time', x.get('time')),
lambda x: x.get('wall_time', x.get('time'))),
lambda x: float(x[0]) / float(x[1])
)
def memory_usage(data):
"""Calculate the memory usage in KB
"""
unit_map = dict(GB=1048576.,
KB=1.,
MB=1024.,
B=1. / 1024.)
if isinstance(data, dict):
data_ = data
else:
data_ = data[-1]
key = next(k for k in data_.keys() if 'value' in k)
return float(data_[key]) * unit_map[data_.get('unit', 'KB')]
def make_datum(data):
"""Build an item in the data list for one simulation
"""
return dict(
name='efficiency',
values=[dict(time_ratio=time_ratio(data['run_time']),
memory_usage=memory_usage(data['memory_usage']))],
)
return pipe(
yaml_data,
dict,
valmap(lambda x: x['data']),
valmap(
filter(
lambda item: item['name'].lower() in ('memory_usage',
'run_time')
)
),
valmap(map(lambda x: (x['name'], x['values']))),
valmap(dict),
valmap(make_datum),
itemmap(lambda item: (item[0], update_dict(item[1], name=item[0]))),
lambda dict_: sorted(list(dict_.values()),
key=lambda item: item['name'])
)
def get_yaml_data():
"""Read in the YAML data but don't group
Returns:
list of tuples of (name, data_dict)
"""
return pipe(
os.path.join(get_path(), 'simulations/*/meta.yaml'),
glob.glob,
sorted,
map(lambda path_: (os.path.split(os.path.split(path_)[0])[1],
read_yaml(path_))),
filter(lambda item: item[0] not in ['example',
'example_minimal',
'test_lander'])
)
def vega2to3(data):
"""Transform a Vega data list from version 2 to 3.
Args:
data: vega data list
Returns:
update vega data list
"""
def keymapping(key):
"""Map vega data keys from version 2 to 3
The mapping is `test` -> `expr` and `field` -> `as` otherwise
the input key is just returned.
Args:
key: the key to map
Returns:
a new key
"""
return dict(test='expr',
field='as').get(key, key)
update_transform = fcompose(
map(keymap(keymapping)),
list
)
return pipe(
data,
map(update_in(keys=['transform'],
func=update_transform,
default=[])),
list
)
def get_data(filter_func):
"""Read in the YAML data and group by benchmark id
Args:
filter_func: function to filter data
Returns:
a dictionary with benchmark ids as keys and lists of appropriate
data for values
"""
return pipe(
get_yaml_data(),
groupby(
lambda item: "{0}.{1}".format(item[1]['benchmark']['id'],
str(item[1]['benchmark']['version']))
),
valmap(filter_func),
valmap(vega2to3)
)
def get_chart_file(j2_file_name):
"""Get the name of the chart file
Returns:
the chart YAML file
"""
return os.path.join(get_path(), 'charts', j2_file_name)
@curry
def write_chart_json(j2_file_name, item):
"""Write a chart JSON file.
Args:
j2_file_name: the name of the Jinja template file
item: a (benchmark_id, chart_dict) pair
Returns:
returns the (filepath, json_data) pair
"""
file_name = fcompose(
lambda x: r"{0}_{1}".format(x, j2_file_name),
lambda x: re.sub(r"([0-9]+[abcd])\.(.+)\.yaml\.j2",
r"\1\2.json",
x)
)
return pipe(
item[0],
file_name,
lambda file_: os.path.join(get_path(), '../_data/charts', file_),
write_json(item[1])
)
@memoize
def get_marks():
"""Get the mark data for the free energy charts
Returns:
a dictionary defined in marks.yaml
"""
return pipe(
os.path.join(get_path(), 'marks.yaml'),
read_yaml
)
def process_chart(id_, data, j2_file_name):
"""Process chart's YAML with data.
Args:
id_: the benchmark ID
data: the data to process the YAML file
j2_file_name: the name of the j2 file to process
Returns:
the rendered YAML as a dictionary
"""
return pipe(
get_chart_file(j2_file_name),
render_yaml(data=data, id_=id_, marks=get_marks()[id_]),
yaml.load
)
def to_datetime(datetime_str, format_="%Y/%m/%d %H:%M:%S"):
"""Datetime formater for Jinja template.
"""
return parse(datetime_str).strftime(format_)
@curry
def render_yaml(tpl_path, **kwargs):
"""Return the rendered yaml template.
Args:
tpl_path: path to the YAML jinja template
**kwargs: data to render in the template
Returns:
the rendered template string
"""
path, filename = os.path.split(tpl_path)
loader = jinja2.FileSystemLoader(path or './')
env = jinja2.Environment(loader=loader)
env.filters['to_yaml'] = yaml.dump
env.filters['to_datetime'] = to_datetime
return env.get_template(filename).render(**kwargs)
def main(filter_func, j2_file_name):
"""Generate the chart JSON files
Args:
filter_func: function to filter simulaton YAML data
j2_file_name: the j2 file name to insert the data into
Returns:
list of (filepath, chart_json) pairs
"""
return pipe(
get_data(filter_func),
itemmap(
lambda item: (
item[0],
process_chart(item[0], item[1], j2_file_name)
)
),
itemmap(write_chart_json(j2_file_name))
)
def landing_page_j2():
"""Get the name of the chart file
Returns:
the chart YAML file
"""
return os.path.join(get_path(), 'charts', 'simulations.yaml.j2')
def landing_page_json():
"""Generate the landing page JSON vega spec.
Returns:
(filepath, chart_json) pairs
"""
def extract_id(name):
"""Extract benchmark ID from png path
"""
return name.replace("../images/", "").replace('_free_energy.png', '')
return pipe(
['1a.1_free_energy.png',
'1b.1_free_energy.png',
'1c.1_free_energy.png',
'1d.1_free_energy.png',
'2a.1_free_energy.png',
'2b.1_free_energy.png',
'2c.1_free_energy.png',
'2d.1_free_energy.png'],
map(lambda name: os.path.join("..", 'images', name)),
enumerate,
map(
lambda tup: (
lambda count, name: dict(path=name,
col=(count % 4),
row=count // 4,
link=extract_id(name))
)(*tup)
),
list,
lambda data: j2_to_json(landing_page_j2(),
os.path.join(get_path(),
'../_data/charts',
'simulations.json'),
data=data)
)
def j2_to_json(path_in, path_out, **kwargs):
"""Render a yaml.j2 chart to JSON.
Args:
path_in: the j2 template path
path_out: the JSON path to write to
kwargs: data to pass to the j2 template
Returns:
the file path and JSON string
"""
return pipe(
render_yaml(path_in, **kwargs),
yaml.load,
write_json(filepath=path_out)
)
if __name__ == "__main__":
main(filter_data('free_energy'), 'free_energy.yaml.j2')
main(filter_memory_data, 'memory.yaml.j2')
landing_page_json()
| {
"content_hash": "f87a7b56aea5e535464e97c5d5844ee8",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 79,
"avg_line_length": 25.159550561797754,
"alnum_prop": 0.5291175419792783,
"repo_name": "usnistgov/chimad-phase-field",
"id": "522556748bd074dbad68627d5ed135c042f362ff",
"size": "11196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_data/simulations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6889"
},
{
"name": "CoffeeScript",
"bytes": "47586"
},
{
"name": "HTML",
"bytes": "9422501"
},
{
"name": "JavaScript",
"bytes": "214584"
},
{
"name": "Jupyter Notebook",
"bytes": "9659080"
},
{
"name": "Makefile",
"bytes": "1231"
},
{
"name": "Python",
"bytes": "22628"
}
],
"symlink_target": ""
} |
"""
Support for Loop Energy sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.loop_energy/
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.util import convert
_LOGGER = logging.getLogger(__name__)
DOMAIN = "loopenergy"
REQUIREMENTS = ['pyloopenergy==0.0.10']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Loop Energy sensors."""
import pyloopenergy
elec_serial = config.get('electricity_serial')
elec_secret = config.get('electricity_secret')
gas_serial = config.get('gas_serial')
gas_secret = config.get('gas_secret')
gas_type = config.get('gas_type', 'metric')
gas_calorific = convert(config.get('gas_calorific'), float, 39.11)
if not (elec_serial and elec_secret):
_LOGGER.error(
"Configuration Error, "
"please make sure you have configured electricity "
"serial and secret tokens")
return None
if (gas_serial or gas_secret) and not (gas_serial and gas_secret):
_LOGGER.error(
"Configuration Error, "
"please make sure you have configured gas "
"serial and secret tokens")
return None
if gas_type not in ['imperial', 'metric']:
_LOGGER.error(
"Configuration Error, 'gas_type' "
"can only be 'imperial' or 'metric' ")
return None
# pylint: disable=too-many-function-args
controller = pyloopenergy.LoopEnergy(
elec_serial,
elec_secret,
gas_serial,
gas_secret,
gas_type,
gas_calorific
)
def stop_loopenergy(event):
"""Shutdown loopenergy thread on exit."""
_LOGGER.info("Shutting down loopenergy.")
controller.terminate()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_loopenergy)
sensors = [LoopEnergyElec(controller)]
if gas_serial:
sensors.append(LoopEnergyGas(controller))
add_devices(sensors)
# pylint: disable=too-many-instance-attributes
class LoopEnergyDevice(Entity):
"""Implementation of an Loop Energy base sensor."""
# pylint: disable=too-many-arguments
def __init__(self, controller):
"""Initialize the sensor."""
self._state = None
self._unit_of_measurement = 'kW'
self._controller = controller
self._name = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def _callback(self):
self.update_ha_state(True)
# pylint: disable=too-many-instance-attributes
class LoopEnergyElec(LoopEnergyDevice):
"""Implementation of an Loop Energy Electricity sensor."""
# pylint: disable=too-many-arguments
def __init__(self, controller):
"""Initialize the sensor."""
super(LoopEnergyElec, self).__init__(controller)
self._name = 'Power Usage'
self._controller.subscribe_elecricity(self._callback)
def update(self):
"""Get the cached Loop energy."""
self._state = round(self._controller.electricity_useage, 2)
# pylint: disable=too-many-instance-attributes
class LoopEnergyGas(LoopEnergyDevice):
"""Implementation of an Loop Energy Gas sensor."""
# pylint: disable=too-many-arguments
def __init__(self, controller):
"""Initialize the sensor."""
super(LoopEnergyGas, self).__init__(controller)
self._name = 'Gas Usage'
self._controller.subscribe_gas(self._callback)
def update(self):
"""Get the cached Loop energy."""
self._state = round(self._controller.gas_useage, 2)
| {
"content_hash": "faca66bef4bae501aa9159634330c352",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 74,
"avg_line_length": 29.29078014184397,
"alnum_prop": 0.6409200968523002,
"repo_name": "Zyell/home-assistant",
"id": "31b957192d9f65c8753a962490513eb09a2b20a7",
"size": "4130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/loopenergy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "798938"
},
{
"name": "Python",
"bytes": "771451"
},
{
"name": "Shell",
"bytes": "5097"
}
],
"symlink_target": ""
} |
import logging
__author__ = 'Apollo'
class BaseInitializer(object):
def __init__(self, count, logger, options, result_backend=None, *args, **kwargs):
self.count = count
self.result_backend = result_backend
self.logger = logger
self.options = options
def start(self, no_runner=False):
"""
start workers and return a runner instance
:return: a runner instance ro run
:rtype: BaseRunner
"""
raise NotImplementedError()
def log_error(self, message, exception=None, include_traceback=True):
import traceback
logging.getLogger(self.logger).error(
"{} : {}\n{}".format(
message,
exception or "",
traceback.format_exc() if include_traceback else ""
)
)
class BaseRunner(object):
def run(self, function, args, kwargs):
pass
| {
"content_hash": "b20919e82716881bff013c16cc341cee",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 85,
"avg_line_length": 26.37142857142857,
"alnum_prop": 0.5742145178764897,
"repo_name": "inb-co/easy-job",
"id": "5a4ef84b30ae1bf0d2e3cda979854fb50d950ef4",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy_job/workers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60031"
}
],
"symlink_target": ""
} |
from catdtree import BaseDecisionTree
from scipy import stats
class C45(BaseDecisionTree):
"""C4.5 decision tree for classification.
This class implements a decision tree using the C4.5 algorithm for
building it.
References:
* Quinlan, J. R. C4.5: Programs for Machine Learning. Morgan Kaufmann
Publishers, 1993.
"""
def __init__(self, criterion='entropy'):
"""Construct the C4.5 tree.
Args:
* criterion: (default='entropy') string. The function to measure
the quality of a split. (TODO: 'gini')
"""
BaseDecisionTree.__init__(self)
self.criterion = criterion
def _choose_best_split(self, X_part, y_part):
"""Choose the best split according to the C4.5 algorithm.
Args:
* X_part: pandas.DataFrame. The data of the independent variables
which reach the current tree node.
* y_part: pandas.Series. The data of the dependent variable
regarding `X_part`.
Returns:
A tuple (condition_str, split_filter). For more info see the docs
of catdtree.TreeNode.__init__.
"""
def compile_split_filter_and_text(split):
if split[1]: # numerical feature
def split_filter_1(X, y):
branch_data_mask = X[split[0]] <= split[1]
return (X[branch_data_mask], y[branch_data_mask])
def split_filter_2(X, y):
branch_data_mask = X[split[0]] > split[1]
return (X[branch_data_mask], y[branch_data_mask])
cond_1_str = split[0] + u' <= ' + unicode(split[1])
cond_2_str = split[0] + u' > ' + unicode(split[1])
return [(cond_1_str, split_filter_1),
(cond_2_str, split_filter_2)]
else: # categorical feature
values = X_part[split[0]].unique()
compiled_split = []
for value in values:
def split_filter(X, y):
branch_data_mask = X[split[0]] == value
return (X[branch_data_mask], y[branch_data_mask])
cond_str = split[0] + u' is ' + unicode(value)
compiled_split.append((cond_str, split_filter))
return compiled_split
classes = y_part.unique()
class_support = [sum(y_part == c) / float(len(y_part)) for c in classes]
parent_support = len(y_part)
if self.criterion == 'entropy':
# compute the entropy of all the data
parent_entropy = stats.entropy(class_support, base=2)
else:
raise NotImplementedError('TODO: gini')
splits = []
for feature, dtype in zip(X_part, X_part.dtypes):
if dtype == object: # categorical feature
# Create a branch for each value of the categorical feature.
values = X_part[feature].unique()
if len(values) < 2:
# we don't want to split on feature containing just one value
continue
branches_entropy = 0 # the accumulated entropy of all branches
for value in values:
branch_data_mask = X_part[feature] == value
y_branch = y_part[branch_data_mask]
branch_support = len(y_branch)
if branch_support == 0:
continue
branch_class_support = [sum(y_branch == c) / float(branch_support) for c in classes]
if self.criterion == 'entropy':
branch_entropy = stats.entropy(branch_class_support, base=2)
branches_entropy += branch_entropy * float(branch_support) / parent_support
else:
raise NotImplementedError('TODO: gini')
info_gain = parent_entropy - branches_entropy
splits.append((feature, None, info_gain))
else: # numerical feature
# Try out all binary splits of the data over the numerical
# feature. Choose the best value for the split using
# information gain.
split_values = X_part[feature].unique()
value_splits = []
for value in split_values:
branches_entropy = 0
branch_data_mask = X_part[feature] <= value
y_child_1 = y_part[branch_data_mask]
y_child_2 = y_part[~branch_data_mask]
ch_1_support = len(y_child_1)
ch_2_support = len(y_child_2)
if ch_1_support == 0 or ch_2_support == 0:
continue
ch_1_class_support = [sum(y_child_1 == c) / float(ch_1_support) for c in classes]
ch_2_class_support = [sum(y_child_2 == c) / float(ch_2_support) for c in classes]
if self.criterion == 'entropy':
child_1_entropy = stats.entropy(ch_1_class_support, base=2)
child_2_entropy = stats.entropy(ch_2_class_support, base=2)
branches_entropy += child_1_entropy * float(ch_1_support) / parent_support \
+ child_2_entropy * float(ch_2_support) / parent_support
else:
raise NotImplementedError('TODO: gini')
info_gain = parent_entropy - branches_entropy
value_splits.append((feature, value, info_gain))
if value_splits:
# if there are any valid splits on the numerical feature
# get the best value to split on
splits.append(max(value_splits, key=lambda x: x[2]))
if splits:
# if there is any valid split of the data on the features
# return the best split
best_split = max(splits, key=lambda x: x[2])
return compile_split_filter_and_text(best_split)
| {
"content_hash": "d193f6753a8692364e4f407e27045efa",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 104,
"avg_line_length": 46.6015037593985,
"alnum_prop": 0.5175863181671507,
"repo_name": "idanivanov/catdtree",
"id": "69c4f017149daa7eca8b9571a542a48cbd5a009a",
"size": "6198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catdtree/classification/c45.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14926"
}
],
"symlink_target": ""
} |
"""
Healthcare API client.
"""
from __future__ import unicode_literals
import operator
from django.conf import settings
from .backends import comparisons
from .backends.base import get_backend
from .exceptions import PatientDoesNotExist, ProviderDoesNotExist
class CategoryWrapper(object):
"Simple wrapper to translate a category (patient/provider) of backend calls."
_lookup_mapping = {
'': comparisons.EQUAL,
'exact': comparisons.EQUAL,
'like': comparisons.LIKE,
'in': comparisons.IN,
'lt': comparisons.LT,
'lte': comparisons.LTE,
'gt': comparisons.GT,
'gte': comparisons.GTE,
}
def __init__(self, backend, category):
self.backend, self.category = backend, category
def get(self, id, **kwargs):
method = getattr(self.backend, 'get_{category}'.format(category=self.category))
return method(id, **kwargs)
def create(self, **kwargs):
method = getattr(self.backend, 'create_{category}'.format(category=self.category))
return method(kwargs)
def update(self, id, **kwargs):
method = getattr(self.backend, 'update_{category}'.format(category=self.category))
return bool(method(id, kwargs))
def delete(self, id):
method = getattr(self.backend, 'delete_{category}'.format(category=self.category))
return bool(method(id))
def _translate_filter_expression(self, name, value):
"Convert a field lookup into the appropriate backend call."
parts = name.split('__')
if len(parts) > 1:
field_name = parts[0]
lookup = parts[-1]
else:
field_name = parts[0]
lookup = ''
comparison = self._lookup_mapping.get(lookup)
if comparison is None:
raise TypeError("Invalid lookup type: {0}".format(lookup))
return (field_name, comparison, value)
def filter(self, **kwargs):
method = getattr(self.backend, 'filter_{category}s'.format(category=self.category))
args = [self._translate_filter_expression(k, v) for k, v in kwargs.items()]
return method(args)
class PatientWrapper(CategoryWrapper):
"Wrapper around backend patient calls."
def __init__(self, backend):
super(PatientWrapper, self).__init__(backend, 'patient')
def get(self, id, source=None):
result = super(PatientWrapper, self).get(id, source=source)
if result is None:
if source:
message = "Patient ID {0} for {1} was not found".format(id, source)
else:
message = "Patient ID {0} was not found".format(id)
raise PatientDoesNotExist(message)
return result
def link(self, id, source_id, source_name):
result = self.backend.link_patient(id, source_id, source_name)
return bool(result)
def unlink(self, id, source_id, source_name):
result = self.backend.unlink_patient(id, source_id, source_name)
return bool(result)
class ProviderWrapper(CategoryWrapper):
"Wrapper around backend provider calls."
def __init__(self, backend):
super(ProviderWrapper, self).__init__(backend, 'provider')
def get(self, id):
result = super(ProviderWrapper, self).get(id)
if result is None:
raise ProviderDoesNotExist("Provider ID {0} was not found".format(id))
return result
class HealthcareAPI(object):
"API Client for accessing healthcare data via the configured backend."
def __init__(self, backend):
self.backend = get_backend(backend)
self.patients = PatientWrapper(self.backend)
self.providers = ProviderWrapper(self.backend)
STORAGE_BACKEND = getattr(settings, 'HEALTHCARE_STORAGE_BACKEND', 'healthcare.backends.djhealth.DjangoStorage')
client = HealthcareAPI(STORAGE_BACKEND)
| {
"content_hash": "fef6fdfc5ecd21926e18bb0dbcc6751c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 111,
"avg_line_length": 32.61344537815126,
"alnum_prop": 0.6413295542385983,
"repo_name": "caktus/rapidsms-healthcare",
"id": "853fff049072543d995fd8def72c484711cfd239",
"size": "3881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "healthcare/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "55191"
},
{
"name": "Shell",
"bytes": "5122"
}
],
"symlink_target": ""
} |
import unittest
from test import support
import collections, random, string
import collections.abc
import gc, weakref
import pickle
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i : i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = {hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3}
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
if support.check_impl_detail():
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
def test_iterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
it = iter(data)
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(sorted(it), sorted(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
del data[drop]
self.assertEqual(sorted(it), sorted(data))
def test_itemiterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg)
it = pickle.loads(d)
# note that the type of type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
def test_valuesiterator_pickling(self):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(sorted(list(it)), sorted(list(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo('123')
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str('msg')))
self.assertEqual(f.msg, f.__dict__[_str('msg')])
def test_object_set_item_single_instance_non_str_key(self):
class Foo: pass
f = Foo()
f.__dict__[1] = 1
f.a = 'a'
self.assertEqual(f.__dict__, {1:1, 'a':'a'})
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| {
"content_hash": "36563bec0e8bb96a10707a074f4fcc66",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 79,
"avg_line_length": 31.332615715823465,
"alnum_prop": 0.4971485502267418,
"repo_name": "timm/timmnix",
"id": "6e4cab6f158f5a4171c24ce510eda6b60fc4805a",
"size": "29108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib-python/3/test/test_dict.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Utilities for using reinforcement learning algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import re
import ruamel.yaml as yaml
import tensorflow as tf
from agents import tools
def define_simulation_graph(batch_env, algo_cls, config):
"""Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes.
"""
# pylint: disable=unused-variable
step = tf.Variable(0, False, dtype=tf.int32, name='global_step')
is_training = tf.placeholder(tf.bool, name='is_training')
should_log = tf.placeholder(tf.bool, name='should_log')
do_report = tf.placeholder(tf.bool, name='do_report')
force_reset = tf.placeholder(tf.bool, name='force_reset')
algo = algo_cls(batch_env, step, is_training, should_log, config)
done, score, summary = tools.simulate(
batch_env, algo, should_log, force_reset)
message = 'Graph contains {} trainable variables.'
tf.logging.info(message.format(tools.count_weights()))
# pylint: enable=unused-variable
return tools.AttrDict(locals())
def define_batch_env(constructor, num_agents, env_processes):
"""Create environments and apply all desired wrappers.
Args:
constructor: Constructor of an OpenAI gym environment.
num_agents: Number of environments to combine in the batch.
env_processes: Whether to step environment in external processes.
Returns:
In-graph environments object.
"""
with tf.variable_scope('environments'):
if env_processes:
envs = [
tools.wrappers.ExternalProcess(constructor)
for _ in range(num_agents)]
else:
envs = [constructor() for _ in range(num_agents)]
batch_env = tools.BatchEnv(envs, blocking=not env_processes)
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env
def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
"""
sess.run(tf.group(
tf.local_variables_initializer(),
tf.global_variables_initializer()))
if resume and not (logdir or checkpoint):
raise ValueError('Need to specify logdir to resume a checkpoint.')
if logdir:
state = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint = os.path.join(logdir, checkpoint)
if not checkpoint and state and state.model_checkpoint_path:
checkpoint = state.model_checkpoint_path
if checkpoint and resume is False:
message = 'Found unexpected checkpoint when starting a new run.'
raise RuntimeError(message)
if checkpoint:
saver.restore(sess, checkpoint)
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.gfile.FastGFile(config_path, 'r') as file_:
config = yaml.load(file_, Loader=yaml.Loader)
message = 'Resume run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
return config
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False
| {
"content_hash": "fad297c1820efaa46f3a492bfe24420a",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 32.90395480225989,
"alnum_prop": 0.7153159340659341,
"repo_name": "google-research/batch-ppo",
"id": "61aad0da5614bdc326b40009501282669182645e",
"size": "6420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/scripts/utility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "176905"
}
],
"symlink_target": ""
} |
def extractFreezingslowlyWordpressCom(item):
'''
Parser for 'freezingslowly.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "5e1dc1c660cfc84652440f756a4ad090",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27,
"alnum_prop": 0.6402116402116402,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b77c77d4dee0210090ef3d2c2e4ccbf951186b6e",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractFreezingslowlyWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import numpy
def rcosFn(*args):
''' [X, Y] = rcosFn(WIDTH, POSITION, VALUES)
Return a lookup table (suitable for use by INTERP1)
containing a "raised cosine" soft threshold function:
Y = VALUES(1) + (VALUES(2)-VALUES(1)) *
cos^2( PI/2 * (X - POSITION + WIDTH)/WIDTH )
WIDTH is the width of the region over which the transition occurs
(default = 1). POSITION is the location of the center of the
threshold (default = 0). VALUES (default = [0,1]) specifies the
values to the left and right of the transition. '''
if len(args) > 0:
width = args[0]
else:
width = 1
if len(args) > 1:
position = args[1]
else:
position = 0
if len(args) > 2:
values = args[2]
else:
values = (0, 1)
#---------------------------------------------
sz = 256 # arbitrary!
X = numpy.pi * numpy.array(list(range(-sz - 1, 2))) / (2 * sz)
Y = values[0] + (values[1] - values[0]) * numpy.cos(X)**2
# make sure end values are repeated, for extrapolation...
Y[0] = Y[1]
Y[sz + 2] = Y[sz + 1]
X = position + (2 * width / numpy.pi) * (X + numpy.pi / 4)
return (X, Y)
| {
"content_hash": "d303c22055a04d8568ad45d667eed934",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 26.19148936170213,
"alnum_prop": 0.5190901705930138,
"repo_name": "tochikuji/pyPyrTools",
"id": "21c52b510afa495718c22210b44545361d138cfb",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrtools/rcosFn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46149"
},
{
"name": "Jupyter Notebook",
"bytes": "2816293"
},
{
"name": "Makefile",
"bytes": "322"
},
{
"name": "Python",
"bytes": "511943"
}
],
"symlink_target": ""
} |
"""
Helpers for abstracting high-level network concepts. Different from 'models.py' which deals
with IO abstraction.
"""
import string
from typing import Dict, Union, Tuple
from collections import namedtuple, OrderedDict
# externals
# None. Should not have any external dependencies.
FILENAME_VALID_CHARS = "-~_.() {}{}".format(string.ascii_letters, string.digits)
"""NetworkResult(input: str, output_tensor: np.array, semantic_output: np.array, median_runtime: NetworkRuntime, models: [str])"""
NetworkResult = namedtuple(
"NetworkResult",
["input", "output_tensor", "semantic_output", "median_runtime", "models"],
)
"""BenchmarkingResult(median_runtime: NetworkRuntime, models: [str])"""
BenchmarkingResult = namedtuple(
"BenchmarkingResult",
["median_runtime", "models"],
)
"""CheckpointResult(network_results: List[NetworkResult], accuracy: float)"""
NetworkCheckpointResult = namedtuple(
"NetworkCheckpointResult", ["network_results", "accuracy"]
)
# Tracks TRT Precision Config
"""Precision(fp16: Bool)"""
Precision = namedtuple("Precision", ["fp16"])
"""NetworkMetadata(variant: str, precision: Precision, other: Union[namedtuple, None])"""
NetworkMetadata = namedtuple("NetworkMetadata", ["variant", "precision", "other"])
"""TimingProfile(iterations: int, number: int, warmup: int, duration: int, percentile: int or [int])"""
TimingProfile = namedtuple("TimingProfile", ["iterations", "number", "warmup", "duration", "percentile"])
"""NetworkModel(name: str, fpath: str)"""
NetworkModel = namedtuple("NetworkModel", ["name", "fpath"])
"""
String encodings to genereted network models.
NetworkModels(torch: Tuple[NetworkModel], onnx: Tuple[NetworkModel])
"""
NetworkModels = namedtuple("NetworkModels", ["torch", "onnx", "trt"])
"""
Args:
name: Name of the network / parts of the network timed.
runtime: Runtime of the time.
NetworkRuntime(name: str, runtime: float)
"""
NetworkRuntime = namedtuple("NetworkRuntime", ["name", "runtime"])
class Dims:
"""Helper class for interfacing dimension constructs with Polygraphy and PyTorch."""
BATCH = "batch"
SEQUENCE = "sequence"
def __init__(self, encoding: OrderedDict):
self.encoding = encoding
def create_new_sequence_dim(dim_type: str) -> str:
"""
Returns a new sequence dimension.
Return:
str: Returns a sequence dimension which Dims.SEQUENCE appended by dim_type.
"""
return Dims.SEQUENCE + "_" + dim_type
def get_dims(self):
"""
Returns the encoding dimensions.
Return:
OrderedDict[str, Union[int, str]]: Returns dimensional encoding. Example: {'input_ids': (1, SEQUENCE_DIM)}
"""
return self.encoding
def get_names(self) -> Tuple[str]:
return tuple(self.encoding.keys())
def get_lengths(self) -> Tuple[Union[int, str]]:
return tuple(self.encoding.values())
def get_torch_dynamic_axis_encoding(self) -> dict:
"""
Returns a Pytorch "dynamic_axes" encoding for onnx.export.
Returns:
dict: Returns a 'dynamic' index with corresponding names according to:
https://pytorch.org/docs/stable/onnx.html
"""
dynamic_axes = {}
for k, v in self.encoding.items():
encodings = []
for idx, e in enumerate(v):
if isinstance(e, str) and (e == self.BATCH or self.SEQUENCE in e):
encodings.append((idx, e))
dynamic_axes[k] = {idx: e for idx, e in encodings}
return dynamic_axes
# Config Class
class NNConfig:
"""Contains info for a given network that we support."""
NETWORK_SEGMENTS = ["full"]
def __init__(self, network_name, variants=None):
assert self._is_valid_filename(
network_name
), "Network name: {} is not filename friendly.".format(network_name)
self.network_name = network_name
self.variants = variants
# Due to limitations of namedtuples and pickle function, namedtupled must be tracked as an instance
# which refers to a global.
if len(self.variants) > 0:
self.MetadataClass = type(self.variants[0].other)
else:
self.MetadataClass = None
def get_network_segments(self):
"""
Returns exportable segments for the given network.
Used in the case where a single network needs to
be exported into multiple parts.
"""
return self.NETWORK_SEGMENTS
@staticmethod
def get_output_dims(metadata) -> Dict:
"""
Returns the output dimensions of the current network.
Since some networks can have multiple parts, should be a dictionary encoding.
Returns:
(Dict): {"network_section": Dims}
"""
raise NotImplementedError("Output dims not yet defined.")
@staticmethod
def get_input_dims(metadata) -> Dict:
"""
Returns the input dimensions of the current network.
Since some networks can have multiple parts, should be a dictionary encoding.
Returns:
(Dict): {"network_section": Dims} example:
{"encoder": Dims(...), "decoder": Dims(...)}
"""
raise NotImplementedError("Input dims not yet defined.")
def _is_valid_filename(self, filename: str) -> bool:
"""
Checks if a given filename is valid, helpful for cross platform dependencies.
"""
return all(c in FILENAME_VALID_CHARS for c in filename)
def get_python_requirements():
return []
def get_metadata_string(self, metadata: NetworkMetadata) -> str:
"""
Serializes a Metadata object into string.
String will be checked if friendly to filenames across Windows and Linux operating systems.
returns:
string: <network>-<variant-name>-<precision>-<others>
"""
precision_str = "-".join(
[k for k, v in metadata.precision._asdict().items() if v]
)
result = [self.network_name, metadata.variant]
if precision_str:
result.append(precision_str)
other_result = [
"{}~{}".format(k, str(v)) for k, v in metadata.other._asdict().items()
]
# Remove all boolean values that are False and remove True if exists
true_length = len("~True")
other_result_filtered = [v[:-true_length] if v.endswith("~True") else v for v in other_result if "~False" not in v]
if len(other_result_filtered) != 0:
result.append("-".join(other_result_filtered))
final_str = "-".join(result)
assert self._is_valid_filename(
final_str
), "Metadata for current network {} is not filename friendly: {}.".format(
self.network_name, final_str
)
return final_str
| {
"content_hash": "7e737c33c44fae764688ec34e4c253f5",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 130,
"avg_line_length": 33.21634615384615,
"alnum_prop": 0.6290345925604284,
"repo_name": "NVIDIA/TensorRT",
"id": "33e8aba151de012a8c535eb9e5518a0b7b95d37a",
"size": "7598",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "demo/HuggingFace/NNDF/networks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
} |
"""
Django settings for django_example project.
Generated by 'django-admin startproject' using Django 3.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-m=&li-7ggx4(x1+^e2ivl47r__gwnxh*ch_7u46a_kv8*oxhaf"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"asphalt.web.django.AsphaltMiddleware",
]
ROOT_URLCONF = "django_example.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "django_example.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| {
"content_hash": "ffac40752273296a33f85062db0e3020",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 91,
"avg_line_length": 26.54032258064516,
"alnum_prop": 0.6940139775144333,
"repo_name": "asphalt-framework/asphalt-web",
"id": "56c76fb3be0bbb2b87ed02d82853c533a4107a15",
"size": "3291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/django/django_example/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50469"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.