hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e61b0fd36cdaa8eee16647e4a0451cc1b0be25b9
| 1,965
|
py
|
Python
|
libra/account_resource.py
|
devos50/libra-client
|
7d1558848ff45ca8f42d756ef11e04846154e3cf
|
[
"MIT"
] | null | null | null |
libra/account_resource.py
|
devos50/libra-client
|
7d1558848ff45ca8f42d756ef11e04846154e3cf
|
[
"MIT"
] | null | null | null |
libra/account_resource.py
|
devos50/libra-client
|
7d1558848ff45ca8f42d756ef11e04846154e3cf
|
[
"MIT"
] | null | null | null |
from canoser import *
from libra.event import EventHandle
from libra.hasher import gen_hasher
from libra.account_config import AccountConfig
from io import StringIO
class AccountStateBlob:
def __init__(self, blob):
self.blob = blob
@classmethod
def from_proto(cls, proto):
return cls(proto.blob)
def hash(self):
shazer = gen_hasher(b"AccountStateBlob")
shazer.update(self.blob)
return shazer.digest()
class AccountState(Struct):
_fields = [
('ordered_map', {})
]
def __str__(self):
concat = StringIO()
concat.write(super().__str__())
resource = self.ordered_map[AccountConfig.ACCOUNT_RESOURCE_PATH]
if resource:
ar = AccountResource.deserialize(resource)
concat.write("\nDecoded:\n")
concat.write(ar.__str__())
return concat.getvalue()
class AccountResource(Struct):
_fields = [
('authentication_key', [Uint8]),
('balance', Uint64),
('delegated_key_rotation_capability', bool),
('delegated_withdrawal_capability', bool),
('received_events', EventHandle),
('sent_events', EventHandle),
('sequence_number', Uint64)
]
@classmethod
def get_account_resource_or_default(cls, blob):
if blob:
omap = AccountState.deserialize(blob.blob).ordered_map
resource = omap[AccountConfig.ACCOUNT_RESOURCE_PATH]
return cls.deserialize(resource)
else:
return cls()
def get_event_handle_by_query_path(self, query_path):
if AccountConfig.account_received_event_path() == query_path:
return self.received_events
elif AccountConfig.account_sent_event_path() == query_path:
return self.sent_events
else:
libra.proof.bail("Unrecognized query path: {}", query_path);
| 30.703125
| 73
| 0.62341
|
9d6a7430d99c54e6b279c54d0024ba9466e43d00
| 199
|
py
|
Python
|
xmonitor/contrib/plugins/artifacts_sample/__init__.py
|
froyobin/xmonitor
|
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
|
[
"Apache-2.0"
] | null | null | null |
xmonitor/contrib/plugins/artifacts_sample/__init__.py
|
froyobin/xmonitor
|
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
|
[
"Apache-2.0"
] | null | null | null |
xmonitor/contrib/plugins/artifacts_sample/__init__.py
|
froyobin/xmonitor
|
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
|
[
"Apache-2.0"
] | null | null | null |
from xmonitor.contrib.plugins.artifacts_sample.v1 import artifact as art1
from xmonitor.contrib.plugins.artifacts_sample.v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| 33.166667
| 73
| 0.839196
|
4e68707fdab645cd5efa3633c98b4aa3e81635ad
| 521
|
py
|
Python
|
riak/datatypes/errors.py
|
albeus/riak-python-client
|
51bf875f1f5e394d45540a3850a8453db0951c40
|
[
"Apache-2.0"
] | null | null | null |
riak/datatypes/errors.py
|
albeus/riak-python-client
|
51bf875f1f5e394d45540a3850a8453db0951c40
|
[
"Apache-2.0"
] | null | null | null |
riak/datatypes/errors.py
|
albeus/riak-python-client
|
51bf875f1f5e394d45540a3850a8453db0951c40
|
[
"Apache-2.0"
] | null | null | null |
from riak import RiakError
class ContextRequired(RiakError):
"""
This exception is raised when removals of map fields and set
entries are attempted and the datatype hasn't been initialized
with a context.
"""
_default_message = ("A context is required for remove operations, "
"fetch the datatype first")
def __init__(self, message=None):
super(ContextRequired, self).__init__(message
or self._default_message)
| 30.647059
| 71
| 0.629559
|
c8b2a633a028f5d13755b20e94aacd05dfa6371d
| 2,953
|
py
|
Python
|
messageApp_api/migrations/0004_auto_20210213_1829.py
|
yoniv/messageApp
|
495762fb6eee328df68f97260494d08c5041d926
|
[
"MIT"
] | null | null | null |
messageApp_api/migrations/0004_auto_20210213_1829.py
|
yoniv/messageApp
|
495762fb6eee328df68f97260494d08c5041d926
|
[
"MIT"
] | null | null | null |
messageApp_api/migrations/0004_auto_20210213_1829.py
|
yoniv/messageApp
|
495762fb6eee328df68f97260494d08c5041d926
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2021-02-13 16:29
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('messageApp_api', '0003_message_is_read'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.DeleteModel(
name='Users',
),
]
| 62.829787
| 329
| 0.657298
|
67a4dc5dd5440ed57b743f18f84e2d218d7c1ec4
| 5,216
|
py
|
Python
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 32
|
2015-01-19T12:13:26.000Z
|
2021-11-11T00:11:22.000Z
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 10
|
2020-06-05T19:42:26.000Z
|
2022-03-11T23:38:35.000Z
|
site/flask/lib/python2.7/site-packages/speaklater.py
|
theholyhades1/tartanHacks2015
|
a801b473f21cfbd136e2a5a74423e8c72d14f900
|
[
"MIT"
] | 9
|
2015-07-18T01:03:56.000Z
|
2019-05-24T09:36:40.000Z
|
# -*- coding: utf-8 -*-
r"""
speaklater
~~~~~~~~~~
A module that provides lazy strings for translations. Basically you
get an object that appears to be a string but changes the value every
time the value is evaluated based on a callable you provide.
For example you can have a global `lazy_gettext` function that returns
a lazy string with the value of the current set language.
Example:
>>> from speaklater import make_lazy_string
>>> sval = u'Hello World'
>>> string = make_lazy_string(lambda: sval)
This lazy string will evaluate to the value of the `sval` variable.
>>> string
lu'Hello World'
>>> unicode(string)
u'Hello World'
>>> string.upper()
u'HELLO WORLD'
If you change the value, the lazy string will change as well:
>>> sval = u'Hallo Welt'
>>> string.upper()
u'HALLO WELT'
This is especially handy when combined with a thread local and gettext
translations or dicts of translatable strings:
>>> from speaklater import make_lazy_gettext
>>> from threading import local
>>> l = local()
>>> l.translations = {u'Yes': 'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: l.translations.get)
>>> yes = lazy_gettext(u'Yes')
>>> print yes
Ja
>>> l.translations[u'Yes'] = u'Si'
>>> print yes
Si
Lazy strings are no real strings so if you pass this sort of string to
a function that performs an instance check, it will fail. In that case
you have to explicitly convert it with `unicode` and/or `string` depending
on what string type the lazy string encapsulates.
To check if a string is lazy, you can use the `is_lazy_string` function:
>>> from speaklater import is_lazy_string
>>> is_lazy_string(u'yes')
False
>>> is_lazy_string(yes)
True
New in version 1.2: It's now also possible to pass keyword arguments to
the callback used with `make_lazy_string`.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def is_lazy_string(obj):
"""Checks if the given object is a lazy string."""
return isinstance(obj, _LazyString)
def make_lazy_string(__func, *args, **kwargs):
"""Creates a lazy string by invoking func with args."""
return _LazyString(__func, args, kwargs)
def make_lazy_gettext(lookup_func):
"""Creates a lazy gettext function dispatches to a gettext
function as returned by `lookup_func`.
Example:
>>> translations = {u'Yes': u'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: translations.get)
>>> x = lazy_gettext(u'Yes')
>>> x
lu'Ja'
>>> translations[u'Yes'] = u'Si'
>>> x
lu'Si'
"""
def lazy_gettext(string):
if is_lazy_string(string):
return string
return make_lazy_string(lookup_func(), string)
return lazy_gettext
class _LazyString(object):
"""Class for strings created by a function call.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting.
"""
__slots__ = ('_func', '_args', '_kwargs')
def __init__(self, func, args, kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
value = property(lambda x: x._func(*x._args, **x._kwargs))
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(unicode)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __getattr__(self, name):
if name == '__members__':
return self.__dir__()
return getattr(self.value, name)
def __getstate__(self):
return self._func, self._args, self._kwargs
def __setstate__(self, tup):
self._func, self._args, self._kwargs = tup
def __getitem__(self, key):
return self.value[key]
def __copy__(self):
return self
def __repr__(self):
try:
return 'l' + repr(self.value)
except Exception:
return '<%s broken>' % self.__class__.__name__
if __name__ == '__main__':
import doctest
doctest.testmod()
| 25.950249
| 78
| 0.637078
|
99e5e7be96e296671513894d23e22cf62f8d65bb
| 6,391
|
py
|
Python
|
Assignment_1/Q3/input_data.py
|
hasagar97/DeepLearning
|
2b0e6ce2ef5e6216c8af63d1c14d98fe39286a0e
|
[
"MIT"
] | null | null | null |
Assignment_1/Q3/input_data.py
|
hasagar97/DeepLearning
|
2b0e6ce2ef5e6216c8af63d1c14d98fe39286a0e
|
[
"MIT"
] | null | null | null |
Assignment_1/Q3/input_data.py
|
hasagar97/DeepLearning
|
2b0e6ce2ef5e6216c8af63d1c14d98fe39286a0e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Functions for downloading and reading MNIST data."""
import gzip
import os
from six.moves.urllib.request import urlretrieve
import numpy
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
| 38.969512
| 79
| 0.627288
|
10e3f32de395844ee8a9b7cabdb9ff79f4728d8f
| 12,106
|
py
|
Python
|
pandas/core/ops/array_ops.py
|
garyteofanus/pandas
|
cc51219fad8add8f442b847ccdabd3f9e9077cb6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-03-23T08:51:08.000Z
|
2020-03-23T08:51:08.000Z
|
pandas/core/ops/array_ops.py
|
garyteofanus/pandas
|
cc51219fad8add8f442b847ccdabd3f9e9077cb6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/ops/array_ops.py
|
garyteofanus/pandas
|
cc51219fad8add8f442b847ccdabd3f9e9077cb6
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
Functions for arithmetic and comparison operations on NumPy arrays and
ExtensionArrays.
"""
from functools import partial
import operator
from typing import Any, Optional
import numpy as np
from pandas._libs import Timedelta, Timestamp, lib, ops as libops
from pandas._typing import ArrayLike
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
find_common_type,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCIndexClass,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.ops import missing
from pandas.core.ops.dispatch import dispatch_to_extension_op, should_extension_dispatch
from pandas.core.ops.invalid import invalid_comparison
from pandas.core.ops.roperator import rpow
def comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
# Note: these checks can be for ABCIndex and not ABCIndexClass
# because that is the only object-dtype class.
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y.values
result = libops.vec_compare(x.ravel(), y, op)
else:
result = libops.scalar_compare(x.ravel(), y, op)
return result.reshape(x.shape)
def masked_arith_op(x, y, op):
"""
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
"""
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
# the logic valid for both Series and DataFrame ops.
xrav = x.ravel()
assert isinstance(x, np.ndarray), type(x)
if isinstance(y, np.ndarray):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
# NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
# we would get int64 dtype, see GH#19956
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
if yrav.shape != mask.shape:
# FIXME: GH#5284, GH#5035, GH#19448
# Without specifically raising here we get mismatched
# errors in Py3 (TypeError) vs Py2 (ValueError)
# Note: Only = an issue in DataFrame case
raise ValueError("Cannot broadcast operands together.")
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], yrav[mask])
else:
if not is_scalar(y):
raise TypeError(
f"Cannot broadcast np.ndarray with operand of type { type(y) }"
)
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
# 1 ** np.nan is 1. So we have to unmask those.
if op is pow:
mask = np.where(x == 1, False, mask)
elif op is rpow:
mask = np.where(y == 1, False, mask)
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], y)
result, _ = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape) # 2D compat
return result
def define_na_arithmetic_op(op, str_rep: str):
def na_op(x, y):
return na_arithmetic_op(x, y, op, str_rep)
return na_op
def na_arithmetic_op(left, right, op, str_rep: str):
"""
Return the result of evaluating op on the passed in values.
If native types are not compatible, try coersion to object dtype.
Parameters
----------
left : np.ndarray
right : np.ndarray or scalar
str_rep : str or None
Returns
-------
array-like
Raises
------
TypeError : invalid operation
"""
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, left, right)
except TypeError:
result = masked_arith_op(left, right, op)
return missing.dispatch_fill_zeros(op, left, right, result)
def arithmetic_op(left: ArrayLike, right: Any, op, str_rep: str):
"""
Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ...
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame or Index. Series is *not* excluded.
op : {operator.add, operator.sub, ...}
Or one of the reversed variants from roperator.
str_rep : str
Returns
-------
ndarrray or ExtensionArray
Or a 2-tuple of these in the case of divmod or rdivmod.
"""
from pandas.core.ops import maybe_upcast_for_op
# NB: We assume that extract_array has already been called
# on `left` and `right`.
lvalues = left
rvalues = right
rvalues = maybe_upcast_for_op(rvalues, lvalues.shape)
if should_extension_dispatch(left, rvalues) or isinstance(
rvalues, (ABCTimedeltaArray, ABCDatetimeArray, Timestamp, Timedelta)
):
# TimedeltaArray, DatetimeArray, and Timestamp are included here
# because they have `freq` attribute which is handled correctly
# by dispatch_to_extension_op.
# Timedelta is included because numexpr will fail on it, see GH#31457
res_values = dispatch_to_extension_op(op, lvalues, rvalues)
else:
with np.errstate(all="ignore"):
res_values = na_arithmetic_op(lvalues, rvalues, op, str_rep)
return res_values
def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:
"""
Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le}
Returns
-------
ndarray or ExtensionArray
"""
# NB: We assume extract_array has already been called on left and right
lvalues = left
rvalues = right
rvalues = lib.item_from_zerodim(rvalues)
if isinstance(rvalues, list):
# TODO: same for tuples?
rvalues = np.asarray(rvalues)
if isinstance(rvalues, (np.ndarray, ABCExtensionArray, ABCIndexClass)):
# TODO: make this treatment consistent across ops and classes.
# We are not catching all listlikes here (e.g. frozenset, tuple)
# The ambiguous case is object-dtype. See GH#27803
if len(lvalues) != len(rvalues):
raise ValueError("Lengths must match to compare")
if should_extension_dispatch(lvalues, rvalues):
res_values = dispatch_to_extension_op(op, lvalues, rvalues)
elif is_scalar(rvalues) and isna(rvalues):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(lvalues.shape, dtype=bool)
else:
res_values = np.zeros(lvalues.shape, dtype=bool)
elif is_object_dtype(lvalues.dtype):
res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)
else:
op_name = f"__{op.__name__}__"
method = getattr(lvalues, op_name)
with np.errstate(all="ignore"):
res_values = method(rvalues)
if res_values is NotImplemented:
res_values = invalid_comparison(lvalues, rvalues, op)
if is_scalar(res_values):
typ = type(rvalues)
raise TypeError(f"Could not compare {typ} type with Series")
return res_values
def na_logical_op(x: np.ndarray, y, op):
try:
# For exposition, write:
# yarr = isinstance(y, np.ndarray)
# yint = is_integer(y) or (yarr and y.dtype.kind == "i")
# ybool = is_bool(y) or (yarr and y.dtype.kind == "b")
# xint = x.dtype.kind == "i"
# xbool = x.dtype.kind == "b"
# Then Cases where this goes through without raising include:
# (xint or xbool) and (yint or bool)
result = op(x, y)
except TypeError:
if isinstance(y, np.ndarray):
# bool-bool dtype operations should be OK, should not get here
assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x.ravel(), y.ravel(), op)
else:
# let null fall thru
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (
TypeError,
ValueError,
AttributeError,
OverflowError,
NotImplementedError,
):
typ = type(y).__name__
raise TypeError(
f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array "
f"and scalar of type [{typ}]"
)
return result.reshape(x.shape)
def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:
"""
Evaluate a logical operation `|`, `&`, or `^`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.and_, operator.or_, operator.xor}
Or one of the reversed variants from roperator.
Returns
-------
ndarrray or ExtensionArray
"""
fill_int = lambda x: x
def fill_bool(x, left=None):
# if `left` is specifically not-boolean, we do not cast to bool
if x.dtype.kind in ["c", "f", "O"]:
# dtypes that can hold NA
mask = isna(x)
if mask.any():
x = x.astype(object)
x[mask] = False
if left is None or is_bool_dtype(left.dtype):
x = x.astype(bool)
return x
is_self_int_dtype = is_integer_dtype(left.dtype)
right = lib.item_from_zerodim(right)
if is_list_like(right) and not hasattr(right, "dtype"):
# e.g. list, tuple
right = construct_1d_object_array_from_listlike(right)
# NB: We assume extract_array has already been called on left and right
lvalues = left
rvalues = right
if should_extension_dispatch(lvalues, rvalues):
res_values = dispatch_to_extension_op(op, lvalues, rvalues)
else:
if isinstance(rvalues, np.ndarray):
is_other_int_dtype = is_integer_dtype(rvalues.dtype)
rvalues = rvalues if is_other_int_dtype else fill_bool(rvalues, lvalues)
else:
# i.e. scalar
is_other_int_dtype = lib.is_integer(rvalues)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
res_values = na_logical_op(lvalues, rvalues, op)
res_values = filler(res_values) # type: ignore
return res_values
def get_array_op(op, str_rep: Optional[str] = None):
"""
Return a binary array operation corresponding to the given operator op.
Parameters
----------
op : function
Binary operator from operator or roperator module.
str_rep : str or None, default None
str_rep to pass to arithmetic_op
Returns
-------
function
"""
op_name = op.__name__.strip("_")
if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}:
return partial(comparison_op, op=op)
elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}:
return partial(logical_op, op=op)
else:
return partial(arithmetic_op, op=op, str_rep=str_rep)
| 31.201031
| 88
| 0.617215
|
cfb74aeec821c618c3fabb938014614a922e01f6
| 8,118
|
py
|
Python
|
archive/solutions/lab1solutions.py
|
samuelcheang0419/python-labs
|
6f32c141412a1af4d8a39b25a56cc211d0a9040f
|
[
"BSD-2-Clause-FreeBSD"
] | 48
|
2016-06-09T20:39:56.000Z
|
2022-01-30T12:49:20.000Z
|
archive/solutions/lab1solutions.py
|
samuelcheang0419/python-labs
|
6f32c141412a1af4d8a39b25a56cc211d0a9040f
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
archive/solutions/lab1solutions.py
|
samuelcheang0419/python-labs
|
6f32c141412a1af4d8a39b25a56cc211d0a9040f
|
[
"BSD-2-Clause-FreeBSD"
] | 117
|
2015-10-15T21:08:39.000Z
|
2021-11-12T10:09:18.000Z
|
#!/usr/bin/env python3 -tt
"""
File: lab1solutions.py
----------------------
Reference solutions to Lab 1 for CS41: Hap.py Code.
"""
import math
DICTIONARY_PATH = '/usr/share/dict/words' # Feel free to change this to your dictionary
def say_hello():
"""Prints "Hello, world!" """
print("Hello, World!")
def print_tictactoe():
"""Print out a tic tac toe board using print's `sep` keyword argument
Note: this is just one of many ways to solve this problem, chosen to
illustrate .join, list multiplication, .format, string multiplication,
and, of course, `sep`.
"""
row = '|'.join([' '] * 3) # row = ' | | '
div = '\n{}\n'.format('-' * 9) # div = '--------'
print(div.join([row] * 3))
def print_super_tictactoe():
"""Prints a super tic-tac-toe board using print's `sep` keyword.
Note: As above, this is just one of many ways to accomplish this program, and
it isn't very readable, or very fast! But, it does illustrate using the `sep`
keyword.
"""
row = 'H'.join([' | | '] * 3) # row = ' | | H | | H | | '
div = '\n'+ 'H'.join(['--+--+--'] * 3) + '\n' # div = '\n--+--+--H--+--+--H--+--+--\n'
superdiv = '\n' + '+'.join(['=' * 8] * 3) + '\n' # superdiv = '\n========+========+========\n'
block = div.join([row] * 3)
print(superdiv.join([block] * 3))
def fizzbuzz(n):
"""Returns the sum of all numbers < n divisible by 3 or 5.
This iterative approach will work really well, and if it gets the job done
reasonably quickly, that's all we should ask for.
If you want to write this in one line, the following will work:
return sum([i for i in range(n) if i % 3 == 0 or i % 5 == 0])
However, that line isn't particularly Pythonic, since we're basically just
compressing the syntax of an iterative for loop into one line - no big changes
except for the use of `sum`.
Another approach, as we'll learn about soon, is to use `filter`:
return sum(filter(lambda i: i % 3 == 0 and i % 5 == 0, range(n)))
However, in many ways, this isn't much different, since we're still specifying a
function (admittedly, a `lambda` or anonymous function - which we'll learn about Week 4)
over our range of numbers.
For a job this simple, the iterative approach will suffice.
"""
count = 0
for i in range(n):
if i % 3 == 0 or i % 5 == 0:
count += i
print(count)
return count
def collatz_len(n):
"""Computes the length of the Collatz sequence starting at `n`.
While this iterative approach might look "unpythonic" at first,
the Collatz sequence is a very iterative algorithm, and there aren't
very many easy functional ways to solve this problem.
One benefit of this approach is that we do not store the entire
sequence in memory - since we're only interested in the length, that
would be wasteful.
"""
length = 1
while n > 1:
if n % 2 == 0:
n //= 2 # We want to explicitly use integer division here, even though n is even.
else:
n = 3 * n + 1
length += 1 # Note: Python has no increment operator (like ++), so we use += 1
return length
def max_collatz_len(n):
"""Computes the longest Collatz sequence length for starting numbers < n
In Python, the `max` function returns the largest element of some collection.
Since "finding the max element" isn't naturally iterative (although it can be
solved iteratively), we can use this functional-looking code to compute the
maximal collatz length. Note, however, that this approach buffers the list of
lengths in memory (due to the list comprehension). In general, we can mitigate
this problem using a generator comprehension (look it up!) rather than a list
comprehension, but we'll cover that later in the course.
An even more Pythonic way to solve this problem is to use `map`, which applies a
function to all elements of a sequence.
return max(map(collatz_len, range(1, n)))
"""
return max([collatz_len(i) for i in range(1, n)])
def collatz_len_fast(n, cache):
"""Slightly more clever way to find the collatz length.
A dictionary is used as a cache of previous results, and since
the dictionary passed in is mutable, our changes will reflect
in the caller.
"""
if n == 1:
return 1
if n in cache:
return cache[n]
if n % 2 == 0:
cache[n] = collatz_len_fast(n // 2, cache) + 1
else:
cache[n] = collatz_len_fast(3 * n + 1, cache) + 1
return cache[n]
def max_collatz_len_fast(n):
"""Slightly faster way to compute the longest Collatz sequence for numbers < n
We use the exact same tactic as in `max_collatz_len` above, with the added
optimization that we only look over the second half of the range, since everything
in the first half has a x2 preimage.
"""
cache = {}
return max(collatz_len_fast(i, cache) for i in range(n // 2, n))
def convert_fahr_to_cels(deg_fahr):
"""Converts a temperature in degrees Fahrenheit to degrees Celsius."""
cels = (fahr - 32) * 5 / 9
def converter():
"""Converts user-specified temperatures from Fahrenheit to Celsius.
This problem exists to check that you're running Python 3, where
`input` returns a string and division is double division by default, in
contrast to Python 2, where `input` quasi-evaluates it's input and division
is integer (floored) division by default.
Note: There's some extra code here (the try/except/else stuff) so that the
solutions can continue to run after you break out of the converter. We'll
talk about advanced exception handling Week 5.
"""
print("Convert from Fahrenheit to Celsius with this lovely tool!")
print("For the purposes of the lab solutions, hit CTRL+C to quit.")
while True:
try:
fahr = float(input("Temperature F? "))
except KeyboardInterrupt:
print("\nExiting converter...")
break
except ValueError as exc:
print(exc)
else:
cels = (fahr - 32) * 5 / 9
# cels = round(cels, 2) # Round to two decimal places
print("It is {} degrees Celsius".format(cels))
def get_english_words(dictionary_path):
"""Returns a set of trimmed, capitalized English words from a path to a dictionary.
The dictionary is assumed to have one English word per line.
If dictionary_path can not be read or is formatted incorrectly, a default English word
set is returned containing some fruits.
Note that we keep the file open for as little time as possible, which is
generally a good practice. One downside of this implementation is that it
buffers all of the words in memory (first as a string, and later as a collection
of lines, but the word list is a known finite size (and that size isn't *too*
big), so this approach will work fine. Iterating through the lines in the file
with a for loop could mitigate this downside.
We then use a set comprehension to build an uppercased collection of all of
the words in the dictionary.
Note that we choose to represent the English words as a set, because we want fast
membership testing (using `in`) and also want to be able to iterate over all words.
"""
try:
with open(dictionary_path, 'r') as f:
content = f.read()
return {line.strip().upper() for line in content.split('\n') if line}
except OSError:
return {'APPLE', 'BANANA', 'PEAR', 'ORANGE'}
if __name__ == '__main__':
"""Runs each of the lab solution functions and prints the attached docstring and source."""
english = get_english_words(DICTIONARY_PATH)
fns = [
# Comment out any functions that you do not want to run
(say_hello, (), {}),
(print_tictactoe, (), {}),
(print_super_tictactoe, (), {}),
(fizzbuzz, (1001,), {}),
(max_collatz_len, (1000,), {}),
(max_collatz_len_fast, (1000000,), {}),
(converter, (), {}),
]
for fn, args, kwargs in fns:
name = fn.__name__
print("*" * len(name)) # header
print(name) # function name
print(fn.__doc__) # function docstring
res = fn(*args, **kwargs) # variadic argument unpacking - cool stuff!
'''if res:
print(res)
input("Press [ENTER] to continue...")'''
print("Done!")
| 34.991379
| 97
| 0.667406
|
f7c497459a025a7b8cb01e6ed0c8e029607b88ab
| 8,273
|
py
|
Python
|
p2_continuous-control/ddpg/ddpg_agent.py
|
happykbs/udacity-drl-bongsang
|
4a5f9c0698543cf80e83020d333cb8589a179243
|
[
"MIT"
] | 1
|
2019-02-11T15:53:20.000Z
|
2019-02-11T15:53:20.000Z
|
p2_continuous-control/ddpg/ddpg_agent.py
|
happykbs/udacity-drl-bongsang
|
4a5f9c0698543cf80e83020d333cb8589a179243
|
[
"MIT"
] | null | null | null |
p2_continuous-control/ddpg/ddpg_agent.py
|
happykbs/udacity-drl-bongsang
|
4a5f9c0698543cf80e83020d333cb8589a179243
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import copy
from collections import namedtuple, deque
from .model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = 100000 #int(1e5) # replay buffer size
BATCH_SIZE = 256 #128 # minibatch size
GAMMA = 0.9 #0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 0.001 #1e-4 # learning rate of the actor
LR_CRITIC = 0.001 #1e-3 # learning rate of the critic
WEIGHT_DECAY = 1e-6 #0 # L2 weight decay
SIGMA = 0.1
# GPU = 0 # GPU ID
# device = torch.device(f"cuda:{GPU}" if torch.cuda.is_available() else "cpu")
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.hard_copy(self.actor_target, self.actor_local) ## !
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.hard_copy(self.critic_target, self.critic_local) ## !
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def hard_copy(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=SIGMA):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| 41.782828
| 127
| 0.623474
|
ad50caed37544fa2d090221f51aa5069254d28e3
| 3,198
|
py
|
Python
|
psdaq/psdaq/cas/collection_widget.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 16
|
2017-11-09T17:10:56.000Z
|
2022-03-09T23:03:10.000Z
|
psdaq/psdaq/cas/collection_widget.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2017-12-12T19:30:05.000Z
|
2020-07-09T00:28:33.000Z
|
psdaq/psdaq/cas/collection_widget.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 25
|
2017-09-18T20:02:43.000Z
|
2022-03-27T22:27:42.000Z
|
import sys
import zmq
from datetime import datetime, timezone
from PyQt5 import QtCore, QtGui, QtWidgets
PORT_BASE = 29980
POSIX_TIME_AT_EPICS_EPOCH = 631152000
def timestampStr():
current = datetime.now(timezone.utc)
nsec = 1000 * current.microsecond
sec = int(current.timestamp()) - POSIX_TIME_AT_EPICS_EPOCH
return '%010d-%09d' % (sec, nsec)
def create_msg(key, msg_id=None, sender_id=None, body={}):
if msg_id is None:
msg_id = timestampStr()
msg = {'header': {
'key': key,
'msg_id': msg_id,
'sender_id': sender_id},
'body': body}
return msg
def rep_port(platform):
return PORT_BASE + platform + 20
class CollectionWidget(QtWidgets.QWidget):
def __init__(self, partition, parent=None):
super().__init__(parent)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REQ)
self.socket.connect('tcp://drp-tst-acc06:%d' %rep_port(partition))
layout = QtWidgets.QGridLayout()
layout.addWidget(QtWidgets.QLabel('Collection') , 0, 0, 1, 3)
l = QtWidgets.QHBoxLayout()
button = QtWidgets.QPushButton('Auto connect')
button.clicked.connect(self.auto_connect)
l.addWidget(button)
button = QtWidgets.QPushButton('Reset')
button.clicked.connect(self.reset)
l.addWidget(button)
layout.addLayout(l, 1, 0, 1, 3)
self.label = QtWidgets.QLabel()
layout.addWidget(self.label, 2, 0, 1, 3)
self.listWidgets = {}
for i, group in enumerate(['drp', 'teb', 'meb']):
layout.addWidget(QtWidgets.QLabel(group), 3, i)
w = QtWidgets.QListWidget()
layout.addWidget(w, 4, i)
self.listWidgets[group] = w
self.setLayout(layout)
self.setMaximumWidth(300)
def auto_connect(self):
self.label.clear()
for w in self.listWidgets.values():
w.clear()
for cmd in ['rollcall', 'alloc', 'connect']:
self.socket.send_json(create_msg(cmd))
response = self.socket.recv_json()
print(response)
if 'err_info' in response['body']:
self.label.setText(response['body']['err_info'])
return
self.get_state()
def reset(self):
self.label.clear()
for w in self.listWidgets.values():
w.clear()
self.socket.send_json(create_msg('reset'))
print(self.socket.recv_json())
def get_state(self):
msg = create_msg('getstate')
self.socket.send_json(msg)
reply = self.socket.recv_json()
for group in reply['body']:
if group not in self.listWidgets:
print('unknown group:', group)
continue
w = self.listWidgets[group]
w.clear()
for k, v in reply['body'][group].items():
host = v['proc_info']['host']
QtWidgets.QListWidgetItem(host, w)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
widget = CollectionWidget()
widget.show()
sys.exit(app.exec_())
| 31.048544
| 74
| 0.583802
|
7fb5134dc4e47a05b2513bcb086572e21ba8c386
| 5,043
|
py
|
Python
|
ffta/line.py
|
lindat18/ffta
|
f510a2068b7626e2984e54afc1a577450e560e97
|
[
"MIT"
] | null | null | null |
ffta/line.py
|
lindat18/ffta
|
f510a2068b7626e2984e54afc1a577450e560e97
|
[
"MIT"
] | null | null | null |
ffta/line.py
|
lindat18/ffta
|
f510a2068b7626e2984e54afc1a577450e560e97
|
[
"MIT"
] | null | null | null |
"""line.py: Contains line class."""
# pylint: disable=E1101,R0902,C0103
__author__ = "Durmus U. Karatay"
__copyright__ = "Copyright 2014, Ginger Lab"
__maintainer__ = "Durmus U. Karatay"
__email__ = "ukaratay@uw.edu"
__status__ = "Development"
import numpy as np
from ffta import pixel
class Line:
"""
Signal Processing to Extract Time-to-First-Peak.
This class is a container for pixels in a line. Since the AFM scans are in
lines, tha data taken is grouped in lines. This class takes the line data
and passes it along to pixels.
Parameters
----------
signal_array : (n_signals, n_points) array_like
2D real-valued signal array, corresponds to a line
params : dict
Includes parameters for processing. The list of parameters is:
trigger = float (in seconds)
total_time = float (in seconds)
sampling_rate = int (in Hz)
drive_freq = float (in Hz)
roi = float (in seconds)
window = string (see documentation of scipy.signal.get_window)
bandpass_filter = int (0: no filtering, 1: FIR filter, 2: IIR filter)
filter_bandwidth = float (in Hz)
n_taps = integer (default: 999)
wavelet_analysis = bool (0: Hilbert method, 1: Wavelet Method)
wavelet_parameter = int (default: 5)
recombination = bool (0: FF-trEFM, 1: Recombination)
n_pixels : int
Number of pixels in a line.
pycroscopy : bool, optional
Pycroscopy requires different orientation, so this corrects for this effect.
Attributes
----------
n_points : int
Number of points in a signal.
n_signals : int
Number of signals in a line.
inst_freq : (n_points, n_pixels) array_like
Instantenous frequencies of the line.
tfp : (n_pixels,) array_like
Time from trigger to first-peak, in seconds.
shift : (n_pixels,) array_like
Frequency shift from trigger to first-peak, in Hz.
See Also
--------
pixel: Pixel processing for FF-trEFM data.
simulate: Simulation for synthetic FF-trEFM data.
scipy.signal.get_window: Windows for signal processing.
Examples
--------
>>> from ffta import line, utils
>>>
>>> signal_file = '../data/SW_0000.ibw'
>>> params_file = '../data/parameters.cfg'
>>>
>>> signal_array = utils.load.signal(signal_file)
>>> n_pixels, params = utils.load.configuration(params_file)
>>>
>>> l = line.Line(signal_array, params, n_pixels)
>>> tfp, shift, inst_freq = l.analyze()
"""
def __init__(self, signal_array, params, n_pixels, pycroscopy=False):
# Pass inputs to the object.
self.signal_array = signal_array
if pycroscopy:
self.signal_array = signal_array.T
self.n_pixels = int(n_pixels)
self.params = params
# Initialize tFP and shift arrays.
self.tfp = np.empty(self.n_pixels)
self.shift = np.empty(self.n_pixels)
self.inst_freq = np.empty((self.signal_array.shape[0], self.n_pixels))
self.avgs_per_pixel = int(self.signal_array.shape[1]/self.n_pixels)
self.n_signals = self.signal_array.shape[0]
return
def analyze(self):
"""
Analyzes the line with the given method.
Returns
-------
tfp : (n_pixels,) array_like
Time from trigger to first-peak, in seconds.
shift : (n_pixels,) array_like
Frequency shift from trigger to first-peak, in Hz.
inst_freq : (n_points, n_pixels) array_like
Instantaneous frequencies of the line.
"""
# Split the signal array into pixels.
pixel_signals = np.split(self.signal_array, self.n_pixels, axis=1)
# Iterate over pixels and return tFP and shift arrays.
for i, pixel_signal in enumerate(pixel_signals):
p = pixel.Pixel(pixel_signal, self.params)
(self.tfp[i], self.shift[i], self.inst_freq[:, i]) = p.analyze()
return (self.tfp, self.shift, self.inst_freq)
def pixel_wise_avg(self):
"""
Averages the line per pixel and saves the result as signal_avg_array
This functionality is primarily used in Pycroscopy-loading functions
Returns
-------
signal_avg_array : (n_points, n_pixels) numpy array
Returns signal_averaged time-domain signal at each pixel
"""
self.signal_avg_array = np.empty((self.signal_array.shape[0], self.n_pixels))
for i in range(self.n_pixels):
avg = self.signal_array[:, i*self.avgs_per_pixel:(i+1)*self.avgs_per_pixel]
self.signal_avg_array[:, i] = avg.mean(axis=1)
return self.signal_avg_array
def clear_filter_flags(self):
"""Removes flags from parameters for setting filters"""
#self.params['window'] = 0
self.params['bandpass_filter'] = 0
return
| 33.177632
| 87
| 0.624033
|
dcbf588284bc57a78498a74eb3617bec1a59dc93
| 1,907
|
py
|
Python
|
helpers/clean.py
|
abyoussef/ALTEGRAD_Challenge
|
eb1a47d7e558143d18ed6f9ac82e94df70a941d2
|
[
"MIT"
] | null | null | null |
helpers/clean.py
|
abyoussef/ALTEGRAD_Challenge
|
eb1a47d7e558143d18ed6f9ac82e94df70a941d2
|
[
"MIT"
] | null | null | null |
helpers/clean.py
|
abyoussef/ALTEGRAD_Challenge
|
eb1a47d7e558143d18ed6f9ac82e94df70a941d2
|
[
"MIT"
] | null | null | null |
import re
import string
import nltk
from nltk import PerceptronTagger
from nltk.corpus import stopwords
def clean_text_simple(text, tagger, keep, stpwds, stemmer, remove_stopwords=True, pos_filtering=True, stemming=True):
# convert to lower case
text = text.lower()
# remove punctuation (preserving intra-word dashes)
tokens = tokenization(text)
if pos_filtering == True and len(tokens) > 0:
tokens = pos_tagging(tokens, tagger, keep)
if remove_stopwords:
# remove stopwords
tokens = [token for token in tokens if token not in stpwds]
if stemming:
# apply Porter's stemmer
tokens = map(stemmer.stem, tokens)
return tokens
def pos_tagging(tokens, tagger, keep):
# apply POS-tagging
tagged_tokens = tagger.tag(tokens)
# retain only nouns and adjectives
tokens = [item[0] for item in tagged_tokens if item[1] in keep]
return tokens
def tokenization(text):
punct = string.punctuation.replace('-', '')
cond = '[' + re.escape(punct) + ']+'
text = re.sub(cond, ' ', text)
text = re.sub('(\s+-|-\s+)', ' ', text)
# strip extra white space
text = re.sub('-{2,}', ' ', text)
text = re.sub('\s+', ' ', text)
# strip leading and trailing white space
text = text.strip()
# tokenize (split based on whitespace)
tokens = text.split(' ')
tokens = filter(lambda x: len(x) > 0, tokens)
return tokens
def clean(X, col = 'body', cleaner = clean_text_simple, join = True):
X_cleaned = X.copy()
tagger = PerceptronTagger()
keep = set(['NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJS', 'JJR'])
stpwds = set(stopwords.words('english'))
stemmer = nltk.stem.PorterStemmer()
X_cleaned[col] = X_cleaned[col].apply(lambda x: cleaner(x, tagger, keep, stpwds, stemmer))
if join:
X_cleaned[col] = X_cleaned[col].apply(lambda x: ' '.join(x))
return X_cleaned
| 34.053571
| 117
| 0.64237
|
f267150ccb6939073502dafc07ec82935c0a4d43
| 5,587
|
py
|
Python
|
rnnfft.py
|
srowen/rnn-fft
|
6be95ec714fb4b6a6dbd3b445a050678a9929c10
|
[
"Apache-2.0"
] | null | null | null |
rnnfft.py
|
srowen/rnn-fft
|
6be95ec714fb4b6a6dbd3b445a050678a9929c10
|
[
"Apache-2.0"
] | null | null | null |
rnnfft.py
|
srowen/rnn-fft
|
6be95ec714fb4b6a6dbd3b445a050678a9929c10
|
[
"Apache-2.0"
] | 1
|
2020-04-13T10:13:14.000Z
|
2020-04-13T10:13:14.000Z
|
#!/usr/bin/env python3
'''
!pip3 install -U tensorflow-gpu keras numpy scipy
'''
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.linalg.blas import daxpy
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Embedding, GRU
from keras.models import Sequential
from keras.optimizers import RMSprop
# Load a short audio file
(breath_sr, breath_wav) = wavfile.read('11_Dialogue_Class_Rank.wav')
# Retain just one stereo track
data = breath_wav[:,0]
# GPUs to use (0-1)
gpu_count = 1
# Number of time-domain samples to convert to frequency domain in one window
window_size = 1024
# Number of time-domain samples between successive windows
slide = 256
# Frequency domain dimension
freq_dim = 2 * (1 + (window_size // 2)) # x2 because of real-imag parts
# Number of successive freq domain windows to predict the next window from
sequence_len = 25
# Dimension of GRU units
gru_dim = 1024
# Optimizer learning rate
learning_rate=0.1
specgram = plt.specgram(data, NFFT=window_size, Fs=slide)
print("Spectrum of input audio")
plt.show()
# Hanning window weights to apply to time-domain sample windows
# Normalize weights to sum to 1, for later convenience
window_weight = slide * np.hanning(window_size) / np.sum(np.hanning(window_size))
n = len(data)
# Data, sliced into a series of windows, and weighted
weighted_slices = data[np.arange(window_size)[None, :] + slide * np.arange(1 + (n - window_size) // slide)[:, None]] * window_weight
del data
# Apply the FFT to convert to a sequence of frequency-domain windows
freq_slices = np.fft.rfft(weighted_slices)
del weighted_slices
# FFT outputs (real,imag) 64-bit pairs. Flatten them to two separate 32-bit values
freq_slices_flattened = np.apply_along_axis(lambda a: a.view('(2,)float').flatten(), 1, freq_slices).astype('float32')
del freq_slices
# Select devices for training based on GPU availability
if gpu_count > 0:
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
session = tf.Session(config=config)
K.set_session(session)
device1 = '/gpu:0'
if gpu_count > 1:
device2 = '/gpu:1'
else:
device2 = '/gpu:0'
else:
device1 = '/cpu:0'
device2 = '/cpu:0'
model = Sequential()
with tf.device(device1):
model.add(GRU(gru_dim,
input_shape=(sequence_len, freq_dim)))
with tf.device(device2):
model.add(Dense(freq_dim, activation=None))
model.compile(optimizer=RMSprop(lr=learning_rate),
loss='mean_absolute_error')
model.summary()
# Initialize predicted audio out with the first few input windows
predicted_freq_slices = freq_slices_flattened[:sequence_len]
# Build up batches of input windows, paired with next window as prediction target
input_freq_slices = []
next_freq_slices = []
for i in range(0, len(freq_slices_flattened) - sequence_len - 1):
input_freq_slices.append(freq_slices_flattened[i : i + sequence_len])
next_freq_slices.append(freq_slices_flattened[i + sequence_len])
del freq_slices_flattened
# Convert them to numpy arrays for future use
input_freq_slices = np.array(input_freq_slices)
next_freq_slices = np.array(next_freq_slices)
# Pick most (input,next) pairs as training; rest for validation
shuffled_indices = np.random.permutation(len(input_freq_slices))
training_size = int(0.95 * len(input_freq_slices))
train_indices = shuffled_indices[:training_size]
val_indices = shuffled_indices[training_size:]
input_freq_slices_train = input_freq_slices[train_indices]
input_freq_slices_val = input_freq_slices[val_indices]
next_freq_slices_train = next_freq_slices[train_indices]
next_freq_slices_val = next_freq_slices[val_indices]
early_stopping = EarlyStopping(patience=10, verbose=1)
model.fit(input_freq_slices_train,
next_freq_slices_train,
epochs=100,
batch_size=64,
shuffle=True,
validation_data=(input_freq_slices_val, next_freq_slices_val),
verbose=2,
callbacks=[early_stopping])
# Starting with initial part of input audio, predict many next windows
for i in range(0, 1000):
pred_next_slice = model.predict(predicted_freq_slices[None,-sequence_len:])
predicted_freq_slices = np.append(predicted_freq_slices, pred_next_slice, axis=0)
# Convert back to (real,imag) complex representation in freq domain
predicted_freq_slices_unflattened = \
np.reshape(predicted_freq_slices, (-1, freq_dim//2, 2)).view('complex64').reshape(-1, freq_dim//2).astype('complex128')
# Apply inverse FFT to get back time-domain windows
pred_time_slices = np.fft.irfft(predicted_freq_slices_unflattened)
# Reassemble full time domain signal by adding overlapping windows
reassembled = np.zeros(window_size + (len(pred_time_slices) - 1) * slide)
for i in range(0, len(pred_time_slices)):
daxpy(pred_time_slices[i], reassembled, offy=slide * i)
# Plot some of the first generated time-domain data as a check
plot_sample_base = sequence_len * slide
plt.plot(reassembled[plot_sample_base:plot_sample_base + window_size])
plt.show()
# Scale time-domain data to have max at 32767, for 16-bit wav output
reassembled_scale = np.max(np.abs(reassembled))
reassembled = reassembled * (32767 / reassembled_scale)
print("Spectrum of output audio")
specgram = plt.specgram(reassembled, NFFT=window_size, Fs=slide)
plt.show()
# Overwrite output to out.wav
out_file = 'out.wav'
if os.path.isfile(out_file):
os.remove(out_file)
wavfile.write(out_file, breath_sr, reassembled.astype(np.int16))
| 35.585987
| 132
| 0.758547
|
96acf19de44f42f201e08ba80d03a8b48c5d8962
| 31,245
|
py
|
Python
|
tests/test_gcloud/testbench/gcs_object.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 558
|
2018-11-09T22:45:27.000Z
|
2022-03-24T04:59:36.000Z
|
tests/test_gcloud/testbench/gcs_object.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 1,122
|
2018-12-09T03:30:40.000Z
|
2022-03-31T16:22:15.000Z
|
tests/test_gcloud/testbench/gcs_object.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 319
|
2018-12-09T00:18:47.000Z
|
2022-03-30T21:49:46.000Z
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement a class to simulate GCS objects."""
import base64
import crc32c
import error_response
import hashlib
import json
import struct
import testbench_utils
import time
class GcsObjectVersion:
"""Represent a single revision of a GCS Object."""
def __init__(self, gcs_url, bucket_name, name, generation, request, media):
"""Initialize a new object revision.
:param gcs_url:str the base URL for the GCS service.
:param bucket_name:str the name of the bucket that contains the object.
:param name:str the name of the object.
:param generation:int the generation number for this object.
:param request:flask.Request the contents of the HTTP request.
:param media:str the contents of the object.
"""
self.gcs_url = gcs_url
self.bucket_name = bucket_name
self.name = name
self.generation = str(generation)
self.object_id = bucket_name + "/o/" + name + "/" + str(generation)
now = time.gmtime(time.time())
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now)
self.media = media
instructions = request.headers.get("x-goog-testbench-instructions")
if instructions == "inject-upload-data-error":
self.media = testbench_utils.corrupt_media(media)
self.metadata = {
"timeCreated": timestamp,
"updated": timestamp,
"metageneration": "0",
"generation": str(generation),
"location": "US",
"storageClass": "STANDARD",
"size": str(len(self.media)),
"etag": "XYZ=",
"owner": {"entity": "project-owners-123456789", "entityId": ""},
"md5Hash": base64.b64encode(hashlib.md5(self.media).digest()).decode(
"utf-8"
),
"crc32c": base64.b64encode(
struct.pack(">I", crc32c.crc32(self.media))
).decode("utf-8"),
}
if request.headers.get("content-type") is not None:
self.metadata["contentType"] = request.headers.get("content-type")
def update_from_metadata(self, metadata):
"""Update from a metadata dictionary.
:param metadata:dict a dictionary with new metadata values.
:rtype:NoneType
"""
tmp = self.metadata.copy()
tmp.update(metadata)
tmp["bucket"] = tmp.get("bucket", self.name)
tmp["name"] = tmp.get("name", self.name)
now = time.gmtime(time.time())
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now)
# Some values cannot be changed via updates, so we always reset them.
tmp.update(
{
"kind": "storage#object",
"bucket": self.bucket_name,
"name": self.name,
"id": self.object_id,
"selfLink": self.gcs_url + self.name,
"projectNumber": "123456789",
"updated": timestamp,
}
)
tmp["metageneration"] = str(int(tmp.get("metageneration", "0")) + 1)
self.metadata = tmp
self._validate_hashes()
def _validate_hashes(self):
"""Validate the md5Hash and crc32c fields against the stored media."""
self._validate_md5_hash()
self._validate_crc32c()
def _validate_md5_hash(self):
"""Validate the md5Hash field against the stored media."""
actual = self.metadata.get("md5Hash", "")
expected = base64.b64encode(hashlib.md5(self.media).digest()).decode("utf-8")
if actual != expected:
raise error_response.ErrorResponse(
"Mismatched MD5 hash expected={}, actual={}".format(expected, actual)
)
def _validate_crc32c(self):
"""Validate the crc32c field against the stored media."""
actual = self.metadata.get("crc32c", "")
expected = base64.b64encode(struct.pack(">I", crc32c.crc32(self.media))).decode(
"utf-8"
)
if actual != expected:
raise error_response.ErrorResponse(
"Mismatched CRC32C checksum expected={}, actual={}".format(
expected, actual
)
)
def validate_encryption_for_read(self, request, prefix="x-goog-encryption"):
"""Verify that the request includes the correct encryption keys.
:param request:flask.Request the http request.
:param prefix: str the prefix shared by the encryption headers,
typically 'x-goog-encryption', but for rewrite requests it can be
'x-goog-copy-source-encryption'.
:rtype:NoneType
"""
key_header = prefix + "-key"
hash_header = prefix + "-key-sha256"
algo_header = prefix + "-algorithm"
encryption = self.metadata.get("customerEncryption")
if encryption is None:
# The object is not encrypted, no key is needed.
if request.headers.get(key_header) is None:
return
else:
# The data is not encrypted, sending an encryption key is an
# error.
testbench_utils.raise_csek_error()
# The data is encrypted, the key must be present, match, and match its
# hash.
key_header_value = request.headers.get(key_header)
hash_header_value = request.headers.get(hash_header)
algo_header_value = request.headers.get(algo_header)
testbench_utils.validate_customer_encryption_headers(
key_header_value, hash_header_value, algo_header_value
)
if encryption.get("keySha256") != hash_header_value:
testbench_utils.raise_csek_error()
def _capture_customer_encryption(self, request):
"""Capture the customer-supplied encryption key, if any.
:param request:flask.Request the http request.
:rtype:NoneType
"""
if request.headers.get("x-goog-encryption-key") is None:
return
prefix = "x-goog-encryption"
key_header = prefix + "-key"
hash_header = prefix + "-key-sha256"
algo_header = prefix + "-algorithm"
key_header_value = request.headers.get(key_header)
hash_header_value = request.headers.get(hash_header)
algo_header_value = request.headers.get(algo_header)
testbench_utils.validate_customer_encryption_headers(
key_header_value, hash_header_value, algo_header_value
)
self.metadata["customerEncryption"] = {
"encryptionAlgorithm": algo_header_value,
"keySha256": hash_header_value,
}
def x_goog_hash_header(self):
"""Return the value for the x-goog-hash header."""
hashes = {
"md5": self.metadata.get("md5Hash", ""),
"crc32c": self.metadata.get("crc32c", ""),
}
hashes = ["{}={}".format(key, val) for key, val in hashes.items() if val]
return ",".join(hashes)
class GcsObject:
"""Represent a GCS Object, including all its revisions."""
def __init__(self, bucket_name, name):
"""Initialize a fake GCS Blob.
:param bucket_name:str the bucket that will contain the new object.
:param name:str the name of the new object.
"""
self.bucket_name = bucket_name
self.name = name
# A counter to create new generation numbers for the object revisions.
# Note that 0 is an invalid generation number. The application can use
# ifGenerationMatch=0 as a pre-condition that means "object does not
# exist".
self.generation_generator = 0
self.current_generation = None
self.revisions = {}
self.rewrite_token_generator = 0
self.rewrite_operations = {}
def get_revision(self, request, version_field_name="generation"):
"""Get the information about a particular object revision or raise.
:param request:flask.Request the contents of the http request.
:param version_field_name:str the name of the generation
parameter, typically 'generation', but sometimes 'sourceGeneration'.
:return: the object revision.
:rtype: GcsObjectVersion
:raises:ErrorResponse if the request contains an invalid generation
number.
"""
generation = request.args.get(version_field_name)
if generation is None:
return self.get_latest()
version = self.revisions.get(generation)
if version is None:
raise error_response.ErrorResponse(
"Precondition Failed: generation %s not found" % generation
)
return version
def del_revision(self, request):
"""Delete a version of a fake GCS Blob.
:param request:flask.Request the contents of the HTTP request.
:return: True if the object entry in the Bucket should be deleted.
:rtype: bool
"""
generation = request.args.get("generation") or self.current_generation
if generation is None:
return True
self.revisions.pop(generation)
if len(self.revisions) == 0:
self.current_generation = None
return True
self.current_generation = sorted(self.revisions.keys())[-1]
return False
@classmethod
def _remove_non_writable_keys(cls, metadata):
"""Remove the keys from metadata (an update or patch) that are not
writable.
Both `Objects: patch` and `Objects: update` either ignore non-writable
keys or return 400 if the key does not match the current value. In
the testbench we simply always ignore them, to make life easier.
:param metadata:dict a dictionary representing a patch or
update to the metadata.
:return metadata but with only any non-writable keys removed.
:rtype: dict
"""
writeable_keys = {
"acl",
"cacheControl",
"contentDisposition",
"contentEncoding",
"contentLanguage",
"contentType",
"eventBasedHold",
"metadata",
"temporaryHold",
"storageClass",
"customTime",
}
# Cannot change `metadata` while we are iterating over it, so we make
# a copy
keys = [key for key in metadata.keys()]
for key in keys:
if key not in writeable_keys:
metadata.pop(key, None)
return metadata
def get_revision_by_generation(self, generation):
"""Get object revision by generation or None if not found.
:param generation:int
:return: the object revision by generation or None.
:rtype:GcsObjectRevision
"""
return self.revisions.get(str(generation), None)
def get_latest(self):
return self.revisions.get(self.current_generation, None)
def check_preconditions_by_value(
self,
generation_match,
generation_not_match,
metageneration_match,
metageneration_not_match,
):
"""Verify that the given precondition values are met."""
current_generation = self.current_generation or "0"
if generation_match is not None and generation_match != current_generation:
raise error_response.ErrorResponse("Precondition Failed", status_code=412)
# This object does not exist (yet), testing in this case is special.
if (
generation_not_match is not None
and generation_not_match == current_generation
):
raise error_response.ErrorResponse("Precondition Failed", status_code=412)
if self.current_generation is None:
if metageneration_match is not None or metageneration_not_match is not None:
raise error_response.ErrorResponse(
"Precondition Failed", status_code=412
)
return
current = self.revisions.get(current_generation)
if current is None:
raise error_response.ErrorResponse("Object not found", status_code=404)
metageneration = current.metadata.get("metageneration")
if (
metageneration_not_match is not None
and metageneration_not_match == metageneration
):
raise error_response.ErrorResponse("Precondition Failed", status_code=412)
if metageneration_match is not None and metageneration_match != metageneration:
raise error_response.ErrorResponse("Precondition Failed", status_code=412)
def check_preconditions(
self,
request,
if_generation_match="ifGenerationMatch",
if_generation_not_match="ifGenerationNotMatch",
if_metageneration_match="ifMetagenerationMatch",
if_metageneration_not_match="ifMetagenerationNotMatch",
):
"""Verify that the preconditions in request are met.
:param request:flask.Request the http request.
:param if_generation_match:str the name of the generation match
parameter name, typically 'ifGenerationMatch', but sometimes
'ifSourceGenerationMatch'.
:param if_generation_not_match:str the name of the generation not-match
parameter name, typically 'ifGenerationNotMatch', but sometimes
'ifSourceGenerationNotMatch'.
:param if_metageneration_match:str the name of the metageneration match
parameter name, typically 'ifMetagenerationMatch', but sometimes
'ifSourceMetagenerationMatch'.
:param if_metageneration_not_match:str the name of the metageneration
not-match parameter name, typically 'ifMetagenerationNotMatch', but
sometimes 'ifSourceMetagenerationNotMatch'.
:rtype:NoneType
"""
generation_match = request.args.get(if_generation_match)
generation_not_match = request.args.get(if_generation_not_match)
metageneration_match = request.args.get(if_metageneration_match)
metageneration_not_match = request.args.get(if_metageneration_not_match)
self.check_preconditions_by_value(
generation_match,
generation_not_match,
metageneration_match,
metageneration_not_match,
)
def _insert_revision(self, revision):
"""Insert a new revision that has been initialized and checked.
:param revision: GcsObjectVersion the new revision to insert.
:rtype:NoneType
"""
update = {str(self.generation_generator): revision}
bucket = testbench_utils.lookup_bucket(self.bucket_name)
if not bucket.versioning_enabled():
self.revisions = update
else:
self.revisions.update(update)
self.current_generation = str(self.generation_generator)
def insert(self, gcs_url, request):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
media = testbench_utils.extract_media(request)
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
media,
)
meta = revision.metadata.setdefault("metadata", {})
meta["x_testbench_upload"] = "simple"
self._insert_revision(revision)
return revision
def insert_multipart(self, gcs_url, request, resource, media_headers, media_body):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param resource:dict JSON resource with object metadata.
:param media_headers:dict media headers in a multi-part upload.
:param media_body:str object data in a multi-part upload.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
# There are two ways to specify the content-type, the 'content-type'
# header and the resource['contentType'] field. They must be consistent,
# and the service generates an error when they are not.
if (
resource.get("contentType") is not None
and media_headers.get("content-type") is not None
and resource.get("contentType") != media_headers.get("content-type")
):
raise error_response.ErrorResponse(
(
"Content-Type specified in the upload (%s) does not match"
+ "contentType specified in the metadata (%s)."
)
% (media_headers.get("content-type"), resource.get("contentType")),
status_code=400,
)
# Set the contentType in the resource from the header. Note that if both
# are set they have the same value.
resource.setdefault("contentType", media_headers.get("content-type"))
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
media_body,
)
meta = revision.metadata.setdefault("metadata", {})
meta["x_testbench_upload"] = "multipart"
if "md5Hash" in resource:
# We should return `x_testbench_md5` only when the user enables
# `MD5Hash` computations.
meta["x_testbench_md5"] = resource.get("md5Hash")
meta["x_testbench_crc32c"] = resource.get("crc32c", "")
# Apply any overrides from the resource object part.
revision.update_from_metadata(resource)
self._insert_revision(revision)
return revision
def insert_resumable(self, gcs_url, request, media, resource):
"""Implement the final insert for a resumable upload.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param media:str the media for the object.
:param resource:dict the metadata for the object.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
media,
)
meta = revision.metadata.setdefault("metadata", {})
meta["x_testbench_upload"] = "resumable"
meta["x_testbench_md5"] = resource.get("md5Hash", "")
meta["x_testbench_crc32c"] = resource.get("crc32c", "")
# Apply any overrides from the resource object part.
revision.update_from_metadata(resource)
self._insert_revision(revision)
return revision
def insert_xml(self, gcs_url, request):
"""Implement the insert operation using the XML API.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
media = testbench_utils.extract_media(request)
self.generation_generator += 1
goog_hash = request.headers.get("x-goog-hash")
md5hash = None
crc32c = None
if goog_hash is not None:
for hash in goog_hash.split(","):
if hash.startswith("md5="):
md5hash = hash[4:]
if hash.startswith("crc32c="):
crc32c = hash[7:]
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
media,
)
meta = revision.metadata.setdefault("metadata", {})
meta["x_testbench_upload"] = "xml"
if md5hash is not None:
meta["x_testbench_md5"] = md5hash
revision.update_from_metadata({"md5Hash": md5hash})
if crc32c is not None:
meta["x_testbench_crc32c"] = crc32c
revision.update_from_metadata({"crc32c": crc32c})
self._insert_revision(revision)
return revision
def copy_from(self, gcs_url, request, source_revision):
"""Insert a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param source_revision:GcsObjectVersion the source object version to
copy from.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
source_revision.validate_encryption_for_read(request)
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
source_revision.media,
)
metadata = json.loads(request.data)
revision.update_from_metadata(metadata)
self._insert_revision(revision)
return revision
def compose_from(self, gcs_url, request, composed_media):
"""Compose a new revision based on the give flask request.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param composed_media:str contents of the composed object
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
composed_media,
)
payload = json.loads(request.data)
if payload.get("destination") is not None:
revision.update_from_metadata(payload.get("destination"))
# The server often discards the MD5 Hash when composing objects, we can
# easily maintain them in the testbench, but dropping them helps us
# detect bugs sooner.
revision.metadata.pop("md5Hash")
self._insert_revision(revision)
return revision
@classmethod
def rewrite_fixed_args(cls):
"""The arguments that should not change between requests for the same
rewrite operation."""
return [
"destinationKmsKeyName",
"destinationPredefinedAcl",
"ifGenerationMatch",
"ifGenerationNotMatch",
"ifMetagenerationMatch",
"ifMetagenerationNotMatch",
"ifSourceGenerationMatch",
"ifSourceGenerationNotMatch",
"ifSourceMetagenerationMatch",
"ifSourceMetagenerationNotMatch",
"maxBytesRewrittenPerCall",
"projection",
"sourceGeneration",
"userProject",
]
@classmethod
def capture_rewrite_operation_arguments(
cls, request, destination_bucket, destination_object
):
"""Captures the arguments used to validate related rewrite calls.
:rtype:dict
"""
original_arguments = {}
for arg in GcsObject.rewrite_fixed_args():
original_arguments[arg] = request.args.get(arg)
original_arguments.update(
{
"destination_bucket": destination_bucket,
"destination_object": destination_object,
}
)
return original_arguments
@classmethod
def make_rewrite_token(
cls, operation, destination_bucket, destination_object, generation
):
"""Create a new rewrite token for the given operation."""
return base64.b64encode(
bytearray(
"/".join(
[
str(operation.get("id")),
destination_bucket,
destination_object,
str(generation),
str(operation.get("bytes_rewritten")),
]
),
"utf-8",
)
).decode("utf-8")
def make_rewrite_operation(self, request, destination_bucket, destination_object):
"""Create a new rewrite token for `Objects: rewrite`."""
generation = request.args.get("sourceGeneration")
if generation is None:
generation = str(self.generation_generator)
else:
generation = generation
self.rewrite_token_generator = self.rewrite_token_generator + 1
body = json.loads(request.data)
original_arguments = self.capture_rewrite_operation_arguments(
request, destination_object, destination_object
)
operation = {
"id": self.rewrite_token_generator,
"original_arguments": original_arguments,
"actual_generation": generation,
"bytes_rewritten": 0,
"body": body,
}
token = GcsObject.make_rewrite_token(
operation, destination_bucket, destination_object, generation
)
return token, operation
def rewrite_finish(self, gcs_url, request, body, source):
"""Complete a rewrite from `source` into this object.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param body:dict the HTTP payload, parsed via json.loads()
:param source:GcsObjectVersion the source object version.
:return: the newly created object version.
:rtype: GcsObjectVersion
"""
media = source.media
self.check_preconditions(request)
self.generation_generator += 1
revision = GcsObjectVersion(
gcs_url,
self.bucket_name,
self.name,
self.generation_generator,
request,
media,
)
revision.update_from_metadata(body)
self._insert_revision(revision)
return revision
def rewrite_step(self, gcs_url, request, destination_bucket, destination_object):
"""Execute an iteration of `Objects: rewrite.
Objects: rewrite may need to be called multiple times before it
succeeds. Only objects in the same location, with the same encryption,
are guaranteed to complete in a single request.
The implementation simulates some, but not all, the behaviors of the
server, in particular, only rewrites within the same bucket and smaller
than 1MiB complete immediately.
:param gcs_url:str the root URL for the fake GCS service.
:param request:flask.Request the contents of the HTTP request.
:param destination_bucket:str where will the object be placed after the
rewrite operation completes.
:param destination_object:str the name of the object when the rewrite
operation completes.
:return: a dictionary prepared for JSON encoding of a
`Objects: rewrite` response.
:rtype:dict
"""
body = json.loads(request.data)
rewrite_token = request.args.get("rewriteToken")
if rewrite_token is not None and rewrite_token != "":
# Note that we remove the rewrite operation, not just look it up.
# That way if the operation completes in this call, and/or fails,
# it is already removed. We need to insert it with a new token
# anyway, so this makes sense.
rewrite = self.rewrite_operations.pop(rewrite_token, None)
if rewrite is None:
raise error_response.ErrorResponse(
"Invalid or expired token in rewrite", status_code=410
)
else:
rewrite_token, rewrite = self.make_rewrite_operation(
request, destination_bucket, destination_bucket
)
# Compare the difference to the original arguments, on the first call
# this is a waste, but the code is easier to follow.
current_arguments = self.capture_rewrite_operation_arguments(
request, destination_bucket, destination_object
)
diff = set(current_arguments) ^ set(rewrite.get("original_arguments"))
if len(diff) != 0:
raise error_response.ErrorResponse(
"Mismatched arguments to rewrite", status_code=412
)
# This will raise if the version is deleted while the operation is in
# progress.
source = self.get_revision_by_generation(rewrite.get("actual_generation"))
source.validate_encryption_for_read(
request, prefix="x-goog-copy-source-encryption"
)
bytes_rewritten = rewrite.get("bytes_rewritten")
bytes_rewritten += 1024 * 1024
result = {"kind": "storage#rewriteResponse", "objectSize": len(source.media)}
if bytes_rewritten >= len(source.media):
bytes_rewritten = len(source.media)
rewrite["bytes_rewritten"] = bytes_rewritten
# Success, the operation completed. Return the new object:
object_path, destination = testbench_utils.get_object(
destination_bucket,
destination_object,
GcsObject(destination_bucket, destination_object),
)
revision = destination.rewrite_finish(gcs_url, request, body, source)
testbench_utils.insert_object(object_path, destination)
result["done"] = True
result["resource"] = revision.metadata
rewrite_token = ""
else:
rewrite["bytes_rewritten"] = bytes_rewritten
rewrite_token = GcsObject.make_rewrite_token(
rewrite, destination_bucket, destination_object, source.generation
)
self.rewrite_operations[rewrite_token] = rewrite
result["done"] = False
result.update(
{"totalBytesRewritten": bytes_rewritten, "rewriteToken": rewrite_token}
)
return result
| 40.525292
| 88
| 0.621027
|
e9692ed4ff0261b736c565932abfc571b07cbd91
| 1,123
|
py
|
Python
|
setup.py
|
Naklecha/knip
|
7c416b8bf0eed88ddf29143311e24b8bfe616bde
|
[
"MIT"
] | 10
|
2019-04-22T19:03:33.000Z
|
2022-01-14T07:06:14.000Z
|
setup.py
|
Naklecha/knip
|
7c416b8bf0eed88ddf29143311e24b8bfe616bde
|
[
"MIT"
] | 3
|
2020-03-24T17:06:41.000Z
|
2021-02-02T22:06:09.000Z
|
setup.py
|
Naklecha/knip
|
7c416b8bf0eed88ddf29143311e24b8bfe616bde
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open("README.md","r") as fh:
long_description = fh.read()
setup(
name="knip",
version="0.0.3",
description="Lets make coding hassle free!",
py_modules=["knip"],
package_dir={"":"src"},
classifiers=["Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: OS Independent"
],
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Naklecha/knip",
author="Nishant Aklecha",
author_email="nishant.aklecha@gmail.com",
)
| 36.225806
| 95
| 0.551202
|
a5c735d65394a4c278e34a3199473a2bcc42db04
| 9,571
|
py
|
Python
|
MAX31855/max2csv.py
|
elliotta/hephaestus
|
67efe7490e1ef5572537627c3278d0cf9256960d
|
[
"MIT"
] | null | null | null |
MAX31855/max2csv.py
|
elliotta/hephaestus
|
67efe7490e1ef5572537627c3278d0cf9256960d
|
[
"MIT"
] | null | null | null |
MAX31855/max2csv.py
|
elliotta/hephaestus
|
67efe7490e1ef5572537627c3278d0cf9256960d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Read data from an Adafruit MAX31855.
After a testing reading out at 10Hz, it was determined that the temperatures
returned by this board are stable enough that the current data point is
sufficient, and averaging multiple data points is unnecessary.
This code requires https://github.com/adafruit/Adafruit_Python_MAX31855
Other documentation:
https://learn.adafruit.com/max31855-thermocouple-python-library?view=all
https://learn.adafruit.com/calibrating-sensors/maxim-31855-linearization
https://learn.adafruit.com/thermocouple/f-dot-a-q
"""
import os
import sys
import datetime
from sys import stderr
from collections import OrderedDict
from math import exp
import time
import subprocess
import Adafruit_GPIO.SPI as SPI
import Adafruit_MAX31855.MAX31855 as MAX31855
# Raspberry Pi software SPI configuration.
# All sensors will share CLK and DO, with separate CS for each sensor
CLK = 24
DO = 25
CS = [23, 18, 22, 17]
SENSOR_NAMES = ['Firebox', 'Cat', 'Stove Top', 'Flue']
def c_to_f(c):
'''Convert Celcius temperature to Farenheit.
'''
return c * 9.0 / 5.0 + 32.0
def get_output_file(filename, data_dict, sep):
"""Create or open the output file.
If a new file is being created, add a header line.
"""
header_line = sep.join(data_dict.keys())
x = 1
while True:
# Loop will break at a return statement
if os.path.exists(filename) and not os.stat(filename).st_size == 0:
# Check for a header line
f_check = open(filename)
if f_check.readline().strip() == header_line:
# Headers match, we're good to go
f_check.close()
return open(filename, 'a', 1) # line buffered
else:
stderr.write('File %s has unexpected header line\n' % filename)
x += 1
if x == 2:
# only do this the first time, otherwise file will be foo-2-3-4.log!
base, extension = os.path.splitext(filename)
filename = base + '-' + str(x) + extension
# The next loop will try with this new filename
else:
# Is safe to overwrite an empty file
f = open(filename, 'w', 1) # line buffered
f.write(header_line + '\n')
return f
def main(web_output_file, interval, web_server_port, verbose,
log_interval, output_file_pattern, output_separator,
log_short_interval, short_interval, short_output_file_pattern):
'''Interval is in minutes. Short_interval is seconds.
'''
# Hardware configuration is set at top of file
global CLK, DO, CS
# Setup
output_file = None
log_interval = datetime.timedelta(minutes=log_interval)
file_start_time = None # For logging hours since start of file
interval_start_time = None
web_server = subprocess.Popen(['python', '-m', 'SimpleHTTPServer', str(web_server_port)], cwd=os.path.dirname(web_output_file))
if log_short_interval:
short_output_file = None
short_interval = datetime.timedelta(seconds=short_interval)
short_file_start_time = None # For logging hours since start of file
short_interval_start_time = None
print('About to connect to %i sensors' % len(CS))
sensors = [MAX31855.MAX31855(CLK, this_CS, DO) for this_CS in CS]
print('Sensors connected')
while True:
try:
# Collect the data
temps = [sensor.readAll() for sensor in sensors]
for t in temps:
for k in t.keys():
if k != 'state':
t[k] = c_to_f(t[k])
now = datetime.datetime.now()
# Stdout output
if verbose:
print(now.isoformat() + ' ' + '; '.join(['%.2f,%.2f,%.2f' % (t['temp'], t['internal'], t['linearized']) for t in temps]))
# Html output
# Always overwrite current file
html = '<html><head><meta http-equiv="refresh" content="1"><title>Current Temps</title></head>'
lines = ['%s: %.1f F' % (name, t['linearized']) for name, t in zip(SENSOR_NAMES, temps)]
lines += ['<br>', '<br>']
lines += ['%s: %.1f internal; errors: %s' % (name, t['internal'], str([s for s, v in t['state'].items() if v])) for name, t in zip(SENSOR_NAMES, temps)]
html += '<body><h1>%s<br><<%s></h1></body></html>' % ('<br>'.join(lines), now.isoformat())
with open(web_output_file, 'w') as web_file:
web_file.write(html)
# Log file output
if not interval_start_time:
interval_start_time = now
if log_short_interval:
if not short_interval_start_time:
short_interval_start_time = now
if (not file_start_time) or (now - interval_start_time >= log_interval):
if not file_start_time:
file_start_time = now
# Assemble data dictionary
data_dict = OrderedDict([('timestamp', now.strftime('%H:%M:%S')), ('hours', format((now-file_start_time).total_seconds()/3600.0, '06.3f'))])
data_dict.update([('%s F' % name, format(t['linearized'], '.2f')) for name, t in zip(SENSOR_NAMES, temps)])
data_dict.update([('internal %s F' % name, format(t['internal'], '.2f')) for name, t in zip(SENSOR_NAMES, temps)])
# Write out the data
if not output_file or now.date() != current_date:
if output_file:
output_file.close()
print('Opening new output file')
current_date = datetime.datetime.now().date()
output_file = get_output_file(current_date.strftime(output_file_pattern), data_dict, output_separator)
output_file.write(output_separator.join([str(x) for x in data_dict.values()]) + '\n')
interval_start_time = now
if log_short_interval:
if (not short_file_start_time) or (now - short_interval_start_time >=short_interval):
if not short_file_start_time:
short_file_start_time = now
# Assemble data dictionary
data_dict = OrderedDict([('timestamp', now.strftime('%H:%M:%S')), ('hours', format((now-short_file_start_time).total_seconds()/3600.0, '07.4f'))])
data_dict.update([('%s F' % name, format(t['linearized'], '.2f')) for name, t in zip(SENSOR_NAMES, temps)])
# Write out the data
if not short_output_file or now.date() != current_date:
if short_output_file:
short_output_file.close()
print('Opening new short output file')
current_date = datetime.datetime.now().date()
short_output_file = get_output_file(current_date.strftime(short_log_file_pattern), data_dict, output_separator)
short_output_file.write(output_separator.join([str(x) for x in data_dict.values()]) + '\n')
short_interval_start_time = now
time.sleep(interval - time.time() % interval) # corrects for drift
except KeyboardInterrupt:
break
web_server.terminate() # Cleanup
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Read data from MAX31855 sensors', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-w', '--web_output_file', default='/dev/shm/current_temp.html', help='Html output file')
parser.add_argument('-i', '--interval', default=1, help='Seconds at which to update the web_output_file')
parser.add_argument('-p', '--web_server_port', default=8080, type=int, help='Web server port for displaying web_output_file')
parser.add_argument('-v', '--verbose', action='store_true', help='Print each data point to stdout')
parser.add_argument('-o', '--output_file_pattern', default='%Y%m%d-temps.tsv', help='Output file name based on date')
parser.add_argument('-l', '--log_interval', type=int, default=1, help='Interval to log, in integer minutes')
parser.add_argument('-s', '--output_separator', default='\t', help='Separator for output file(s)')
group = parser.add_argument_group('short interval logging')
group.add_argument('--log_short_interval', action='store_true', help='Additional log at shorter interval')
group.add_argument('--short_output_file_pattern', default='/dev/shm/%Y%m%d-short_interval.tsv', help='File to write shorter intervales to')
group.add_argument('--short_interval', type=int, default=15, help='Interval to log, in integer seconds')
parser.add_argument('-d', '--daemon', action='store_true', help='Run as a daemon')
args = parser.parse_args()
if args.daemon:
# Pid file check
#pidfile = '/var/run/max2csv.pid'
pidfile = '/dev/shm/max2csv.pid'
mypid = os.getpid()
if os.path.exists(pidfile):
f = open(pidfile)
old_pid = int(f.readline().strip())
f.close()
if os.path.exists('/proc/%i' % old_pid):
print('Pid from %s already running' % pidfile)
sys.exit()
# Create pid file
f = open(pidfile, 'w')
f.write('%i\n' % mypid)
f.close()
kwargs = vars(args)
del kwargs['daemon']
main( **kwargs)
| 46.014423
| 166
| 0.611326
|
ea7933d0ca3765c928eb80efed0b87d828132cfe
| 5,254
|
py
|
Python
|
bfgn/architectures/residual_unet.py
|
dmarvs/bfg-nets
|
0c8e7f469b6a50c7b167ead98cb545d238dee214
|
[
"MIT"
] | 8
|
2019-08-01T17:52:58.000Z
|
2021-05-15T22:19:41.000Z
|
bfgn/architectures/residual_unet.py
|
pgbrodrick/rsCNN
|
0c8e7f469b6a50c7b167ead98cb545d238dee214
|
[
"MIT"
] | 38
|
2019-06-22T19:45:02.000Z
|
2019-07-20T19:39:09.000Z
|
bfgn/architectures/residual_unet.py
|
pgbrodrick/rsCNN
|
0c8e7f469b6a50c7b167ead98cb545d238dee214
|
[
"MIT"
] | 1
|
2020-01-13T20:28:28.000Z
|
2020-01-13T20:28:28.000Z
|
from typing import Tuple
import keras
from keras.layers import BatchNormalization, Concatenate, Conv2D, MaxPooling2D, UpSampling2D
from bfgn.architectures import config_sections
class ArchitectureConfigSection(
config_sections.AutoencoderMixin,
config_sections.BlockMixin,
config_sections.GrowthMixin,
config_sections.BaseArchitectureConfigSection,
):
pass
def create_model(
inshape: Tuple[int, int, int],
n_classes: int,
output_activation: str,
block_structure: Tuple[int, ...] = config_sections.DEFAULT_BLOCK_STRUCTURE,
filters: int = config_sections.DEFAULT_FILTERS,
internal_activation: str = config_sections.DEFAULT_INTERNAL_ACTIVATION,
kernel_size: Tuple[int, int] = config_sections.DEFAULT_KERNEL_SIZE,
padding: str = config_sections.DEFAULT_PADDING,
pool_size: Tuple[int, int] = config_sections.DEFAULT_POOL_SIZE,
use_batch_norm: bool = config_sections.DEFAULT_USE_BATCH_NORM,
use_growth: bool = config_sections.DEFAULT_USE_GROWTH,
use_initial_colorspace_transformation_layer: bool = config_sections.DEFAULT_USE_INITIAL_COLORSPACE_TRANSFORMATION_LAYER,
) -> keras.models.Model:
input_width = inshape[0]
minimum_width = input_width / 2 ** len(block_structure)
assert minimum_width >= 2, (
"The convolution width in the last encoding block ({}) is less than 2."
+ "Reduce the number of blocks in block_structure (currently {}).".format(len(block_structure))
)
# Need to track the following throughout the model creation
layers_pass_through = list()
# Encodings
inlayer = keras.layers.Input(shape=inshape)
encoder = inlayer
if use_initial_colorspace_transformation_layer:
intermediate_color_depth = int(inshape[-1] ** 2)
encoder = Conv2D(filters=intermediate_color_depth, kernel_size=(1, 1), padding="same")(inlayer)
encoder = Conv2D(filters=inshape[-1], kernel_size=(1, 1), padding="same")(encoder)
encoder = BatchNormalization()(encoder)
# Each encoder block has a number of subblocks
for num_subblocks in block_structure:
# Store the subblock input for the residual connection
input_subblock = encoder
for idx_sublayer in range(num_subblocks):
# Each subblock has a number of convolutions
encoder = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding)(encoder)
if use_batch_norm:
encoder = BatchNormalization()(encoder)
# Add the residual connection from the previous subblock output to the current subblock output
encoder = _add_residual_shortcut(input_subblock, encoder)
# Each encoder block passes its pre-pooled layers through to the decoder
layers_pass_through.append(encoder)
encoder = MaxPooling2D(pool_size=pool_size)(encoder)
if use_growth:
filters *= 2
# Decodings
decoder = encoder
# Each decoder block has a number of subblocks, but in reverse order of encoder
for num_subblocks, layer_passed_through in zip(reversed(block_structure), reversed(layers_pass_through)):
# Store the subblock input for the residual connection
input_subblock = decoder
for idx_sublayer in range(num_subblocks):
# Each subblock has a number of convolutions
decoder = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding)(decoder)
if use_batch_norm:
decoder = BatchNormalization()(decoder)
# Add the residual connection from the previous subblock output to the current subblock output
decoder = _add_residual_shortcut(input_subblock, decoder)
decoder = UpSampling2D(size=pool_size)(decoder)
decoder = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding)(decoder)
if use_batch_norm:
decoder = BatchNormalization()(decoder)
decoder = Concatenate()([layer_passed_through, decoder])
if use_growth:
filters = int(filters / 2)
# Last convolutions
output_layer = Conv2D(filters=filters, kernel_size=kernel_size, padding=padding)(decoder)
if use_batch_norm:
output_layer = BatchNormalization()(output_layer)
output_layer = Conv2D(filters=n_classes, kernel_size=(1, 1), padding="same", activation=output_activation)(
output_layer
)
return keras.models.Model(inputs=[inlayer], outputs=[output_layer])
def _add_residual_shortcut(input_tensor: keras.layers.Layer, residual_module: keras.layers.Layer):
"""
Adds a shortcut connection by combining a input tensor and residual module
"""
shortcut = input_tensor
# We need to apply a convolution if the input and block shapes do not match, every block transition
inshape = keras.backend.int_shape(input_tensor)[1:]
residual_shape = keras.backend.int_shape(residual_module)[1:]
if inshape != residual_shape:
strides = (int(round(inshape[0] / residual_shape[0])), int(round(inshape[1] / residual_shape[1])))
shortcut = keras.layers.Conv2D(
filters=residual_shape[-1], kernel_size=(1, 1), padding="valid", strides=strides
)(shortcut)
return keras.layers.add([shortcut, residual_module])
| 44.525424
| 124
| 0.719262
|
40032282478794bc7453291b36254ab842976d17
| 4,353
|
py
|
Python
|
account/views.py
|
SergePogorelov/bookmarks
|
2ee19469f6a91aea0b57b36e2dde2c4650f72a8e
|
[
"BSD-3-Clause"
] | null | null | null |
account/views.py
|
SergePogorelov/bookmarks
|
2ee19469f6a91aea0b57b36e2dde2c4650f72a8e
|
[
"BSD-3-Clause"
] | 2
|
2020-10-07T12:08:32.000Z
|
2021-06-10T23:28:57.000Z
|
account/views.py
|
SergePogorelov/bookmarks
|
2ee19469f6a91aea0b57b36e2dde2c4650f72a8e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.decorators.http import require_POST
from common.decorators import ajax_required
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from .models import Profile, Contact
from actions.utils import create_action
from actions.models import Action
def user_login(request):
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(
request, username=cd["username"], password=cd["password"]
)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse("Authenticated successfully")
else:
return HttpResponse("Disabled account")
else:
return HttpResponse("Invalid login")
else:
form = LoginForm()
return render(request, "account/login.html", {"form": form})
@login_required
def dashboard(request):
actions = Action.objects.exclude(user=request.user)
following_ids = request.user.following.values_list("id", flat=True)
if following_ids:
actions = actions.filter(user_id__in=following_ids)
actions = actions.select_related("user", "user__profile").prefetch_related(
"target"
)[:10]
return render(
request, "account/dashboard.html", {"section": dashboard, "actions": actions}
)
def register(request):
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save(commit=False)
new_user.set_password(user_form.cleaned_data["password"])
new_user.save()
Profile.objects.create(user=new_user)
create_action(new_user, "has created an account")
return render(request, "account/register_done.html", {"new_user": new_user})
else:
user_form = UserRegistrationForm()
return render(request, "account/register.html", {"user_form": user_form})
@login_required
def edit(request):
if request.method == "POST":
user_form = UserEditForm(instance=request.user, data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile, data=request.POST, files=request.FILES
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, "Profile updated successfully")
else:
messages.error(request, "Error updating your profile")
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(
request,
"account/edit.html",
{"user_form": user_form, "profile_form": profile_form},
)
@login_required
def user_list(request):
users = User.objects.filter(is_active=True, is_staff=False)
return render(
request, "account/user/list.html", {"section": "people", "users": users}
)
@login_required
def user_detail(request, username):
user = get_object_or_404(User, username=username, is_active=True, is_staff=False)
return render(
request, "account/user/detail.html", {"section": "people", "user": user}
)
@ajax_required
@require_POST
@login_required
def user_follow(request):
user_id = request.POST.get("id")
action = request.POST.get("action")
if user_id and action:
try:
user = User.objects.get(id=user_id)
if action == "follow":
Contact.objects.get_or_create(user_from=request.user, user_to=user)
create_action(request.user, "is following", user)
else:
Contact.objects.filter(user_from=request.user, user_to=user).delete()
return JsonResponse({"status": "ok"})
except User.DoesNotExit:
return JsonResponse({"status": "ok"})
return JsonResponse({"status": "ok"})
| 35.390244
| 88
| 0.662302
|
5ab2d15fca351f993720a53d237514111455efb1
| 17,658
|
py
|
Python
|
Omni_Receiver_4_Directions_2D_CNN_No_Mapping_Outdoor.py
|
debashriroy/DeePOE
|
ae6d6dcb1bb1410ea8e078277e54aa6f363484a4
|
[
"MIT"
] | null | null | null |
Omni_Receiver_4_Directions_2D_CNN_No_Mapping_Outdoor.py
|
debashriroy/DeePOE
|
ae6d6dcb1bb1410ea8e078277e54aa6f363484a4
|
[
"MIT"
] | null | null | null |
Omni_Receiver_4_Directions_2D_CNN_No_Mapping_Outdoor.py
|
debashriroy/DeePOE
|
ae6d6dcb1bb1410ea8e078277e54aa6f363484a4
|
[
"MIT"
] | null | null | null |
##############################################################
# Radio Fingerprinting in RFML Environment #
# Neural Network for Direction Finding Data 2020 #
# Author: Debashri Roy #
#############################################################
############ IMPORTING NECESSARY PACKAGES ################
import numpy as np # Package for numerical computation
np.set_printoptions(threshold=np.inf) # To print each elements
import time # Package is for computing execution time
import sys # Package to get command line arguments
import tensorflow as tf
from sklearn.model_selection import train_test_split
from array import array
# by setting env variables before Keras import you can set up which backend
import os,random
#os.environ["KERAS_BACKEND"] = "theano"
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["THEANO_FLAGS"] = "device=cuda0, dnn.enabled=False"
import theano
#theano.config.mode = ""
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.models import Sequential
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers import Embedding
from keras.layers.noise import GaussianNoise
from keras.layers.convolutional import Conv2D, Conv1D, Convolution2D, MaxPooling2D, ZeroPadding2D, Convolution1D
from keras.regularizers import *
from keras.optimizers import adam, Nadam, Adadelta
from keras.optimizers import Adam, RMSprop, Adagrad
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.optimizers import rmsprop
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
#from keras.regularizers import l2, activity_l2
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from keras.layers.advanced_activations import LeakyReLU, PReLU
# import BatchNormalization
from keras.layers.normalization import BatchNormalization
from keras.layers import GRU, RNN, SimpleRNN, LSTM, GRUCell, SimpleRNNCell, LSTMCell
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
from keras.optimizers import SGD
import matplotlib
#matplotlib.use('TkAgg')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import seaborn as sns
import keras
import itertools
import scipy
from sklearn.linear_model import LinearRegression
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
########## FUNCTIONS TO CALCULATE F SCORE OF THE MODEL ###############
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
######################################################################
data_file_loc1 = '/Users/debashri/Desktop/DirectionFinding_Data/Outdoor/W_E_02_06_2020_OUT_914MHz_source.dat' # TRANSMITTER IS WEST TO THE RECEIVER, TRANSMITTER ANTENNA TO EAST
data_file_loc2 ='/Users/debashri/Desktop/DirectionFinding_Data/Outdoor/W_W_02_06_2020_OUT_914MHz_source.dat' # TRANSMITTER IS WEST TO THE RECEIVER, TRANSMITTER ANTENNA TO WEST
data_file_loc3 = '/Users/debashri/Desktop/DirectionFinding_Data/Outdoor/W_N_02_06_2020_OUT_914MHz_source.dat'# TRANSMITTER IS WEST TO THE RECEIVER, TRANSMITTER ANTENNA TO NORTH
data_file_loc4 = '/Users/debashri/Desktop/DirectionFinding_Data/Outdoor/W_S_02_06_2020_OUT_914MHz_source.dat' # TRANSMITTER IS WEST TO THE RECEIVER, TRANSMITTER ANTENNA TO SOUTH
#dtype_all= np.dtype([('raw-iq0', 'c16')]) # gr_complex is '32fc' --> make any sense?
dtype_all= scipy.dtype([('raw-iq', scipy.complex64)]) # gr_complex is '32fc' --> make any sense?
# print("Total number of i/q samples for REEF BACK:")
# print(scipy.fromfile(open(data_file_loc1), dtype=dtype_all).shape[0])
#
# print("Total number of i/q samples for REEF FRONT LEFT:")
# print(scipy.fromfile(open(data_file_loc3), dtype=dtype_all).shape[0])
#
# print("Total number of i/q samples for REEF FRONT RIGHT:")
# print(scipy.fromfile(open(data_file_loc2), dtype=dtype_all).shape[0])
sample_size = 512 # CHANGE
no_of_samples = 8000 # CHANGE
no_of_features = 2 # CHANGE
iqdata_loc1 = scipy.fromfile(open(data_file_loc1), dtype=dtype_all, count = sample_size * no_of_samples)
iqdata_loc2 = scipy.fromfile(open(data_file_loc2), dtype=dtype_all, count = sample_size * no_of_samples)
iqdata_loc3 = scipy.fromfile(open(data_file_loc3), dtype=dtype_all, count = sample_size * no_of_samples)
iqdata_loc4 = scipy.fromfile(open(data_file_loc4), dtype=dtype_all, count = sample_size * no_of_samples)
# iqdata_loc1 = scipy.fromfile(open(data_file_loc1), dtype=dtype_all) # DATA COLLECTED at UCF
# iqdata_loc2 = scipy.fromfile(open(data_file_loc2), dtype=dtype_all) # DATA COLLECTED at UCF
# iqdata_loc3 = scipy.fromfile(open(data_file_loc3), dtype=dtype_all) # DATA COLLECTED at UCF
# iqdata_loc4 = scipy.fromfile(open(data_file_loc4), dtype=dtype_all) # DATA COLLECTED at UCF
start_time = time.time() # Taking start time to calculate overall execution time
no_of_loc1 = iqdata_loc1.shape[0]
no_of_loc2 = iqdata_loc2.shape[0]
no_of_loc3 = iqdata_loc3.shape[0]
no_of_loc4 = iqdata_loc4.shape[0]
# USING ONLY LAST N SAMPLES OF DATA
number_of_data_to_read = sample_size * no_of_samples
extra_rows_loc1 = no_of_loc1 - number_of_data_to_read
extra_rows_loc2 = no_of_loc2 - number_of_data_to_read
extra_rows_loc3 = no_of_loc3 - number_of_data_to_read
extra_rows_loc4 = no_of_loc4 - number_of_data_to_read
xdata_loc1 = np.delete(iqdata_loc1, np.s_[:extra_rows_loc1], 0)
xdata_loc2 = np.delete(iqdata_loc2, np.s_[:extra_rows_loc2], 0)
xdata_loc3= np.delete(iqdata_loc3, np.s_[:extra_rows_loc3], 0)
xdata_loc4= np.delete(iqdata_loc4, np.s_[:extra_rows_loc4], 0)
# PREPARING THE DATA WITHOUT TIME INFORMATION
no_of_data_loc1 = iqdata_loc1.shape[0]
no_of_data_loc2 = iqdata_loc2.shape[0]
no_of_data_loc3 = iqdata_loc3.shape[0]
no_of_data_loc4 = iqdata_loc4.shape[0]
##################### CHANNELING REAL AND IMAGINARY PART OF XDATA ###########################
# xdata1 = np.dstack((xydata['raw-iq0'].real.reshape(no_of_data, 1), xydata['raw-iq0'].imag.reshape(no_of_data, 1)))
# for k in range(1, 1024):
# st = "raw-iq" + str(k)
# xdata_temp = np.dstack((xydata[st].real.reshape(no_of_data, 1), xydata[st].imag.reshape(no_of_data, 1)))
# xdata1 = np.concatenate([xdata1, xdata_temp], axis=1)
# ydata1 = xydata['trans-id']
#
# xdata = xdata1.astype(np.float)
# ydata = ydata1.astype(np.int).flatten()
#
# print("UNTIL XDATA CHANNELING")
xdata_loc1= np.concatenate([iqdata_loc1['raw-iq'].real.reshape(number_of_data_to_read,1), iqdata_loc1['raw-iq'].imag.reshape(number_of_data_to_read,1)], axis=1)
xdata_loc2= np.concatenate([iqdata_loc2['raw-iq'].real.reshape(number_of_data_to_read,1), iqdata_loc2['raw-iq'].imag.reshape(number_of_data_to_read,1)], axis=1)
xdata_loc3= np.concatenate([iqdata_loc3['raw-iq'].real.reshape(number_of_data_to_read,1), iqdata_loc3['raw-iq'].imag.reshape(number_of_data_to_read,1)], axis=1)
xdata_loc4= np.concatenate([iqdata_loc4['raw-iq'].real.reshape(number_of_data_to_read,1), iqdata_loc4['raw-iq'].imag.reshape(number_of_data_to_read,1)], axis=1)
# DOES TRANSPOSE MAKE SENSE????
print("Before:::", xdata_loc1.shape)
xdata_loc1= xdata_loc1.T.reshape(no_of_data_loc1//(sample_size), sample_size*no_of_features) # CHNAGED FROM 2
print("After", xdata_loc1.shape)
xdata_loc2 = xdata_loc2.T.reshape(no_of_data_loc2//(sample_size), sample_size*no_of_features)
xdata_loc3 = xdata_loc3.T.reshape(no_of_data_loc3//(sample_size), sample_size*no_of_features)
xdata_loc4 = xdata_loc4.T.reshape(no_of_data_loc4//(sample_size), sample_size*no_of_features)
xdata = np.concatenate([xdata_loc1, xdata_loc2, xdata_loc3, xdata_loc4], axis=0)
# CREATING LABEL FOR THE DATASETS
ydata_loc1 = np.full(xdata_loc1.shape[0], 0, dtype=int)
ydata_loc2 = np.full(xdata_loc2.shape[0], 1, dtype=int)
ydata_loc3 = np.full(xdata_loc3.shape[0], 2, dtype=int)
ydata_loc4 = np.full(xdata_loc4.shape[0], 3, dtype=int)
ydata = np.concatenate([ydata_loc1, ydata_loc2, ydata_loc3, ydata_loc4], axis=0)
# PREPROCESSING X AND Y DATA
xdata =xdata.astype(np.float)
ydata = ydata.astype(np.int).flatten()
# REMOVING THE NANS
xdata = np.nan_to_num(xdata)
# ############## RANDOMLY SHUFFLING THE DATA ###################
#
xydata = np.concatenate([xdata.reshape(xdata.shape[0], xdata.shape[1]), ydata.reshape(ydata.shape[0], 1)], axis=1)
np.random.shuffle(xydata)
print("Shape of XYDATA", xydata.shape)
#xdata, ydata = xydata[:,0:sample_size*2+2], xydata[:,((sample_size*2+2))] # ADDED 2 FOR LAT LONG
xdata, ydata = xydata[:,0:sample_size*no_of_features], xydata[:,((sample_size*no_of_features))] # multiplied by 8 because we augmented with weight matrix
#################### NORMALIZE THE X DATA #######################
standard = preprocessing.StandardScaler().fit(xdata) # Normalize the data with zero mean and unit variance for each column
xdata = standard.transform(xdata)
############### SEPARATING TRAIN AND TEST DATA #######################
xtrain, xtest, ytrain, ytest = train_test_split(xdata, ydata, test_size=0.2, random_state=42) # 90/20 is train/test size
print("XTRAIN AND XTEST SHAPE:", xtrain.shape, xtest.shape)
print("YTRAIN AND YTEST SHAPE:", ytrain.shape, ytest.shape)
# reshape to be [samples][width][height][channels]
xtrain = xtrain.reshape((xtrain.shape[0], no_of_features, sample_size, 1)).astype('float32')
xtest = xtest.reshape((xtest.shape[0], no_of_features, sample_size, 1)).astype('float32')
num_classes = 4 # TOTAL NUMBER OF Data
# Convert labels to categorical one-hot encoding
ytrain_one_hot = to_categorical(ytrain, num_classes=num_classes) # DEFINE THE NUMBER OF TOTAL CLASSES IN LABEL
ytest_one_hot = to_categorical(ytest, num_classes=num_classes)
print("XTRAIN AND XTEST SHAPE:", xtrain.shape, xtest.shape)
print("YTRAIN AND YTEST SHAPE:", ytrain_one_hot.shape, ytest_one_hot.shape)
############################################################
# #
######## Building a Convolutional Neural Network #################
# #
############################################################
dr = 0.6 # dropout rate (%)
batch_size = 64 # Mini batch size
nb_epoch = 100 # Number of Epoch (Give a higher number to get better accuracy)
# classes = array("i", [0, 1]) # CHANGE: LABEL
# classes = ["T1","T2"]
classes = ["W_E", "W_W", "W_N", "W_S"] # CHANGE LABEL
in_shp = list(xtrain.shape[1:]) # Input Dimension
print(in_shp)
# model = models.Sequential()
timesteps=1
data_dim=xtrain.shape[1]
###############################
########## NEXT CHANGE: MINIMIZE THE KERNEL SIZE AND STRIDES
# THEN: CHANGE THE ACTIVATIONS OF THE LAYERS
##############################
############################################################
# #
######## Building a 2D Convolutional Neural Network #####
# #
############################################################
# xtrain = xtrain.reshape(xtrain.shape[0], 1, xtrain.shape[1])
# xtest = xtest.reshape(xtest.shape[0], 1, xtest.shape[1])
# print ("AFTER RESHAPE")
ytrain_one_hot = np.reshape(ytrain_one_hot, (ytrain_one_hot.shape[0], num_classes)) # Used in training
ytest_one_hot = np.reshape(ytest_one_hot, (ytest_one_hot.shape[0], num_classes)) # Used in training
# Modeling the CNN
model = Sequential()
model.add(Conv2D(64, (2, 2), input_shape=(no_of_features, sample_size, 1), activation='relu')) # CHANGE # Stride (1, 1)
#model.add(MaxPooling2D()) # Pool size: (2, 2) and stride (2, 2)
model.add(Dropout(0.2))
# model.add(Conv2D(64, (2, 2), activation='relu'))
# model.add(MaxPooling2D())
# model.add(Dropout(dr))
model.add(Flatten())
#model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
# For a multi-class classification problem
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
# model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # Multiclass classification with rmsprop
model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['acc', f1_m, precision_m, recall_m]) # Multiclass classification with rms adam optimizer # CHANGE
#model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1_m, precision_m, recall_m])
model.summary()
filepath = '/Users/debashri/Desktop/DirectionFinding_Plots/direction_data_4_direction_2D_CNN_Outdoor.wts.h5'
print("The dropout rate was: ")
print(dr)
# Fit the model
# history= model.fit(xtrain, ytrain_one_hot, epochs=nb_epoch, batch_size=batch_size, validation_data = (xtest, ytest_one_hot), callbacks = [keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='auto'), keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=2, mode='auto')])
history = model.fit(xtrain, ytrain_one_hot, epochs=nb_epoch, batch_size=batch_size, validation_split=0.1, callbacks=[
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=2, mode='auto')])
# Evaluate the model
loss, accuracy, f1_score, precision, recall = model.evaluate(xtest, ytest_one_hot, batch_size=batch_size) # CHANGE
print("\nTest Loss: %s: %.2f%%" % (model.metrics_names[0], loss * 100)) # CHANGE
print("\nTest Accuracy: %s: %.2f%%" % (model.metrics_names[1], accuracy * 100)) # CHANGE
print("\nTest F1 Score: %s: %.2f%%" % (model.metrics_names[2], f1_score)) # CHANGE
print("\nTest Precision: %s: %.2f%%" % (model.metrics_names[3], precision * 100)) # CHANGE
print("\nTest Recall: %s: %.2f%%" % (model.metrics_names[4], recall * 100)) # CHANGE
# Calculating total execution time
end_time = time.time() # Taking end time to calculate overall execution time
print("\n Total Execution Time (Minutes): ")
print(((end_time - start_time) / 60))
#### SET PLOTTING PARAMETERS #########
params = {'legend.fontsize': 'xx-large',
'axes.labelsize': 'xx-large',
'axes.titlesize': 'xx-large',
'xtick.labelsize': 'xx-large',
'ytick.labelsize': 'xx-large'}
plt.rcParams.update(params)
# Show Accuracy Curves
fig = plt.figure()
# plt.title('Training Performance')
plt.plot(history.epoch, history.history['acc'], label='Training Accuracy', linewidth=2.0, c='b')
plt.plot(history.epoch, history.history['val_acc'], label='Validation Accuracy', linewidth=2.0, c='r')
plt.ylabel('Accuracy(%)')
plt.xlabel('Epoch')
plt.legend()
# fig = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis
# ax.plot([0, 1, 2], [1UCF0, 20, 3])
plt.tight_layout()
fig.savefig('/Users/debashri/Desktop/DirectionFinding_Plots/direction_4_acc_2D_CNN_Outdoor.png') # save the figure to file
plt.close(fig)
# plt.show()
def plot_confusion_matrix(cm, title='Confusion Matrix', cmap=plt.cm.YlGnBu, labels=[], normalize=False):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
cm = cm.astype('int')
# print('Confusion matrix, without normalization')
plt.rcParams.update(params) # ADDED
fig = plt.figure(figsize=(12,12))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
thresh = cm.max() / 2
fmt = '.2f' if normalize else 'd'
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i,"{:,}".format(cm[i, j]),
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center", fontsize="xx-large",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
# fig, ax = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis
# ax.plot([0, 1, 2], [10, 20, 3])
plt.tight_layout()
fig.savefig('/Users/debashri/Desktop/DirectionFinding_Plots/direction_4_conf_mat_2D_CNN_Outdoor.png') # save the figure to file
plt.close(fig)
# plt.show()
# Plot confusion matrix
test_Y_hat = model.predict(xtest, batch_size=batch_size)
conf = np.zeros([len(classes), len(classes)])
confnorm = np.zeros([len(classes), len(classes)])
for i in range(0, xtest.shape[0]):
j = list(ytest_one_hot[i, :]).index(1)
k = int(np.argmax(test_Y_hat[i, :]))
conf[j, k] = conf[j, k] + 1
plot_confusion_matrix(conf, labels=classes, normalize=False)
for i in range(0, len(classes)):
confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])
# plot_confusion_matrix(confnorm, labels=classes)
end_time = time.time() # Taking end time to calculate overall execution time
print("\n Total Execution Time (Minutes): ")
print(((end_time-start_time)/60))
| 41.942993
| 335
| 0.702854
|
fba0137778a489a79340f3d93f389b1c946a9834
| 25,932
|
py
|
Python
|
generate/generate_miri.py
|
rendinam/pysiaf
|
b891e97037b250bfccb3f94b1ca156ffdf836594
|
[
"BSD-3-Clause"
] | null | null | null |
generate/generate_miri.py
|
rendinam/pysiaf
|
b891e97037b250bfccb3f94b1ca156ffdf836594
|
[
"BSD-3-Clause"
] | null | null | null |
generate/generate_miri.py
|
rendinam/pysiaf
|
b891e97037b250bfccb3f94b1ca156ffdf836594
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Generate MIRI SIAF and flight-like SIAF reference files.
Authors
-------
Johannes Sahlmann
References
----------
This script was partially adapted from Colin Cox' miriifu.py.
For a detailed description of the MIRI SIAF, the underlying reference files, and the
transformations, see Law et al., (latest revision): MIRI SIAF Input (JWST-STScI-004741).
The term `worksheet` refers to the excel worksheet in the respective SIAF.xlsx, which contained
some of the SIAF generation logic previously.
"""
from collections import OrderedDict
import copy
import os
from astropy.io import fits
import numpy as np
import pylab as pl
import pysiaf
from pysiaf import iando
from pysiaf.constants import JWST_SOURCE_DATA_ROOT, JWST_TEMPORARY_DATA_ROOT, \
JWST_DELIVERY_DATA_ROOT
from pysiaf.tests import test_miri
from pysiaf.utils import compare
from pysiaf.utils import polynomial
import generate_reference_files
#############################
instrument = 'MIRI'
# regenerate SIAF reference files if needed
regerenate_basic_reference_files = False
if regerenate_basic_reference_files:
generate_reference_files.generate_siaf_detector_layout()
generate_reference_files.generate_siaf_detector_reference_file(instrument)
generate_reference_files.generate_siaf_ddc_mapping_reference_file(instrument)
# DDC name mapping
ddc_apername_mapping = iando.read.read_siaf_ddc_mapping_reference_file(instrument)
# MIRI detector parameters, e.g. XDetSize
siaf_detector_parameters = iando.read.read_siaf_detector_reference_file(instrument)
# definition of the master apertures
detector_layout = iando.read.read_siaf_detector_layout()
master_aperture_names = detector_layout['AperName'].data
# directory containing reference files delivered by instrument team(s)
source_data_dir = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'delivery')
print('Loading source data files from directory: {}'.format(source_data_dir))
miri_distortion_file = 'MIRI_FM_MIRIMAGE_DISTORTION_07.04.01.fits'
# Fundamental aperture definitions: names, types, reference positions, dependencies
# for MIRI this file is part of the delivered source files and contains more columns
siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument,
directory=source_data_dir)
def untangle(square):
"""Turn a square n x n array into a linear array.
Parameters
----------
square : n x n array
Input array
Returns
-------
linear : array
Linearized array
"""
n = square.shape[0]
t = n * (n + 1) // 2
linear = np.zeros(t)
for i in range(n):
for j in range(n - i):
k = (i + j) * (i + j + 1) // 2 + i
linear[k] = square[i, j]
return linear
def invcheck(A, B, C, D, order, low, high):
"""Round trip calculation to test inversion.
Parameters
----------
A
B
C
D
order
low
high
"""
x = np.random.random(10)
x = low + (high - low) * x
y = np.random.random(10)
y = low + (high - low) * y
u = polynomial.poly(A, x, y, order)
v = polynomial.poly(B, x, y, order)
x2 = polynomial.poly(C, u, v, order)
y2 = polynomial.poly(D, u, v, order)
print('\n INVERSE CHECK')
for i in range(10):
print('%10.4f%10.4f%10.4f%10.4f%10.4f%10.4f%10.2e%10.2e' %
(x[i], y[i], u[i], v[i], x2[i], y2[i], x2[i] - x[i], y2[i] - y[i]))
print('Round trip errors %10.2e %10.2e' % ((x - x2).std(), (y - y2).std()))
print('Round trip errors %10.3f %10.3f' % ((x - x2).std(), (y - y2).std()))
def get_mirim_coefficients(distortion_file, verbose=False):
"""Read delivered FITS file for MIRI imager and return data to be ingested in SIAF.
Parameters
----------
distortion_file : str
Name of distortion file.
verbose : bool
verbosity
Returns
-------
csv_data : dict
Dictionary containing the data
"""
miri = fits.open(os.path.join(source_data_dir, distortion_file))
T = miri['T matrix'].data
TI = miri['TI matrix'].data
# CDP7 T matrices transform from/to v2,v3 in arcsec
# set VtoAN and ANtoV to unit matrix
VtoAN = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
ANtoV = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
TV = np.dot(T, VtoAN)
VT = np.dot(ANtoV, TI)
prod = np.dot(VT, TV)
TT = np.dot(T, TI)
if verbose:
print('T\n', T)
print('TI\n', TI)
print('VtoAN\n', VtoAN)
print('\n TV V2V3 to XY Entrance')
print(TV)
print(1.0 / TV[1, 1], 'arcsec/mm')
print('\nANtoV\n', ANtoV)
print('\n VTXY entrance to V2V3')
print('VT\n', VT)
print()
print('VT comparison\n', prod)
print('T comparison\n', TT)
# Get linear coefficient layout
A = miri['AI matrix'].data
B = miri['BI matrix'].data
C = miri['A matrix'].data
D = miri['B matrix'].data
AL = untangle(A)
BL = untangle(B)
CL = untangle(C)
DL = untangle(D)
if verbose:
print('Initial AL\n', AL)
print('Initial BL\n', BL)
print('CL\n', CL)
print('DL\n', DL)
# scale factor corresponding to 25 mum pixel size, i.e. 40 pixels/mm
order = 4
k = 0
for i in range(order + 1):
factor = 0.025 ** i
for j in range(i + 1):
AL[k] = AL[k] * factor
BL[k] = BL[k] * factor
k += 1
AF = VT[0, 0] * AL + VT[0, 1] * BL
AF[0] = AF[0] + VT[0, 2]
BF = VT[1, 0] * AL + VT[1, 1] * BL
BF[0] = BF[0] + VT[1, 2]
if verbose:
polynomial.print_triangle(AF)
polynomial.print_triangle(BF)
print('AL scaled\n', AL)
print('\n A FINAL')
print('\n B FINAL')
## print('INVERSE TRANSFORMATIONS')
# Combine TV with polynomial using polynomial.two_step
# combination of several polynomial coefficients
a = np.array([TV[0, 2], TV[0, 0], TV[0, 1]])
b = np.array([TV[1, 2], TV[1, 0], TV[1, 1]])
(C2, D2) = polynomial.two_step(CL, DL, a, b)
CF = 40 * C2
DF = 40 * D2
if verbose:
polynomial.print_triangle(CF)
polynomial.print_triangle(DF)
print('a', a)
print('b', b)
print('\nC Final')
print('\nD Final')
# if verbose:
# Test two_step
v2 = -280
v3 = -430
xin = TV[0, 0] * v2 + TV[0, 1] * v3 + TV[0, 2]
yin = TV[1, 0] * v2 + TV[1, 1] * v3 + TV[1, 2]
xmm = polynomial.poly(CL, xin, yin, 4)
ymm = polynomial.poly(DL, xin, yin, 4)
xmm2 = polynomial.poly(C2, v2, v3, 4)
ymm2 = polynomial.poly(D2, v2, v3, 4)
# Backwards check
xp = 0
yp = 0
v2 = polynomial.poly(AF, xp, yp, 4)
v3 = polynomial.poly(BF, xp, yp, 4)
xpix = polynomial.poly(CF, v2, v3, 4)
ypix = polynomial.poly(DF, v2, v3, 4)
print('IN', xin, yin)
print('MM', xmm, ymm)
print('MM2', xmm2, ymm2)
print('V', v2, v3)
print('Original ', xp, yp)
print('Recovered', xpix, ypix)
print('Change ', xpix - xp, ypix - yp)
invcheck(AF, BF, CF, DF, 4, -512.0, 512.0)
CS = polynomial.shift_coefficients(CF, AF[0], BF[0])
DS = polynomial.shift_coefficients(DF, AF[0], BF[0])
CS[0] = 0.0
DS[0] = 0.0
# extract V2,V3 reference position
V2cen = AF[0]
V3cen = BF[0]
# reset zero order coefficients to zero
AF[0] = 0.0
BF[0] = 0.0
if verbose:
polynomial.print_triangle(CS)
polynomial.print_triangle(DS)
invcheck(AF, BF, CS, DS, 4, -512.0, 512.0)
print('\nCS')
print('\nDS')
print('\nDetector Center')
# if verbose:
xscalec = np.hypot(AF[1], BF[1])
yscalec = np.hypot(AF[2], BF[2])
# compute angles
xanglec = np.rad2deg(np.arctan2(AF[1], BF[1]))
yanglec = np.rad2deg(np.arctan2(AF[2], BF[2]))
if verbose:
print('Position', V2cen, V3cen)
print('Scales %10.6f %10.6f' % (xscalec, yscalec))
print('Angles %10.6f %10.6f' % (xanglec, yanglec))
# if verbose:
xcen = 1033 / 2
ycen = 1025 / 2
xref = 693.5 - xcen
yref = 512.5 - ycen
V2Ref = polynomial.poly(AF, xref, yref, 4) + V2cen
V3Ref = polynomial.poly(BF, xref, yref, 4) + V3cen
dV2dx = polynomial.dpdx(AF, xref, yref)
dV3dx = polynomial.dpdx(BF, xref, yref)
dV2dy = polynomial.dpdy(AF, xref, yref)
dV3dy = polynomial.dpdy(BF, xref, yref)
xangler = np.arctan2(dV2dx, dV3dx)
yangler = np.arctan2(dV2dy, dV3dy)
# if verbose:
print('Axis angles', np.rad2deg(xangler), np.rad2deg(yangler))
# if verbose:
# Illum reference position
xscaler = np.hypot(dV2dx, dV3dx)
yscaler = np.hypot(dV2dy, dV3dy)
xangler = np.rad2deg(np.arctan2(dV2dx, dV3dx))
yangler = np.rad2deg(np.arctan2(dV2dy, dV3dy))
# if verbose:
print('\nIllum reference position')
print('xref=', xref)
print('Position', V2Ref, V3Ref)
print('Scales %10.6f %10.6f' % (xscaler, yscaler))
print('Angles %10.6f %10.6f %10.6f' % (xangler, yangler, yangler - xangler))
# if verbose:
# Slit position
xslit = (326.13)
yslit = (300.70)
dxslit = xslit - xcen
dyslit = yslit - ycen
V2slit = polynomial.poly(AF, dxslit, dyslit, 4) + V2cen
V3slit = polynomial.poly(BF, dxslit, dyslit, 4) + V3cen
dV2dx = polynomial.dpdx(AF, dxslit, yslit)
dV3dx = polynomial.dpdx(BF, dxslit, dyslit)
dV2dy = polynomial.dpdy(AF, dxslit, dyslit)
dV3dy = polynomial.dpdy(BF, dxslit, dyslit)
xangles = np.arctan2(dV2dx, dV3dx)
yangles = np.arctan2(dV2dy, dV3dy)
# if verbose:
print('\nSlit')
print('Position', dxslit, dyslit)
print('V2,V3', V2slit, V3slit)
print('Slit angles', np.rad2deg(xangles), np.rad2deg(yangles))
# if verbose:
# Corners
xc = np.array([-516.0, 516.0, 516.0, -516.0, -516.0])
yc = np.array([-512.0, -512.0, 512.0, 512.0, -512.0])
V2c = polynomial.poly(AF, xc, yc, 4)
V3c = polynomial.poly(BF, xc, yc, 4)
V2c = V2c + V2cen
V3c = V3c + V3cen
# if verbose:
print('\nCorners')
print('V2 %10.4f %10.4f %10.4f %10.4f' % (V2c[0], V2c[1], V2c[2], V2c[3]))
print('V3 %10.4f %10.4f %10.4f %10.4f' % (V3c[0], V3c[1], V3c[2], V3c[3]))
# make figure
pl.figure(1)
pl.clf()
pl.title('MIRI Detector')
pl.plot(V2cen, V3cen, 'r+')
pl.plot(V2c, V3c, ':')
pl.grid(True)
pl.axis('equal')
pl.plot(V2Ref, V3Ref, 'b+')
pl.plot(V2slit, V3slit, 'c+')
pl.gca().invert_xaxis()
pl.show()
## Rotated versions
print('Angle', yanglec)
print('Rotated')
# incorporate rotation in coefficients
a = np.deg2rad(yanglec)
AR = AF * np.cos(a) - BF * np.sin(a)
BR = AF * np.sin(a) + BF * np.cos(a)
CR = polynomial.prepend_rotation_to_polynomial(CS, yanglec)
DR = polynomial.prepend_rotation_to_polynomial(DS, yanglec)
if verbose:
print('AR')
polynomial.print_triangle(AR)
print('BR')
polynomial.print_triangle(BF)
print('\n', AR[2], ' near zero')
# if verbose:
invcheck(AR, BR, CR, DR, 4, -512.0, 512.0)
# Check positions using rotated (Ideal) coefficients
# if verbose:
xi = polynomial.poly(AR, xc, yc, 4)
yi = polynomial.poly(BR, xc, yc, 4)
v2r = xi * np.cos(a) + yi * np.sin(a) + V2cen
v3r = -xi * np.sin(a) + yi * np.cos(a) + V3cen
# if verbose:
print('V2', v2r)
print('V3', v3r)
pl.plot(v2r, v3r, '--')
CRFl = polynomial.flip_x(CR)
DRFl = polynomial.flip_x(DR)
# see TR: "polynomial origin being at the detector center with
# pixel position (516.5, 512.5). "
detector_center_pixel_x = 516.5
detector_center_pixel_y = 512.5
# dictionary holding data written to csv
csv_data = {}
csv_data['DET_OSS'] = {}
csv_data['DET_OSS']['A'] = AR
csv_data['DET_OSS']['B'] = BR
csv_data['DET_OSS']['C'] = CR
csv_data['DET_OSS']['D'] = DR
csv_data['DET_OSS']['Xref'] = detector_center_pixel_x
csv_data['DET_OSS']['Yref'] = detector_center_pixel_y
csv_data['DET_OSS']['Xref_inv'] = V2cen
csv_data['DET_OSS']['Yref_inv'] = V3cen
csv_data['DET_OSS']['xAngle'] = xanglec
csv_data['DET_OSS']['yAngle'] = yanglec
csv_data['DET_DMF'] = {}
csv_data['DET_DMF']['A'] = -AR
csv_data['DET_DMF']['B'] = BR
csv_data['DET_DMF']['C'] = CRFl
csv_data['DET_DMF']['D'] = DRFl
csv_data['DET_DMF']['Xref'] = detector_center_pixel_x
csv_data['DET_DMF']['Yref'] = detector_center_pixel_y
csv_data['DET_DMF']['Xref_inv'] = V2cen
csv_data['DET_DMF']['Yref_inv'] = V3cen
csv_data['DET_DMF']['xAngle'] = xanglec
csv_data['DET_DMF']['yAngle'] = yanglec
return csv_data
def extract_ifu_data(aperture_table):
"""Extract relevant information from IFU slice reference files.
Return one single table with columns that directly map to SIAF aperture entries.
Parameters
----------
aperture_table : astropy.table.Table
Table with aperture information
Returns
-------
table : astropy.table.Table instance
Table containing data
"""
column_name_mapping = {}
column_name_mapping['X1'] = 'v2ll'
column_name_mapping['Y1'] = 'v3ll'
column_name_mapping['X2'] = 'v2lr'
column_name_mapping['Y2'] = 'v3lr'
column_name_mapping['X3'] = 'v2ur'
column_name_mapping['Y3'] = 'v3ur'
column_name_mapping['X4'] = 'v2ul'
column_name_mapping['Y4'] = 'v3ul'
ifu_index = np.array([i for i, name in enumerate(aperture_table['AperName']) if 'MIRIFU_' in name])
table = copy.deepcopy(aperture_table[ifu_index])
table['V2Ref'] = table['v2ref']
table['V3Ref'] = table['v3ref']
# see IFU worksheet
for axis in ['X', 'Y']:
for index in [1, 2, 3, 4]:
if axis == 'X':
table['{}IdlVert{}'.format(axis, index)] = table['V2Ref'] \
- table[column_name_mapping['{}{}'.format(axis, index)]]
elif axis == 'Y':
table['{}IdlVert{}'.format(axis, index)] = table[ column_name_mapping['{}{}'.format(axis, index)]] \
- table['V3Ref']
return table
csv_data = get_mirim_coefficients(miri_distortion_file, verbose=False)
number_of_coefficients = len(csv_data['DET_OSS']['A'])
polynomial_degree = polynomial.polynomial_degree(number_of_coefficients)
# convert to column names in Calc worksheet
for AperName in csv_data.keys():
csv_data[AperName]['dx'] = csv_data[AperName]['Xref']
csv_data[AperName]['dy'] = csv_data[AperName]['Yref']
csv_data[AperName]['dxIdl'] = csv_data[AperName]['Xref_inv']
csv_data[AperName]['dyIdl'] = csv_data[AperName]['Yref_inv']
k = 0
for i in range(polynomial_degree + 1):
for j in np.arange(i + 1):
csv_data[AperName]['Sci2IdlX{:d}{:d}'.format(i, j)] = csv_data[AperName]['A'][k]
csv_data[AperName]['Sci2IdlY{:d}{:d}'.format(i, j)] = csv_data[AperName]['B'][k]
csv_data[AperName]['Idl2SciX{:d}{:d}'.format(i, j)] = csv_data[AperName]['C'][k]
csv_data[AperName]['Idl2SciY{:d}{:d}'.format(i, j)] = csv_data[AperName]['D'][k]
k += 1
# get IFU aperture definitions
slice_table = extract_ifu_data(siaf_aperture_definitions)
idlvert_attributes = ['XIdlVert{}'.format(i) for i in [1, 2, 3, 4]] + [
'YIdlVert{}'.format(i) for i in [1, 2, 3, 4]]
aperture_dict = OrderedDict()
aperture_name_list = siaf_aperture_definitions['AperName'].tolist()
for aperture_index, AperName in enumerate(aperture_name_list):
# new aperture to be constructed
aperture = pysiaf.JwstAperture()
aperture.AperName = AperName
aperture.InstrName = siaf_detector_parameters['InstrName'][0].upper()
# index in the aperture definition table
aperture_definitions_index = siaf_aperture_definitions['AperName'].tolist().index(AperName)
aperture.AperShape = siaf_detector_parameters['AperShape'][0]
# Retrieve basic aperture parameters from definition files
for attribute in 'XDetRef YDetRef AperType XSciSize YSciSize XSciRef YSciRef'.split():
value = siaf_aperture_definitions[attribute][aperture_definitions_index]
if np.ma.is_masked(value):
value = None
setattr(aperture, attribute, value)
if aperture.AperType not in ['COMPOUND', 'SLIT']:
for attribute in 'XDetSize YDetSize'.split():
setattr(aperture, attribute, siaf_detector_parameters[attribute][0])
aperture.DDCName = 'not set'
aperture.Comment = None
aperture.UseAfterDate = '2014-01-01'
master_aperture_name = 'MIRIM_FULL'
# process master apertures
if aperture.AperType not in ['COMPOUND', 'SLIT']:
if aperture.AperType == 'OSS':
aperture.VIdlParity = 1
aperture.DetSciYAngle = 0
aperture.DetSciParity = 1
csv_aperture_name = 'DET_OSS'
else:
detector_layout_index = detector_layout['AperName'].tolist().index(master_aperture_name)
for attribute in 'DetSciYAngle DetSciParity VIdlParity'.split():
setattr(aperture, attribute, detector_layout[attribute][detector_layout_index])
# this is the name given to the pseudo-aperture in the Calc worksheet
csv_aperture_name = 'DET_DMF'
aperture.Sci2IdlDeg = polynomial_degree
dx = aperture.XDetRef - csv_data[csv_aperture_name]['dx']
dy = aperture.YDetRef - csv_data[csv_aperture_name]['dy']
csv_data[csv_aperture_name]['A_shifted'] = polynomial.shift_coefficients(
csv_data[csv_aperture_name]['A'], dx, dy, verbose=False)
csv_data[csv_aperture_name]['B_shifted'] = polynomial.shift_coefficients(
csv_data[csv_aperture_name]['B'], dx, dy, verbose=False)
# apply polynomial to get reference location in ideal plane
dxIdl = polynomial.poly(csv_data[csv_aperture_name]['A'], dx, dy, order=polynomial_degree)
dyIdl = polynomial.poly(csv_data[csv_aperture_name]['B'], dx, dy, order=polynomial_degree)
csv_data[csv_aperture_name]['C_shifted'] = polynomial.shift_coefficients(
csv_data[csv_aperture_name]['C'], dxIdl, dyIdl, verbose=False)
csv_data[csv_aperture_name]['D_shifted'] = polynomial.shift_coefficients(
csv_data[csv_aperture_name]['D'], dxIdl, dyIdl, verbose=False)
# set 00 coefficients to zero
for coefficient_name in ['{}_shifted'.format(c) for c in 'A B C D'.split()]:
csv_data[csv_aperture_name][coefficient_name][0] = 0.
k = 0
for i in range(polynomial_degree + 1):
for j in np.arange(i + 1):
setattr(aperture, 'Sci2IdlX{:d}{:d}'.format(i, j), csv_data[csv_aperture_name]['A_shifted'][k])
setattr(aperture, 'Sci2IdlY{:d}{:d}'.format(i, j), csv_data[csv_aperture_name]['B_shifted'][k])
setattr(aperture, 'Idl2SciX{:d}{:d}'.format(i, j), csv_data[csv_aperture_name]['C_shifted'][k])
setattr(aperture, 'Idl2SciY{:d}{:d}'.format(i, j), csv_data[csv_aperture_name]['D_shifted'][k])
k += 1
aperture.V3SciYAngle = csv_data[csv_aperture_name]['yAngle']
aperture.V3SciXAngle = csv_data[csv_aperture_name]['xAngle']
aperture.V3IdlYAngle = aperture.V3SciYAngle
aperture.V2Ref = csv_data[csv_aperture_name]['Xref_inv'] + aperture.VIdlParity * dxIdl * np.cos(np.deg2rad(aperture.V3IdlYAngle)) + dyIdl * np.sin(np.deg2rad(aperture.V3IdlYAngle))
aperture.V3Ref = csv_data[csv_aperture_name]['Yref_inv'] - aperture.VIdlParity * dxIdl * np.sin(np.deg2rad(aperture.V3IdlYAngle)) + dyIdl * np.cos(np.deg2rad(aperture.V3IdlYAngle))
# overwrite V3IdlYAngle if set in definition files
for attribute in 'V3IdlYAngle'.split():
value = siaf_aperture_definitions[attribute][aperture_definitions_index]
if np.ma.is_masked(value) is False:
setattr(aperture, attribute, value)
aperture.complement()
elif AperName in slice_table['AperName']:
slice_index = slice_table['AperName'].tolist().index(AperName)
for attribute in 'V2Ref V3Ref V3IdlYAngle'.split() + idlvert_attributes: #
setattr(aperture, attribute, slice_table[attribute][slice_index])
aperture.AperShape = siaf_detector_parameters['AperShape'][0]
aperture.VIdlParity = -1
elif AperName == 'MIRIM_SLIT':
# get MIRIM_SLIT definitions from source_file
mirim_slit_definitions = copy.deepcopy(siaf_aperture_definitions[aperture_index])
aperture.V2Ref = mirim_slit_definitions['v2ref']
aperture.V3Ref = mirim_slit_definitions['v3ref']
for attribute_name in 'VIdlParity V3IdlYAngle'.split():
setattr(aperture, attribute_name, mirim_slit_definitions[attribute_name])
# the mapping is different from above because now we are treating this as 'true' v2v3 and transform to idl
column_name_mapping = {}
column_name_mapping['X1'] = 'v2ll'
column_name_mapping['Y1'] = 'v3ll'
column_name_mapping['X4'] = 'v2lr'
column_name_mapping['Y4'] = 'v3lr'
column_name_mapping['X3'] = 'v2ur'
column_name_mapping['Y3'] = 'v3ur'
column_name_mapping['X2'] = 'v2ul'
column_name_mapping['Y2'] = 'v3ul'
for index in [1, 2, 3, 4]:
x_idl, y_idl = aperture.tel_to_idl(mirim_slit_definitions[column_name_mapping['{}{}'.format('X', index)]],
mirim_slit_definitions[column_name_mapping['{}{}'.format('Y', index)]])
setattr(aperture, '{}IdlVert{}'.format('X', index), x_idl)
setattr(aperture, '{}IdlVert{}'.format('Y', index), y_idl)
aperture_dict[AperName] = aperture
aperture_dict = OrderedDict(sorted(aperture_dict.items(), key=lambda t: aperture_name_list.index(t[0])))
# third pass to set DDCNames apertures, which depend on other apertures
ddc_siaf_aperture_names = np.array([key for key in ddc_apername_mapping.keys()])
ddc_v2 = np.array(
[aperture_dict[aperture_name].V2Ref for aperture_name in ddc_siaf_aperture_names])
ddc_v3 = np.array(
[aperture_dict[aperture_name].V3Ref for aperture_name in ddc_siaf_aperture_names])
for AperName in aperture_name_list:
separation_tel_from_ddc_aperture = np.sqrt(
(aperture_dict[AperName].V2Ref - ddc_v2) ** 2 + (
aperture_dict[AperName].V3Ref - ddc_v3) ** 2)
aperture_dict[AperName].DDCName = ddc_apername_mapping[
ddc_siaf_aperture_names[np.argmin(separation_tel_from_ddc_aperture)]]
######################################
# SIAF content generation finished
######################################
aperture_collection = pysiaf.ApertureCollection(aperture_dict)
emulate_delivery = True
if emulate_delivery:
pre_delivery_dir = os.path.join(JWST_DELIVERY_DATA_ROOT, instrument)
if not os.path.isdir(pre_delivery_dir):
os.makedirs(pre_delivery_dir)
# write the SIAF files to disk
filenames = pysiaf.iando.write.write_jwst_siaf(aperture_collection, basepath=pre_delivery_dir, file_format=['xml', 'xlsx'])
pre_delivery_siaf = pysiaf.Siaf(instrument, basepath=pre_delivery_dir)
compare_against_prd = True
compare_against_cdp7b = True
print('\nRunning regression test of pre_delivery_siaf against test_data:')
test_miri.test_against_test_data(siaf=pre_delivery_siaf, verbose=True)
for compare_to in [pysiaf.JWST_PRD_VERSION]:
if compare_to == 'cdp7b':
ref_siaf = pysiaf.Siaf(instrument,
filename=os.path.join(pre_delivery_dir, 'MIRI_SIAF_cdp7b.xml'))
else:
# compare new SIAF with PRD version
ref_siaf = pysiaf.Siaf(instrument)
tags = {'reference': compare_to, 'comparison': 'pre_delivery'}
compare.compare_siaf(pre_delivery_siaf, reference_siaf_input=ref_siaf,
fractional_tolerance=1e-6, report_dir=pre_delivery_dir, tags=tags)
compare.compare_transformation_roundtrip(pre_delivery_siaf,
reference_siaf_input=ref_siaf, tags=tags,
report_dir=pre_delivery_dir)
compare.compare_inspection_figures(pre_delivery_siaf, reference_siaf_input=ref_siaf,
report_dir=pre_delivery_dir, tags=tags,
xlimits=(-360, -520), ylimits=(-440, -300))
# run some tests on the new SIAF
from pysiaf.tests import test_aperture
print('\nRunning aperture_transforms test for pre_delivery_siaf')
test_aperture.test_jwst_aperture_transforms([pre_delivery_siaf], verbose=False, threshold=0.04)
print('\nRunning aperture_vertices test for pre_delivery_siaf')
test_aperture.test_jwst_aperture_vertices([pre_delivery_siaf])
else:
test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
# write the SIAFXML to disk
[filename] = pysiaf.iando.write.write_jwst_siaf(aperture_collection, basepath=test_dir,
file_format=['xml'])
print('SIAFXML written in {}'.format(filename))
| 36.472574
| 188
| 0.620006
|
583eb5cc625a3c1ee146ecec88ec983f9c074816
| 5,029
|
py
|
Python
|
gammapy/image/tests/test_profile.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/image/tests/test_profile.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/image/tests/test_profile.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from ...utils.testing import assert_quantity_allclose
from ...utils.testing import requires_dependency, mpl_plot_check
from ...maps import WcsNDMap, WcsGeom
from ..profile import compute_binning, ImageProfile, ImageProfileEstimator
@requires_dependency("pandas")
def test_compute_binning():
data = [1, 3, 2, 2, 4]
bin_edges = compute_binning(data, n_bins=3, method="equal width")
assert_allclose(bin_edges, [1, 2, 3, 4])
bin_edges = compute_binning(data, n_bins=3, method="equal entries")
# TODO: create test-cases that have been verified by hand here!
assert_allclose(bin_edges, [1, 2, 2.66666667, 4])
@pytest.fixture(scope="session")
def checkerboard_image():
nxpix, nypix = 10, 6
# set up data as a checkerboard of 0.5 and 1.5, so that the mean and sum
# are not compeletely trivial to compute
data = 1.5 * np.ones((nypix, nxpix))
data[slice(0, nypix + 1, 2), slice(0, nxpix + 1, 2)] = 0.5
data[slice(1, nypix + 1, 2), slice(1, nxpix + 1, 2)] = 0.5
geom = WcsGeom.create(npix=(nxpix, nypix), coordsys="GAL", binsz=0.02)
return WcsNDMap(geom=geom, data=data, unit="cm-2 s-1")
@pytest.fixture(scope="session")
def cosine_profile():
table = Table()
table["x_ref"] = np.linspace(-90, 90, 11) * u.deg
table["profile"] = np.cos(table["x_ref"].to("rad")) * u.Unit("cm-2 s-1")
table["profile_err"] = 0.1 * table["profile"]
return ImageProfile(table)
class TestImageProfileEstimator:
def test_lat_profile_sum(self, checkerboard_image):
p = ImageProfileEstimator(axis="lat", method="sum")
profile = p.run(checkerboard_image)
desired = 10 * np.ones(6) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_lon_profile_sum(self, checkerboard_image):
p = ImageProfileEstimator(axis="lon", method="sum")
profile = p.run(checkerboard_image)
desired = 6 * np.ones(10) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_radial_profile_sum(self, checkerboard_image):
center = SkyCoord(0, 0, unit="deg", frame="galactic")
p = ImageProfileEstimator(axis="radial", method="sum", center=center)
profile = p.run(checkerboard_image)
desired = [4.0, 8.0, 20.0, 12.0, 12.0] * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_lat_profile_mean(self, checkerboard_image):
p = ImageProfileEstimator(axis="lat", method="mean")
profile = p.run(checkerboard_image)
desired = np.ones(6) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_lon_profile_mean(self, checkerboard_image):
p = ImageProfileEstimator(axis="lon", method="mean")
profile = p.run(checkerboard_image)
desired = np.ones(10) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_x_edges_lat(self, checkerboard_image):
x_edges = Angle(np.linspace(-0.06, 0.06, 4), "deg")
p = ImageProfileEstimator(x_edges=x_edges, axis="lat", method="sum")
profile = p.run(checkerboard_image)
desired = 20 * np.ones(3) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
def test_x_edges_lon(self, checkerboard_image):
x_edges = Angle(np.linspace(-0.1, 0.1, 6), "deg")
p = ImageProfileEstimator(x_edges=x_edges, axis="lon", method="sum")
profile = p.run(checkerboard_image)
desired = 12 * np.ones(5) * u.Unit("cm-2 s-1")
assert_quantity_allclose(profile.profile, desired)
class TestImageProfile:
def test_normalize(self, cosine_profile):
normalized = cosine_profile.normalize(mode="integral")
profile = normalized.profile
assert_quantity_allclose(profile.sum(), 1 * u.Unit("cm-2 s-1"))
normalized = cosine_profile.normalize(mode="peak")
profile = normalized.profile
assert_quantity_allclose(profile.max(), 1 * u.Unit("cm-2 s-1"))
def test_profile_x_edges(self, cosine_profile):
assert_quantity_allclose(cosine_profile.x_ref.sum(), 0 * u.deg)
@pytest.mark.parametrize("kernel", ["gauss", "box"])
def test_smooth(self, cosine_profile, kernel):
# smoothing should preserve the mean
desired_mean = cosine_profile.profile.mean()
smoothed = cosine_profile.smooth(kernel, radius=3)
assert_quantity_allclose(smoothed.profile.mean(), desired_mean)
# smoothing should decrease errors
assert smoothed.profile_err.mean() < cosine_profile.profile_err.mean()
@requires_dependency("matplotlib")
def test_peek(self, cosine_profile):
with mpl_plot_check():
cosine_profile.peek()
| 38.098485
| 78
| 0.678664
|
f57524c187398072f4289e3a2754ca8af21b0657
| 26,246
|
py
|
Python
|
dataloader.py
|
hardik0/CTracker
|
a09d579c3621383280111f4771c0340fdcd72020
|
[
"Apache-2.0"
] | null | null | null |
dataloader.py
|
hardik0/CTracker
|
a09d579c3621383280111f4771c0340fdcd72020
|
[
"Apache-2.0"
] | null | null | null |
dataloader.py
|
hardik0/CTracker
|
a09d579c3621383280111f4771c0340fdcd72020
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import cv2
import csv
from six import raise_from
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image, ImageEnhance
RGB_MEAN = [0.485, 0.456, 0.406]
RGB_STD = [0.229, 0.224, 0.225]
class CSVDataset(Dataset):
"""CSV dataset."""
def __init__(self, root_path, train_file, class_list, transform=None):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.train_file = train_file
self.class_list = class_list
self.transform = transform
self.root_path = root_path
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, obj_id, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
self.name2video_frames = dict()
self.image_name_prefix = list()
for image_name in self.image_names:
self.image_name_prefix.append(image_name[0:-len(image_name.split('/')[-1].split('_')[-1])])
self.image_name_prefix = set(self.image_name_prefix)
print('Total video count: {}'.format(len(self.image_name_prefix)))
for image_name in self.image_names:
cur_prefix = image_name[0:-len(image_name.split('/')[-1].split('_')[-1])]
if cur_prefix not in self.name2video_frames:
self.name2video_frames[cur_prefix] = 1
else:
self.name2video_frames[cur_prefix] = self.name2video_frames[cur_prefix] + 1
def _extract_frame_index(self, image_name):
suffix_name = image_name.split('/')[-1].split('_')[-1]
return int(float(suffix_name.split('.')[0]))
def _get_random_surroud_name(self, image_name, max_diff=3, ignore_equal=True, pos_only=True):
suffix_name = image_name.split('/')[-1].split('_')[-1]
prefix = image_name[0:-len(suffix_name)]
cur_index = int(float(suffix_name.split('.')[0]))
total_number = self.name2video_frames[prefix]
if total_number < 2: return image_name
next_index = cur_index
while True:
range_low = max(1, cur_index - max_diff)
range_high = min(cur_index + max_diff, total_number)
if pos_only:
range_low = cur_index
if ignore_equal:
range_low = range_low + 1
if cur_index == total_number:
return image_name
next_index = random.randint(range_low, range_high)
if ignore_equal:
if next_index == cur_index:
continue
break
return prefix + '{0:06}.'.format(next_index) + suffix_name.split('.')[-1]
def _extract_name_prefix(self, image_name):
return image_name[0:-len(image_name.split('/')[-1].split('_')[-1])]
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
while True:
try:
img = self.load_image(idx)
next_name = self._get_random_surroud_name(self.image_names[idx])
img_next = self.load_image_by_name(next_name)
annot = self.load_annotations(idx)
annot_next = self.load_annotationse_by_name(next_name)
if (annot.shape[0] < 1) or (annot_next.shape[0] < 1):
idx = random.randrange(0, len(self.image_names))
continue
except FileNotFoundError:
print ('FileNotFoundError in process image.')
idx = random.randrange(0, len(self.image_names))
continue
break
if np.random.rand() < 0.5:
sample = {'img': img, 'annot': annot, 'img_next': img_next, 'annot_next': annot_next}
else:
sample = {'img': img_next, 'annot': annot_next, 'img_next': img, 'annot_next': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image_by_name(self, image_name):
img = skimage.io.imread(image_name)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img
def load_image(self, image_index):
img = skimage.io.imread(self.image_names[image_index])
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img
def load_annotationse_by_name(self, image_name):
# get ground truth annotations
annotation_list = self.image_data[image_name]
annotations = np.zeros((0, 6))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
obj_id = a['obj_id']
if (x2-x1) < 1 or (y2-y1) < 1:
continue
annotation = np.zeros((1, 6))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotation[0, 5] = obj_id
annotations = np.append(annotations, annotation, axis=0)
return annotations
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 6))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
obj_id = a['obj_id']
if (x2-x1) < 1 or (y2-y1) < 1:
continue
annotation = np.zeros((1, 6))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotation[0, 5] = obj_id
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, obj_id, x1, y1, x2, y2, class_name = row[:7]
except ValueError:
raise_from(ValueError('line {}: format should be \'img_file,obj_id,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)), None)
img_file = os.path.join(self.root_path, img_file.strip())
if img_file not in result:
result[img_file] = []
class_name = class_name.strip()
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = self._parse(float(x1), int, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(float(y1), int, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(float(x2), int, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(float(y2), int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'obj_id': obj_id})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
def collater(data):
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
imgs_next = [s['img_next'] for s in data]
annots_next = [s['annot_next'] for s in data]
widths = [int(s.shape[1]) for s in imgs]
heights = [int(s.shape[0]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_height, max_width, 3)
padded_imgs_next = torch.zeros(batch_size, max_height, max_width, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
img_next = imgs_next[i]
padded_imgs_next[i, :int(img_next.shape[0]), :int(img_next.shape[1]), :] = img_next
max_num_annots = max(annot.shape[0] for annot in annots)
max_num_annots_next = max(annot.shape[0] for annot in annots_next)
max_num_annots = max(max_num_annots, max_num_annots_next)
if max_num_annots > 0:
annot_padded = torch.ones((len(annots), max_num_annots, 6)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots):
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = torch.ones((len(annots), 1, 6)) * -1
if max_num_annots > 0:
annot_padded_next = torch.ones((len(annots_next), max_num_annots, 6)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots_next):
if annot.shape[0] > 0:
annot_padded_next[idx, :annot.shape[0], :] = annot
else:
annot_padded_next = torch.ones((len(annots_next), 1, 6)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
padded_imgs_next = padded_imgs_next.permute(0, 3, 1, 2)
return {'img': padded_imgs, 'annot': annot_padded, 'img_next': padded_imgs_next, 'annot_next': annot_padded_next}
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy + 1), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0] + 1) *
(box_a[:, 3]-box_a[:, 1] + 1)) # [A,B]
area_b = ((box_b[2]-box_b[0] + 1) *
(box_b[3]-box_b[1] + 1)) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def overlap_numpy(box_a, box_b):
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0] + 1) *
(box_a[:, 3]-box_a[:, 1] + 1)) # [A,B]
return inter / area_a # [A,B]
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, min_side=608, max_side=1024):
return sample
image, annots, image_next, annots_next = sample['img'], sample['annot'], sample['img_next'], sample['annot_next']
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows*scale)), int(round((cols*scale)))))
image_next = skimage.transform.resize(image_next, (int(round(rows*scale)), int(round((cols*scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
new_image_next = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image_next[:rows, :cols, :] = image_next.astype(np.float32)
annots[:, :4] *= scale
annots_next[:, :4] *= scale
return {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'img_next': torch.from_numpy(new_image_next), 'annot_next': torch.from_numpy(annots_next), 'scale': scale}
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image_next, annots_next = sample['img_next'], sample['annot_next']
image = image[:, ::-1, :]
image_next = image_next[:, ::-1, :]
rows, cols, _ = image.shape
rows_next, cols_next, _ = image_next.shape
assert (rows == rows_next) and (cols == cols_next), 'size must be equal between adjacent images pair.'
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
# for next
x1 = annots_next[:, 0].copy()
x2 = annots_next[:, 2].copy()
x_tmp = x1.copy()
annots_next[:, 0] = cols - x2
annots_next[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots, 'img_next': image_next, 'annot_next': annots_next}
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[RGB_MEAN]])
self.std = np.array([[RGB_STD]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
image_next, annots_next = sample['img_next'], sample['annot_next']
return {'img':torch.from_numpy((image.astype(np.float32) / 255.0 - self.mean) / self.std), 'annot': torch.from_numpy(annots), 'img_next':torch.from_numpy((image_next.astype(np.float32) / 255.0-self.mean)/self.std), 'annot_next': torch.from_numpy(annots_next)}
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = RGB_MEAN
else:
self.mean = mean
if std == None:
self.std = RGB_STD
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def random_brightness(img, img_next):
prob = np.random.uniform(0, 1)
if prob < 0.5:
delta = np.random.uniform(-0.125, 0.125) + 1
img = ImageEnhance.Brightness(img).enhance(delta)
img_next = ImageEnhance.Brightness(img_next).enhance(delta)
return img, img_next
def random_contrast(img, img_next):
prob = np.random.uniform(0, 1)
if prob < 0.5:
delta = np.random.uniform(-0.5, 0.5) + 1
img = ImageEnhance.Contrast(img).enhance(delta)
img_next = ImageEnhance.Contrast(img_next).enhance(delta)
return img, img_next
def random_saturation(img, img_next):
prob = np.random.uniform(0, 1)
if prob < 0.5:
delta = np.random.uniform(-0.5, 0.5) + 1
img = ImageEnhance.Color(img).enhance(delta)
img_next = ImageEnhance.Color(img_next).enhance(delta)
return img, img_next
def random_hue(img, img_next):
prob = np.random.uniform(0, 1)
if prob < 0.5:
delta = np.random.uniform(-18, 18)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
img_next_hsv = np.array(img_next.convert('HSV'))
img_next_hsv[:, :, 0] = img_next_hsv[:, :, 0] + delta
img_next = Image.fromarray(img_next_hsv, mode='HSV').convert('RGB')
return img, img_next
class PhotometricDistort(object):
def __init__(self):
pass
def __call__(self, sample):
image, annots, image_next, annots_next = sample['img'], sample['annot'], sample['img_next'], sample['annot_next']
prob = np.random.uniform(0, 1)
# Apply different distort order
img = Image.fromarray(image)
img_next = Image.fromarray(image_next)
if prob > 0.5:
img, img_next = random_brightness(img, img_next)
img, img_next = random_contrast(img, img_next)
img, img_next = random_saturation(img, img_next)
img, img_next = random_hue(img, img_next)
else:
img, img_next = random_brightness(img, img_next)
img, img_next = random_saturation(img, img_next)
img, img_next = random_hue(img, img_next)
img, img_next = random_contrast(img, img_next)
image = np.array(img)
image_next = np.array(img_next)
return {'img': image, 'annot': annots, 'img_next': image_next, 'annot_next': annots_next}
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, image_next):
for t in self.transforms:
img, image_next = t(img, image_next)
return img, image_next
class RandomSampleCrop(object):
def __init__(self):
pass
def __call__(self, sample):
image, annots, image_next, annots_next = sample['img'], sample['annot'], sample['img_next'], sample['annot_next']
#print('crop1',image.dtype)
height, width, _ = image.shape
shorter_side = min(height, width)
crop_size = np.random.uniform(0.3 * shorter_side, 0.8 * shorter_side)
target_size = 512
if shorter_side < 384:
target_size = 256
min_iou = 0.2
crop_success = False
# max trails (10)
for _ in range(20):
left = np.random.uniform(0, width - crop_size)
top = np.random.uniform(0, height - crop_size)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left + crop_size), int(top + crop_size)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = overlap_numpy(annots[:, :4], rect)
overlap_next = overlap_numpy(annots_next[:, :4], rect)
if overlap.max() < min_iou or overlap_next.max() < min_iou:
continue
crop_success = True
image = image[rect[1]:rect[3], rect[0]:rect[2], :]
image_next = image_next[rect[1]:rect[3], rect[0]:rect[2], :]
annots = annots[overlap > min_iou, :].copy()
annots_next = annots_next[overlap_next > min_iou, :].copy()
annots[:, :2] -= rect[:2]
annots[:, 2:4] -= rect[:2]
annots_next[:, :2] -= rect[:2]
annots_next[:, 2:4] -= rect[:2]
#print('crop1',image.max())
expand_ratio = 1.0
if np.random.uniform(0, 1) > 0.75:
height, width, depth = image.shape
expand_ratio = random.uniform(1, 3)
left = random.uniform(0, width * expand_ratio - width)
top = random.uniform(0, height * expand_ratio - height)
expand_image = np.zeros((int(height*expand_ratio), int(width*expand_ratio), depth), dtype=image.dtype)
expand_image[:, :, :] = np.array([[RGB_MEAN]]) * 255.0
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
annots[:, :2] += (int(left), int(top))
annots[:, 2:4] += (int(left), int(top))
expand_next_image = np.zeros(
(int(height*expand_ratio), int(width*expand_ratio), depth),
dtype=image_next.dtype)
expand_next_image[:, :, :] = np.array([[RGB_MEAN]]) * 255.0
expand_next_image[int(top):int(top + height),
int(left):int(left + width)] = image_next
image_next = expand_next_image
annots_next[:, :2] += (int(left), int(top))
annots_next[:, 2:4] += (int(left), int(top))
# resize the image with the computed scale
# resize the image with the computed scale
image = (255.0 * skimage.transform.resize(image, (target_size, target_size))).astype(np.uint8)
image_next = (255.0 * skimage.transform.resize(image_next, (target_size, target_size))).astype(np.uint8)
annots[:, :4] *= (target_size / (crop_size * expand_ratio))
annots_next[:, :4] *= (target_size / (crop_size * expand_ratio))
#print('crop2',image.max())
return {'img': image, 'annot': annots, 'img_next': image_next, 'annot_next': annots_next}
if not crop_success:
image = (255.0 * skimage.transform.resize(image, (height // 2, width // 2))).astype(np.uint8)
image_next = (255.0 * skimage.transform.resize(image_next, (height // 2, width // 2))).astype(np.uint8)
annots[:, :4] *= 0.5
annots_next[:, :4] *= 0.5
return {'img': image, 'annot': annots, 'img_next': image_next, 'annot_next': annots_next}
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
| 37.494286
| 267
| 0.578031
|
dcfa1f104275238d4f223acb55c16023cd01b96d
| 3,100
|
py
|
Python
|
edward2_autoreparam/models_test.py
|
kinoute/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 11
|
2020-01-29T07:25:04.000Z
|
2022-03-05T16:01:21.000Z
|
edward2_autoreparam/models_test.py
|
zhangyuezjx/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:19:53.000Z
|
2022-02-10T00:39:26.000Z
|
edward2_autoreparam/models_test.py
|
zhangyuezjx/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 2
|
2019-12-07T19:01:03.000Z
|
2020-03-19T16:53:04.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for edward2_autoreparam.models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_probability import edward2 as ed
from edward2_autoreparam import models
class ModelsTest(tf.test.TestCase):
def _sanity_check_conversion(self, model, model_args, observed, to_cp, to_ncp,
make_to_cp):
with ed.tape() as model_tape:
model(*model_args)
model_tape_ = self.evaluate(model_tape)
example_params = list(model_tape_.values())[:-1]
# Test that `make_to_cp`, when given the centered parameterization as the
# source, generates the identity fn.
param_names = [p for v in model_tape_.keys() for p in (v + '_a', v + '_b')]
centered_parameterization = {p: 1. for p in param_names}
identity_cp = make_to_cp(**centered_parameterization)
example_params_copy = identity_cp(example_params)
c1_ = self.evaluate(example_params_copy)
c2_ = self.evaluate(example_params_copy)
self.assertAllClose(c1_, c2_)
self.assertAllClose(c1_, example_params)
# Test that `to_ncp` and `to_cp` are deterministic and consistent
ncp_params = to_ncp(example_params)
cp_params = to_cp(ncp_params)
ncp_params_, cp_params_ = self.evaluate((ncp_params, cp_params))
ncp_params2_, cp_params2_ = self.evaluate((ncp_params, cp_params))
# Test determinism
self.assertAllClose(ncp_params_, ncp_params2_)
self.assertAllClose(cp_params_, cp_params2_)
# Test round-trip consistency:
self.assertAllClose(cp_params_, example_params)
def test_german_credit_lognormal(self):
(model, model_args, observed,
to_cp, to_ncp, make_to_cp) = models.get_german_credit_lognormalcentered()
self._sanity_check_conversion(model, model_args, observed, to_cp, to_ncp,
make_to_cp)
def test_radon_stddvs(self):
(model, model_args, observed,
to_cp, to_ncp, make_to_cp) = models.get_radon_model_stddvs()
self._sanity_check_conversion(model, model_args, observed, to_cp, to_ncp,
make_to_cp)
def test_eight_schools(self):
(model, model_args, observed,
to_cp, to_ncp, make_to_cp) = models.get_eight_schools()
self._sanity_check_conversion(model, model_args, observed, to_cp, to_ncp,
make_to_cp)
if __name__ == '__main__':
tf.test.main()
| 36.046512
| 80
| 0.721935
|
0b02477973b984231866fa9147ad9bec4c2a9325
| 3,112
|
py
|
Python
|
contrib/zmq/zmq_sub.py
|
minblock/generalcoin
|
b3b51979823f77e54eb548df39e1b8037e480f50
|
[
"MIT"
] | null | null | null |
contrib/zmq/zmq_sub.py
|
minblock/generalcoin
|
b3b51979823f77e54eb548df39e1b8037e480f50
|
[
"MIT"
] | null | null | null |
contrib/zmq/zmq_sub.py
|
minblock/generalcoin
|
b3b51979823f77e54eb548df39e1b8037e480f50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Generalcoind should be started with the command line arguments:
generalcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.186047
| 107
| 0.645244
|
c7c3373fb583f09ad9b98bbad30cc9806da172a5
| 1,295
|
py
|
Python
|
sdk/core/azure-core/tests/async_tests/test_rest_headers_async.py
|
navali-msft/azure-sdk-for-python
|
716a7d1f4a115a74243fd399fa2e6d08c81a9aeb
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/tests/async_tests/test_rest_headers_async.py
|
navali-msft/azure-sdk-for-python
|
716a7d1f4a115a74243fd399fa2e6d08c81a9aeb
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/tests/async_tests/test_rest_headers_async.py
|
navali-msft/azure-sdk-for-python
|
716a7d1f4a115a74243fd399fa2e6d08c81a9aeb
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import pytest
from azure.core.rest import HttpRequest
@pytest.mark.asyncio
async def test_response_headers_case_insensitive(client):
request = HttpRequest("GET", "/basic/headers")
response = await client.send_request(request)
response.raise_for_status()
assert (
response.headers["lowercase-header"] ==
response.headers["LOWERCASE-HEADER"] ==
response.headers["Lowercase-Header"] ==
response.headers["lOwErCasE-HeADer"] ==
"lowercase"
)
assert (
response.headers["allcaps-header"] ==
response.headers["ALLCAPS-HEADER"] ==
response.headers["Allcaps-Header"] ==
response.headers["AlLCapS-HeADer"] ==
"ALLCAPS"
)
assert (
response.headers["camelcase-header"] ==
response.headers["CAMELCASE-HEADER"] ==
response.headers["CamelCase-Header"] ==
response.headers["cAMeLCaSE-hEadER"] ==
"camelCase"
)
return response
| 35
| 75
| 0.580695
|
ef89793fd73dc996f442913f60e0920545d83d5b
| 3,231
|
py
|
Python
|
tensorflow/python/keras/applications/__init__.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 1
|
2019-11-27T16:08:48.000Z
|
2019-11-27T16:08:48.000Z
|
tensorflow/python/keras/applications/__init__.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/applications/__init__.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Applications are canned architectures with pre-trained weights."""
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras_applications
from tensorflow.python.keras import backend
from tensorflow.python.keras import engine
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras.utils import all_utils
from tensorflow.python.util import tf_inspect
def keras_modules_injection(base_fun):
"""Decorator injecting tf.keras replacements for Keras modules.
Arguments:
base_fun: Application function to decorate (e.g. `MobileNet`).
Returns:
Decorated function that injects keyword argument for the tf.keras
modules required by the Applications.
"""
def wrapper(*args, **kwargs):
kwargs['backend'] = backend
if 'layers' not in kwargs:
kwargs['layers'] = layers
kwargs['models'] = models
kwargs['utils'] = all_utils
return base_fun(*args, **kwargs)
return wrapper
from tensorflow.python.keras.applications.densenet import DenseNet121
from tensorflow.python.keras.applications.densenet import DenseNet169
from tensorflow.python.keras.applications.densenet import DenseNet201
from tensorflow.python.keras.applications.imagenet_utils import decode_predictions
from tensorflow.python.keras.applications.imagenet_utils import preprocess_input
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.nasnet import NASNetLarge
from tensorflow.python.keras.applications.nasnet import NASNetMobile
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.python.keras.applications.resnet_v2 import ResNet101V2
from tensorflow.python.keras.applications.resnet_v2 import ResNet152V2
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.applications.xception import Xception
| 44.260274
| 86
| 0.798514
|
9fc1fa2954c3927d04dfb85add55596ac5f227c3
| 805
|
py
|
Python
|
Concurrency/codeSample/Part10_The_Multiprocessing_Module/countdownp2.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | 1
|
2018-05-30T02:36:46.000Z
|
2018-05-30T02:36:46.000Z
|
Concurrency/codeSample/Part10_The_Multiprocessing_Module/countdownp2.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
Concurrency/codeSample/Part10_The_Multiprocessing_Module/countdownp2.py
|
Chyi341152/pyConPaper
|
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# countdownp2.py
# A simple performance test of CPU-bound processes. Compare to the thread example module
import time
import multiprocessing
# Functions as threads
def countdown(count):
while count > 0:
count -= 1
return
start = time.time()
countdown(10000000)
countdown(10000000)
end = time.time()
print("Sequential ", end-start)
start = time.time()
p1 = multiprocessing.Process(target=countdown, args=(10000000,)) # Create the thread object
p2 = multiprocessing.Process(target=countdown, args=(10000000,)) # Create the thread object
p1.start() # Launch the thread
p2.start()
p1.join()
p2.join()
end = time.time()
print("Multiproceessing", end-start)
print("Macintosh 1984")
| 22.361111
| 95
| 0.667081
|
11bc35b36ece1902b2e369be22d69f47c7f8ed44
| 11,609
|
py
|
Python
|
src/parity_game.py
|
olijzenga/bdd-parity-game-solver
|
22195bfb303986c872fce7ee710d0fe6284ab9a3
|
[
"Apache-2.0"
] | 1
|
2021-06-21T07:22:48.000Z
|
2021-06-21T07:22:48.000Z
|
src/parity_game.py
|
olijzenga/bdd-parity-game-solver
|
22195bfb303986c872fce7ee710d0fe6284ab9a3
|
[
"Apache-2.0"
] | null | null | null |
src/parity_game.py
|
olijzenga/bdd-parity-game-solver
|
22195bfb303986c872fce7ee710d0fe6284ab9a3
|
[
"Apache-2.0"
] | null | null | null |
from dd.cudd import BDD
from bdd_provider import make_bdd
from graphviz import Source
# Parity class which is a slightly altered version of the one used by Sanchez et al:
# Sanchez, L., Wesselink, J.W., & Willemse, T.A.C. (2018). BDD-based parity game solving: a comparison of
# Zielonka's recursive algorithm, priority promotion and fixpoint iteration. (Computer science reports; Vol. 1801).
# Eindhoven: Technische Universiteit Eindhoven.
# https://pure.tue.nl/ws/files/92755535/CSR_18_01.pdf
class parity_game:
def __init__ (self, bdd: BDD, variables, variables_, v: BDD, e: BDD, even: BDD, odd: BDD, p):
"""Initializes a parity game instance
:param bdd: the BDD used as the data structure for this parity game
:type bdd: dd.autoref.BDD
:param variables: variables used for representing vertices in BDDs
:type variables: list<str>
:param variables_: variables used to represent successor vertices for edges and strategies
:type variables_: list<str>
:param v: BDD which can determine whether a vector represents a vertex in the parity game
:type v: dd.autoref.BDD
:param e: BDD which represents the edges of the parity game
:type e: dd.autoref.BDD
:param even: BDD which determines whether a vertex is owned by player even
:type even: dd.autoref.BDD
:param odd: BDD which determines whether a vertex is owned by player odd
:type odd: dd.autoref.BDD
:param p: maps a priority to a BDD which determines if a vertex has that priority
:type p: dict<dd.autoref.BDD>
"""
self.bdd = bdd
self.variables = variables
self.variables_ = variables_
self.substitution_list = { variables[i] : variables_[i] for i in range(len(variables)) }
self.reverse_substitution_list = { variables_[i] : variables[i] for i in range(len(variables)) }
self.v = v
self.e = e
self.even = even
self.odd = odd
self.p = p
self.d = max(p)
self.prio_even = bdd.false
self.prio_odd = bdd.false
# Build a BDD for deciding
for prio in p:
if prio % 2:
self.prio_odd = self.prio_odd | p[prio]
else:
self.prio_even = self.prio_even | p[prio]
# Add empty priority BDDs for priorities which have no vertices
for prio in range(0, self.d):
if prio not in p:
p[prio] = bdd.false
# Return sum of SAT count of all BDDs
def get_sat_count(self):
count = 0
count += len(list(self.bdd.pick_iter(self.v, care_vars=self.variables)))
count += len(list(self.bdd.pick_iter(self.e, care_vars=(self.variables + self.variables_))))
count += len(list(self.bdd.pick_iter(self.even, care_vars=self.variables)))
count += len(list(self.bdd.pick_iter(self.odd, care_vars=self.variables)))
count += sum([ len(list(self.bdd.pick_iter(self.p[prio], care_vars=self.variables))) for prio in self.p.keys() ])
count += len(list(self.bdd.pick_iter(self.prio_even, care_vars=self.variables)))
count += len(list(self.bdd.pick_iter(self.prio_odd, care_vars=self.variables)))
return count
def get_avg_out_deg(self):
return len(list(self.bdd.pick_iter(self.e, care_vars=(self.variables + self.variables_)))) / len(list(self.bdd.pick_iter(self.v, care_vars=self.variables)))
# Convert a SAT value to a hex string for more compact representaton
def sat_to_hex(self, sat, edge=False):
res = ""
bytea = []
pos = 0
cur = 0
for var in self.variables:
if(sat[var]):
cur += pow(2,pos)
if pos == 7:
bytea.append(cur)
cur = 0
pos = 0
else:
pos += 1
if pos != 0:
bytea.append(cur)
bytea.reverse()
res += ''.join('{:02x}'.format(x) for x in bytea)
if edge:
bytea = []
pos = 0
cur = 0
for var in self.variables_:
if(sat[var]):
cur += pow(2,pos)
if pos == 7:
bytea.append(cur)
cur = 0
pos = 0
else:
pos += 1
if pos != 0:
bytea.append(cur)
bytea.reverse()
res = ''.join('{:02x}'.format(x) for x in bytea)
return res
# Display the parity game using graphviz
def show(self):
self.make_dot("output/pg.dot")
with open("output/pg.dot", "r") as text_file:
s = Source(text_file.read(), filename="output/dot.png", format="png")
s.view()
# Prints the hex representation of each SAT of _bdd_
def bdd_sat(self, bdd: BDD):
return ', '.join([self.sat_to_hex(sat) for sat in self.bdd.pick_iter(bdd, self.variables)])
# Prints the hex representation of each SAT of edges represented by _bdd_
def bdd_sat_edges(self, bdd: BDD):
return ', '.join([self.sat_to_hex(sat) + " <==> " + self.sat_to_hex(sat, edge=True) for sat in self.bdd.pick_iter(bdd, care_vars=(self.variables_ + self.variables))])
# Gather data used for exporting this parity game to a dot file, or printing it
def get_repr_data(self):
data = {}
for v_0 in self.bdd.pick_iter(self.even, care_vars=self.variables):
data[self.sat_to_hex(v_0)] = ('Even', None, [])
for v_1 in self.bdd.pick_iter(self.odd, care_vars=self.variables):
data[self.sat_to_hex(v_1)] = ('Odd ', None, [])
for prio in self.p:
for v in self.bdd.pick_iter(self.p[prio], care_vars=self.variables):
d = data[self.sat_to_hex(v)]
data[self.sat_to_hex(v)] = (d[0], prio, [])
for e in self.bdd.pick_iter(self.e, care_vars=(self.variables + self.variables_)):
d = data[self.sat_to_hex(e)]
d[2].append(self.sat_to_hex(e, edge=True))
data[self.sat_to_hex(e)] = (d[0], d[1], d[2])
return data
def __repr__(self):
data = self.get_repr_data()
res = ""
for h in sorted(data):
d = data[h]
res += h + ' ' + d[0] + ' prio: ' + str(d[1]) + ' edges: ' + (', '.join(d[2])) + '\n'
return res
# Build a dot file for this parity game
def make_dot(self, filename):
data = self.get_repr_data()
res = "digraph parity_game {\n"
for h in sorted(data):
d = data[h]
res += "\t\"" + h + "\" [label=\"" + str(d[1]) + " (" + h + ")\", shape=" + ('diamond' if d[0] == 'Even' else 'box') + "];\n"
for h in sorted(data):
d = data[h]
#for e in d[2]:
# res += "\t\"" + h + "\" -> \"" + e + "\"\n"
res += "\t\"" + h + "\" -> {" + (', '.join([ "\"" + x + "\"" for x in d[2] ])) + "};\n"
res += "\n}"
with open(filename, "w") as text_file:
print(res, file=text_file)
def copy(self, deep=False):
if deep:
bdd = make_bdd()
bdd.declare(*self.variables)
bdd.declare(*self.variables_)
v = self.bdd.copy(self.v, bdd)
e = self.bdd.copy(self.e, bdd)
even = self.bdd.copy(self.even, bdd)
odd = self.bdd.copy(self.odd, bdd)
p = { prio : self.bdd.copy(self.p[prio], bdd) for prio in self.p.keys() }
c = parity_game(bdd, self.variables, self.variables_, v, e, even, odd, p)
else:
c = parity_game(self.bdd, self.variables, self.variables_, self.v, self.e, self.even, self.odd, self.p)
return c
def has_successor (self, U):
#check whether U = { u in V | u in U /\ exists v in V: u --> v }
V_= self.bdd.let(self.substitution_list, self.v)
U_next = self.bdd.let(self.substitution_list, U)
Z = self.bdd.quantify(U_next & self.e, self.variables_, forall = False) & U
return (Z == U)
def successor (self, U):
U_next = self.bdd.quantify(self.e & U, self.variables, forall = False)
U_next = U | self.bdd.let(self.reverse_substitution_list, U_next)
return U_next
def reachable (self, U):
U_next = self.successor(U)
i = 0
while U != U_next:
U = U_next
U_next = self.successor(U)
i = i +1
return U_next
def predecessor (self, player, U):
# U is a BDD representing a set of vertices
# player is either string 'even' or string 'odd'
(V_player,V_opponent) = (self.even, self.odd) if (player == 'even') else (self.odd, self.even)
V_ = self.bdd.let(self.substitution_list, self.v)
U_next = self.bdd.let(self.substitution_list, U)
U_player = V_player & self.bdd.quantify(U_next & self.e, self.variables_, forall = False)
# V_opponent /\ {v in V | forall u in V: v --> u ==> u in U } =
# V_opponent /\ {v in V | ~ (exists u in V: v --> u /\ u in V\U) }
U_opponent = V_opponent & ~(self.bdd.quantify(self.e & V_ & ~U_next, self.variables_, forall = False) )
# return union of the two sets
return U_player | U_opponent
def predecessor_gen (self, player, X, U):
# X,U are BDDs representing a set of vertices
# X is used to restrict the set of edges to stay within X
# player is either string 'even' or string 'odd'
(V_player,V_opponent) = (self.even, self.odd) if (player == 'even') else (self.odd, self.even)
V_ = self.bdd.let(self.substitution_list, self.v)
X_ = self.bdd.let(self.substitution_list, X)
E = self.e & X & X_
U_next = self.bdd.let(self.substitution_list, U)
U_player = V_player & self.bdd.quantify(U_next & E, self.variables_, forall = False)
# V_opponent /\ {v in V | forall u in V: v --> u ==> u in U } =
# V_opponent /\ {v in V | ~ (exists u in V: v --> u /\ u in V\U) }
U_opponent = V_opponent & ~(self.bdd.quantify(E & V_ & ~U_next, self.variables_, forall = False) )
# return union of the two sets
return U_player | U_opponent
def attractor (self, player, A):
# U is a BDD representing a set of vertices
# player is either string 'even' or string 'odd'
# attractor computation is a least fixpoint computation
tmp = self.bdd.false
tmp_= A
while tmp != tmp_:
tmp = tmp_
tmp_ = tmp_ | self.predecessor ( player, tmp_)
return tmp
def attractor_gen(self, player, V, A):
tmp = self.bdd.false
tmp_ = A & V
while tmp != tmp_:
tmp = tmp_
tmp_ = V & (tmp_ | self.predecessor_gen( player, V, tmp_)) # the use of predecessor_gen and intersection with V are both required!
return tmp
def remove (self, A):
# removing a set of vertices represented by BDD A
(self.v, self.even, self.odd) = (self.v & ~A, self.even & ~A, self.odd & ~A)
A_ = self.bdd.let(self.substitution_list, A)
self.e = self.e & ~A & ~A_
p_ = { i : self.p[i] & ~A for i in self.p if self.p[i] & ~A != self.bdd.false}
self.p = p_
#self.bdd.collect_garbage()
# Convert a variable assignment to a boolean expression
def sat_to_expr(sat: dict):
return '&'.join([ var if sat[var] else ('~' + var) for var in sat.keys() ])
| 41.460714
| 174
| 0.565596
|
140d67fedf06fbae600d7d7a8aa0ee9177633960
| 58,529
|
py
|
Python
|
SLpackage/private/thirdparty/pythonpkgs/numpy/numpy_1.13.1/lib/python2.7/site-packages/numpy/f2py/rules.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/thirdparty/pythonpkgs/numpy/numpy_1.13.1/lib/python2.7/site-packages/numpy/f2py/rules.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/thirdparty/pythonpkgs/numpy/numpy_1.13.1/lib/python2.7/site-packages/numpy/f2py/rules.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
#! python
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (successful) {
put_a_to_python
if (successful) {
put_b_to_python
if (successful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import os
import time
import copy
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, gentitle, getargs2,
hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote,
isarray, isarrayofstrings, iscomplex, iscomplexarray,
iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal,
isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c,
isintent_callback, isintent_copy, isintent_hide, isintent_inout,
isintent_nothide, isintent_out, isintent_overwrite, islogical,
islong_complex, islong_double, islong_doublefunction, islong_long,
islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar,
issigned_long_longarray, isstring, isstringarray, isstringfunction,
issubroutine, issubroutine_wrap, isthreadsafe, isunsigned,
isunsigned_char, isunsigned_chararray, isunsigned_long_long,
isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray,
l_and, l_not, l_or, outmess, replace, stripcomma,
)
from . import capi_maps
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
options = {}
sepdict = {}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr', 'method',
'pyobjfrom', 'closepyobjfrom',
'freemem',
'userincludes',
'includes0', 'includes', 'typedefs', 'typedefs_generated',
'cppmacros', 'cfuncs', 'callbacks',
'latexdoc',
'restdoc',
'routine_defs', 'externroutines',
'initf2pywraphooks',
'commonhooks', 'initcommonhooks',
'f90modhooks', 'initf90modhooks']:
sepdict[k] = '\n'
#################### Rules for C/API module #################
module_rules = {
'modulebody': """\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """ + time.asctime(time.localtime(time.time())) + """
* $R""" + """evision:$
* $D""" + """ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
""" + gentitle("See f2py2e/cfuncs.py: includes") + """
#includes#
#includes0#
""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """
#typedefs#
""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """
#typedefs_generated#
""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """
#cppmacros#
""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """
#cfuncs#
""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """
#userincludes#
""" + gentitle("See f2py2e/capi_rules.py: usercode") + """
#usercode#
/* See f2py2e/rules.py */
#externroutines#
""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """
#usercode1#
""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """
#callbacks#
""" + gentitle("See f2py2e/rules.py: buildapi") + """
#body#
""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """
#f90modhooks#
""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """
""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """
#commonhooks#
""" + gentitle("See f2py2e/rules.py") + """
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor': {'latexdoc': '\n\n',
'restdoc': '\n\n'},
'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc': ['Module #modulename#\n' + '=' * 80,
'\n#restdoc#']
}
defmod_rules = [
{'body': '/*eof body*/',
'method': '/*eof method*/',
'externroutines': '/*eof externroutines*/',
'routine_defs': '/*eof routine_defs*/',
'initf90modhooks': '/*eof initf90modhooks*/',
'initf2pywraphooks': '/*eof initf2pywraphooks*/',
'initcommonhooks': '/*eof initcommonhooks*/',
'latexdoc': '',
'restdoc': '',
'modnote': {hasnote: '#note#', l_not(hasnote): ''},
}
]
routine_rules = {
'separatorsfor': sepdict,
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs': '#routine_def#',
'initf2pywraphooks': '#initf2pywraphook#',
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80,
]
}
################## Rules for C/API function ##############
rout_rules = [
{ # Init
'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
'routdebugleave': '\n', 'routdebugfailure': '\n',
'setjmpbuf': ' || ',
'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
'freemem': '/*freemem*/',
'docsignshort': '', 'docsignoptshort': '',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\\nParameters\\n----------',
'docstropt': '\\nOther Parameters\\n----------------',
'docstrout': '\\nReturns\\n-------',
'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'args_capi': '', 'keys_capi': '', 'functype': '',
'frompyobj': '/*frompyobj*/',
# this list will be reversed
'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'],
'pyobjfrom': '/*pyobjfrom*/',
# this list will be reversed
'closepyobjfrom': ['/*end of closepyobjfrom*/'],
'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
'routdebugenter': '/*routdebugenter*/',
'routdebugfailure': '/*routdebugfailure*/',
'callfortranroutine': '/*callfortranroutine*/',
'argformat': '', 'keyformat': '', 'need_cfuncs': '',
'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
'initf2pywraphook': '',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, {
'apiname': 'f2py_rout_#modulename#_#name#',
'pyname': '#modulename#.#name#',
'decl': '',
'_check': l_not(ismoduleroutine)
}, {
'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname': '#modulename#.#f90modulename#.#name#',
'decl': '',
'_check': ismoduleroutine
}, { # Subroutine
'functype': 'void',
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine: '',
isdummyroutine: ''
},
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals: """\t\t}"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': issubroutine_wrap,
}, { # Function
'functype': '#ctype#',
'docreturn': {l_not(isintent_hide): '#rname#,'},
'docstrout': '#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
}, { # Scalar function
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:
'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'},
{l_and(debugcapi, iscomplexfunction)
: '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
{islong_doublefunction: 'long_double'}],
'returnformat': {l_not(isintent_hide): '#rformat#'},
'return': {iscomplexfunction: ',#name#_return_value_capi',
l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'},
'_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl': ['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe: '\t\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t\t}'},
{debugcapi:
'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
'freemem': '\tSTRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long: 'long_long',
islong_double: 'long_double',
islong_complex: 'complex_long_double',
isunsigned_char: 'unsigned_char',
isunsigned_short: 'unsigned_short',
isunsigned: 'unsigned',
isunsigned_long_long: 'unsigned_long_long',
isunsigned_chararray: 'unsigned_char',
isunsigned_shortarray: 'unsigned_short',
isunsigned_long_longarray: 'unsigned_long_long',
issigned_long_longarray: 'long_long',
}
aux_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
'return': ',#varname#',
'docstrout': '#pydocsignout#',
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': l_and(isscalar, l_not(iscomplex), isintent_out),
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
arg_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
# Doc signatures
{
'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'},
'docstrout': {isintent_out: '#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'depend': ''
},
# Required/Optional arguments
{
'kwlist': '"#varname#",',
'docsign': '#varname#,',
'_check': l_and(isintent_nothide, l_not(isoptional))
},
{
'kwlistopt': '"#varname#",',
'docsignopt': '#varname#=#showinit#,',
'docsignoptshort': '#varname#,',
'_check': l_and(isintent_nothide, isoptional)
},
# Docstring/BuildValue
{
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'},
'docsignxashort': {isintent_nothide: '#varname#_extra_args,'},
'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs': '#cbdocstr#',
'latexdocstrcbs': '\\item[] #cblatexdocstr#',
'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl': ['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):
'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'},
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'xaformat': {isintent_nothide: 'O!'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf': '(setjmp(#cbname#_jmpbuf))',
'callfortran': {l_not(isintent_callback): '#varname#_cptr,'},
'need': ['#cbname#', 'setjmp.h'],
'_check':isexternal
},
{
'frompyobj': [{l_not(isintent_callback): """\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""}, {isintent_callback: """\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi: ["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback): """\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need': ['SWAP', 'create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
}, {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
'frompyobj': [
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
{l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical: '''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
: '\tif (#varname#_capi != Py_None)'},
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'return': {isintent_out: ',#varname#'},
'need': ['len..'], # 'STRINGFREE'],
'_check':isstring
}, { # Common
'frompyobj': """\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj': """\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'],
'_check':isstring,
'_depend':''
}, { # Not hidden
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: '''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
'decl': '\tint capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=1,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
'decl': '\tint capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=0,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray,
'_depend': ''
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
'frompyobj': ['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
'\tif (#varname#_capi == Py_None) {'},
{isintent_hide: '\t{'},
{iscomplexarray: '\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
: """\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
################# Rules for checking ###############
check_rules = [
{
'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m, um):
"""
Return
"""
global f2py_version, options
outmess('\tBuilding module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
rd = dictappend({'f2py_version': f2py_version}, vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb = None
for bi in m['body']:
if not bi['block'] == 'interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name'] == n:
nb = b
break
if not nb:
errmess(
'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n))
continue
nb_list = [nb]
if 'entry' in nb:
for k, a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api, wrap = buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar = applyrules(api, vrd)
rd = dictappend(rd, ar)
# Construct COMMON block support
cr, wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar = applyrules(cr, vrd)
rd = dictappend(rd, ar)
# Construct F90 module support
mr, wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar = applyrules(mr, vrd)
rd = dictappend(rd, ar)
for u in um:
ar = use_rules.buildusevars(u, m['use'][u['name']])
rd = dictappend(rd, ar)
needs = cfuncs.get_needs()
code = {}
for n in needs.keys():
code[n] = []
for k in needs[n]:
c = ''
if k in cfuncs.includes0:
c = cfuncs.includes0[k]
elif k in cfuncs.includes:
c = cfuncs.includes[k]
elif k in cfuncs.userincludes:
c = cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c = cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c = cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c = cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c = cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c = cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c = cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c = cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n' % (repr(k)))
continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar = applyrules(r, vrd, m)
rd = dictappend(rd, ar)
ar = applyrules(module_rules, rd)
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
f = open(fn, 'w')
f.write(ar['modulebody'].replace('\t', 2 * ' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
f = open(fn, 'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
f = open(fn, 'w')
f.write(
'%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
if 'shortlatex' not in options:
f.write(
'\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f = open(wn, 'w')
f.write('C -*- fortran -*-\n')
f.write(
'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
if l and l[0] == ' ':
while len(l) >= 66:
lines.append(l[:66] + '\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
f = open(wn, 'w')
f.write('! -*- f90 -*-\n')
f.write(
'! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
if len(l) > 72 and l[0] == ' ':
lines.append(l[:72] + '&\n &')
l = l[72:]
while len(l) > 66:
lines.append(l[:66] + '&\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th',
6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
def buildapi(rout):
rout, wrap = func2subr.assubr(rout)
args, depargs = getargs2(rout)
capi_maps.depargs = depargs
var = rout['vars']
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
# Args
nth, nthk = 0, 0
savevrd = {}
for a in args:
vrd = capi_maps.sign2map(a, var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth = nth + 1
vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument'
else:
nthk = nthk + 1
vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword'
else:
vrd['nth'] = 'hidden'
savevrd[a] = vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd = savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check'] = c
ar = applyrules(check_rules, vrd, var[a])
rd = dictappend(rd, ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign': rd['docsign'],
'docsignopt': rd['docsignopt'],
'docsignxa': rd['docsignxa']}))
optargs = stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa': rd['docsignxashort'],
'docsignopt': rd['docsignoptshort']}
))
if optargs == '':
rd['docsignatureshort'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_')
rd['latexdocsignatureshort'] = rd[
'latexdocsignatureshort'].replace(',', ', ')
cfs = stripcomma(replace('#callfortran##callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
if len(rd['callfortranappend']) > 1:
rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
else:
rd['callcompaqfortran'] = cfs
rd['callfortran'] = cfs
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = '
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str), repr(
(argformat, type(argformat)))
rd['argformat'] += '|'
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n' % (ar['docshort']))
else:
outmess('\t\t %s\n' % (ar['docshort']))
return ar, wrap
#################### EOF rules.py #######################
| 39.653794
| 212
| 0.548651
|
24c0b02d167fa64a185141bede0ecdd803cc193d
| 4,371
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
Qbase-Foundation/Testnet
|
fefcbd91eb2f0ac35c4c3c5b41243f2afccf194e
|
[
"MIT"
] | 6
|
2018-12-16T00:48:48.000Z
|
2021-10-16T19:17:30.000Z
|
contrib/seeds/generate-seeds.py
|
Qbase-Foundation/Testnet
|
fefcbd91eb2f0ac35c4c3c5b41243f2afccf194e
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
Qbase-Foundation/Testnet
|
fefcbd91eb2f0ac35c4c3c5b41243f2afccf194e
|
[
"MIT"
] | 2
|
2018-10-15T05:40:05.000Z
|
2021-05-20T18:14:57.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef REDEN_CHAINPARAMSSEEDS_H\n')
g.write('#define REDEN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the qbase network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 17817)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 17717)
g.write('#endif // REDEN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.446043
| 98
| 0.581789
|
6602a91e2cb8c54312ef65b181b6033b114bf71d
| 834
|
py
|
Python
|
pymongoshell/mping.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 2
|
2019-01-17T21:32:08.000Z
|
2019-01-17T22:14:41.000Z
|
pymongoshell/mping.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 5
|
2021-04-20T20:55:02.000Z
|
2021-04-20T20:55:04.000Z
|
pymongoshell/mping.py
|
jdrumgoole/mongodbutils
|
9e74ce94e56f5b1a0eaa558c282b6c6659ebf1da
|
[
"Apache-2.0"
] | 1
|
2017-04-29T19:32:24.000Z
|
2017-04-29T19:32:24.000Z
|
"""
Author : joe@joedrumgoole.com
MPing : Ping a MongoDB server with an is_master command.
"""
import pymongo
from pymongo.errors import ConnectionFailure
from datetime import datetime
import pprint
import sys
if __name__ == "__main__":
arg = None
if len(sys.argv) > 1:
arg = sys.argv[1]
client = pymongo.MongoClient(host=arg)
try:
# The ismaster command is cheap and does not require auth.
start = datetime.utcnow()
doc = client.admin.command('ismaster')
end = datetime.utcnow()
duration = end - start
print(f"ismaster took : {duration}")
pprint.pprint(doc)
except ConnectionFailure:
end = datetime.utcnow()
print("Server not available")
duration = end - start
print(f"connection failure took : {duration}")
| 22.540541
| 66
| 0.640288
|
ade9b236d36171955404991fe28d9a3288402866
| 3,318
|
py
|
Python
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/_configuration.py
|
moovy2/azure-sdk-for-python
|
6b0495dc9917d47a7264f26cbd3221d43461a537
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/_configuration.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/_configuration.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class ApplicationInsightsManagementClientConfiguration(Configuration):
"""Configuration for ApplicationInsightsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2015-05-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 46.083333
| 129
| 0.690175
|
98b0fe78010d93a4b7a5af536237d2516e55b877
| 5,591
|
py
|
Python
|
aoc/event2021/day24/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
aoc/event2021/day24/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
aoc/event2021/day24/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
import re
########
# PART 1
class ALU:
_registers : list[int]
def __init__(self) -> None:
self._input = []
self._registers = [0] * 4
self.loaded_code = None
self._pattern = re.compile(r"^(inp|add|mul|div|mod|eql) ([w-z])(?: (?:(-?\d+)|([w-z])))?$")
def reset_registers(self):
self._registers = [0] * 4
return self
def __setitem__(self, reg: str, value: int) -> int:
self._registers[ord(reg) - ord('w')] = value
def __getitem__(self, reg: str) -> int:
return self._registers[ord(reg) - ord('w')]
def load(self, filename):
with open("event2021/day24/" + filename, "r") as file:
self.loaded_code = list(line.strip() for line in file.readlines())
def set_status(self, code, regs):
self.loaded_code = code[:]
self._registers = regs[:]
def load_and_run(self, filename, input = [], reset = True, debug = False):
self.load(filename)
self.run_code(input = input, reset = reset, debug = debug)
return self
def run_code(self, input = [], reset = True, debug = False):
if reset:
self.reset_registers()
self._input = input
for ln, instruction in enumerate(self.loaded_code):
match = self._pattern.match(instruction)
if match:
opcode = getattr(ALU, "_opcode_" + match[1])
arg1 = match[2]
arg2 = int(match[3]) if match[3] else match[4]
if arg2 is not None:
opcode(self, arg1, arg2)
else:
opcode(self, arg1)
if debug:
print(f"{match[1]}\t{arg1}\t{arg2 if arg2 is not None else ''}\twxyz={self}")
else:
raise RuntimeError("Error on line " + str(ln) + ": " + instruction)
return self
def __str__(self) -> str:
return str(self._registers) + str([self._registers[3] // (26*n) for n in range(1, 14)]) + str(self._registers[3] % 26)
def _opcode_inp(self, a):
"""
inp a - Read an input value and write it to variable a.
"""
self[a] = self._input.pop(0)
def _opcode_add(self, a, b):
"""
add a b - Add the value of a to the value of b, then store the result in variable a.
"""
self[a] += self[b] if isinstance(b, str) else b
def _opcode_mul(self, a, b):
"""
mul a b - Multiply the value of a by the value of b, then store the result in variable a.
"""
self[a] *= self[b] if isinstance(b, str) else b
def _opcode_div(self, a, b):
"""
div a b - Divide the value of a by the value of b, truncate the result to an integer,
then store the result in variable a.
(Here, "truncate" means to round the value toward zero.)
"""
self[a] //= self[b] if isinstance(b, str) else b
def _opcode_mod(self, a, b):
"""
mod a b - Divide the value of a by the value of b, then store the remainder in variable a.
(This is also called the modulo operation.)
"""
self[a] %= self[b] if isinstance(b, str) else b
def _opcode_eql(self, a, b):
"""
eql a b - If the value of a and b are equal, then store the value 1 in variable a.
Otherwise, store the value 0 in variable a.
"""
self[a] = 1 if self[a] == (self[b] if isinstance(b, str) else b) else 0
def digits(s):
return [int(x) for x in s]
#print(digits("99999999999998"))
#print(alu.run_code(input=[9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8], debug=True))
def find_pairs():
alu = ALU()
alu.load("input.txt")
total_code = alu.loaded_code[:]
# break down each digit
inp_indexes = []
for i, instruction in enumerate(alu.loaded_code):
if instruction.startswith('inp'):
inp_indexes += [i]
x_value_pattern = re.compile(r"add x (-?\d+)")
y_value_pattern = re.compile(r"add y (-?\d+)")
stack = []
in_push = None
pairs = []
for idx, (start, end) in enumerate(zip(inp_indexes, inp_indexes[1:] + [len(alu.loaded_code)])):
for instr in total_code[start:end]:
if instr == "div z 1":
in_push = True
elif instr == "div z 26":
in_push = False
elif instr == "add y 25" or instr == "add y 1":
continue
elif in_push and (m := y_value_pattern.match(instr)):
stack.append((idx, int(m[1])))
elif not in_push and (m := x_value_pattern.match(instr)):
pairs.append((stack.pop(), (idx, int(m[1]))))
return pairs
def maximize(pairs):
digits = [9] * 14
for (pa, va), (pb, vb) in pairs:
while True:
v = digits[pa] + vb + va
if 1 <= v <= 9:
break
digits[pa] -= 1
digits[pb] = v
return "".join([str(x) for x in digits])
pairs = find_pairs()
answer = maximize(pairs)
print("Part 1 =", answer)
assert answer == "99298993199873" # check with accepted answer
########
# PART 2
def minimize(pairs):
digits = [1] * 14
for (pa, va), (pb, vb) in pairs:
while True:
v = digits[pa] + vb + va
if 1 <= v <= 9:
break
digits[pa] += 1
digits[pb] = v
return "".join([str(x) for x in digits])
answer = minimize(pairs)
print("Part 2 =", answer)
assert answer == "73181221197111" # check with accepted answer
| 27.406863
| 126
| 0.53443
|
0802c0eabb3e4e31fb532043496bfff4184d5fa8
| 1,205
|
py
|
Python
|
api/index.py
|
bramaudi/gold-price
|
a67328a917ed3eadeda5b968c2ad47566f6ba305
|
[
"MIT"
] | null | null | null |
api/index.py
|
bramaudi/gold-price
|
a67328a917ed3eadeda5b968c2ad47566f6ba305
|
[
"MIT"
] | null | null | null |
api/index.py
|
bramaudi/gold-price
|
a67328a917ed3eadeda5b968c2ad47566f6ba305
|
[
"MIT"
] | null | null | null |
import requests, json
from http.server import BaseHTTPRequestHandler
from cowpy import cow
from bs4 import BeautifulSoup
from sanic import Sanic
from sanic.response import json
http = requests.get('https://harga-emas.org/')
soup = BeautifulSoup(http.content, 'html.parser')
def sort_type(kurs):
return {
"oz": kurs[0],
"gr": kurs[1],
"kg": kurs[2],
}
list_usd = []
list_kurs_dollar = []
list_idr = []
table = soup.find_all('table')[1]
for tr in table.find_all('tr'):
satuan = tr.select('td[align=left]')
if len(satuan):
# print(satuan[0].parent.find_all('td')[0]) # satuan
usd = satuan[0].parent.find_all('td')[1]
list_usd.append(usd.get_text())
kurs_dollar = satuan[0].parent.find_all('td')[2]
list_kurs_dollar.append(kurs_dollar.get_text())
idr = satuan[0].parent.find_all('td')[3]
list_idr.append(idr.get_text())
response = {
'usd': sort_type(list_usd),
'kurs_dollar': sort_type(list_kurs_dollar),
'idr': sort_type(list_idr),
}
app = Sanic()
@app.route('/')
@app.route('/<path:path>')
async def index(request, path=""):
return json(
response,
headers={"Access-Control-Allow-Origin": "*"}
)
# app.run(host='0.0.0.0', port=8080)
| 23.173077
| 56
| 0.66639
|
52a2aa0210df9b0e3442289e4febb8c415560b5a
| 6,870
|
py
|
Python
|
Maverick/Cache.py
|
Lumin0ux/Maverick
|
4ca5c0d212f473f1687ae62bb520b48fcb817740
|
[
"MIT"
] | null | null | null |
Maverick/Cache.py
|
Lumin0ux/Maverick
|
4ca5c0d212f473f1687ae62bb520b48fcb817740
|
[
"MIT"
] | null | null | null |
Maverick/Cache.py
|
Lumin0ux/Maverick
|
4ca5c0d212f473f1687ae62bb520b48fcb817740
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Handle cache
"""
import os
import json
import shutil
from PIL import Image
from PIL import ImageFile
import urllib
from .Config import g_conf
from .Router import Router
from urllib.parse import urlparse
import urllib.request as request
from .Utils import unify_joinpath, print_color, Color, safe_read, safe_write, gen_hash
g_used_imgs = None
g_sizeinfo_cache = None
def quickQueryImgSize(src):
"""get size info by downloading small part of it
"""
try:
with request.urlopen(src) as file:
p = ImageFile.Parser()
while True:
data = file.read(1024)
if not data:
break
p.feed(data)
if p.image:
return list(p.image.size)
except BaseException as e:
print_color('fetch error: ' + src, Color.RED.value)
print_color(e, Color.RED.value)
return None
def cache_img(src, base_path):
"""Gen image cache
1. find image in cache dir
2. if src is remote URL, download it or call quickQueryImgSize
3. treat src as absolute path and find it
3. treat src as relative path and find it
"""
global g_used_imgs
global g_sizeinfo_cache
json_path = './tmp/used_imgs.json'
sizeinfo_json_path = './cached_imgs/sizeinfo.json'
cache_dir = './cached_imgs'
if g_used_imgs is None:
g_used_imgs = set(json.loads(safe_read(json_path) or '[]'))
if g_sizeinfo_cache is None:
g_sizeinfo_cache = json.loads(
safe_read(sizeinfo_json_path) or '{}')
def log_and_return(filename):
global g_used_imgs
global g_sizeinfo_cache
g_used_imgs = g_used_imgs | set([filename])
info = {
"src": '',
"width": -1,
"height": -1
}
# if enable jsDelivr CDN, add prefix
# if not, fallback (site_prefix) will be used
router = Router(g_conf)
static_prefix = router.gen_static_file_prefix()
info['src'] = "%sarchives/assets/%s" % (static_prefix, filename)
if filename in g_sizeinfo_cache: # if size info in cache
info['width'] = g_sizeinfo_cache[filename][0]
info['height'] = g_sizeinfo_cache[filename][1]
print_color("Sizeinfo hit cache: %s (%s, %s)" %
(src, info['width'], info['height']), Color.GREEN.value)
else:
try:
img = Image.open(unify_joinpath(cache_dir, filename))
info['width'] = img.size[0]
info['height'] = img.size[1]
print_color("Parsed sizeinfo from local: %s (%s, %s)" %
(src, info['width'], info['height']), Color.GREEN.value)
g_sizeinfo_cache[filename] = [img.size[0], img.size[1]]
except IOError:
print_color("Pars sizeinfo from local failed", Color.RED.value)
return info
src_md5 = gen_hash(src)
# find image in cache dir
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cached_imgs = [name for name in os.listdir(cache_dir)]
for name in cached_imgs:
if name.split('.')[0].lower() == src_md5.lower(): # in cache dir
# print_color("Image hit cache: " + src, Color.GREEN.value)
return log_and_return(name)
# if it is remote image
if src.startswith('http'):
if g_conf.fetch_remote_imgs:
# download and treat it as local image
try:
suffix = urlparse(src).path.split('.')[-1]
filename = '.'.join([src_md5, suffix])
proxy = request.ProxyHandler({})
opener = request.build_opener(proxy)
opener.addheaders = [('User-Agent',
r'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
r'AppleWebKit/537.36(KHTML, like Gecko) '
r'Chrome/78.0.3904.108 Safari/537.36')]
request.install_opener(opener)
print_color("Trying to download: " + src, Color.BLUE.value)
request.urlretrieve(
src, unify_joinpath(cache_dir, filename))
return log_and_return(filename)
except BaseException as e:
print_color('Fetch error: ' + src, Color.RED.value)
print_color(e, Color.RED.value)
return {
"src": src,
"width": -1,
"height": -1
}
else:
# download small part of it
info = {
"src": src,
"width": -1,
"height": -1
}
if src in g_sizeinfo_cache:
info['width'] = g_sizeinfo_cache[src][0]
info['height'] = g_sizeinfo_cache[src][1]
print_color("Sizeinfo hit cache: %s (%s, %s)" %
(src, info['width'], info['height']), Color.GREEN.value)
else:
print_color("Trying to fetch size of "+src, Color.BLUE.value)
size = quickQueryImgSize(src)
if not size is None:
info['width'] = size[0]
info['height'] = size[1]
g_sizeinfo_cache[src] = size
print_color("Size fetched: (%s, %s)" % (
info['width'], info['height']), Color.BLUE.value)
return info
# treat src as absolute path
if os.path.exists(src):
print_color("Image found at local: " + src, Color.GREEN.value)
filename = src_md5 + '.' + src.split('.')[-1]
shutil.copyfile(src, unify_joinpath(cache_dir, filename))
return log_and_return(filename)
# treat src as relative path to Markdown file
if os.path.exists(unify_joinpath(base_path, src)):
print_color("Image found at local: " + src, Color.GREEN.value)
filename = src_md5 + '.' + src.split('.')[-1]
shutil.copyfile(unify_joinpath(base_path, src),
unify_joinpath(cache_dir, filename))
return log_and_return(filename)
return {
"src": src,
"width": -1,
"height": -1
}
def dump_log():
global g_used_imgs
global g_sizeinfo_cache
json_path = './tmp/used_imgs.json'
sizeinfo_json_path = './cached_imgs/sizeinfo.json'
safe_write(json_path, json.dumps(list(g_used_imgs or []), indent=1))
safe_write(sizeinfo_json_path, json.dumps(
g_sizeinfo_cache or {}, indent=1))
| 35.230769
| 87
| 0.534643
|
7c2a282222a6a50bcc7ab6311634942e0f0dc963
| 1,328
|
py
|
Python
|
setup.py
|
galamit86/nimbletl
|
7bee9894b45143f315b1a98d70d62c356fb6d70f
|
[
"MIT"
] | 3
|
2020-05-13T18:19:40.000Z
|
2020-07-02T18:53:52.000Z
|
setup.py
|
galamit86/nimbletl
|
7bee9894b45143f315b1a98d70d62c356fb6d70f
|
[
"MIT"
] | 7
|
2020-05-16T09:55:14.000Z
|
2020-08-25T10:54:59.000Z
|
setup.py
|
dkapitan/nimbletl
|
7bee9894b45143f315b1a98d70d62c356fb6d70f
|
[
"MIT"
] | 1
|
2020-11-09T10:54:22.000Z
|
2020-11-09T10:54:22.000Z
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["prefect>=0.11.0"]
setup_requirements = [
"pytest-runner",
]
test_requirements = [
"pytest>=3",
]
setup(
author="Daniel Kapitan",
author_email="daniel@kapitan.net",
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Lightweight Python ETL toolkit using Prefect.",
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="nimbletl",
name="nimbletl",
packages=find_packages(include=["nimbletl", "nimbletl.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/dkapitan/nimbletl",
version="0.1.0",
zip_safe=False,
)
| 26.039216
| 64
| 0.652861
|
39d2dccbd32910ec74fc30e55e404aaccca51df2
| 1,044
|
py
|
Python
|
isi_sdk_8_1_1/test/test_node_drives_purposelist_node_purpose.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/test/test_node_drives_purposelist_node_purpose.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/test/test_node_drives_purposelist_node_purpose.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.node_drives_purposelist_node_purpose import NodeDrivesPurposelistNodePurpose # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestNodeDrivesPurposelistNodePurpose(unittest.TestCase):
"""NodeDrivesPurposelistNodePurpose unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNodeDrivesPurposelistNodePurpose(self):
"""Test NodeDrivesPurposelistNodePurpose"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.node_drives_purposelist_node_purpose.NodeDrivesPurposelistNodePurpose() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.463415
| 124
| 0.745211
|
5b4f764194628fb28c637cc1d12d42f57938c162
| 299
|
py
|
Python
|
app/api/errors/repository.py
|
karriva/myna_test
|
1def78a0c11b91a5d28c216333eec6ffe39e6fbb
|
[
"MIT"
] | null | null | null |
app/api/errors/repository.py
|
karriva/myna_test
|
1def78a0c11b91a5d28c216333eec6ffe39e6fbb
|
[
"MIT"
] | 1
|
2022-01-17T08:18:39.000Z
|
2022-01-17T08:18:39.000Z
|
app/api/errors/repository.py
|
karriva/myna_test
|
1def78a0c11b91a5d28c216333eec6ffe39e6fbb
|
[
"MIT"
] | null | null | null |
from .common import SchemaEnhancedHTTPException
class RepositoryHTTPException(SchemaEnhancedHTTPException):
"""
Raises when repository raised error.
"""
default_status_code = 400
default_error_kind = 'repository'
default_error_code = 1200
message = 'Repository error.'
| 24.916667
| 59
| 0.745819
|
fd1610416cc70916e4af17427d53a1319dec6442
| 1,531
|
py
|
Python
|
test/functional/uacomment.py
|
arcana-coin/arcana-core
|
b5d6d71968d1f19c42dc3f351aff17800da5af36
|
[
"MIT"
] | null | null | null |
test/functional/uacomment.py
|
arcana-coin/arcana-core
|
b5d6d71968d1f19c42dc3f351aff17800da5af36
|
[
"MIT"
] | null | null | null |
test/functional/uacomment.py
|
arcana-coin/arcana-core
|
b5d6d71968d1f19c42dc3f351aff17800da5af36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bytcoyn Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class UacommentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Total length of network version string (286) exceeds maximum length (256). Reduce the number or size of uacomments."
self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters"
self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected)
if __name__ == '__main__':
UacommentTest().main()
| 42.527778
| 136
| 0.683867
|
a60967a746ba9aad03766a088b790b26dc4b1921
| 4,611
|
py
|
Python
|
gymnasiums/tests/tests_gymnasium_create_view.py
|
hbuyse/dj-gymnasiums
|
39f590dc703eec01c753ea54d7f4afd06f81a582
|
[
"MIT"
] | null | null | null |
gymnasiums/tests/tests_gymnasium_create_view.py
|
hbuyse/dj-gymnasiums
|
39f590dc703eec01c753ea54d7f4afd06f81a582
|
[
"MIT"
] | null | null | null |
gymnasiums/tests/tests_gymnasium_create_view.py
|
hbuyse/dj-gymnasiums
|
39f590dc703eec01c753ea54d7f4afd06f81a582
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
"""Tests the views."""
# Django
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
# Current django project
from gymnasiums.models import Gymnasium
class TestGymnasiumCreateViewAsAnonymous(TestCase):
"""Tests."""
def test_get(self):
"""Tests."""
r = self.client.get(reverse('gymnasiums:create'))
self.assertEqual(r.status_code, 403)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
r = self.client.post(reverse('gymnasiums:create'), d)
self.assertEqual(r.status_code, 403)
class TestGymnasiumCreateViewAsLogged(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse"
}
get_user_model().objects.create_user(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('gymnasiums:create'))
self.assertEqual(r.status_code, 403)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('gymnasiums:create'), d)
self.assertEqual(r.status_code, 403)
class TestGymnasiumCreateViewAsStaff(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse",
'is_staff': True
}
get_user_model().objects.create_user(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('gymnasiums:create'))
self.assertEqual(r.status_code, 200)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('gymnasiums:create'), d)
self.assertEqual(r.status_code, 302)
g = Gymnasium.objects.last()
self.assertEqual("/{}".format(g.id), r.url)
class TestGymnasiumCreateViewAsSuperuser(TestCase):
"""Tests."""
def setUp(self):
"""Setup for al the following tests."""
self.dict = {
'username': "hbuyse",
'password': "usermodel",
'first_name': "Henri",
'last_name': "Buyse",
'email': 'henri.buyse@gmail.com'
}
get_user_model().objects.create_superuser(**self.dict)
def test_get(self):
"""Tests."""
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.get(reverse('gymnasiums:create'))
self.assertEqual(r.status_code, 200)
def test_post(self):
"""Tests."""
d = {
'name': 'Watteau',
'address': '37 rue Lequesne',
'city': 'Nogent-Sur-Marne',
'zip_code': '94130',
'phone': '0100000000',
'surface': '123',
'capacity': '456',
}
self.assertTrue(self.client.login(username=self.dict['username'], password=self.dict['password']))
r = self.client.post(reverse('gymnasiums:create'), d)
self.assertEqual(r.status_code, 302)
g = Gymnasium.objects.last()
self.assertEqual("/{}".format(g.id), r.url)
| 29.369427
| 106
| 0.551074
|
e79c278ba8194bc0d1cb08d8038a358dca650ea4
| 8,340
|
py
|
Python
|
angrmanagement/ui/widgets/qfeature_map.py
|
Kyle-Kyle/angr-management
|
6ea65f19d813be510a38f06510b2b2148a6b5000
|
[
"BSD-2-Clause"
] | 2
|
2022-01-23T21:43:54.000Z
|
2022-02-02T08:20:20.000Z
|
angrmanagement/ui/widgets/qfeature_map.py
|
Kyle-Kyle/angr-management
|
6ea65f19d813be510a38f06510b2b2148a6b5000
|
[
"BSD-2-Clause"
] | 1
|
2021-12-04T01:11:46.000Z
|
2021-12-04T01:11:46.000Z
|
angrmanagement/ui/widgets/qfeature_map.py
|
Kyle-Kyle/angr-management
|
6ea65f19d813be510a38f06510b2b2148a6b5000
|
[
"BSD-2-Clause"
] | 1
|
2021-05-17T05:46:19.000Z
|
2021-05-17T05:46:19.000Z
|
from sortedcontainers import SortedDict
from PySide2.QtWidgets import QWidget, QHBoxLayout, QGraphicsScene, QSizePolicy, QGraphicsSceneMouseEvent
from PySide2.QtGui import QPaintEvent, QPainter, QBrush, QPen, QPolygonF
from PySide2.QtCore import Qt, QRectF, QSize, QPointF
import cle
from angr.block import Block
from angr.analyses.cfg.cfb import Unknown
from ...config import Conf
from ...data.object_container import ObjectContainer
from .qgraph import QZoomableDraggableGraphicsView
import logging
l = logging.getLogger(name=__name__)
class Orientation:
Vertical = 0
Horizontal = 1
class QClickableGraphicsScene(QGraphicsScene):
def __init__(self, feature_map):
"""
:param QFeatureMap feature_map:
"""
super().__init__()
self._feature_map = feature_map
def mousePressEvent(self, mouseEvent):
"""
:param QGraphicsSceneMouseEvent mouseEvent:
:return:
"""
if mouseEvent.button() == Qt.LeftButton:
pos = mouseEvent.scenePos()
offset = pos.x()
self._feature_map.select_offset(offset)
class QFeatureMapView(QZoomableDraggableGraphicsView):
def __init__(self, parent=None):
super().__init__(parent)
self._scene = QClickableGraphicsScene(parent)
self.setScene(self._scene)
class QFeatureMap(QWidget):
"""
Byte-level map of the memory space.
"""
def __init__(self, disasm_view, parent=None):
super().__init__(parent)
self.disasm_view = disasm_view
self.workspace = disasm_view.workspace
self.instance = self.workspace.instance
self.orientation = Orientation.Vertical
# widgets
self.view = None # type: QFeatureMapView
# items
self._insn_indicators = [ ]
# data instance
self.addr = ObjectContainer(None, name='The current address of the Feature Map.')
# cached values
self._addr_to_region = SortedDict()
self._regionaddr_to_offset = SortedDict()
self._offset_to_regionaddr = SortedDict()
self._total_size = None
self._regions_painted = False
self._init_widgets()
self._register_events()
def sizeHint(self):
return QSize(25, 25)
#
# Public methods
#
def refresh(self):
if self.view is None:
return
if not self._regions_painted:
self._regions_painted = self._paint_regions()
def select_offset(self, offset):
addr = self._get_addr_from_pos(offset)
if addr is None:
return
self.addr.am_obj = addr
self.addr.am_event()
#
# Private methods
#
def _init_widgets(self):
self.view = QFeatureMapView(self)
layout = QHBoxLayout()
layout.addWidget(self.view)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def _register_events(self):
self.disasm_view.infodock.selected_insns.am_subscribe(self._paint_insn_indicators)
def _paint_regions(self):
cfb = self.instance.cfb_container.am_obj
if cfb is None:
return False
# colors
func_color = Conf.feature_map_color_regular_function
data_color = Conf.feature_map_color_data
unknown_color = Conf.feature_map_color_unknown
delimiter_color = Conf.feature_map_color_delimiter
if self._total_size is None:
# calculate the total number of bytes
b = 0
self._addr_to_region.clear()
self._regionaddr_to_offset.clear()
for mr in cfb.regions:
self._addr_to_region[mr.addr] = mr
self._regionaddr_to_offset[mr.addr] = b
self._offset_to_regionaddr[b] = mr.addr
b += self._adjust_region_size(mr)
self._total_size = b
# iterate through all items and draw the image
offset = 0
total_width = self.width()
current_region = None
height = self.height()
l.debug("total width %d", total_width)
for addr, obj in cfb.ceiling_items():
# are we in a new region?
new_region = False
if current_region is None or not (current_region.addr <= addr < current_region.addr + current_region.size):
try:
current_region_addr = next(self._addr_to_region.irange(maximum=addr, reverse=True))
except StopIteration:
# FIXME: it's not within any of the known regions
# we should fix this in the future. for now, let's make sure it does not crash
continue
current_region = self._addr_to_region[current_region_addr]
new_region = True
# adjust size
adjusted_region_size = self._adjust_region_size(current_region)
adjusted_size = min(obj.size, current_region.addr + adjusted_region_size - addr)
if adjusted_size <= 0:
continue
pos = offset * total_width // self._total_size
length = adjusted_size * total_width // self._total_size
offset += adjusted_size
# draw a rectangle
if isinstance(obj, Unknown):
pen = QPen(data_color)
brush = QBrush(data_color)
elif isinstance(obj, Block):
# TODO: Check if it belongs to a function or not
pen = QPen(func_color)
brush = QBrush(func_color)
else:
pen = QPen(unknown_color)
brush = QBrush(unknown_color)
rect = QRectF(pos, 0, length, height)
self.view._scene.addRect(rect, pen, brush)
# if at the beginning of a new region, draw a line
if new_region:
pen = QPen(delimiter_color)
self.view._scene.addLine(pos, 0, pos, height, pen)
return True
def _adjust_region_size(self, memory_region):
if isinstance(memory_region.object, (cle.ExternObject, cle.TLSObject, cle.KernelObject)):
# Draw unnecessary objects smaller
return 80
else:
l.debug("memory_region.size: %x memory_region.object: %s", memory_region.size, memory_region.object)
return memory_region.size
def _get_pos_from_addr(self, addr):
# find the region it belongs to
try:
mr_base = next(self._addr_to_region.irange(maximum=addr, reverse=True))
except StopIteration:
return None
# get the base offset of that region
base_offset = self._regionaddr_to_offset[mr_base]
offset = base_offset + addr - mr_base
return offset * self.width() // self._total_size
def _get_addr_from_pos(self, pos):
offset = int(pos * self._total_size // self.width())
try:
base_offset = next(self._offset_to_regionaddr.irange(maximum=offset, reverse=True))
except StopIteration:
return None
region_addr = self._offset_to_regionaddr[base_offset]
return region_addr + offset - base_offset
def _paint_insn_indicators(self, **kwargs):
scene = self.view.scene() # type: QGraphicsScene
for item in self._insn_indicators:
scene.removeItem(item)
self._insn_indicators.clear()
for selected_insn_addr in self.disasm_view.infodock.selected_insns:
pos = self._get_pos_from_addr(selected_insn_addr)
if pos is None:
continue
pos -= 1 # this is the top-left x coordinate of our arrow body (the rectangle)
pen = QPen(Qt.yellow)
brush = QBrush(Qt.yellow)
rect = QRectF(pos, 0, 2, 5)
# rectangle
item = scene.addRect(rect, pen, brush)
self._insn_indicators.append(item)
# triangle
triangle = QPolygonF()
triangle.append(QPointF(pos - 1, 5))
triangle.append(QPointF(pos + 3, 5))
triangle.append(QPointF(pos + 1, 7))
triangle.append(QPointF(pos - 1, 5))
item = scene.addPolygon(triangle, pen, brush)
self._insn_indicators.append(item)
| 31.353383
| 119
| 0.614269
|
b88f79af369a2590db24e82d3ac36b2d0e6a75fb
| 378
|
py
|
Python
|
examples/simple_requests.py
|
Coffee-Meets-Bagel/python-leanplum
|
41a7a295488ac23dd44da65d511ce8313326b26f
|
[
"MIT"
] | 3
|
2018-07-17T17:05:05.000Z
|
2019-12-16T20:11:52.000Z
|
examples/simple_requests.py
|
Coffee-Meets-Bagel/python-leanplum
|
41a7a295488ac23dd44da65d511ce8313326b26f
|
[
"MIT"
] | 1
|
2021-08-23T16:02:57.000Z
|
2021-10-04T15:40:41.000Z
|
examples/simple_requests.py
|
Coffee-Meets-Bagel/python-leanplum
|
41a7a295488ac23dd44da65d511ce8313326b26f
|
[
"MIT"
] | null | null | null |
from leanplum.client import Client
APP_ID = "your-app-id"
CLIENT_KEY = "your-client-key"
client = Client(APP_ID, CLIENT_KEY)
client.users.track(133700, event="Joined Faction", params={"Faction Name": "Rebels"})
client.users.advance(133700, state="Member", params={"Membership": "Rebel Scum"})
client.users.set_user_attributes(133700, {"email": "wookie@milleniumfalcon.com"})
| 34.363636
| 85
| 0.751323
|
faa67371860dcd471fac2a2528918c7e01792d0f
| 2,192
|
py
|
Python
|
fund/fund_annouce.py
|
cogitate3/stock
|
bba986cbbb17de9f424c3b5417a17d1bb1204403
|
[
"BSD-3-Clause"
] | 3,401
|
2016-05-20T02:49:18.000Z
|
2022-03-31T07:02:05.000Z
|
fund/fund_annouce.py
|
sulongniao/stock
|
bba986cbbb17de9f424c3b5417a17d1bb1204403
|
[
"BSD-3-Clause"
] | 23
|
2017-06-09T14:18:57.000Z
|
2022-01-07T10:38:57.000Z
|
fund/fund_annouce.py
|
sulongniao/stock
|
bba986cbbb17de9f424c3b5417a17d1bb1204403
|
[
"BSD-3-Clause"
] | 947
|
2016-08-23T14:51:46.000Z
|
2022-03-31T07:02:10.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2021/3/19 0:04
# @File : fund_annouce.py
# @Author : Rocky C@www.30daydo.com
# 基金公告数据
# http://fund.szse.cn/api/disc/info/find/tannInfo?random=0.5044519418668192&type=2&pageSize=30&pageNum=3
import datetime
import math
import sys
sys.path.append('..')
from common.BaseService import BaseService
class FundAnnouce(BaseService):
def __init__(self):
super(FundAnnouce, self).__init__('../log/fund_annouce.log')
self.PAGE_SIZE=30
self.base_url = 'http://fund.szse.cn/api/disc/info/find/tannInfo?type=2&pageSize={}&pageNum={}'
# def get(self, url, _json=False, binary=False, retry=5):
@property
def headers(self):
_header = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Host": "fund.szse.cn",
"Pragma": "no-cache",
"Referer": "http://fund.szse.cn/disclosurelist/index.html",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36",
"X-Request-Type": "ajax",
"X-Requested-With": "XMLHttpRequest",
}
return _header
def get_page(self):
content = self.get(self.base_url.format(self.PAGE_SIZE,1), _json=True)
announceCount=content['announceCount']
total_page = math.ceil(announceCount/self.PAGE_SIZE)
return total_page
def run(self):
total_page=self.get_page()
if total_page<1:
self.logger.info('empty content')
return
for page in range(1,total_page):
content = self.get(self.base_url.format(self.PAGE_SIZE, 1), _json=True)
self.parse(content)
def parse(self, content):
for item in content.get('data'):
item['crawltime']=datetime.datetime.now()
def main():
app = FundAnnouce()
app.run()
if __name__ == '__main__':
main()
| 31.768116
| 142
| 0.604015
|
2619bbbbf7135369dc0cd5b0d887051197e14377
| 13,988
|
py
|
Python
|
data/KVR/read_data_for_tree.py
|
ElliottYan/KB-Chat
|
ed39d0c95756008668a8a3901a9ed0db827c32d1
|
[
"MIT"
] | 1
|
2019-03-10T15:29:18.000Z
|
2019-03-10T15:29:18.000Z
|
data/KVR/read_data_for_tree.py
|
ElliottYan/KB-Chat
|
ed39d0c95756008668a8a3901a9ed0db827c32d1
|
[
"MIT"
] | null | null | null |
data/KVR/read_data_for_tree.py
|
ElliottYan/KB-Chat
|
ed39d0c95756008668a8a3901a9ed0db827c32d1
|
[
"MIT"
] | null | null | null |
import json
from nltk import wordpunct_tokenize as tokenizer
import argparse
import pickle
import os
import pdb
from utils.structure import Node
def entity_replace(temp, bot, user, names={}):
# change poi, location, event first
global_rp = {
"pf changs": "p_.f_._changs",
"p f changs": "p_.f_._changs",
"'": "",
"restaurants": "restaurant",
"activities": "activity",
"appointments": "appointment",
"doctors": "doctor",
"doctor s": "doctor",
"optometrist s": "optometrist",
"conferences": "conference",
"meetings": "meeting",
"labs": "lab",
"stores": "store",
"stops": "stop",
"centers": "center",
"garages": "garage",
"stations": "station",
"hospitals": "hospital"
}
for grp in global_rp.keys():
bot = bot.replace(grp, global_rp[grp])
user = user.replace(grp, global_rp[grp])
# will it be not exactly match ?
for name in names.keys():
if name in bot:
bot = bot.replace(name, names[name])
if name in user:
user = user.replace(name, names[name])
for e in temp:
for wo in e.keys():
inde = bot.find(wo)
if (inde != -1):
bot = bot.replace(wo, e[wo])
inde = user.find(wo)
if (inde != -1):
user = user.replace(wo, e[wo])
return bot, user
def cleaner(token_array):
new_token_array = []
for idx, token in enumerate(token_array):
temp = token
if token == ".." or token == "." or token == "...": continue
# remove exact time information ? only with am and pm ??
if (token == "am" or token == "pm") and token_array[idx - 1].isdigit():
new_token_array.pop()
new_token_array.append(token_array[idx - 1] + token)
continue
if token == ":),": continue
if token == "avenue": temp = "ave"
if token == "heavey" and "traffic" in token_array[idx + 1]: temp = "heavy"
if token == "heave" and "traffic" in token_array[idx + 1]: temp = "heavy"
if token == "'": continue
# ??
if token == "-" and "0" in token_array[idx - 1]:
new_token_array.pop()
new_token_array.append(token_array[idx - 1] + "f")
if "f" not in token_array[idx + 1]:
token_array[idx + 1] = token_array[idx + 1] + "f"
new_token_array.append(temp)
return new_token_array
def main(root_path):
parser = argparse.ArgumentParser(description='')
parser.add_argument('--json', dest='json',
default='kvret_train_public.json',
help='process json file')
args = parser.parse_args()
task = args.json.split('_')[1]
with open(os.path.join(root_path, args.json)) as f:
dialogues = json.load(f)
with open(os.path.join(root_path, 'kvret_entities.json')) as f:
entities_dict = json.load(f)
# drop poi and poi_type here.
global_kb_type = ['distance', 'traffic_info', 'location', 'weather_attribute', 'temperature', "weekly_time",
'event', 'time', 'date', 'party', 'room', 'agenda']
global_temp = []
di = {}
# connect infos with '_' and map from original str to str with '_'
for e in global_kb_type:
for p in map(lambda x: str(x).lower(), entities_dict[e]):
if "_" in p and p.replace("_", " ") != p:
di[p.replace("_", " ")] = p
else:
if p != p.replace(" ", "_"):
di[p] = p.replace(" ", "_")
global_temp.append(di)
example_kbs = []
for d in dialogues:
roots = []
if (d['scenario']['task']['intent'] == "navigate"): # "schedule" "navigate"
print("#navigate#")
temp = []
names = {}
# iterate through all kb infos.
for el in d['scenario']['kb']['items']:
poi = " ".join(tokenizer(el['poi'].replace("'", " "))).replace(" ", "_").lower()
slots = ['poi', 'distance', 'traffic_info', 'poi_type', 'address']
# remvoe "'" and convert to lower
for slot in slots:
el[slot] = " ".join(tokenizer(el[slot].replace("'", " "))).lower()
names[el['poi']] = poi
di = {
el['distance']: el['distance'].replace(" ", "_"),
el['traffic_info']: el['traffic_info'].replace(" ", "_"),
el['poi_type']: el['poi_type'].replace(" ", "_"),
el['address']: el['address'].replace(" ", "_"),
}
print(
"0 " + di[el['distance']] + " " + di[el['traffic_info']] + " " + di[el['poi_type']] + " poi " + poi)
print("0 " + poi + " distance " + di[el['distance']])
print("0 " + poi + " traffic_info " + di[el['traffic_info']])
print("0 " + poi + " poi_type " + di[el['poi_type']])
print("0 " + poi + " address " + di[el['address']])
temp.append(di)
# construct tree root for each kb item
root = Node(poi, 'poi', layer=0)
# except poi again
for slot in slots[1:]:
root.children.append(Node(di[el[slot]], slot, layer=1))
roots.append(root)
# use for latter entity matching ?
temp += global_temp
# drop last one.
if (len(d['dialogue']) % 2 != 0):
d['dialogue'].pop()
j = 1
for i in range(0, len(d['dialogue']), 2):
user = " ".join(cleaner(tokenizer(str(d['dialogue'][i]['data']['utterance']).lower())))
bot = " ".join(cleaner(tokenizer(str(d['dialogue'][i + 1]['data']['utterance']).lower())))
# replace entity names with names joined by "_"
bot, user = entity_replace(temp, bot, user, names)
navigation = global_kb_type # ['distance','traffic_info']
nav_poi = ['address', 'poi', 'type']
gold_entity = []
for key in bot.split(' '):
for e in navigation:
for p in map(lambda x: str(x).lower(), entities_dict[e]):
if (key == p):
gold_entity.append(key)
elif (key == str(p).replace(" ", "_")):
gold_entity.append(key)
for e in entities_dict['poi']:
for p in nav_poi:
if (key == str(e[p]).lower()):
gold_entity.append(key)
elif (key == str(e[p]).lower().replace(" ", "_")):
gold_entity.append(key)
# gold entity for each turn of dialogue.
gold_entity = list(set(gold_entity))
if bot != "" and user != "":
print(str(j) + " " + user + '\t' + bot + '\t' + str(gold_entity))
j += 1
print("")
elif (d['scenario']['task']['intent'] == "weather"): # "weather"
print("#weather#")
temp = []
j = 1
print("0 today " + d['scenario']['kb']['items'][0]["today"])
today = d['scenario']['kb']['items'][0]["today"]
for el in d['scenario']['kb']['items']:
for el_key in el.keys():
el[el_key] = " ".join(tokenizer(el[el_key])).lower()
loc = el['location'].replace(" ", "_")
di = {el['location']: loc}
temp.append(di)
days = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
for day in days:
print("0 " + loc + " " + day + " " + el[day].split(',')[0].rstrip().replace(" ", "_"))
print("0 " + loc + " " + day + " " + el[day].split(',')[1].split(" ")[1] + " " +
el[day].split(',')[1].split(" ")[3])
print("0 " + loc + " " + day + " " + el[day].split(',')[2].split(" ")[1] + " " +
el[day].split(',')[2].split(" ")[3])
# construct tree root for each kb item
# root = Node(loc, 'location', layer=0)
slots = ['weather', 'high', 'low']
for day in days:
root = Node(loc, 'location', layer=0)
'''
tmp = Node(el[day], day, layer=1)
val = el[day]
splits = [item.strip() for item in val.split(',')]
tmp.children.append(Node(splits[0], 'weather', layer=2))
tmp.children.append(Node(splits[1], splits[1].split()[0], layer=2))
tmp.children.append(Node(splits[2], splits[2].split()[0], layer=2))
root.children.append(tmp)
'''
# change weather to 1-layer tree.
val = el[day]
splits = [item.strip() for item in val.split(',')]
root.children.append(Node(day, 'date', layer=1))
# more delicate for vals
root.children.append(Node(splits[1], splits[1].split()[0], layer=1))
root.children.append(Node(splits[2], splits[2].split()[0], layer=1))
# miss this in original dataset...
if today == day:
root.children.append(Node('yes', 'today', layer=1))
else:
root.children.append(Node('no', 'today', layer=1))
roots.append(root)
temp += global_temp
if (len(d['dialogue']) % 2 != 0):
d['dialogue'].pop()
for i in range(0, len(d['dialogue']), 2):
user = " ".join(cleaner(tokenizer(str(d['dialogue'][i]['data']['utterance']).lower())))
bot = " ".join(cleaner(tokenizer(str(d['dialogue'][i + 1]['data']['utterance']).lower())))
bot, user = entity_replace(temp, bot, user)
weather = global_kb_type # ['location', 'weather_attribute','temperature',"weekly_time"]
gold_entity = []
for key in bot.split(' '):
for e in weather:
for p in map(lambda x: str(x).lower(), entities_dict[e]):
if (key == p):
gold_entity.append(key)
elif (key == str(p).replace(" ", "_")):
gold_entity.append(key)
gold_entity = list(set(gold_entity))
if bot != "" and user != "":
print(str(j) + " " + user + '\t' + bot + '\t' + str(gold_entity))
j += 1
print("")
if (d['scenario']['task']['intent'] == "schedule"): # "schedule"
print("#schedule#")
temp = []
names = {}
j = 1
# for all kb triple
if (d['scenario']['kb']['items'] != None):
for el in d['scenario']['kb']['items']:
for el_key in el.keys():
el[el_key] = " ".join(tokenizer(el[el_key])).lower()
ev = el['event'].replace(" ", "_")
names[el['event']] = ev
slots = ['time', 'date', 'party', 'room', 'agenda']
di = {}
for slot in slots:
if el[slot] == "-":
continue
if slot == "time":
print("0 " + ev + " " + slot + " " + el[slot].replace(" ", ""))
di[el[slot]] = el[slot].replace(" ", "")
else:
print("0 " + ev + " " + slot + " " + el[slot].replace(" ", "_"))
di[el[slot]] = el[slot].replace(" ", "_")
temp.append(di)
root = Node(ev, 'event', layer=0)
for slot in slots:
tmp = Node(el[slot], slot, layer=1)
root.children.append(tmp)
roots.append(root)
temp += global_temp
if (len(d['dialogue']) % 2 != 0):
d['dialogue'].pop()
for i in range(0, len(d['dialogue']), 2):
user = " ".join(cleaner(tokenizer(str(d['dialogue'][i]['data']['utterance']).lower())))
bot = " ".join(cleaner(tokenizer(str(d['dialogue'][i + 1]['data']['utterance']).lower())))
bot, user = entity_replace(temp, bot, user, names)
calendar = global_kb_type # ['event','time', 'date', 'party', 'room', 'agenda']
gold_entity = []
for key in bot.split(' '):
for e in calendar:
for p in map(lambda x: str(x).lower(), entities_dict[e]):
if (key == p):
gold_entity.append(key)
elif (key == str(p).replace(" ", "_")):
gold_entity.append(key)
gold_entity = list(set(gold_entity))
if bot != "" and user != "":
print(str(j) + " " + user + '\t' + bot + '\t' + str(gold_entity))
j += 1
print("")
# add to example kbs.
example_kbs.append(roots)
# next step : save to file.
with open(os.path.join(root_path, '{}_example_kbs.dat'.format(task)), 'wb') as f:
pickle.dump(example_kbs, f)
| 43.576324
| 120
| 0.44145
|
e0345f1d617ab5278b7e3a63063d9400f9a8d94f
| 9,496
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_peer_express_route_circuit_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_peer_express_route_circuit_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_peer_express_route_circuit_connections_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations(object):
"""PeerExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PeerExpressRouteCircuitConnection"
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PeerExpressRouteCircuitConnectionListResult"]
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
| 48.697436
| 231
| 0.66944
|
39c7f8ad0294fa4c72edb9952280d7b5b39f2c99
| 1,476
|
py
|
Python
|
tests/beem/test_notify.py
|
MWFIAE/beem
|
8447150a15b6908dc89e075cbe79a6be1dabd95b
|
[
"MIT"
] | null | null | null |
tests/beem/test_notify.py
|
MWFIAE/beem
|
8447150a15b6908dc89e075cbe79a6be1dabd95b
|
[
"MIT"
] | null | null | null |
tests/beem/test_notify.py
|
MWFIAE/beem
|
8447150a15b6908dc89e075cbe79a6be1dabd95b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import super
import mock
import string
import unittest
import random
import itertools
from pprint import pprint
from beem import Steem
from beemapi.websocket import SteemWebsocket
from beem.notify import Notify
from beem.instance import set_shared_steem_instance
from beem.nodelist import NodeList
# Py3 compatibility
import sys
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
core_unit = "STM"
class TestBot:
def init(self):
self.notify = None
self.blocks = 0
def new_block(self, block):
chunk = 5
self.blocks = self.blocks + 1
if self.blocks >= chunk:
self.notify.close()
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
nodelist = NodeList()
nodelist.update_nodes(steem_instance=Steem(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
self.bts = Steem(
node=nodelist.get_nodes(),
nobroadcast=True,
num_retries=10
)
def test_connect(self):
tb = TestBot()
tb.init()
notify = Notify(on_block=tb.new_block, steem_instance=self.bts)
tb.notify = notify
notify.listen()
self.assertEqual(tb.blocks, 5)
| 26.357143
| 119
| 0.697832
|
26eebe9b495a22ab9c0c449e611b2e161804a290
| 1,153
|
py
|
Python
|
generateEntities.py
|
greablezzq/CRL_Environment
|
9be54be37f253fe7a08b956a12bc6319f95e8de4
|
[
"MIT"
] | null | null | null |
generateEntities.py
|
greablezzq/CRL_Environment
|
9be54be37f253fe7a08b956a12bc6319f95e8de4
|
[
"MIT"
] | null | null | null |
generateEntities.py
|
greablezzq/CRL_Environment
|
9be54be37f253fe7a08b956a12bc6319f95e8de4
|
[
"MIT"
] | null | null | null |
from typing import Dict
import random
from past.utils import old_div
def generateEntity(x,y,z,type):
return '<DrawEntity x="{0[0]}" y="{0[1]}" z="{0[2]}" type="{0[3]}" />'.format([x,y,z,type])
def generateEntities(xRange,y,zRange, typesDictionary:Dict):
entitiesString = []
space = [[i,j] for i in xRange for j in zRange]
k = sum(typesDictionary.values())
positions = random.choices(space, k=k)
for entity in typesDictionary.keys():
for _ in range(typesDictionary[entity]):
position = positions.pop()
entitiesString.append(generateEntity(position[0],y,position[1],entity))
return entitiesString
# copy from malmo source code begin
def getCorner(index,top,left,expand=0,y=0, ARENA_WIDTH = 20, ARENA_BREADTH = 20):
''' Return part of the XML string that defines the requested corner'''
x = str(-(expand+old_div(ARENA_WIDTH,2))) if left else str(expand+old_div(ARENA_WIDTH,2))
z = str(-(expand+old_div(ARENA_BREADTH,2))) if top else str(expand+old_div(ARENA_BREADTH,2))
return 'x'+index+'="'+x+'" y'+index+'="' +str(y)+'" z'+index+'="'+z+'"'
# copy from malmo souce code end
| 41.178571
| 96
| 0.66869
|
bdcd4089529c8b0d6c301507c16846281f3eecf8
| 6,840
|
py
|
Python
|
lib/rucio/daemons/judge/cleaner.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/daemons/judge/cleaner.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/daemons/judge/cleaner.py
|
brianv0/rucio
|
127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Martin Barisits, <martin.barisits@cern.ch>, 2013-2016
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013, 2015
"""
Judge-Cleaner is a daemon to clean expired replication rules.
"""
import logging
import ntplib
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import delete_rule, get_expired_rules
from rucio.core.monitor import record_counter
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging, config_get('common', 'loglevel').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_cleaner(once=False):
"""
Main loop to check for expired replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-cleaners have the correct worker number on the next try
live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_expired_rules(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
limit=200,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_cleaner[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_cleaner[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
rule_expression = rule[1]
logging.info('rule_cleaner[%s/%s]: Deleting rule %s with expression %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, rule_expression))
if graceful_stop.is_set():
break
try:
start = time.time()
delete_rule(rule_id=rule_id, nowait=True)
logging.debug('rule_cleaner[%s/%s]: deletion of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, time.time() - start))
except (DatabaseException, DatabaseError, UnsupportedOperation), e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
record_counter('rule.judge.exceptions.LocksDetected')
logging.warning('rule_cleaner[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound, e:
pass
except (DatabaseException, DatabaseError), e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception, e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable='rucio-judge-cleaner', hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Clean threads.
"""
try:
ntpc = ntplib.NTPClient()
response = ntpc.request('137.138.16.69', version=3) # 137.138.16.69 CERN IP-TIME-1 NTP Server (Stratum 2)
if response.offset > 60 * 60 + 10: # 1hour 10seconds
logging.critical('Offset between NTP server and system time too big. Stopping Cleaner')
return
except:
return
hostname = socket.gethostname()
sanity_check(executable='rucio-judge-cleaner', hostname=hostname)
if once:
rule_cleaner(once)
else:
logging.info('Cleaner starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in xrange(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
| 42.484472
| 178
| 0.605848
|
77c338d5cbfcb434722852755bfc109d2698c229
| 332
|
py
|
Python
|
Code/grace/__init__.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | 1
|
2016-05-17T22:52:19.000Z
|
2016-05-17T22:52:19.000Z
|
Code/grace/__init__.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
Code/grace/__init__.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
# Configureation file for the module allows `import grace`. Grace will then
# contain the dataset as (grace.grids, grace.dates, grace.positions)
# Automaticly build datafiles if they don't exists
import build
build.autobuild()
# Load datafiles
from load import grids, dates, positions
__all__ = ["grids", "dates", "positions"]
| 23.714286
| 75
| 0.756024
|
7a96fedc89b32c7081a6b45fd82fb13380f736b4
| 74,125
|
py
|
Python
|
tensorflow/python/training/tracking/util_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 8
|
2020-07-29T18:50:45.000Z
|
2021-07-25T07:06:43.000Z
|
tensorflow/python/training/tracking/util_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/training/tracking/util_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 11
|
2020-05-31T13:14:56.000Z
|
2021-12-14T04:39:25.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import weakref
from absl.testing import parameterized
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
def testLayerDeduplication(self):
model = training.Model()
layer_one = core.Dense(1)
layer_two = core.Dense(1)
model.other_path = [layer_one, layer_two]
model.l2 = layer_two
model.l1 = layer_one
self.assertEqual([layer_one, layer_two], model.layers)
def testSaveWithOnlyKerasSession(self):
with ops.Graph().as_default():
inp = input_layer.Input([1])
dense = core.Dense(1)(inp)
model = training.Model(inp, dense)
model.compile(optimizer="sgd", loss="mse")
model.fit([1.], [2.])
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerTrackable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
trackable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = trackable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = trackable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = trackable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = trackable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
trackable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(trackable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = (
graph_view.ObjectGraphView(obj).serialize_object_graph())
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.AutoTrackable):
def __init__(self):
pass
# __init__ for Trackable will be called implicitly.
trackable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.AutoTrackable()
v1 = trackable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = trackable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
def testObjectMetadata(self):
with context.eager_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = trackable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = trackable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
def testNotTrackable(self):
class CallsFunctionalStuff(
tracking.NotTrackable, tracking.AutoTrackable):
pass
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
checkpoint = trackable_utils.Checkpoint(x=CallsFunctionalStuff())
with self.assertRaises(NotImplementedError):
checkpoint.save(prefix)
class CallsFunctionalStuffOtherMRO(
tracking.AutoTrackable, tracking.NotTrackable):
pass
checkpoint_reversed = trackable_utils.Checkpoint(
x=CallsFunctionalStuffOtherMRO())
with self.assertRaises(NotImplementedError):
checkpoint_reversed.save(prefix)
class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.Adam(0.001)
step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, step=step)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = control_flow_ops.group(
optimizer.apply_gradients(zip(gradients, variables)),
step.assign_add(1))
with backprop.GradientTape() as tape:
loss = other_model(input_value)
variables = other_model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_slot_keys = (
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
"optimizer/learning_rate",
"optimizer/beta_1",
"optimizer/beta_2",
"optimizer/iter",
"optimizer/decay",
) + expected_slot_keys
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual("Adam/beta_1",
named_variables["optimizer/beta_1" + suffix].full_name)
self.assertEqual("Adam/beta_2",
named_variables["optimizer/beta_2" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
children = [node.local_name for node in optimizer_node.children]
six.assertCountEqual(
self,
# hyper variable dependencies
["beta_1", "beta_2", "iter", "decay", "learning_rate"],
children)
serialized_slot_keys = []
for slot in optimizer_node.slot_variables:
for attribute in (
serialized_graph.nodes[slot.slot_variable_node_id].attributes):
serialized_slot_keys.append(attribute.checkpoint_key)
six.assertCountEqual(
self,
[key + suffix for key in expected_slot_keys],
serialized_slot_keys)
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = trackable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testAssertConsumedNoCheckpoint(self):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
save_path = ckpt.save(file_prefix=prefix)
status = ckpt.restore(save_path=save_path)
del ckpt
status.assert_consumed()
@test_util.run_in_graph_and_eager_modes
def testPassingCheckpointOptions(self):
localhost = "/job:localhost/device:CPU:0"
options = checkpoint_options.CheckpointOptions(
experimental_io_device=localhost)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
v = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(v.initializer)
ckpt = trackable_utils.Checkpoint(v=v)
self.evaluate(trackable_utils.gather_initializers(ckpt))
save_path = ckpt.save(file_prefix=prefix, options=options)
status = ckpt.restore(save_path=save_path, options=options)
del ckpt
status.assert_consumed()
# In graph mode, verify that the save and restore ops were set to run on
# localhost.
if not context.executing_eagerly():
for op in ops.get_default_graph().get_operations():
if op.type in ("SaveV2", "RestoreV2"):
self.assertEqual(localhost, op.device)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.assertFalse(root_trackable.save_counter.trainable)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(
sorted(optimizer.variables(), key=lambda v: v.name))
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.Adam(0.001)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value,
var_list=[dummy_var])
status.assert_existing_objects_matched()
status.assert_consumed()
self.assertAllEqual(
optimizer_variables,
# Creation order is different, so .variables() needs to be re-sorted.
self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
def _train_fn(model, input_value):
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(_train_fn, model, input_value)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = trackable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
saver = trackable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = trackable_utils.Checkpoint()
status = checkpoint.restore(prefix)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(step=step)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)))
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
def testPartialRestoreWarningObject(self):
with context.eager_mode():
optimizer = adam.Adam(0.0)
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.),
optimizer=optimizer)
# Create a slot variable to save
optimizer.minimize(original_root.v1.read_value, [original_root.v1])
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path)
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v2'", messages)
self.assertIn("(root).optimizer's state 'm' for (root).v1", messages)
self.assertNotIn("(root).v1'", messages)
self.assertIn("expect_partial()", messages)
def testPartialRestoreWarningAttribute(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
with test.mock.patch.object(logging, "warning") as mock_log:
# Note: Unlike in testPartialRestoreWarningObject, the warning actually
# prints immediately here, since all of the objects have been created
# and there's no deferred restoration sitting around.
partial_root.restore(save_path)
self.assertEqual(3., partial_root.v2.numpy())
del partial_root
self.assertIsNone(weak_partial_root())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v1", messages)
self.assertNotIn("(root).v2", messages)
self.assertIn("expect_partial()", messages)
def testAttributeException(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
status = partial_root.restore(save_path)
with self.assertRaisesRegexp(
AssertionError,
r"Unused attributes(.|\n)*\(root\).v1"):
status.assert_consumed()
def testSilencePartialWarning(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path).expect_partial()
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
self.assertEmpty(mock_log.call_args_list)
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.Adam(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables))
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
root.leaf = leaf
trackable_utils.add_variable(leaf, name="v", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_trackable(leaf, name=".ATTRIBUTES")
trackable_utils.add_variable(trackable=leaf, name="a", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
named_variable.name)
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.Adam(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(trackable_utils.Checkpoint):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = original.save(checkpoint_prefix)
load_into = LateDependencies()
status = load_into.restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(trackable_utils.Checkpoint):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = dep_after_var.save(checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = loaded_dep_after_var.restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.Adam(0.1)
variables = [root.var]
gradients = [1.]
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(
optimizer.get_slot(slot_name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.Adam(0.1)
slot_status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "Unresolved object"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
else:
# Slot variables are not created eagerly when graph building.
with self.assertRaises(KeyError):
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)
variables = [new_root.var]
gradients = [1.]
train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
if not context.executing_eagerly():
# The train op hasn't run when graph building, so the slot variable has
# its restored value. It has run in eager, so the value will be different.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep = tracking.AutoTrackable()
save_root.dep.var = trackable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
first_path = save_root.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = save_root.save(os.path.join(checkpoint_directory, "second"))
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
first_status = first_root.restore(first_path)
second_status = second_root.restore(second_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
second_status = second_root.restore(second_path)
first_status = first_root.restore(first_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
dep_three = tracking.AutoTrackable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
trackable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
status = load_root.restore(save_path)
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = tracking.AutoTrackable()
load_root.dep_one.dep_three = tracking.AutoTrackable()
load_root.dep_two.dep_three = tracking.AutoTrackable()
trackable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
trackable_utils.add_variable(
load_root.dep_two.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
trackable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
trackable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = load_root.dep_one
v1 = trackable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = trackable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = load_root.restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testEmptyContainersIgnored(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint(a=[])
path = save_root.save(checkpoint_directory)
load_root = trackable_utils.Checkpoint(b=[])
load_root.dep = []
load_root.dep.append([])
status = load_root.restore(path)
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = trackable_utils.Checkpoint()
second = trackable_utils.Checkpoint()
first.second = second
second.first = first
first.v = trackable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = trackable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(trackable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = first.save(os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = trackable_utils.Checkpoint()
status = first_load.restore(save_path)
second_load = tracking.AutoTrackable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = trackable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = trackable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = first_load.restore(save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = trackable_utils.Checkpoint()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = first.save(checkpoint_prefix)
second = trackable_utils.Checkpoint()
second.var2 = variables_lib.Variable(0., name="blah")
status = second.restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
obj.save(checkpoint_prefix)
graph.finalize()
obj.save(checkpoint_prefix)
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
saver = trackable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
checkpoint = trackable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
graph.finalize()
obj.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def test_sequential(self):
model = sequential.Sequential()
checkpoint = trackable_utils.Checkpoint(model=model)
model.add(core.Dense(4))
second_dense = core.Dense(5)
model.add(second_dense)
model(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
self.evaluate(second_dense.bias.assign(
constant_op.constant([1., 2., 3., 4., 5.])))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(second_dense.bias.assign(
constant_op.constant([5., 6., 7., 8., 9.])))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.], self.evaluate(second_dense.bias))
deferred_sequential = sequential.Sequential()
deferred_sequential_checkpoint = trackable_utils.Checkpoint(
model=deferred_sequential)
status = deferred_sequential_checkpoint.restore(save_path)
deferred_sequential.add(core.Dense(4))
deferred_second_dense = core.Dense(5)
deferred_sequential.add(deferred_second_dense)
deferred_sequential(constant_op.constant([[1.]]))
status.run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
model=model) # Do not save the optimizer with the checkpoint.
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
def train_fn():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
# TODO(tanzheny): Add hyper variables to .variables(), and set them with
# set_weights etc.
variables_not_in_the_variables_property = [
obj for obj in optimizer._hyper.values()
if isinstance(obj, variables_lib.Variable)]
self.evaluate([v.initializer for v
in optimizer.variables()
+ variables_not_in_the_variables_property])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.beta_1.assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
del train_fn
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
def train_fn1():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn1 = functools.partial(self.evaluate, train_fn1())
status.initialize_or_restore()
train_fn1()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
del train_fn1
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001, beta_1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
def train_fn2():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn2 = functools.partial(self.evaluate, train_fn2())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn2()
self.assertEqual(42., self.evaluate(optimizer.beta_1))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_trackable_data_structure(self):
model = NonLayerTrackable()
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerTrackable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = trackable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_from_function(self):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint():
save_path = save_checkpoint.write(checkpoint_prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint())
load_checkpoint = trackable_utils.Checkpoint(v=variables_lib.Variable(0.))
# Use read() instead of restore() which allows us to check that all
# existing objects were loaded.
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint())
self.evaluate(save_checkpoint.v.assign(0.))
status = load_checkpoint.read(checkpoint_prefix)
status.assert_existing_objects_matched()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
def test_inititialize_with_data_structures(self):
checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(0.), variables_lib.Variable(1.)],
b={"a": variables_lib.Variable(2.), "b": variables_lib.Variable(3.)})
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
load_checkpoint = trackable_utils.Checkpoint(
a=[variables_lib.Variable(4.), variables_lib.Variable(5.)],
b={"a": variables_lib.Variable(6.), "b": variables_lib.Variable(7.)})
load_checkpoint.restore(save_path)
self.assertAllClose(self.evaluate(load_checkpoint.a), [0, 1])
self.assertAllClose(self.evaluate(load_checkpoint.b), {"a": 2, "b": 3})
class _ManualScope(tracking.AutoTrackable):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with trackable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
class TemplateTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[id(v1_save), id(v2_save), id(manual_scope),
id(manual_scope_v), id(save_template)],
map(id, trackable_utils.list_objects(save_template)))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.Adam(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value,
var_list=[v1_save])
self.evaluate([v.initializer for v in save_template.variables])
optimizer_variables = optimizer.variables() + list(
optimizer._hyper.values())
self.evaluate([v.initializer for v in optimizer_variables])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.Adam(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value, var_list=[var])
self.assertLen(load_template._checkpoint_dependencies, 3)
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = trackable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = trackable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._checkpoint_dependencies
self.assertLen(outer_template_dependencies, 2)
self.assertEqual("i1", outer_template_dependencies[0].name)
self.assertIs(inner_template_one, outer_template_dependencies[0].ref)
self.assertEqual("i2", outer_template_dependencies[1].name)
self.assertIs(inner_template_two, outer_template_dependencies[1].ref)
self.assertLen(inner_template_one._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name)
self.assertLen(inner_template_two._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, slot_name="m").assign([2.]))
self.evaluate(optimizer.beta_1.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")
.assign([102.]))
self.evaluate(root_trackable.optimizer.beta_1.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")))
self.assertAllEqual(3.,
self.evaluate(root_trackable.optimizer.beta_1))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session,
save_path=checkpoint_prefix,
global_step=root.optimizer.iterations)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
status.assert_nontrivial_match()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
def testIgnoreSaveCounter(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with self.cached_session() as session:
# Create and save a model using Saver() before using a Checkpoint. This
# generates a snapshot without the Checkpoint's `save_counter`.
model = sequential.Sequential()
model.add(core.Flatten(input_shape=(1,)))
model.add(core.Dense(1))
name_saver = saver_lib.Saver(model.trainable_variables)
save_path = name_saver.save(
sess=session, save_path=checkpoint_prefix, global_step=1)
# Checkpoint.restore must successfully load that checkpoint.
ckpt = trackable_utils.Checkpoint(model=model)
status = ckpt.restore(save_path)
status.assert_existing_objects_matched()
# It should, however, refuse to load a checkpoint where an unrelated
# `save_counter` variable is missing.
model.layers[1].var = variables_lib.Variable(0., name="save_counter")
status = ckpt.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| 43.322618
| 80
| 0.709046
|
84032250d1dc0d8b43527391368b2ca3ad6df970
| 6,987
|
py
|
Python
|
lark/parsers/lalr_parser.py
|
plannigan/lark
|
5662083377866ac07b40b7598ce85d9861ccf68c
|
[
"MIT"
] | 2,653
|
2018-04-24T07:29:58.000Z
|
2022-03-31T03:17:56.000Z
|
lark/parsers/lalr_parser.py
|
plannigan/lark
|
5662083377866ac07b40b7598ce85d9861ccf68c
|
[
"MIT"
] | 900
|
2018-04-24T00:08:42.000Z
|
2022-03-31T20:30:55.000Z
|
lark/parsers/lalr_parser.py
|
plannigan/lark
|
5662083377866ac07b40b7598ce85d9861ccf68c
|
[
"MIT"
] | 356
|
2018-04-25T22:46:05.000Z
|
2022-03-31T08:39:37.000Z
|
"""This module implements a LALR(1) Parser
"""
# Author: Erez Shinan (2017)
# Email : erezshin@gmail.com
from copy import deepcopy, copy
from ..exceptions import UnexpectedInput, UnexpectedToken
from ..lexer import Token
from ..utils import Serialize
from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
from .lalr_interactive_parser import InteractiveParser
from lark.exceptions import UnexpectedCharacters, UnexpectedInput, UnexpectedToken
###{standalone
class LALR_Parser(Serialize):
def __init__(self, parser_conf, debug=False):
analysis = LALR_Analyzer(parser_conf, debug=debug)
analysis.compute_lalr()
callbacks = parser_conf.callbacks
self._parse_table = analysis.parse_table
self.parser_conf = parser_conf
self.parser = _Parser(analysis.parse_table, callbacks, debug)
@classmethod
def deserialize(cls, data, memo, callbacks, debug=False):
inst = cls.__new__(cls)
inst._parse_table = IntParseTable.deserialize(data, memo)
inst.parser = _Parser(inst._parse_table, callbacks, debug)
return inst
def serialize(self, memo):
return self._parse_table.serialize(memo)
def parse_interactive(self, lexer, start):
return self.parser.parse(lexer, start, start_interactive=True)
def parse(self, lexer, start, on_error=None):
try:
return self.parser.parse(lexer, start)
except UnexpectedInput as e:
if on_error is None:
raise
while True:
if isinstance(e, UnexpectedCharacters):
s = e.interactive_parser.lexer_state.state
p = s.line_ctr.char_pos
if not on_error(e):
raise e
if isinstance(e, UnexpectedCharacters):
# If user didn't change the character position, then we should
if p == s.line_ctr.char_pos:
s.line_ctr.feed(s.text[p:p+1])
try:
return e.interactive_parser.resume_parse()
except UnexpectedToken as e2:
if (isinstance(e, UnexpectedToken)
and e.token.type == e2.token.type == '$END'
and e.interactive_parser == e2.interactive_parser):
# Prevent infinite loop
raise e2
e = e2
except UnexpectedCharacters as e2:
e = e2
class ParseConf:
__slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states'
def __init__(self, parse_table, callbacks, start):
self.parse_table = parse_table
self.start_state = self.parse_table.start_states[start]
self.end_state = self.parse_table.end_states[start]
self.states = self.parse_table.states
self.callbacks = callbacks
self.start = start
class ParserState:
__slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack'
def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None):
self.parse_conf = parse_conf
self.lexer = lexer
self.state_stack = state_stack or [self.parse_conf.start_state]
self.value_stack = value_stack or []
@property
def position(self):
return self.state_stack[-1]
# Necessary for match_examples() to work
def __eq__(self, other):
if not isinstance(other, ParserState):
return NotImplemented
return len(self.state_stack) == len(other.state_stack) and self.position == other.position
def __copy__(self):
return type(self)(
self.parse_conf,
self.lexer, # XXX copy
copy(self.state_stack),
deepcopy(self.value_stack),
)
def copy(self):
return copy(self)
def feed_token(self, token, is_end=False):
state_stack = self.state_stack
value_stack = self.value_stack
states = self.parse_conf.states
end_state = self.parse_conf.end_state
callbacks = self.parse_conf.callbacks
while True:
state = state_stack[-1]
try:
action, arg = states[state][token.type]
except KeyError:
expected = {s for s in states[state].keys() if s.isupper()}
raise UnexpectedToken(token, expected, state=self, interactive_parser=None)
assert arg != end_state
if action is Shift:
# shift once and return
assert not is_end
state_stack.append(arg)
value_stack.append(token if token.type not in callbacks else callbacks[token.type](token))
return
else:
# reduce+shift as many times as necessary
rule = arg
size = len(rule.expansion)
if size:
s = value_stack[-size:]
del state_stack[-size:]
del value_stack[-size:]
else:
s = []
value = callbacks[rule](s)
_action, new_state = states[state_stack[-1]][rule.origin.name]
assert _action is Shift
state_stack.append(new_state)
value_stack.append(value)
if is_end and state_stack[-1] == end_state:
return value_stack[-1]
class _Parser:
def __init__(self, parse_table, callbacks, debug=False):
self.parse_table = parse_table
self.callbacks = callbacks
self.debug = debug
def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False):
parse_conf = ParseConf(self.parse_table, self.callbacks, start)
parser_state = ParserState(parse_conf, lexer, state_stack, value_stack)
if start_interactive:
return InteractiveParser(self, parser_state, parser_state.lexer)
return self.parse_from_state(parser_state)
def parse_from_state(self, state):
# Main LALR-parser loop
try:
token = None
for token in state.lexer.lex(state):
state.feed_token(token)
end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
return state.feed_token(end_token, True)
except UnexpectedInput as e:
try:
e.interactive_parser = InteractiveParser(self, state, state.lexer)
except NameError:
pass
raise e
except Exception as e:
if self.debug:
print("")
print("STATE STACK DUMP")
print("----------------")
for i, s in enumerate(state.state_stack):
print('%d)' % i , s)
print("")
raise
###}
| 34.761194
| 106
| 0.584657
|
7c989d394e31ee02ee8126830cce17880ecb2194
| 10,275
|
py
|
Python
|
librosa/core/pitch.py
|
SleezusJ/librosa
|
0ac24ab2894f925cfa3155789304850543e58949
|
[
"ISC"
] | null | null | null |
librosa/core/pitch.py
|
SleezusJ/librosa
|
0ac24ab2894f925cfa3155789304850543e58949
|
[
"ISC"
] | null | null | null |
librosa/core/pitch.py
|
SleezusJ/librosa
|
0ac24ab2894f925cfa3155789304850543e58949
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Pitch-tracking and tuning estimation"""
import warnings
import numpy as np
from .spectrum import _spectrogram
from . import time_frequency
from .._cache import cache
from .. import util
__all__ = ["estimate_tuning", "pitch_tuning", "piptrack"]
def estimate_tuning(
y=None, sr=22050, S=None, n_fft=2048, resolution=0.01, bins_per_octave=12, **kwargs
):
"""Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
"""
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(
pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave,
)
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
"""Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
Parameters
----------
frequencies : array-like, float
A collection of frequencies detected in the signal.
See `piptrack`
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
estimate_tuning
Estimating tuning from time-series or spectrogram input
Examples
--------
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes, stft = librosa.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.pitch_tuning(pitches)
0.089999999999999969
"""
frequencies = np.atleast_1d(frequencies)
# Trim out any DC components
frequencies = frequencies[frequencies > 0]
if not np.any(frequencies):
warnings.warn("Trying to estimate tuning from empty frequency set.")
return 0.0
# Compute the residual relative to the number of bins
residual = np.mod(bins_per_octave * time_frequency.hz_to_octs(frequencies), 1.0)
# Are we on the wrong side of the semitone?
# A residual of 0.95 is more likely to be a deviation of -0.05
# from the next tone up.
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, int(np.ceil(1.0 / resolution)) + 1)
counts, tuning = np.histogram(residual, bins)
# return the histogram peak
return tuning[np.argmax(counts)]
@cache(level=30)
def piptrack(
y=None,
sr=22050,
S=None,
n_fft=2048,
hop_length=None,
fmin=150.0,
fmax=4000.0,
threshold=0.1,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
ref=None,
):
"""Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
"""
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(
y=y,
S=S,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode="constant")
shift = np.pad(shift, ([1, 1], [0, 0]), mode="constant")
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = (
(idx[:, 0] + shift[idx[:, 0], idx[:, 1]]) * float(sr) / n_fft
)
mags[idx[:, 0], idx[:, 1]] = S[idx[:, 0], idx[:, 1]] + dskew[idx[:, 0], idx[:, 1]]
return pitches, mags
| 28.701117
| 87
| 0.615961
|
06299ca6092ae34fa30eae51cacfad16287d3278
| 113
|
py
|
Python
|
events/admin.py
|
anurag0singh/Jagrati
|
d4487e08368da38cf53a77dc1303ea8841c71ba9
|
[
"MIT"
] | null | null | null |
events/admin.py
|
anurag0singh/Jagrati
|
d4487e08368da38cf53a77dc1303ea8841c71ba9
|
[
"MIT"
] | null | null | null |
events/admin.py
|
anurag0singh/Jagrati
|
d4487e08368da38cf53a77dc1303ea8841c71ba9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(events)
| 18.833333
| 32
| 0.787611
|
a23957a7c721819d3df0d1e6ee1e1afcbc307248
| 169
|
py
|
Python
|
mollie_oscar/urls.py
|
fourdigits/django-oscar-mollie
|
1d182bf1bcfc6378511b4315c5b2dcb8f42e94a8
|
[
"BSD-2-Clause"
] | null | null | null |
mollie_oscar/urls.py
|
fourdigits/django-oscar-mollie
|
1d182bf1bcfc6378511b4315c5b2dcb8f42e94a8
|
[
"BSD-2-Clause"
] | null | null | null |
mollie_oscar/urls.py
|
fourdigits/django-oscar-mollie
|
1d182bf1bcfc6378511b4315c5b2dcb8f42e94a8
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = "mollie_oscar"
urlpatterns = [
url(r'^webhook/', views.WebhookView.as_view(), name='webhook'),
]
| 15.363636
| 67
| 0.698225
|
aa578285ef931b74bced5aa21e5561a330755710
| 69
|
py
|
Python
|
modules/python-codes/modules/variables/04-complex.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | 1
|
2020-09-06T22:17:19.000Z
|
2020-09-06T22:17:19.000Z
|
modules/python-codes/modules/variables/04-complex.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
modules/python-codes/modules/variables/04-complex.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
# Complex
cmplex = 2 + 3j
print(cmplex)
print(type(cmplex))
print()
| 9.857143
| 19
| 0.681159
|
cbb026923ec0dff98d77eba22b728fd35917ca25
| 15,437
|
py
|
Python
|
sockeye/decoder.py
|
annacurrey/sockeye
|
429f8478b29d7d28d5bb22dcebd080531f7e6f4e
|
[
"Apache-2.0"
] | 1
|
2020-08-12T18:02:59.000Z
|
2020-08-12T18:02:59.000Z
|
sockeye/decoder.py
|
barbaradarques/sockeye
|
92a020a25cbe75935c700ce2f29b286b31a87189
|
[
"Apache-2.0"
] | null | null | null |
sockeye/decoder.py
|
barbaradarques/sockeye
|
92a020a25cbe75935c700ce2f29b286b31a87189
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Decoders for sequence-to-sequence models.
"""
import logging
from abc import abstractmethod
from itertools import islice
from typing import Dict, List, Optional, Tuple, Union, Type
import mxnet as mx
from . import constants as C
from . import layers
from . import transformer
logger = logging.getLogger(__name__)
DecoderConfig = Union[transformer.TransformerConfig]
def get_decoder(config: DecoderConfig, inference_only: bool = False, prefix: str = '', dtype: str = C.DTYPE_FP32) -> 'Decoder':
return Decoder.get_decoder(config, inference_only, prefix, dtype)
class Decoder(mx.gluon.Block):
"""
Generic decoder interface.
A decoder needs to implement code to decode a target sequence known in advance (decode_sequence),
and code to decode a single word given its decoder state (decode_step).
The latter is typically used for inference graphs in beam search.
For the inference module to be able to keep track of decoder's states
a decoder provides methods to return initial states (init_states), state variables and their shapes.
"""
__registry = {} # type: Dict[Type[DecoderConfig], Tuple[Type['Decoder'], str]]
@classmethod
def register(cls, config_type: Type[DecoderConfig], suffix: str):
"""
Registers decoder type for configuration. Suffix is appended to decoder prefix.
:param config_type: Configuration type for decoder.
:param suffix: String to append to decoder prefix.
:return: Class decorator.
"""
def wrapper(target_cls):
cls.__registry[config_type] = (target_cls, suffix)
return target_cls
return wrapper
@classmethod
def get_decoder(cls, config: DecoderConfig, inference_only: bool, prefix: str, dtype: str) -> 'Decoder':
"""
Creates decoder based on config type.
:param config: Decoder config.
:param inference_only: Create a decoder that is only used for inference.
:param prefix: Prefix to prepend for decoder.
:param dtype: Data type for weights.
:return: Decoder instance.
"""
config_type = type(config)
if config_type not in cls.__registry:
raise ValueError('Unsupported decoder configuration %s' % config_type.__name__)
decoder_cls, suffix = cls.__registry[config_type]
# TODO: move final suffix/prefix construction logic into config builder
return decoder_cls(config=config, inference_only=inference_only, prefix=prefix + suffix, dtype=dtype)
@abstractmethod
def __init__(self):
super().__init__()
@abstractmethod
def state_structure(self) -> str:
raise NotImplementedError()
@abstractmethod
def init_state_from_encoder(self,
encoder_outputs: mx.nd.NDArray,
encoder_valid_length: Optional[mx.nd.NDArray] = None) -> List[mx.nd.NDArray]:
raise NotImplementedError()
@abstractmethod
def decode_seq(self, inputs: mx.nd.NDArray, states: List[mx.nd.NDArray]):
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param inputs: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param states: List of initial states, as given by init_state_from_encoder().
:return: Decoder output. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
raise NotImplementedError()
@abstractmethod
def get_num_hidden(self):
raise NotImplementedError()
@Decoder.register(transformer.TransformerConfig, C.TRANSFORMER_DECODER_PREFIX)
class TransformerDecoder(Decoder, mx.gluon.HybridBlock):
"""
Transformer decoder as in Vaswani et al, 2017: Attention is all you need.
In training, computation scores for each position of the known target sequence are computed in parallel,
yielding most of the speedup.
At inference time, the decoder block is evaluated again and again over a maximum length input sequence that is
initially filled with zeros and grows during beam search with predicted tokens. Appropriate masking at every
time-step ensures correct self-attention scores and is updated with every step.
:param config: Transformer configuration.
:param prefix: Name prefix for symbols of this decoder.
:param inference_only: Only use the model for inference enabling some optimizations, such as disabling the auto-regressive mask.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_DECODER_PREFIX,
inference_only: bool = False,
dtype: str = C.DTYPE_FP32) -> None:
Decoder.__init__(self)
mx.gluon.HybridBlock.__init__(self, prefix=prefix)
self.config = config
self.inference_only = inference_only
with self.name_scope():
self.pos_embedding = layers.PositionalEmbeddings(weight_type=self.config.positional_embedding_type,
num_embed=self.config.model_size,
max_seq_len=self.config.max_seq_len_target,
prefix=C.TARGET_POSITIONAL_EMBEDDING_PREFIX,
scale_up_input=True,
scale_down_positions=False)
self.autoregressive_bias = transformer.AutoRegressiveBias(prefix="autoregressive_bias_")
self.valid_length_mask = transformer.TransformerValidLengthMask(num_heads=self.config.attention_heads,
fold_heads=False,
name="bias")
self.layers = mx.gluon.nn.HybridSequential()
for i in range(config.num_layers):
self.layers.add(transformer.TransformerDecoderBlock(config, prefix="%d_" % i, dtype=dtype,
inference_only=self.inference_only))
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
dropout=config.dropout_prepost,
prefix="final_process_",
num_hidden=self.config.model_size)
def state_structure(self) -> str:
"""
Returns the structure of states used for manipulation of the states.
Each state is either labeled 's' for step, 'b' for source_mask, 'd' for decoder, or 'e' for encoder.
"""
structure = ''
if self.inference_only:
structure += C.STEP_STATE + C.BIAS_STATE + C.ENCODER_STATE * self.config.num_layers * 2
else:
structure += C.STEP_STATE + C.ENCODER_STATE + C.BIAS_STATE
total_num_states = sum(layer.num_state_tensors for layer in self.layers)
structure += C.DECODER_STATE * total_num_states
return structure
def init_state_from_encoder(self,
encoder_outputs: mx.nd.NDArray,
encoder_valid_length: Optional[mx.nd.NDArray] = None) -> List[mx.nd.NDArray]:
"""
Returns the initial states given encoder output. States for teacher-forced training are encoder outputs
and a valid length mask for encoder outputs.
At inference, this method returns the following state tuple:
valid length bias, step state,
[projected encoder attention keys, projected encoder attention values] * num_layers,
[autoregressive state dummies] * num_layers.
:param encoder_outputs: Encoder outputs. Shape: (batch, source_length, encoder_dim).
:param encoder_valid_length: Valid lengths of encoder outputs. Shape: (batch,).
:return: Initial states.
"""
source_mask = self.valid_length_mask(encoder_outputs, encoder_valid_length)
# (batch_size, 1)
step = mx.nd.expand_dims(mx.nd.zeros_like(encoder_valid_length), axis=1)
if self.inference_only:
# Encoder projection caching, therefore we don't pass the encoder_outputs
states = [step, source_mask]
for layer in self.layers:
encoder_attention_keys, encoder_attention_values = \
layer.enc_attention.project_and_isolate_heads(mx.nd, encoder_outputs)
states.append(encoder_attention_keys)
states.append(encoder_attention_values)
else:
# NO encoder projection caching
states = [step, encoder_outputs, source_mask]
batch_size = encoder_outputs.shape[0]
dummy_autoregr_states = [mx.nd.zeros(layer.get_states_shape(batch_size),
ctx=encoder_outputs.context,
dtype=encoder_outputs.dtype)
for layer in self.layers
for _ in range(layer.num_state_tensors)]
states += dummy_autoregr_states
return states
def decode_seq(self, inputs: mx.nd.NDArray, states: List[mx.nd.NDArray]):
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param inputs: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param states: List of initial states, as given by init_state_from_encoder().
:return: Decoder output. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
outputs, _ = self.forward(inputs, states)
return outputs
def forward(self, step_input, states):
"""
Run forward pass of the decoder.
step_input is either:
(batch, num_hidden): single decoder step at inference time
(batch, seq_len, num_hidden): full sequence decode during training.
states is either:
if self.inference_only == False: (Training and Checkpoint decoder during training)
steps, encoder_outputs, source_bias, layer_caches...
else: (during translation outside of training)
steps, source_bias, layer_caches..., projected encoder outputs...
"""
input_shape = step_input.shape
is_inference = len(input_shape) == 2
if is_inference:
# Just add the length dimension:
# (batch, num_hidden) -> (batch, 1, num_hidden)
step_input = mx.nd.expand_dims(step_input, axis=1)
else:
assert not self.inference_only, "Decoder created with inference_only=True but used during training."
# Replace the single step by multiple steps for training
step, *states = states
# Create steps (1, trg_seq_len,)
steps = mx.nd.expand_dims(mx.nd.arange(step_input.shape[1], ctx=step_input.context), axis=0)
states = [steps] + states
# run decoder op
target, autoregr_states = super().forward(step_input, states)
if is_inference:
# During inference, length dimension of decoder output has size 1, squeeze it
# (batch, num_hidden)
target = mx.nd.reshape(target, shape=(-1, self.get_num_hidden()))
# We also increment time step state (1st state in the list) and add new caches
step = states[0] + 1
if self.inference_only:
# pass in cached encoder states
encoder_attention_keys_values = states[2:2 + self.config.num_layers * 2]
new_states = [step, states[1]] + encoder_attention_keys_values + autoregr_states
else:
encoder_outputs = states[1]
source_mask = states[2]
new_states = [step, encoder_outputs, source_mask] + autoregr_states
assert len(new_states) == len(states)
else:
new_states = None # we don't care about states in training
return target, new_states
def hybrid_forward(self, F, step_input, states):
mask = None
if self.inference_only:
steps, source_mask, *other = states
source_encoded = None # use constant pre-computed key value projections from the states
enc_att_kv = other[:self.config.num_layers * 2]
enc_att_kv = [enc_att_kv[i:i + 2] for i in range(0, len(enc_att_kv), 2)]
autoregr_states = other[self.config.num_layers * 2:]
else:
if any(layer.needs_mask for layer in self.layers):
mask = self.autoregressive_bias(step_input) # mask: (1, length, length)
steps, source_encoded, source_mask, *autoregr_states = states
enc_att_kv = [(None, None) for _ in range(self.config.num_layers)]
if any(layer.num_state_tensors > 1 for layer in self.layers):
# separates autoregressive states by layer
states_iter = iter(autoregr_states)
autoregr_states = [list(islice(states_iter, 0, layer.num_state_tensors)) for layer in self.layers]
# Fold the heads of source_mask (batch_size, num_heads, seq_len) -> (batch_size * num_heads, 1, seq_len)
source_mask = F.expand_dims(F.reshape(source_mask, shape=(-3, -2)), axis=1)
# target: (batch_size, length, model_size)
target = self.pos_embedding(step_input, steps)
if self.config.dropout_prepost > 0.0:
target = F.Dropout(data=target, p=self.config.dropout_prepost)
new_autoregr_states = []
for layer, layer_autoregr_state, (enc_att_k, enc_att_v) in zip(self.layers, autoregr_states, enc_att_kv):
target, new_layer_autoregr_state = layer(target,
mask,
source_encoded,
source_mask,
layer_autoregr_state,
enc_att_k, enc_att_v)
new_autoregr_states += [*new_layer_autoregr_state]
# NOTE: the list expansion is needed in order to handle both a tuple (of Symbols) and a Symbol as a new state
target = self.final_process(target, None)
return target, new_autoregr_states
def get_num_hidden(self):
return self.config.model_size
| 46.496988
| 132
| 0.624862
|
96e2f15f08759487beb7568cc5c2f9717e4d7201
| 141,747
|
py
|
Python
|
datalad/support/gitrepo.py
|
emdupre/datalad
|
9dce81cb4d3c004150571bb7d9fbddcd8338eba2
|
[
"MIT"
] | null | null | null |
datalad/support/gitrepo.py
|
emdupre/datalad
|
9dce81cb4d3c004150571bb7d9fbddcd8338eba2
|
[
"MIT"
] | null | null | null |
datalad/support/gitrepo.py
|
emdupre/datalad
|
9dce81cb4d3c004150571bb7d9fbddcd8338eba2
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Interface to Git via GitPython
For further information on GitPython see http://gitpython.readthedocs.org/
"""
from itertools import chain
import logging
from collections import OrderedDict
import re
import shlex
import time
import os
import os.path as op
import warnings
from os import linesep
from os.path import join as opj
from os.path import exists
from os.path import normpath
from os.path import isabs
from os.path import commonprefix
from os.path import relpath
from os.path import realpath
from os.path import dirname
from os.path import basename
from os.path import curdir
from os.path import pardir
from os.path import sep
import posixpath
from functools import wraps
from weakref import WeakValueDictionary
from six import string_types
from six import text_type
from six import add_metaclass
from six import iteritems
from six import PY2
import git as gitpy
from git import RemoteProgress
from gitdb.exc import BadName
from git.exc import GitCommandError
from git.exc import NoSuchPathError
from git.exc import InvalidGitRepositoryError
from git.objects.blob import Blob
from datalad.support.due import due, Doi
from datalad import ssh_manager
from datalad.cmd import GitRunner
from datalad.cmd import BatchedCommand
from datalad.consts import GIT_SSH_COMMAND
from datalad.dochelpers import exc_str
from datalad.config import ConfigManager
import datalad.utils as ut
from datalad.utils import Path
from datalad.utils import assure_bytes
from datalad.utils import assure_list
from datalad.utils import optional_args
from datalad.utils import on_windows
from datalad.utils import getpwd
from datalad.utils import posix_relpath
from datalad.utils import assure_dir
from datalad.utils import generate_file_chunks
from ..utils import assure_unicode
# imports from same module:
from .external_versions import external_versions
from .exceptions import CommandError
from .exceptions import DeprecatedError
from .exceptions import FileNotInRepositoryError
from .exceptions import GitIgnoreError
from .exceptions import InvalidGitReferenceError
from .exceptions import MissingBranchError
from .exceptions import OutdatedExternalDependencyWarning
from .exceptions import PathKnownToRepositoryError
from .network import RI, PathRI
from .network import is_ssh
from .repo import Flyweight
from .repo import RepoInterface
# shortcuts
_curdirsep = curdir + sep
_pardirsep = pardir + sep
lgr = logging.getLogger('datalad.gitrepo')
_lgr_level = lgr.getEffectiveLevel()
if _lgr_level <= 2:
from ..log import LoggerHelper
# Let's also enable gitpy etc debugging
gitpy_lgr = LoggerHelper(logtarget="git").get_initialized_logger()
gitpy_lgr.setLevel(_lgr_level)
gitpy_lgr.propagate = True
# Override default GitPython's DB backend to talk directly to git so it doesn't
# interfere with possible operations performed by gc/repack
default_git_odbt = gitpy.GitCmdObjectDB
# TODO: Figure out how GIT_PYTHON_TRACE ('full') is supposed to be used.
# Didn't work as expected on a first try. Probably there is a neatier way to
# log Exceptions from git commands.
# TODO: ignore leading and/or trailing underscore to allow for
# python-reserved words
@optional_args
def kwargs_to_options(func, split_single_char_options=True,
target_kw='options'):
"""Decorator to provide convenient way to pass options to command calls.
Parameters
----------
func: Callable
function to decorate
split_single_char_options: bool
whether or not to split key and value of single char keyword arguments
into two subsequent entries of the list
target_kw: str
keyword argument to pass the generated list of cmdline arguments to
Returns
-------
Callable
"""
# TODO: don't overwrite options, but join
@wraps(func)
def newfunc(self, *args, **kwargs):
t_kwargs = dict()
t_kwargs[target_kw] = \
gitpy.Git().transform_kwargs(
split_single_char_options=split_single_char_options,
**kwargs)
return func(self, *args, **t_kwargs)
return newfunc
def to_options(**kwargs):
"""Transform keyword arguments into a list of cmdline options
Parameters
----------
split_single_char_options: bool
kwargs:
Returns
-------
list
"""
# TODO: borrow_docs!
return gitpy.Git().transform_kwargs(**kwargs)
def _normalize_path(base_dir, path):
"""Helper to check paths passed to methods of this class.
Checks whether `path` is beneath `base_dir` and normalizes it.
Additionally paths are converted into relative paths with respect to
`base_dir`, considering PWD in case of relative paths. This
is intended to be used in repository classes, which means that
`base_dir` usually will be the repository's base directory.
Parameters
----------
base_dir: str
directory to serve as base to normalized, relative paths
path: str
path to be normalized
Returns
-------
str:
path, that is a relative path with respect to `base_dir`
"""
if not path:
return path
base_dir = realpath(base_dir) # realpath OK
# path = normpath(path)
# Note: disabled normpath, because it may break paths containing symlinks;
# But we don't want to realpath relative paths, in case cwd isn't the
# correct base.
if isabs(path):
# path might already be a symlink pointing to annex etc,
# so realpath only its directory, to get "inline" with
# realpath(base_dir) above
path = opj(realpath(dirname(path)), basename(path)) # realpath OK
# Executive decision was made to not do this kind of magic!
#
# elif commonprefix([realpath(getpwd()), base_dir]) == base_dir:
# # If we are inside repository, rebuilt relative paths.
# path = opj(realpath(getpwd()), path)
#
# BUT with relative curdir/pardir start it would assume relative to curdir
#
elif path.startswith(_curdirsep) or path.startswith(_pardirsep):
path = normpath(opj(realpath(getpwd()), path)) # realpath OK
else:
# We were called from outside the repo. Therefore relative paths
# are interpreted as being relative to self.path already.
return path
if commonprefix([path, base_dir]) != base_dir:
raise FileNotInRepositoryError(msg="Path outside repository: %s"
% path, filename=path)
return relpath(path, start=base_dir)
@optional_args
def normalize_path(func):
"""Decorator to provide unified path conversion for a single file
Unlike normalize_paths, intended to be used for functions dealing with a
single filename at a time
Note
----
This is intended to be used within the repository classes and therefore
returns a class method!
The decorated function is expected to take a path at
first positional argument (after 'self'). Additionally the class `func`
is a member of, is expected to have an attribute 'path'.
"""
@wraps(func)
def newfunc(self, file_, *args, **kwargs):
file_new = _normalize_path(self.path, file_)
return func(self, file_new, *args, **kwargs)
return newfunc
@optional_args
def normalize_paths(func, match_return_type=True, map_filenames_back=False,
serialize=False):
"""Decorator to provide unified path conversions.
Note
----
This is intended to be used within the repository classes and therefore
returns a class method!
The decorated function is expected to take a path or a list of paths at
first positional argument (after 'self'). Additionally the class `func`
is a member of, is expected to have an attribute 'path'.
Accepts either a list of paths or a single path in a str. Passes a list
to decorated function either way, but would return based on the value of
match_return_type and possibly input argument.
If a call to the wrapped function includes normalize_path and it is False
no normalization happens for that function call (used for calls to wrapped
functions within wrapped functions, while possible CWD is within a
repository)
Parameters
----------
match_return_type : bool, optional
If True, and a single string was passed in, it would return the first
element of the output (after verifying that it is a list of length 1).
It makes easier to work with single files input.
map_filenames_back : bool, optional
If True and returned value is a dictionary, it assumes to carry entries
one per file, and then filenames are mapped back to as provided from the
normalized (from the root of the repo) paths
serialize : bool, optional
Loop through files giving only a single one to the function one at a time.
This allows to simplify implementation and interface to annex commands
which do not take multiple args in the same call (e.g. checkpresentkey)
"""
@wraps(func)
def newfunc(self, files, *args, **kwargs):
normalize = _normalize_path if kwargs.pop('normalize_paths', True) \
else lambda rpath, filepath: filepath
if files:
if isinstance(files, string_types) or not files:
files_new = [normalize(self.path, files)]
single_file = True
elif isinstance(files, list):
files_new = [normalize(self.path, path) for path in files]
single_file = False
else:
raise ValueError("_files_decorator: Don't know how to handle "
"instance of %s." % type(files))
else:
single_file = None
files_new = []
if map_filenames_back:
def remap_filenames(out):
"""Helper to map files back to non-normalized paths"""
if isinstance(out, dict):
assert(len(out) == len(files_new))
files_ = [files] if single_file else files
mapped = out.__class__()
for fin, fout in zip(files_, files_new):
mapped[fin] = out[fout]
return mapped
else:
return out
else:
remap_filenames = lambda x: x
if serialize: # and not single_file:
result = [
func(self, f, *args, **kwargs)
for f in files_new
]
else:
result = func(self, files_new, *args, **kwargs)
if single_file is None:
# no files were provided, nothing we can do really
return result
elif (result is None) or not match_return_type or not single_file:
# If function doesn't return anything or no denormalization
# was requested or it was not a single file
return remap_filenames(result)
elif single_file:
if len(result) != 1:
# Magic doesn't apply
return remap_filenames(result)
elif isinstance(result, (list, tuple)):
return result[0]
elif isinstance(result, dict) and tuple(result)[0] == files_new[0]:
# assume that returned dictionary has files as keys.
return tuple(result.values())[0]
else:
# no magic can apply
return remap_filenames(result)
else:
return RuntimeError("should have not got here... check logic")
return newfunc
def check_git_configured():
"""Do a check if git is configured (user.name and user.email are set)
Raises
------
RuntimeError if any of those two variables are not set
Returns
-------
dict with user.name and user.email entries
"""
check_runner = GitRunner()
vals = {}
exc_ = ""
for c in 'user.name', 'user.email':
try:
v, err = check_runner.run(['git', 'config', c])
vals[c] = v.rstrip('\n')
except CommandError as exc:
exc_ += exc_str(exc)
if exc_:
lgr.warning(
"It is highly recommended to configure git first (set both "
"user.name and user.email) before using DataLad. Failed to "
"verify that git is configured: %s. Some operations might fail or "
"not perform correctly." % exc_
)
return vals
def _remove_empty_items(list_):
"""Remove empty entries from list
This is needed, since some functions of GitPython may convert
an empty entry to '.', when used with a list of paths.
Parameter:
----------
list_: list of str
Returns
-------
list of str
"""
if not isinstance(list_, list):
lgr.warning(
"_remove_empty_items() called with non-list type: %s" % type(list_))
return list_
return [file_ for file_ in list_ if file_]
def Repo(*args, **kwargs):
"""Factory method around gitpy.Repo to consistently initiate with different
backend
"""
# TODO: This probably doesn't work as intended (or at least not as
# consistently as intended). gitpy.Repo could be instantiated by
# classmethods Repo.init or Repo.clone_from. In these cases 'odbt'
# would be needed as a parameter to these methods instead of the
# constructor.
if 'odbt' not in kwargs:
kwargs['odbt'] = default_git_odbt
return gitpy.Repo(*args, **kwargs)
def split_remote_branch(branch):
"""Splits a remote branch's name into the name of the remote and the name
of the branch.
Parameters
----------
branch: str
the remote branch's name to split
Returns
-------
list of str
"""
assert '/' in branch, \
"remote branch %s must have had a /" % branch
assert not branch.endswith('/'), \
"branch name with trailing / is invalid. (%s)" % branch
return branch.split('/', 1)
def guard_BadName(func):
"""A helper to guard against BadName exception
Workaround for
https://github.com/gitpython-developers/GitPython/issues/768
also see https://github.com/datalad/datalad/issues/2550
Let's try to precommit (to flush anything flushable) and do
it again
"""
@wraps(func)
def wrapped(repo, *args, **kwargs):
try:
return func(repo, *args, **kwargs)
except BadName:
repo.precommit()
return func(repo, *args, **kwargs)
return wrapped
class GitPythonProgressBar(RemoteProgress):
"""A handler for Git commands interfaced by GitPython which report progress
"""
# GitPython operates with op_codes which are a mask for actions.
_known_ops = {
RemoteProgress.COUNTING: "counting objects",
RemoteProgress.COMPRESSING: "compressing objects",
RemoteProgress.WRITING: "writing objects",
RemoteProgress.RECEIVING: "receiving objects",
RemoteProgress.RESOLVING: "resolving stuff",
RemoteProgress.FINDING_SOURCES: "finding sources",
RemoteProgress.CHECKING_OUT: "checking things out"
}
# To overcome the bug when GitPython (<=2.1.11), with tentative fix
# in https://github.com/gitpython-developers/GitPython/pull/798
# we will collect error_lines from the last progress bar used by GitPython
# To do that reliably this class should be used as a ContextManager,
# or .close() should be called explicitly before analysis of this
# attribute is done.
# TODO: remove the workaround whenever new GitPython version provides
# it natively and we boost versioned dependency on it
_last_error_lines = None
def __init__(self, action):
super(GitPythonProgressBar, self).__init__()
self._action = action
from datalad.ui import ui
self._ui = ui
self._pbar = None
self._op_code = None
GitPythonProgressBar._last_error_lines = None
def __del__(self):
self.close()
def close(self):
GitPythonProgressBar._last_error_lines = self.error_lines
self._close_pbar()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _close_pbar(self):
if self._pbar:
self._pbar.finish()
self._pbar = None
def _get_human_msg(self, op_code):
"""Return human readable action message
"""
op_id = op_code & self.OP_MASK
op = self._known_ops.get(op_id, "doing other evil")
return "%s (%s)" % (self._action, op)
def update(self, op_code, cur_count, max_count=None, message=''):
# ATM we ignore message which typically includes bandwidth info etc
try:
if not max_count:
# spotted used by GitPython tests, so may be at times it is not
# known and assumed to be a 100%...? TODO
max_count = 100.0
if op_code:
# Apparently those are composite and we care only about the ones
# we know, so to avoid switching the progress bar for no good
# reason - first & with the mask
op_code = op_code & self.OP_MASK
if self._op_code is None or self._op_code != op_code:
# new type of operation
self._close_pbar()
self._pbar = self._ui.get_progressbar(
self._get_human_msg(op_code),
total=max_count,
unit=' objects'
)
self._op_code = op_code
if not self._pbar:
lgr.error("Ended up without progress bar... how?")
return
self._pbar.update(cur_count, increment=False)
except Exception as exc:
lgr.debug("GitPythonProgressBar errored with %s", exc_str(exc))
return
#import time; time.sleep(0.001) # to see that things are actually "moving"
# without it we would get only a blink on initial 0 value, istead of
# a blink at some higher value. Anyways git provides those
# without flooding so should be safe to force here.
self._pbar.refresh()
@add_metaclass(Flyweight)
class GitRepo(RepoInterface):
"""Representation of a git repository
"""
# We use our sshrun helper
GIT_SSH_ENV = {'GIT_SSH_COMMAND': GIT_SSH_COMMAND,
'GIT_SSH_VARIANT': 'ssh'}
# We must check git config to have name and email set, but
# should do it once
_config_checked = False
# Begin Flyweight:
_unique_instances = WeakValueDictionary()
@classmethod
def _flyweight_id_from_args(cls, *args, **kwargs):
if args:
# to a certain degree we need to simulate an actual call to __init__
# and make sure, passed arguments are fitting:
# TODO: Figure out, whether there is a cleaner way to do this in a
# generic fashion
assert('path' not in kwargs)
path = args[0]
args = args[1:]
elif 'path' in kwargs:
path = kwargs.pop('path')
else:
raise TypeError("__init__() requires argument `path`")
if path is None:
raise AttributeError
# mirror what is happening in __init__
if isinstance(path, ut.PurePath):
path = text_type(path)
# Sanity check for argument `path`:
# raise if we cannot deal with `path` at all or
# if it is not a local thing:
path = RI(path).localpath
# resolve symlinks to make sure we have exactly one instance per
# physical repository at a time
path = realpath(path)
kwargs['path'] = path
return path, args, kwargs
@classmethod
def _flyweight_invalid(cls, id_):
return not cls.is_valid_repo(id_)
@classmethod
def _flyweight_reject(cls, id_, *args, **kwargs):
# TODO:
# This is a temporary approach. See PR # ...
# create = kwargs.pop('create', None)
# kwargs.pop('path', None)
# if create and kwargs:
# # we have `create` plus options other than `path`
# return "Call to {0}() with args {1} and kwargs {2} conflicts " \
# "with existing instance {3}." \
# "This is likely to be caused by inconsistent logic in " \
# "your code." \
# "".format(cls, args, kwargs, cls._unique_instances[id_])
pass
# End Flyweight
def __hash__(self):
# the flyweight key is already determining unique instances
# add the class name to distinguish from strings of a path
return hash((self.__class__.__name__, self.__weakref__.key))
# This is the least common denominator to claim that a user
# used DataLad.
# For now citing Zenodo's all (i.e., latest) version
@due.dcite(Doi("10.5281/zenodo.808846"),
# override path since there is no need ATM for such details
path="datalad",
description="DataLad - Data management and distribution platform")
def __init__(self, path, url=None, runner=None, create=True,
git_opts=None, repo=None, fake_dates=False,
create_sanity_checks=True,
**kwargs):
"""Creates representation of git repository at `path`.
Can also be used to create a git repository at `path`.
Parameters
----------
path: str
path to the git repository; In case it's not an absolute path,
it's relative to PWD
url: str, optional
DEPRECATED -- use .clone() class method
url to the to-be-cloned repository. Requires a valid git url
according to:
http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS .
create: bool, optional
if true, creates a git repository at `path` if there is none. Also
creates `path`, if it doesn't exist.
If set to false, an exception is raised in case `path` doesn't exist
or doesn't contain a git repository.
repo: git.Repo, optional
GitPython's Repo instance to (re)use if provided
create_sanity_checks: bool, optional
Whether to perform sanity checks during initialization (when
`create=True` and target path is not a valid repo already), such as
that new repository is not created in the directory where git already
tracks some files.
kwargs:
keyword arguments serving as additional options to the git-init
command. Therefore, it makes sense only if called with `create`.
Generally, this way of passing options to the git executable is
(or will be) used a lot in this class. It's a transformation of
python-style keyword arguments (or a `dict`) to command line arguments,
provided by GitPython.
A single character keyword will be prefixed by '-', multiple characters
by '--'. An underscore in the keyword becomes a dash. The value of the
keyword argument is used as the value for the corresponding command
line argument. Assigning a boolean creates a flag.
Examples:
no_commit=True => --no-commit
C='/my/path' => -C /my/path
"""
if url is not None:
raise DeprecatedError(
new=".clone() class method",
version="0.5.0",
msg="RF: url passed to init()"
)
# So that we "share" control paths with git/git-annex
if ssh_manager:
ssh_manager.assure_initialized()
if not GitRepo._config_checked:
check_git_configured()
GitRepo._config_checked = True
self.realpath = realpath(path)
# note: we may also want to distinguish between a path to the worktree
# and the actual repository
# Could be used to e.g. disable automatic garbage and autopacking
# ['-c', 'receive.autogc=0', '-c', 'gc.auto=0']
self._GIT_COMMON_OPTIONS = []
# actually no need with default GitPython db backend not in memory
# default_git_odbt but still allows for faster testing etc.
# May be eventually we would make it switchable _GIT_COMMON_OPTIONS = []
if git_opts is None:
git_opts = {}
if kwargs:
git_opts.update(kwargs)
self.path = path
self.cmd_call_wrapper = runner or GitRunner(cwd=self.path)
self._repo = repo
self._cfg = None
_valid_repo = GitRepo.is_valid_repo(path)
if create and not _valid_repo:
if repo is not None:
# `repo` passed with `create`, which doesn't make sense
raise TypeError("argument 'repo' must not be used with 'create'")
self._repo = self._create_empty_repo(path, create_sanity_checks, **git_opts)
else:
# Note: We used to call gitpy.Repo(path) here, which potentially
# raised NoSuchPathError or InvalidGitRepositoryError. This is
# used by callers of GitRepo.__init__() to detect whether we have a
# valid repo at `path`. Now, with switching to lazy loading property
# `repo`, we detect those cases without instantiating a
# gitpy.Repo().
if not exists(path):
raise NoSuchPathError(path)
if not _valid_repo:
raise InvalidGitRepositoryError(path)
# inject git options into GitPython's git call wrapper:
# Note: `None` currently can happen, when Runner's protocol prevents
# calls above from being actually executed (DryRunProtocol)
if self._repo is not None:
self._repo.git._persistent_git_options = self._GIT_COMMON_OPTIONS
# with DryRunProtocol path might still not exist
if exists(self.realpath):
self.inode = os.stat(self.realpath).st_ino
else:
self.inode = None
if fake_dates:
self.configure_fake_dates()
# Set by fake_dates_enabled to cache config value across this instance.
self._fake_dates_enabled = None
self.pathobj = ut.Path(self.path)
def _create_empty_repo(self, path, sanity_checks=True, **kwargs):
if not op.lexists(path):
os.makedirs(path)
elif sanity_checks and external_versions['cmd:git'] < '2.14.0':
warnings.warn(
"Your git version (%s) is too old, we will not safe-guard "
"against creating a new repository under already known to git "
"subdirectory" % external_versions['cmd:git'],
OutdatedExternalDependencyWarning
)
elif sanity_checks:
# Verify that we are not trying to initialize a new git repository
# under a directory some files of which are already tracked by git
# use case: https://github.com/datalad/datalad/issues/3068
try:
stdout, _ = self._git_custom_command(
None, ['git', 'ls-files'], cwd=path, expect_fail=True
)
if stdout:
raise PathKnownToRepositoryError(
"Failing to initialize new repository under %s where "
"following files are known to a repository above: %s"
% (path, stdout)
)
except CommandError:
# assume that all is good -- we are not under any repo
pass
cmd = ['git', 'init']
cmd.extend(kwargs.pop('_from_cmdline_', []))
cmd.extend(to_options(**kwargs))
lgr.debug(
"Initialize empty Git repository at '%s'%s",
path,
' %s' % cmd[2:] if cmd[2:] else '')
try:
stdout, stderr = self._git_custom_command(
None,
cmd,
cwd=path,
log_stderr=True,
log_stdout=True,
log_online=False,
expect_stderr=False,
shell=False,
# we don't want it to scream on stdout
expect_fail=True)
except CommandError as exc:
lgr.error(exc_str(exc))
raise
# we want to return None and have lazy eval take care of
# the rest
return
@property
def repo(self):
# with DryRunProtocol path not exist
if exists(self.realpath):
inode = os.stat(self.realpath).st_ino
else:
inode = None
if self.inode != inode:
# reset background processes invoked by GitPython:
self._repo.git.clear_cache()
self.inode = inode
if self._repo is None:
# Note, that this may raise GitCommandError, NoSuchPathError,
# InvalidGitRepositoryError:
self._repo = self.cmd_call_wrapper(
Repo,
# Encode path on Python 2 because, as of v2.1.11, GitPython's
# Repo will pass the path to str() otherwise.
assure_bytes(self.path) if PY2 else self.path)
lgr.log(8, "Using existing Git repository at %s", self.path)
# inject git options into GitPython's git call wrapper:
# Note: `None` currently can happen, when Runner's protocol prevents
# call of Repo(path) above from being actually executed (DryRunProtocol)
if self._repo is not None:
self._repo.git._persistent_git_options = self._GIT_COMMON_OPTIONS
return self._repo
@classmethod
def clone(cls, url, path, *args, **kwargs):
"""Clone url into path
Provides workarounds for known issues (e.g.
https://github.com/datalad/datalad/issues/785)
Parameters
----------
url : str
path : str
expect_fail : bool
Whether expect that command might fail, so error should be logged then
at DEBUG level instead of ERROR
"""
if 'repo' in kwargs:
raise TypeError("argument 'repo' conflicts with cloning")
# TODO: what about 'create'?
expect_fail = kwargs.pop('expect_fail', False)
# fail early on non-empty target:
from os import listdir
if exists(path) and listdir(path):
# simulate actual GitCommandError:
lgr.warning("destination path '%s' already exists and is not an "
"empty directory." % path)
raise GitCommandError(
['git', 'clone', '-v', url, path],
128,
"fatal: destination path '%s' already exists and is not an "
"empty directory." % path)
else:
# protect against cloning into existing and obviously dangling
# instance for that location
try:
del cls._unique_instances[path]
except KeyError:
# didn't exist - all fine
pass
# Massage URL
url_ri = RI(url) if not isinstance(url, RI) else url
# try to get a local path from `url`:
try:
url = url_ri.localpath
url_ri = RI(url)
except ValueError:
pass
if is_ssh(url_ri):
ssh_manager.get_connection(url).open()
# TODO: with git <= 2.3 keep old mechanism:
# with rm.repo.git.custom_environment(GIT_SSH="wrapper_script"):
env = GitRepo.GIT_SSH_ENV
else:
if isinstance(url_ri, PathRI):
new_url = os.path.expanduser(url)
if url != new_url:
# TODO: remove whenever GitPython is fixed:
# https://github.com/gitpython-developers/GitPython/issues/731
lgr.info("Expanded source path to %s from %s", new_url, url)
url = new_url
env = None
ntries = 5 # 3 is not enough for robust workaround
for trial in range(ntries):
try:
lgr.debug("Git clone from {0} to {1}".format(url, path))
with GitPythonProgressBar("Cloning") as git_progress:
repo = gitpy.Repo.clone_from(
url, path,
env=env,
odbt=default_git_odbt,
progress=git_progress
)
# Note/TODO: signature for clone from:
# (url, to_path, progress=None, env=None, **kwargs)
lgr.debug("Git clone completed")
break
except GitCommandError as e:
# log here but let caller decide what to do
e_str = exc_str(e)
# see https://github.com/datalad/datalad/issues/785
if re.search("Request for .*aborted.*Unable to find", str(e),
re.DOTALL) \
and trial < ntries - 1:
lgr.info(
"Hit a known issue with Git (see GH#785). Trial #%d, "
"retrying",
trial)
continue
(lgr.debug if expect_fail else lgr.error)(e_str)
raise
except ValueError as e:
if gitpy.__version__ == '1.0.2' \
and "I/O operation on closed file" in str(e):
# bug https://github.com/gitpython-developers/GitPython
# /issues/383
raise GitCommandError(
"clone has failed, telling ya",
999, # good number
stdout="%s already exists" if exists(path) else "")
raise # reraise original
gr = cls(path, *args, repo=repo, **kwargs)
return gr
def __del__(self):
# unbind possibly bound ConfigManager, to prevent all kinds of weird
# stalls etc
self._cfg = None
# Make sure to flush pending changes, especially close batch processes
# (internal `git cat-file --batch` by GitPython)
try:
if getattr(self, '_repo', None) is not None and exists(self.path):
# gc might be late, so the (temporary)
# repo doesn't exist on FS anymore
self._repo.git.clear_cache()
# We used to write out the index to flush GitPython's
# state... but such unconditional write is really a workaround
# and does not play nice with read-only operations - permission
# denied etc. So disabled
#if exists(opj(self.path, '.git')): # don't try to write otherwise
# self.repo.index.write()
except InvalidGitRepositoryError:
# might have being removed and no longer valid
pass
def __repr__(self):
return "<GitRepo path=%s (%s)>" % (self.path, type(self))
def __eq__(self, obj):
"""Decides whether or not two instances of this class are equal.
This is done by comparing the base repository path.
"""
return self.realpath == obj.realpath
@classmethod
def is_valid_repo(cls, path):
"""Returns if a given path points to a git repository"""
path = Path(path) / '.git'
# the aim here is to have this test as cheap as possible, because
# it is performed a lot
# recognize two things as good-enough indicators of a present
# repo: 1) a non-empty .git directory (#3473) and 2) a pointer
# file or symlink
return path.exists() and (
not path.is_dir() or \
any(path.iterdir()))
@staticmethod
def get_git_dir(repo):
"""figure out a repo's gitdir
'.git' might be a directory, a symlink or a file
Parameter
---------
repo: path or Repo instance
currently expected to be the repos base dir
Returns
-------
str
relative path to the repo's git dir; So, default would be ".git"
"""
if hasattr(repo, 'path'):
# repo instance like given
repo = repo.path
dot_git = op.join(repo, ".git")
if not op.exists(dot_git):
raise RuntimeError("Missing .git in %s." % repo)
elif op.islink(dot_git):
git_dir = os.readlink(dot_git)
elif op.isdir(dot_git):
git_dir = ".git"
elif op.isfile(dot_git):
with open(dot_git) as f:
git_dir = f.readline()
if git_dir.startswith("gitdir:"):
git_dir = git_dir[7:]
git_dir = git_dir.strip()
return git_dir
@property
def config(self):
"""Get an instance of the parser for the persistent repository
configuration.
Note: This allows to also read/write .datalad/config,
not just .git/config
Returns
-------
ConfigManager
"""
if self._cfg is None:
# associate with this dataset and read the entire config hierarchy
self._cfg = ConfigManager(dataset=self, dataset_only=False)
return self._cfg
def is_with_annex(self, only_remote=False):
"""Return True if GitRepo (assumed) at the path has remotes with git-annex branch
Parameters
----------
only_remote: bool, optional
Check only remote (no local branches) for having git-annex branch
"""
return any((b.endswith('/git-annex') or
'annex/direct' in b
for b in self.get_remote_branches())) or \
((not only_remote) and
any((b == 'git-annex' or 'annex/direct' in b
for b in self.get_branches())))
@classmethod
def get_toppath(cls, path, follow_up=True, git_options=None):
"""Return top-level of a repository given the path.
Parameters
-----------
follow_up : bool
If path has symlinks -- they get resolved by git. If follow_up is
True, we will follow original path up until we hit the same resolved
path. If no such path found, resolved one would be returned.
git_options: list of str
options to be passed to the git rev-parse call
Return None if no parent directory contains a git repository.
"""
cmd = ['git']
if git_options:
cmd.extend(git_options)
cmd += ["rev-parse", "--show-toplevel"]
try:
toppath, err = GitRunner().run(
cmd,
cwd=path,
log_stdout=True, log_stderr=True,
expect_fail=True, expect_stderr=True)
toppath = toppath.rstrip('\n\r')
except CommandError:
return None
except OSError:
toppath = GitRepo.get_toppath(dirname(path), follow_up=follow_up,
git_options=git_options)
if follow_up:
path_ = path
path_prev = ""
while path_ and path_ != path_prev: # on top /.. = /
if realpath(path_) == toppath:
toppath = path_
break
path_prev = path_
path_ = dirname(path_)
return toppath
# classmethod so behavior could be tuned in derived classes
@classmethod
def _get_added_files_commit_msg(cls, files):
if not files:
return "No files were added"
msg = "Added %d file" % len(files)
if len(files) > 1:
msg += "s"
return msg + '\n\nFiles:\n' + '\n'.join(files)
@normalize_paths
def add(self, files, git=True, git_options=None, update=False):
"""Adds file(s) to the repository.
Parameters
----------
files: list
list of paths to add
git: bool
somewhat ugly construction to be compatible with AnnexRepo.add();
has to be always true.
update: bool
--update option for git-add. From git's manpage:
Update the index just where it already has an entry matching
<pathspec>. This removes as well as modifies index entries to match
the working tree, but adds no new files.
If no <pathspec> is given when --update option is used, all tracked
files in the entire working tree are updated (old versions of Git
used to limit the update to the current directory and its
subdirectories).
Returns
-------
list
Of status dicts.
"""
# under all circumstances call this class' add_ (otherwise
# AnnexRepo.add would go into a loop
return list(GitRepo.add_(self, files, git=git, git_options=git_options,
update=update))
def add_(self, files, git=True, git_options=None, update=False):
"""Like `add`, but returns a generator"""
# TODO: git_options is used as options for the git-add here,
# instead of options to the git executable => rename for consistency
if not git:
lgr.warning(
'GitRepo.add() called with git=%s, this should not happen',
git)
git = True
# there is no other way then to collect all files into a list
# at this point, because we need to pass them at once to a single
# `git add` call
files = [_normalize_path(self.path, f) for f in assure_list(files) if f]
if not (files or git_options or update):
# wondering why just a warning? in cmdline this is also not an error
lgr.warning("add was called with empty file list and no options.")
return
try:
# without --verbose git 2.9.3 add does not return anything
add_out = self._git_custom_command(
files,
['git', 'add'] + assure_list(git_options) +
to_options(update=update) + ['--verbose']
)
# get all the entries
for o in self._process_git_get_output(*add_out):
yield o
# Note: as opposed to git cmdline, force is True by default in
# gitpython, which would lead to add things, that are
# ignored or excluded otherwise
# 2. Note: There is an issue with globbing (like adding '.'),
# which apparently doesn't care for 'force' and therefore
# adds '.git/...'. May be it's expanded at the wrong
# point in time or sth. like that.
# For now, use direct call to git add.
#self.cmd_call_wrapper(self.repo.index.add, files, write=True,
# force=False)
# TODO: May be make use of 'fprogress'-option to indicate
# progress
# But then, we don't have it for git-annex add, anyway.
#
# TODO: Is write=True a reasonable way to do it?
# May be should not write until success of operation is
# confirmed?
# What's best in case of a list of files?
except OSError as e:
lgr.error("add: %s" % e)
raise
# Make sure return value from GitRepo is consistent with AnnexRepo
# currently simulating similar return value, assuming success
# for all files:
# TODO: Make return values consistent across both *Repo classes!
return
@staticmethod
def _process_git_get_output(stdout, stderr=None):
"""Given both outputs (stderr is ignored atm) of git add - process it
Primarily to centralize handling in both indirect annex and direct
modes when ran through proxy
"""
return [{u'file': f, u'success': True}
for f in re.findall("'(.*)'[\n$]", assure_unicode(stdout))]
@normalize_paths(match_return_type=False)
def remove(self, files, recursive=False, **kwargs):
"""Remove files.
Calls git-rm.
Parameters
----------
files: str
list of paths to remove
recursive: False
whether to allow recursive removal from subdirectories
kwargs:
see `__init__`
Returns
-------
[str]
list of successfully removed files.
"""
files = _remove_empty_items(files)
if recursive:
kwargs['r'] = True
stdout, stderr = self._git_custom_command(
files, ['git', 'rm'] + to_options(**kwargs))
# output per removed file is expected to be "rm 'PATH'":
return [line.strip()[4:-1] for line in stdout.splitlines()]
#return self.repo.git.rm(files, cached=False, **kwargs)
def precommit(self):
"""Perform pre-commit maintenance tasks
"""
# All GitPython commands should take care about flushing index
# whenever they modify it, so we would not care to do anything
# if self.repo is not None and exists(opj(self.path, '.git')): # don't try to write otherwise:
# # flush possibly cached in GitPython changes to index:
# # if self.repo.git:
# # sys.stderr.write("CLEARING\n")
# # self.repo.git.clear_cache()
# self.repo.index.write()
# Close batched by GitPython git processes etc
# Ref: https://github.com/gitpython-developers/GitPython/issues/718
self.repo.__del__()
pass
@staticmethod
def _get_prefixed_commit_msg(msg):
DATALAD_PREFIX = "[DATALAD]"
return DATALAD_PREFIX if not msg else "%s %s" % (DATALAD_PREFIX, msg)
def configure_fake_dates(self):
"""Configure repository to use fake dates.
"""
lgr.debug("Enabling fake dates")
self.config.set("datalad.fake-dates", "true")
@property
def fake_dates_enabled(self):
"""Is the repository configured to use fake dates?
"""
if self._fake_dates_enabled is None:
self._fake_dates_enabled = \
self.config.getbool('datalad', 'fake-dates', default=False)
return self._fake_dates_enabled
def add_fake_dates(self, env):
"""Add fake dates to `env`.
Parameters
----------
env : dict or None
Environment variables.
Returns
-------
A dict (copied from env), with date-related environment
variables for git and git-annex set.
"""
env = (env if env is not None else os.environ).copy()
# Note: Use _git_custom_command here rather than repo.git.for_each_ref
# so that we use annex-proxy in direct mode.
last_date = self._git_custom_command(
None,
["git", "for-each-ref", "--count=1",
"--sort=-committerdate", "--format=%(committerdate:raw)",
"refs/heads"])[0].strip()
if last_date:
# Drop the "contextual" timezone, leaving the unix timestamp. We
# avoid :unix above because it wasn't introduced until Git v2.9.4.
last_date = last_date.split()[0]
seconds = int(last_date)
else:
seconds = self.config.obtain("datalad.fake-dates-start")
seconds_new = seconds + 1
date = "@{} +0000".format(seconds_new)
lgr.debug("Setting date to %s",
time.strftime("%a %d %b %Y %H:%M:%S +0000",
time.gmtime(seconds_new)))
env["GIT_AUTHOR_DATE"] = date
env["GIT_COMMITTER_DATE"] = date
env["GIT_ANNEX_VECTOR_CLOCK"] = str(seconds_new)
return env
def commit(self, msg=None, options=None, _datalad_msg=False, careless=True,
files=None, date=None, index_file=None):
"""Commit changes to git.
Parameters
----------
msg: str, optional
commit-message
options: list of str, optional
cmdline options for git-commit
_datalad_msg: bool, optional
To signal that commit is automated commit by datalad, so
it would carry the [DATALAD] prefix
careless: bool, optional
if False, raise when there's nothing actually committed;
if True, don't care
files: list of str, optional
path(s) to commit
date: str, optional
Date in one of the formats git understands
index_file: str, optional
An alternative index to use
"""
self.precommit()
if _datalad_msg:
msg = self._get_prefixed_commit_msg(msg)
options = options or []
if not msg:
if options:
if "--allow-empty-message" not in options:
options.append("--allow-empty-message")
else:
options = ["--allow-empty-message"]
if date:
options += ["--date", date]
# Note: We used to use a direct call to git only if there were options,
# since we can't pass all possible options to gitpython's implementation
# of commit.
# But there's an additional issue. GitPython implements commit in a way,
# that it might create a new commit, when a direct call wouldn't. This
# was discovered with a modified (but unstaged) submodule, leading to a
# commit, that apparently did nothing - git status still showed the very
# same thing afterwards. But a commit was created nevertheless:
# diff --git a/sub b/sub
# --- a/sub
# +++ b/sub
# @@ -1 +1 @@
# -Subproject commit d3935338a3b3735792de1078bbfb5e9913ef998f
# +Subproject commit d3935338a3b3735792de1078bbfb5e9913ef998f-dirty
#
# Therefore, for now always use direct call.
# TODO: Figure out, what exactly is going on with gitpython here
cmd = ['git', 'commit'] + (["-m", msg if msg else ""])
if options:
cmd.extend(options)
lgr.debug("Committing via direct call of git: %s" % cmd)
try:
self._git_custom_command(files, cmd,
expect_stderr=True, expect_fail=True,
check_fake_dates=True,
index_file=index_file)
except CommandError as e:
if 'nothing to commit' in e.stdout:
if careless:
lgr.debug(u"nothing to commit in {}. "
"Ignored.".format(self))
else:
raise
elif 'no changes added to commit' in e.stdout or \
'nothing added to commit' in e.stdout:
if careless:
lgr.debug(u"no changes added to commit in {}. "
"Ignored.".format(self))
else:
raise
elif "did not match any file(s) known to git" in e.stderr:
# TODO: Improve FileNotInXXXXError classes to better deal with
# multiple files; Also consider PathOutsideRepositoryError
raise FileNotInRepositoryError(cmd=e.cmd,
msg="File(s) unknown to git",
code=e.code,
filename=linesep.join(
[l for l in e.stderr.splitlines()
if l.startswith("pathspec")]))
else:
raise
def get_indexed_files(self):
"""Get a list of files in git's index
Returns
-------
list
list of paths rooting in git's base dir
"""
return [x[0] for x in self.cmd_call_wrapper(
self.repo.index.entries.keys)]
def format_commit(self, fmt, commitish=None):
"""Return `git show` output for `commitish`.
Parameters
----------
fmt : str
A format string accepted by `git show`.
commitish: str, optional
Any commit identifier (defaults to "HEAD").
Returns
-------
str or, if there are not commits yet, None.
"""
cmd = ['git', 'show', '-z', '--no-patch', '--format=' + fmt]
if commitish is not None:
cmd.append(commitish + "^{commit}")
# make sure Git takes our argument as a revision
cmd.append('--')
try:
stdout, stderr = self._git_custom_command(
'', cmd, expect_stderr=True, expect_fail=True)
except CommandError as e:
if 'bad revision' in e.stderr:
raise ValueError("Unknown commit identifier: %s" % commitish)
elif 'does not have any commits yet' in e.stderr:
return None
else:
raise e
# This trailing null is coming from the -z above, which avoids the
# newline that Git would append to the output. We could drop -z and
# strip the newline directly, but then we'd have to worry about
# compatibility across platforms.
return stdout.rsplit("\0", 1)[0]
def get_hexsha(self, commitish=None, short=False):
"""Return a hexsha for a given commitish.
Parameters
----------
commitish : str, optional
Any identifier that refers to a commit (defaults to "HEAD").
short : bool, optional
Return the abbreviated form of the hexsha.
Returns
-------
str or, if there are not commits yet, None.
"""
stdout = self.format_commit("%{}".format('h' if short else 'H'),
commitish)
if stdout is not None:
stdout = stdout.splitlines()
assert(len(stdout) == 1)
return stdout[0]
@normalize_paths(match_return_type=False)
def get_last_commit_hash(self, files):
"""Return the hash of the last commit the modified any of the given
paths"""
try:
stdout, stderr = self._git_custom_command(
files,
['git', 'log', '-n', '1', '--pretty=format:%H'],
expect_fail=True)
commit = stdout.strip()
return commit
except CommandError as e:
if 'does not have any commits' in e.stderr:
return None
raise
def commit_exists(self, commitish):
"""Does `commitish` exist in the repo?
Parameters
----------
commitish : str
A commit or an object that can be dereferenced to one.
Returns
-------
bool
"""
try:
# Note: The peeling operator "^{commit}" is required so that
# rev-parse doesn't succeed if passed a full hexsha that is valid
# but doesn't exist.
self._git_custom_command(
"", ["git", "rev-parse", "--verify", commitish + "^{commit}"],
expect_fail=True)
except CommandError:
return False
return True
def get_merge_base(self, commitishes):
"""Get a merge base hexsha
Parameters
----------
commitishes: str or list of str
List of commitishes (branches, hexshas, etc) to determine the merge
base of. If a single value provided, returns merge_base with the
current branch.
Returns
-------
str or None
If no merge-base for given commits, or specified treeish doesn't
exist, None returned
"""
if isinstance(commitishes, string_types):
commitishes = [commitishes]
if not commitishes:
raise ValueError("Provide at least a single value")
elif len(commitishes) == 1:
commitishes = commitishes + [self.get_active_branch()]
try:
bases = self.repo.merge_base(*commitishes)
except GitCommandError as exc:
if "fatal: Not a valid object name" in str(exc):
return None
raise
if not bases:
return None
assert(len(bases) == 1) # we do not do 'all' yet
return bases[0].hexsha
def is_ancestor(self, reva, revb):
"""Is `reva` an ancestor of `revb`?
Parameters
----------
reva, revb : str
Revisions.
Returns
-------
bool
"""
try:
self._git_custom_command(
"", ["git", "merge-base", "--is-ancestor", reva, revb],
expect_fail=True)
except CommandError:
return False
return True
def get_commit_date(self, branch=None, date='authored'):
"""Get the date stamp of the last commit (in a branch or head otherwise)
Parameters
----------
date: {'authored', 'committed'}
Which date to return. "authored" will be the date shown by "git show"
and the one possibly specified via --date to `git commit`
Returns
-------
int or None
None if no commit
"""
try:
if branch:
commit = next(self.get_branch_commits(branch))
else:
commit = self.repo.head.commit
except Exception as exc:
lgr.debug("Got exception while trying to get last commit: %s",
exc_str(exc))
return None
return getattr(commit, "%s_date" % date)
def get_active_branch(self):
try:
branch = self.repo.active_branch.name
except TypeError as e:
if "HEAD is a detached symbolic reference" in str(e):
lgr.debug("detached HEAD in {0}".format(self))
return None
else:
raise
return branch
def get_branches(self):
"""Get all branches of the repo.
Returns
-------
[str]
Names of all branches of this repository.
"""
return [branch.name for branch in self.repo.branches]
def get_remote_branches(self):
"""Get all branches of all remotes of the repo.
Returns
-----------
[str]
Names of all remote branches.
"""
# TODO: Reconsider melting with get_branches()
# TODO: treat entries like this: origin/HEAD -> origin/master'
# currently this is done in collection
# For some reason, this is three times faster than the version below:
remote_branches = list()
for remote in self.repo.remotes:
try:
for ref in remote.refs:
remote_branches.append(ref.name)
except AssertionError as e:
if str(e).endswith("did not have any references"):
# this will happen with git annex special remotes
pass
else:
raise e
return remote_branches
# return [branch.strip() for branch in
# self.repo.git.branch(r=True).splitlines()]
def get_remotes(self, with_urls_only=False):
"""Get known remotes of the repository
Parameters
----------
with_urls_only : bool, optional
return only remotes which have urls
Returns
-------
remotes : list of str
List of names of the remotes
"""
# Note: read directly from config and spare instantiation of gitpy.Repo
# since we need this in AnnexRepo constructor. Furthermore gitpy does it
# pretty much the same way and the use of a Repo instance seems to have
# no reason other than a nice object oriented look.
from datalad.utils import unique
self.config.reload()
remotes = unique([x[7:] for x in self.config.sections()
if x.startswith("remote.")])
if with_urls_only:
remotes = [
r for r in remotes
if self.config.get('remote.%s.url' % r)
]
return remotes
def get_files(self, branch=None):
"""Get a list of files in git.
Lists the files in the (remote) branch.
Parameters
----------
branch: str
Name of the branch to query. Default: active branch.
Returns
-------
[str]
list of files.
"""
# TODO: RF codes base and melt get_indexed_files() in
if branch is None:
# active branch can be queried way faster:
return self.get_indexed_files()
else:
return [item.path for item in self.repo.tree(branch).traverse()
if isinstance(item, Blob)]
def get_file_content(self, file_, branch='HEAD'):
"""
Returns
-------
[str]
content of file_ as a list of lines.
"""
content_str = self.repo.commit(branch).tree[file_].data_stream.read()
# in python3 a byte string is returned. Need to convert it:
from six import PY3
if PY3:
conv_str = u''
for b in bytes(content_str):
conv_str += chr(b)
return conv_str.splitlines()
else:
return content_str.splitlines()
# TODO: keep splitlines?
def _get_files_history(self, files, branch='HEAD'):
"""
Parameters
----------
files: list
list of files, only commits with queried files are considered
branch: str
Name of the branch to query. Default: HEAD.
Returns
-------
[iterator]
yielding Commit items generator from branch history associated with files
"""
return gitpy.objects.commit.Commit.iter_items(self.repo, branch, paths=files)
def _get_remotes_having_commit(self, commit_hexsha, with_urls_only=True):
"""Traverse all branches of the remote and check if commit in any of their ancestry
It is a generator yielding names of the remotes
"""
out, err = self._git_custom_command(
'', 'git branch -r --contains ' + commit_hexsha
)
# sanitize a bit (all the spaces and new lines)
remote_branches = [
b # could be origin/HEAD -> origin/master, we just skip ->
for b in filter(bool, out.split())
if b != '->'
]
return [
remote
for remote in self.get_remotes(with_urls_only=with_urls_only)
if any(rb.startswith(remote + '/') for rb in remote_branches)
]
@normalize_paths(match_return_type=False)
def _git_custom_command(self, files, cmd_str,
log_stdout=True, log_stderr=True, log_online=False,
expect_stderr=True, cwd=None, env=None,
shell=None, expect_fail=False,
check_fake_dates=False,
index_file=None,
updates_tree=False):
"""Allows for calling arbitrary commands.
Helper for developing purposes, i.e. to quickly implement git commands
for proof of concept without the need to figure out, how this is done
via GitPython.
Parameters
----------
files: list of files
cmd_str: str or list
arbitrary command str. `files` is appended to that string.
updates_tree: bool
whether or not command updates the working tree. If True, triggers
necessary reevaluations like self.config.reload()
Returns
-------
stdout, stderr
"""
# ensure cmd_str becomes a well-formed list:
if isinstance(cmd_str, string_types):
cmd = shlex.split(cmd_str, posix=not on_windows)
else:
cmd = cmd_str[:] # we will modify in-place
assert(cmd[0] == 'git')
cmd = cmd[:1] + self._GIT_COMMON_OPTIONS + cmd[1:]
if check_fake_dates and self.fake_dates_enabled:
env = self.add_fake_dates(env)
if index_file:
env = (env if env is not None else os.environ).copy()
env['GIT_INDEX_FILE'] = index_file
# TODO?: wouldn't splitting interfer with above GIT_INDEX_FILE
# handling????
try:
out, err = self._run_command_files_split(
self.cmd_call_wrapper.run,
cmd,
files,
log_stderr=log_stderr,
log_stdout=log_stdout,
log_online=log_online,
expect_stderr=expect_stderr,
cwd=cwd,
env=env,
shell=shell,
expect_fail=expect_fail)
except CommandError as e:
ignored = re.search(GitIgnoreError.pattern, e.stderr)
if ignored:
raise GitIgnoreError(cmd=e.cmd, msg=e.stderr,
code=e.code, stdout=e.stdout,
stderr=e.stderr,
paths=ignored.groups()[0].splitlines())
raise
if updates_tree:
lgr.debug("Reloading config due to supposed working tree update")
self.config.reload()
return out, err
# TODO: could be static or class method even
def _run_command_files_split(
self,
func,
cmd,
files,
*args, **kwargs
):
"""
Run `func(cmd + files, ...)` possibly multiple times if `files` is too long
"""
assert isinstance(cmd, list)
if not files:
file_chunks = [[]]
else:
file_chunks = generate_file_chunks(files, cmd)
out, err = "", ""
for file_chunk in file_chunks:
out_, err_ = func(
cmd + (['--'] if file_chunk else []) + file_chunk,
*args, **kwargs)
# out_, err_ could be None, and probably no need to append empty strings
if out_:
out += out_
if err_:
err += err_
return out, err
# TODO: --------------------------------------------------------------------
def add_remote(self, name, url, options=None):
"""Register remote pointing to a url
"""
cmd = ['git', 'remote', 'add']
if options:
cmd += options
cmd += [name, url]
result = self._git_custom_command('', cmd)
self.config.reload()
return result
def remove_remote(self, name):
"""Remove existing remote
"""
# TODO: testing and error handling!
from .exceptions import RemoteNotAvailableError
try:
out, err = self._git_custom_command(
'', ['git', 'remote', 'remove', name])
except CommandError as e:
if 'fatal: No such remote' in e.stderr:
raise RemoteNotAvailableError(name,
cmd="git remote remove",
msg="No such remote",
stdout=e.stdout,
stderr=e.stderr)
else:
raise e
# TODO: config.reload necessary?
self.config.reload()
return
def update_remote(self, name=None, verbose=False):
"""
"""
options = ["-v"] if verbose else []
name = [name] if name else []
self._git_custom_command(
'', ['git', 'remote'] + name + ['update'] + options,
expect_stderr=True
)
# TODO: centralize all the c&p code in fetch, pull, push
# TODO: document **kwargs passed to gitpython
@guard_BadName
def fetch(self, remote=None, refspec=None, all_=False, **kwargs):
"""Fetches changes from a remote (or all_ remotes).
Parameters
----------
remote: str
(optional) name of the remote to fetch from. If no remote is given and
`all_` is not set, the tracking branch is fetched.
refspec: str
(optional) refspec to fetch.
all_: bool
fetch all_ remotes (and all_ of their branches).
Fails if `remote` was given.
kwargs:
passed to gitpython. TODO: Figure it out, make consistent use of it
and document it.
Returns
-------
list
FetchInfo objects of the items fetched from remote
"""
# TODO: options=> **kwargs):
# Note: Apparently there is no explicit (fetch --all) in gitpython,
# but fetch is always bound to a certain remote instead.
# Therefore implement it on our own:
if remote is None:
if refspec is not None:
# conflicts with using tracking branch or fetch all remotes
# For now: Just fail.
# TODO: May be check whether it fits to tracking branch
raise ValueError("refspec specified without a remote. (%s)" %
refspec)
if all_:
remotes_to_fetch = [
self.repo.remote(r)
for r in self.get_remotes(with_urls_only=True)
]
else:
# No explicit remote to fetch.
# => get tracking branch:
tb_remote, refspec = self.get_tracking_branch()
if tb_remote is not None:
remotes_to_fetch = [self.repo.remote(tb_remote)]
else:
# No remote, no tracking branch
# => fail
raise ValueError("Neither a remote is specified to fetch "
"from nor a tracking branch is set up.")
else:
remotes_to_fetch = [self.repo.remote(remote)]
fi_list = []
for rm in remotes_to_fetch:
fetch_url = \
self.config.get('remote.%s.fetchurl' % rm.name,
self.config.get('remote.%s.url' % rm.name,
None))
if fetch_url is None:
lgr.debug("Remote %s has no URL", rm)
return []
fi_list += self._call_gitpy_with_progress(
"Fetching %s" % rm.name,
rm.fetch,
rm.repo,
refspec,
fetch_url,
**kwargs
)
# TODO: fetch returns a list of FetchInfo instances. Make use of it.
return fi_list
def _call_gitpy_with_progress(self, msg, callable, git_repo,
refspec, url, **kwargs):
"""A helper to reduce code duplication
Wraps call to a GitPython method with all needed decoration for
workarounds of having aged git, or not providing full stderr
when monitoring progress of the operation
"""
with GitPythonProgressBar(msg) as git_progress:
git_kwargs = dict(
refspec=refspec,
progress=git_progress,
**kwargs
)
if is_ssh(url):
ssh_manager.get_connection(url).open()
# TODO: with git <= 2.3 keep old mechanism:
# with rm.repo.git.custom_environment(
# GIT_SSH="wrapper_script"):
with git_repo.git.custom_environment(**GitRepo.GIT_SSH_ENV):
ret = callable(**git_kwargs)
# TODO: +kwargs
else:
ret = callable(**git_kwargs)
# TODO: +kwargs
return ret
def pull(self, remote=None, refspec=None, **kwargs):
"""See fetch
"""
if remote is None:
if refspec is not None:
# conflicts with using tracking branch or fetch all remotes
# For now: Just fail.
# TODO: May be check whether it fits to tracking branch
raise ValueError("refspec specified without a remote. (%s)" %
refspec)
# No explicit remote to pull from.
# => get tracking branch:
tb_remote, refspec = self.get_tracking_branch()
if tb_remote is not None:
remote = self.repo.remote(tb_remote)
else:
# No remote, no tracking branch
# => fail
raise ValueError("No remote specified to pull from nor a "
"tracking branch is set up.")
else:
remote = self.repo.remote(remote)
fetch_url = \
remote.config_reader.get(
'fetchurl' if remote.config_reader.has_option('fetchurl')
else 'url')
return self._call_gitpy_with_progress(
"Pulling",
remote.pull,
remote.repo,
refspec,
fetch_url,
**kwargs
)
def push(self, remote=None, refspec=None, all_remotes=False,
**kwargs):
"""Push to remote repository
Parameters
----------
remote: str
name of the remote to push to
refspec: str
specify what to push
all_remotes: bool
if set to True push to all remotes. Conflicts with `remote` not being
None.
kwargs: dict
options to pass to `git push`
Returns
-------
list
PushInfo objects of the items pushed to remote
"""
if remote is None:
if refspec is not None:
# conflicts with using tracking branch or fetch all remotes
# For now: Just fail.
# TODO: May be check whether it fits to tracking branch
raise ValueError("refspec specified without a remote. (%s)" %
refspec)
if all_remotes:
remotes_to_push = self.repo.remotes
else:
# Nothing explicitly specified. Just call `git push` and let git
# decide what to do would be an option. But:
# - without knowing the remote and its URL we cannot provide
# shared SSH connection
# - we lose ability to use GitPython's progress info and return
# values
# (the latter would be solvable:
# Provide a Repo.push() method for GitPython, copying
# Remote.push() for similar return value and progress
# (also: fetch, pull)
# Do what git would do:
# 1. branch.*.remote for current branch or 'origin' as default
# if config is missing
# 2. remote.*.push or push.default
# TODO: check out "same procedure" for fetch/pull
tb_remote, refspec = self.get_tracking_branch()
if tb_remote is None:
tb_remote = 'origin'
remotes_to_push = [self.repo.remote(tb_remote)]
# use no refspec; let git find remote.*.push or push.default on
# its own
else:
if all_remotes:
lgr.warning("Option 'all_remotes' conflicts with specified "
"remote '%s'. Option ignored.")
remotes_to_push = [self.repo.remote(remote)]
pi_list = []
for rm in remotes_to_push:
push_url = \
rm.config_reader.get('pushurl'
if rm.config_reader.has_option('pushurl')
else 'url')
pi_list += self._call_gitpy_with_progress(
"Pushing %s" % rm.name,
rm.push,
rm.repo,
refspec,
push_url,
**kwargs
)
return pi_list
def get_remote_url(self, name, push=False):
"""Get the url of a remote.
Reads the configuration of remote `name` and returns its url or None,
if there is no url configured.
Parameters
----------
name: str
name of the remote
push: bool
if True, get the pushurl instead of the fetch url.
"""
var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')
return self.config.get(var, None)
def set_remote_url(self, name, url, push=False):
"""Set the URL a remote is pointing to
Sets the URL of the remote `name`. Requires the remote to already exist.
Parameters
----------
name: str
name of the remote
url: str
push: bool
if True, set the push URL, otherwise the fetch URL
"""
var = 'remote.{0}.{1}'.format(name, 'pushurl' if push else 'url')
self.config.set(var, url, where='local', reload=True)
def get_branch_commits(self, branch=None, limit=None, stop=None, value=None):
"""Return GitPython's commits for the branch
Pretty much similar to what 'git log <branch>' does.
It is a generator which returns top commits first
Parameters
----------
branch: str, optional
If not provided, assumes current branch
limit: None | 'left-only', optional
Limit which commits to report. If None -- all commits (merged or not),
if 'left-only' -- only the commits from the left side of the tree upon
merges
stop: str, optional
hexsha of the commit at which stop reporting (matched one is not
reported either)
value: None | 'hexsha', optional
What to yield. If None - entire commit object is yielded, if 'hexsha'
only its hexsha
"""
if not branch:
branch = self.get_active_branch()
try:
_branch = self.repo.branches[branch]
except IndexError:
raise MissingBranchError(self, branch,
[b.name for b in self.repo.branches])
fvalue = {None: lambda x: x, 'hexsha': lambda x: x.hexsha}[value]
if not limit:
def gen():
# traverse doesn't yield original commit
co = _branch.commit
yield co
for co_ in co.traverse():
yield co_
elif limit == 'left-only':
# we need a custom implementation since couldn't figure out how to
# do with .traversal
def gen():
co = _branch.commit
while co:
yield co
co = co.parents[0] if co.parents else None
else:
raise ValueError(limit)
for c in gen():
if stop and c.hexsha == stop:
return
yield fvalue(c)
def checkout(self, name, options=None):
"""
"""
# TODO: May be check for the need of -b options herein?
cmd = ['git', 'checkout']
if options:
cmd += options
cmd += [str(name)]
self._git_custom_command('', cmd, expect_stderr=True, updates_tree=True)
# TODO: Before implementing annex merge, find usages and check for a needed
# change to call super().merge
def merge(self, name, options=None, msg=None, allow_unrelated=False, **kwargs):
if options is None:
options = []
if msg:
options = options + ["-m", msg]
if allow_unrelated and external_versions['cmd:git'] >= '2.9':
options += ['--allow-unrelated-histories']
self._git_custom_command(
'', ['git', 'merge'] + options + [name],
check_fake_dates=True,
**kwargs
)
def remove_branch(self, branch):
self._git_custom_command(
'', ['git', 'branch', '-D', branch]
)
def cherry_pick(self, commit):
"""Cherry pick `commit` to the current branch.
Parameters
----------
commit : str
A single commit.
"""
self._git_custom_command("", ["git", "cherry-pick", commit],
check_fake_dates=True)
@property
def dirty(self):
"""Is the repository dirty?
Note: This provides a quick answer when you simply want to know if
there are any untracked changes or modifications in this repository or
its submodules. For finer-grained control and more detailed reporting,
use status() instead.
"""
stdout, _ = self._git_custom_command(
[],
["git", "status", "--porcelain",
# Ensure the result isn't influenced by status.showUntrackedFiles.
"--untracked-files=normal",
# Ensure the result isn't influenced by diff.ignoreSubmodules.
"--ignore-submodules=none"])
return bool(stdout.strip())
@property
def untracked_files(self):
"""Legacy interface, do not use! Use the status() method instead.
Despite its name, it also reports on untracked datasets, and
yields their names with trailing path separators.
"""
return [
'{}{}'.format(
text_type(p.relative_to(self.pathobj)),
os.sep if props['type'] != 'file' else ''
)
for p, props in iteritems(self.status(
untracked='all', eval_submodule_state='no'))
if props.get('state', None) == 'untracked'
]
def gc(self, allow_background=False, auto=False):
"""Perform house keeping (garbage collection, repacking)"""
cmd_options = ['git']
if not allow_background:
cmd_options += ['-c', 'gc.autodetach=0']
cmd_options += ['gc', '--aggressive']
if auto:
cmd_options += ['--auto']
self._git_custom_command('', cmd_options)
def get_submodules(self, sorted_=True):
"""Return a list of git.Submodule instances for all submodules"""
# check whether we have anything in the repo. if not go home early
if not self.repo.head.is_valid():
return []
submodules = self.repo.submodules
if sorted_:
submodules = sorted(submodules, key=lambda x: x.path)
return submodules
def is_submodule_modified(self, name, options=[]):
"""Whether a submodule has new commits
Note: This is an adhoc method. It parses output of
'git submodule summary' and currently is not able to distinguish whether
or not this change is staged in `self` and whether this would be
reported 'added' or 'modified' by 'git status'.
Parsing isn't heavily tested yet.
Parameters
----------
name: str
the submodule's name
options: list
options to pass to 'git submodule summary'
Returns
-------
bool
True if there are commits in the submodule, differing from
what is registered in `self`
--------
"""
out, err = self._git_custom_command('',
['git', 'submodule', 'summary'] + \
options + ['--', name])
return any([line.split()[1] == name
for line in out.splitlines()
if line and len(line.split()) > 1])
def add_submodule(self, path, name=None, url=None, branch=None):
"""Add a new submodule to the repository.
This will alter the index as well as the .gitmodules file, but will not
create a new commit. If the submodule already exists, no matter if the
configuration differs from the one provided, the existing submodule
is considered as already added and no further action is performed.
Parameters
----------
path : str
repository-relative path at which the submodule should be located, and
which will be created as required during the repository initialization.
name : str or None
name/identifier for the submodule. If `None`, the `path` will be used
as name.
url : str or None
git-clone compatible URL. If `None`, the repository is assumed to
exist, and the url of the first remote is taken instead. This is
useful if you want to make an existing repository a submodule of
another one.
branch : str or None
name of branch to be checked out in the submodule. The given branch
must exist in the remote repository, and will be checked out locally
as a tracking branch. If `None`, remote HEAD will be checked out.
"""
if name is None:
name = Path(path).as_posix()
# XXX the following should do it, but GitPython will refuse to add a submodule
# unless you specify a URL that is configured as one of its remotes, or you
# specify no URL, but the repo has at least one remote.
# this is stupid, as for us it is valid to not have any remote, because we can
# still obtain the submodule from a future publication location, based on the
# parent
# gitpy.Submodule.add(self.repo, name, path, url=url, branch=branch)
# going git native instead
cmd = ['git', 'submodule', 'add', '--name', name]
if branch is not None:
cmd += ['-b', branch]
if url is None:
# repo must already exist locally
subm = GitRepo(op.join(self.path, path), create=False, init=False)
# check that it has a commit, and refuse
# to operate on it otherwise, or we would get a bastard
# submodule that cripples git operations
if not subm.get_hexsha():
raise InvalidGitRepositoryError(
'cannot add subdataset {} with no commits'.format(subm))
# make an attempt to configure a submodule source URL based on the
# discovered remote configuration
remote, branch = subm.get_tracking_branch()
url = subm.get_remote_url(remote) if remote else None
if url is None:
# had no luck with a remote URL
if not isabs(path):
# need to recode into a relative path "URL" in POSIX
# style, even on windows
url = posixpath.join(curdir, posix_relpath(path))
else:
url = path
cmd += [url, path]
self._git_custom_command('', cmd)
# record dataset ID if possible for comprehesive metadata on
# dataset components within the dataset itself
subm_id = GitRepo(op.join(self.path, path)).config.get(
'datalad.dataset.id', None)
if subm_id:
self._git_custom_command(
'',
['git', 'config', '--file', '.gitmodules', '--replace-all',
'submodule.{}.datalad-id'.format(name), subm_id])
# ensure supported setup
_fixup_submodule_dotgit_setup(self, path)
# TODO: return value
def deinit_submodule(self, path, **kwargs):
"""Deinit a submodule
Parameters
----------
path: str
path to the submodule; relative to `self.path`
kwargs:
see `__init__`
"""
self._git_custom_command(path,
['git', 'submodule', 'deinit'] +
to_options(**kwargs))
# TODO: return value
def update_submodule(self, path, mode='checkout', init=False):
"""Update a registered submodule.
This will make the submodule match what the superproject expects by
cloning missing submodules and updating the working tree of the
submodules. The "updating" can be done in several ways depending
on the value of submodule.<name>.update configuration variable, or
the `mode` argument.
Parameters
----------
path : str
Identifies which submodule to operate on by it's repository-relative
path.
mode : {checkout, rebase, merge}
Update procedure to perform. 'checkout': the commit recorded in the
superproject will be checked out in the submodule on a detached HEAD;
'rebase': the current branch of the submodule will be rebased onto
the commit recorded in the superproject; 'merge': the commit recorded
in the superproject will be merged into the current branch in the
submodule.
init : bool
If True, initialize all submodules for which "git submodule init" has
not been called so far before updating.
Primarily provided for internal purposes and should not be used directly
since would result in not so annex-friendly .git symlinks/references
instead of full featured .git/ directories in the submodules
"""
cmd = ['git', 'submodule', 'update', '--%s' % mode]
if init:
cmd.append('--init')
subgitpath = opj(self.path, path, '.git')
if not exists(subgitpath):
# TODO: wouldn't with --init we get all those symlink'ed .git/?
# At least let's warn
lgr.warning(
"Do not use update_submodule with init=True to avoid git creating "
"symlinked .git/ directories in submodules"
)
# yoh: I thought I saw one recently but thought it was some kind of
# an artifact from running submodule update --init manually at
# some point, but looking at this code now I worry that it was not
self._git_custom_command(path, cmd)
# TODO: return value
def update_ref(self, ref, value, symbolic=False):
"""Update the object name stored in a ref "safely".
Just a shim for `git update-ref` call if not symbolic, and
`git symbolic-ref` if symbolic
Parameters
----------
ref : str
Reference, such as `ref/heads/BRANCHNAME` or HEAD.
value : str
Value to update to, e.g. hexsha of a commit when updating for a
branch ref, or branch ref if updating HEAD
symbolic : None
To instruct if ref is symbolic, e.g. should be used in case of
ref=HEAD
"""
self._git_custom_command(
'',
['git', 'symbolic-ref' if symbolic else 'update-ref', ref, value]
)
def tag(self, tag, message=None):
"""Assign a tag to current commit
Parameters
----------
tag : str
Custom tag label.
message : str, optional
If provided, would create an annotated tag with that message
"""
# TODO later to be extended with tagging particular commits and signing
# TODO: call in save.py complains about extensive logging. When does it
# happen in what way? Figure out, whether to just silence it or raise or
# whatever else.
options = []
if message:
options += ['-m', message]
self._git_custom_command(
'', ['git', 'tag'] + options + [str(tag)],
check_fake_dates=True
)
def get_tags(self, output=None):
"""Get list of tags
Parameters
----------
output : str, optional
If given, limit the return value to a list of values matching that
particular key of the tag properties.
Returns
-------
list
Each item is a dictionary with information on a tag. At present
this includes 'hexsha', and 'name', where the latter is the string
label of the tag, and the format the hexsha of the object the tag
is attached to. The list is sorted by commit date, with the most
recent commit being the last element.
"""
tag_objs = sorted(
self.repo.tags,
key=lambda t: t.commit.committed_date
)
tags = [
{
'name': t.name,
'hexsha': t.commit.hexsha
}
for t in tag_objs
]
if output:
return [t[output] for t in tags]
else:
return tags
def describe(self, commitish=None, **kwargs):
""" Quick and dirty implementation to call git-describe
Parameters:
-----------
kwargs:
transformed to cmdline options for git-describe;
see __init__ for description of the transformation
"""
# TODO: be more precise what failure to expect when and raise actual
# errors
cmd = ['git', 'describe'] + to_options(**kwargs)
if commitish is not None:
cmd.append(commitish)
try:
describe, outerr = self._git_custom_command(
[],
cmd,
expect_fail=True)
return describe.strip()
# TODO: WTF "catch everything"?
except:
return None
def get_tracking_branch(self, branch=None):
"""Get the tracking branch for `branch` if there is any.
Parameters
----------
branch: str
local branch to look up. If none is given, active branch is used.
Returns
-------
tuple
(remote or None, refspec or None) of the tracking branch
"""
if branch is None:
branch = self.get_active_branch()
if branch is None:
return None, None
track_remote = self.config.get('branch.{0}.remote'.format(branch), None)
track_branch = self.config.get('branch.{0}.merge'.format(branch), None)
return track_remote, track_branch
@property
def count_objects(self):
"""return dictionary with count, size(in KiB) information of git objects
"""
count_cmd = ['git', 'count-objects', '-v']
count_str, err = self._git_custom_command('', count_cmd)
count = {key: int(value)
for key, value in [item.split(': ')
for item in count_str.split('\n')
if len(item.split(': ')) == 2]}
return count
def get_changed_files(self, staged=False, diff_filter='', index_file=None,
files=None):
"""Return files that have changed between the index and working tree.
Parameters
----------
staged: bool, optional
Consider changes between HEAD and the index instead of changes
between the index and the working tree.
diff_filter: str, optional
Any value accepted by the `--diff-filter` option of `git diff`.
Common ones include "A", "D", "M" for add, deleted, and modified
files, respectively.
index_file: str, optional
Alternative index file for git to use
"""
opts = ['--name-only', '-z']
kwargs = {}
if staged:
opts.append('--staged')
if diff_filter:
opts.append('--diff-filter=%s' % diff_filter)
if index_file:
kwargs['env'] = {'GIT_INDEX_FILE': index_file}
if files is not None:
opts.append('--')
# might be too many, need to chunk up
optss = (
opts + file_chunk
for file_chunk in generate_file_chunks(files, ['git', 'diff'] + opts)
)
else:
optss = [opts]
return [
normpath(f) # Call normpath to convert separators on Windows.
for f in chain(
*(self.repo.git.diff(*opts, **kwargs).split('\0')
for opts in optss)
)
if f
]
def get_missing_files(self):
"""Return a list of paths with missing files (and no staged deletion)"""
return self.get_changed_files(diff_filter='D')
def get_deleted_files(self):
"""Return a list of paths with deleted files (staged deletion)"""
return self.get_changed_files(staged=True, diff_filter='D')
def get_git_attributes(self):
"""Query gitattributes which apply to top level directory
It is a thin compatibility/shortcut wrapper around more versatile
get_gitattributes which operates on a list of paths and returns
a dictionary per each path
Returns
-------
dict:
a dictionary with attribute name and value items relevant for the
top ('.') directory of the repository, and thus most likely the
default ones (if not overwritten with more rules) for all files within
repo.
"""
return self.get_gitattributes('.')['.']
def get_gitattributes(self, path, index_only=False):
"""Query gitattributes for one or more paths
Parameters
----------
path: path or list
Path(s) to query. Paths may be relative or absolute.
index_only: bool
Flag whether to consider only gitattribute setting that are reflected
in the repository index, not just in the work tree content.
Returns
-------
dict:
Each key is a queried path (always relative to the repostiory root),
each value is a dictionary with attribute
name and value items. Attribute values are either True or False,
for set and unset attributes, or are the literal attribute value.
"""
path = assure_list(path)
cmd = ["git", "check-attr", "-z", "--all"]
if index_only:
cmd.append('--cached')
stdout, stderr = self._git_custom_command(path, cmd)
# make sure we have one entry for each query path to
# simplify work with the result
attributes = {_normalize_path(self.path, p): {} for p in path}
attr = []
for item in stdout.split('\0'):
attr.append(item)
if len(attr) < 3:
continue
# we have a full record
p, name, value = attr
attrs = attributes[p]
attrs[name] = \
True if value == 'set' else False if value == 'unset' else value
# done, reset item
attr = []
return attributes
def set_gitattributes(self, attrs, attrfile='.gitattributes', mode='a'):
"""Set gitattributes
By default appends additional lines to `attrfile`. Note, that later
lines in `attrfile` overrule earlier ones, which may or may not be
what you want. Set `mode` to 'w' to replace the entire file by
what you provided in `attrs`.
Parameters
----------
attrs : list
Each item is a 2-tuple, where the first element is a path pattern,
and the second element is a dictionary with attribute key/value
pairs. The attribute dictionary must use the same semantics as those
returned by `get_gitattributes()`. Path patterns can use absolute paths,
in which case they will be normalized relative to the directory
that contains the target .gitattributes file (see `attrfile`).
attrfile: path
Path relative to the repository root of the .gitattributes file the
attributes shall be set in.
mode: str
'a' to append .gitattributes, 'w' to replace it
"""
git_attributes_file = op.join(self.path, attrfile)
attrdir = op.dirname(git_attributes_file)
if not op.exists(attrdir):
os.makedirs(attrdir)
with open(git_attributes_file, mode) as f:
for pattern, attr in sorted(attrs, key=lambda x: x[0]):
# normalize the pattern relative to the target .gitattributes file
npath = _normalize_path(
op.join(self.path, op.dirname(attrfile)), pattern)
attrline = u''
if npath.count(' '):
# quote patterns with spaces
attrline += u'"{}"'.format(npath.replace('"', '\\"'))
else:
attrline += npath
for a in sorted(attr):
val = attr[a]
if val is True:
attrline += ' {}'.format(a)
elif val is False:
attrline += ' -{}'.format(a)
else:
attrline += ' {}={}'.format(a, val)
f.write('\n{}'.format(attrline))
def get_content_info(self, paths=None, ref=None, untracked='all',
eval_file_type=True):
"""Get identifier and type information from repository content.
This is simplified front-end for `git ls-files/tree`.
Both commands differ in their behavior when queried about subdataset
paths. ls-files will not report anything, ls-tree will report on the
subdataset record. This function uniformly follows the behavior of
ls-tree (report on the respective subdataset mount).
Parameters
----------
paths : list(pathlib.PurePath)
Specific paths, relative to the resolved repository root, to query
info for. Paths must be normed to match the reporting done by Git,
i.e. no parent dir components (ala "some/../this").
If none are given, info is reported for all content.
ref : gitref or None
If given, content information is retrieved for this Git reference
(via ls-tree), otherwise content information is produced for the
present work tree (via ls-files). With a given reference, the
reported content properties also contain a 'bytesize' record,
stating the size of a file in bytes.
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported when no `ref` was given:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
eval_file_type : bool
If True, inspect file type of untracked files, and report annex
symlink pointers as type 'file'. This convenience comes with a
cost; disable to get faster performance if this information
is not needed.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
Note that the reported type will not always match the type of
content committed to Git, rather it will reflect the nature
of the content minus platform/mode-specifics. For example,
a symlink to a locked annexed file on Unix will have a type
'file', reported, while a symlink to a file in Git or directory
will be of type 'symlink'.
`gitshasum`
SHASUM of the item as tracked by Git, or None, if not
tracked. This could be different from the SHASUM of the file
in the worktree, if it was modified.
Raises
------
ValueError
In case of an invalid Git reference (e.g. 'HEAD' in an empty
repository)
"""
lgr.debug('%s.get_content_info(...)', self)
# TODO limit by file type to replace code in subdatasets command
info = OrderedDict()
if paths:
# path matching will happen against what Git reports
# and Git always reports POSIX paths
# any incoming path has to be relative already, so we can simply
# convert unconditionally
paths = [ut.PurePosixPath(p) for p in paths]
# this will not work in direct mode, but everything else should be
# just fine
if not ref:
# make sure no operations are pending before we figure things
# out in the worktree
self.precommit()
# --exclude-standard will make sure to honor and standard way
# git can be instructed to ignore content, and will prevent
# crap from contaminating untracked file reports
cmd = ['git', 'ls-files',
'--stage', '-z', '-d', '-m', '--exclude-standard']
# untracked report mode, using labels from `git diff` option style
if untracked == 'all':
cmd.append('-o')
elif untracked == 'normal':
cmd += ['-o', '--directory', '--no-empty-directory']
elif untracked == 'no':
pass
else:
raise ValueError(
'unknown value for `untracked`: %s', untracked)
props_re = re.compile(
r'(?P<type>[0-9]+) (?P<sha>.*) (.*)\t(?P<fname>.*)$')
else:
cmd = ['git', 'ls-tree', ref, '-z', '-r', '--full-tree', '-l']
props_re = re.compile(
r'(?P<type>[0-9]+) ([a-z]*) (?P<sha>[^ ]*) [\s]*(?P<size>[0-9-]+)\t(?P<fname>.*)$')
lgr.debug('Query repo: %s', cmd)
try:
stdout, stderr = self._git_custom_command(
# specifically always ask for a full report and
# filter out matching path later on to
# homogenize wrt subdataset content paths across
# ls-files and ls-tree
None,
cmd,
log_stderr=True,
log_stdout=True,
# not sure why exactly, but log_online has to be false!
log_online=False,
expect_stderr=False,
shell=False,
# we don't want it to scream on stdout
expect_fail=True)
except CommandError as exc:
if "fatal: Not a valid object name" in text_type(exc):
raise InvalidGitReferenceError(ref)
raise
lgr.debug('Done query repo: %s', cmd)
if not eval_file_type:
_get_link_target = None
elif ref:
def _read_symlink_target_from_catfile(lines):
# it is always the second line, all checks done upfront
header = lines.readline()
if header.rstrip().endswith('missing'):
# something we do not know about, should not happen
# in real use, but guard against to avoid stalling
return ''
return lines.readline().rstrip()
_get_link_target = BatchedCommand(
['git', 'cat-file', '--batch'],
path=self.path,
output_proc=_read_symlink_target_from_catfile,
)
else:
def try_readlink(path):
try:
return os.readlink(path)
except OSError:
# readlink will fail if the symlink reported by ls-files is
# not in the working tree (it could be removed or
# unlocked). Fall back to a slower method.
return op.realpath(path)
_get_link_target = try_readlink
try:
self._get_content_info_line_helper(
paths,
ref,
info,
stdout.split('\0'),
props_re,
_get_link_target)
finally:
if ref and _get_link_target:
# cancel batch process
_get_link_target.close()
lgr.debug('Done %s.get_content_info(...)', self)
return info
def _get_content_info_line_helper(self, paths, ref, info, lines,
props_re, get_link_target):
"""Internal helper of get_content_info() to parse Git output"""
mode_type_map = {
'100644': 'file',
'100755': 'file',
'120000': 'symlink',
'160000': 'dataset',
}
for line in lines:
if not line:
continue
inf = {}
props = props_re.match(line)
if not props:
# not known to Git, but Git always reports POSIX
path = ut.PurePosixPath(line)
inf['gitshasum'] = None
else:
# again Git reports always in POSIX
path = ut.PurePosixPath(props.group('fname'))
# rejects paths as early as possible
# the function assumes that any `path` is a relative path lib
# instance if there were path constraints given, we need to reject
# paths now
# reject anything that is:
# - not a direct match with a constraint
# - has no constraint as a parent
# (relevant to find matches of regular files in a repository)
# - is not a parent of a constraint
# (relevant for finding the matching subds entry for
# subds-content paths)
if paths \
and not any(
path == c or path in c.parents or c in path.parents
for c in paths):
continue
# revisit the file props after this path has not been rejected
if props:
inf['gitshasum'] = props.group('sha')
inf['type'] = mode_type_map.get(
props.group('type'), props.group('type'))
if get_link_target and inf['type'] == 'symlink' and \
((ref is None and '.git/annex/objects' in \
ut.Path(
get_link_target(text_type(self.pathobj / path))
).as_posix()) or \
(ref and \
'.git/annex/objects' in get_link_target(
u'{}:{}'.format(
ref, text_type(path))))
):
# report annex symlink pointers as file, their
# symlink-nature is a technicality that is dependent
# on the particular mode annex is in
inf['type'] = 'file'
if ref and inf['type'] == 'file':
inf['bytesize'] = int(props.group('size'))
# join item path with repo path to get a universally useful
# path representation with auto-conversion and tons of other
# stuff
path = self.pathobj.joinpath(path)
if 'type' not in inf:
# be nice and assign types for untracked content
inf['type'] = 'symlink' if path.is_symlink() \
else 'directory' if path.is_dir() else 'file'
info[path] = inf
def status(self, paths=None, untracked='all', eval_submodule_state='full'):
"""Simplified `git status` equivalent.
Parameters
----------
paths : list or None
If given, limits the query to the specified paths. To query all
paths specify `None`, not an empty list. If a query path points
into a subdataset, a report is made on the subdataset record
within the queried dataset only (no recursion).
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
eval_submodule_state : {'full', 'commit', 'no'}
If 'full' (the default), the state of a submodule is evaluated by
considering all modifications, with the treatment of untracked files
determined by `untracked`. If 'commit', the modification check is
restricted to comparing the submodule's HEAD commit to the one
recorded in the superdataset. If 'no', the state of the subdataset is
not evaluated.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
`state`
Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.
"""
lgr.debug('Query status of %r for %s paths',
self, len(paths) if paths else 'all')
return self.diffstatus(
fr='HEAD' if self.get_hexsha() else None,
to=None,
paths=paths,
untracked=untracked,
eval_submodule_state=eval_submodule_state)
def diff(self, fr, to, paths=None, untracked='all',
eval_submodule_state='full'):
"""Like status(), but reports changes between to arbitrary revisions
Parameters
----------
fr : str or None
Revision specification (anything that Git understands). Passing
`None` considers anything in the target state as new.
to : str or None
Revision specification (anything that Git understands), or None
to compare to the state of the work tree.
paths : list or None
If given, limits the query to the specified paths. To query all
paths specify `None`, not an empty list.
untracked : {'no', 'normal', 'all'}
If and how untracked content is reported when `to` is None:
'no': no untracked files are reported; 'normal': untracked files
and entire untracked directories are reported as such; 'all': report
individual files even in fully untracked directories.
eval_submodule_state : {'full', 'commit', 'no'}
If 'full' (the default), the state of a submodule is evaluated by
considering all modifications, with the treatment of untracked files
determined by `untracked`. If 'commit', the modification check is
restricted to comparing the submodule's HEAD commit to the one
recorded in the superdataset. If 'no', the state of the subdataset is
not evaluated.
Returns
-------
dict
Each content item has an entry under a pathlib `Path` object instance
pointing to its absolute path inside the repository (this path is
guaranteed to be underneath `Repo.path`).
Each value is a dictionary with properties:
`type`
Can be 'file', 'symlink', 'dataset', 'directory'
`state`
Can be 'added', 'untracked', 'clean', 'deleted', 'modified'.
"""
return {k: v for k, v in iteritems(self.diffstatus(
fr=fr, to=to, paths=paths,
untracked=untracked,
eval_submodule_state=eval_submodule_state))
if v.get('state', None) != 'clean'}
def diffstatus(self, fr, to, paths=None, untracked='all',
eval_submodule_state='full', eval_file_type=True,
_cache=None):
"""Like diff(), but reports the status of 'clean' content too"""
return self._diffstatus(
fr, to, paths, untracked, eval_submodule_state, eval_file_type,
_cache)
def _diffstatus(self, fr, to, paths, untracked, eval_state,
eval_file_type, _cache):
"""Just like diffstatus(), but supports an additional evaluation
state 'global'. If given, it will return a single 'modified'
(vs. 'clean') state label for the entire repository, as soon as
it can."""
def _get_cache_key(label, paths, ref, untracked=None):
return self.path, label, tuple(paths) if paths else None, \
ref, untracked
if _cache is None:
_cache = {}
if paths:
# at this point we must normalize paths to the form that
# Git would report them, to easy matching later on
paths = [ut.Path(p) for p in paths]
paths = [
p.relative_to(self.pathobj) if p.is_absolute() else p
for p in paths
]
# TODO report more info from get_content_info() calls in return
# value, those are cheap and possibly useful to a consumer
# we need (at most) three calls to git
if to is None:
# everything we know about the worktree, including os.stat
# for each file
key = _get_cache_key('ci', paths, None, untracked)
if key in _cache:
to_state = _cache[key]
else:
to_state = self.get_content_info(
paths=paths, ref=None, untracked=untracked,
eval_file_type=eval_file_type)
_cache[key] = to_state
# we want Git to tell us what it considers modified and avoid
# reimplementing logic ourselves
key = _get_cache_key('mod', paths, None)
if key in _cache:
modified = _cache[key]
else:
modified = set(
self.pathobj.joinpath(ut.PurePosixPath(p))
for p in self._git_custom_command(
# low-level code cannot handle pathobjs
[text_type(p) for p in paths] if paths else None,
['git', 'ls-files', '-z', '-m'])[0].split('\0')
if p)
_cache[key] = modified
else:
key = _get_cache_key('ci', paths, to)
if key in _cache:
to_state = _cache[key]
else:
to_state = self.get_content_info(
paths=paths, ref=to, eval_file_type=eval_file_type)
_cache[key] = to_state
# we do not need worktree modification detection in this case
modified = None
# origin state
key = _get_cache_key('ci', paths, fr)
if key in _cache:
from_state = _cache[key]
else:
if fr:
from_state = self.get_content_info(
paths=paths, ref=fr, eval_file_type=eval_file_type)
else:
# no ref means from nothing
from_state = {}
_cache[key] = from_state
status = OrderedDict()
for f, to_state_r in iteritems(to_state):
props = None
if f not in from_state:
# this is new, or rather not known to the previous state
props = dict(
state='added' if to_state_r['gitshasum'] else 'untracked',
)
if 'type' in to_state_r:
props['type'] = to_state_r['type']
elif to_state_r['gitshasum'] == from_state[f]['gitshasum'] and \
(modified is None or f not in modified):
if to_state_r['type'] != 'dataset':
# no change in git record, and no change on disk
props = dict(
state='clean' if f.exists() or \
f.is_symlink() else 'deleted',
type=to_state_r['type'],
)
else:
# a dataset
props = dict(type=to_state_r['type'])
if to is not None:
# we can only be confident without looking
# at the worktree, if we compare to a recorded
# state
props['state'] = 'clean'
else:
# report the shasum that we know, for further
# wrangling of subdatasets below
props['gitshasum'] = to_state_r['gitshasum']
props['prev_gitshasum'] = from_state[f]['gitshasum']
else:
# change in git record, or on disk
props = dict(
# TODO we could have a new file that is already staged
# but had subsequent modifications done to it that are
# unstaged. Such file would presently show up as 'added'
# ATM I think this is OK, but worth stating...
state='modified' if f.exists() or \
f.is_symlink() else 'deleted',
# TODO record before and after state for diff-like use
# cases
type=to_state_r['type'],
)
state = props.get('state', None)
if eval_state == 'global' and \
state not in ('clean', None):
# any modification means globally 'modified'
return 'modified'
if state in ('clean', 'added', 'modified'):
props['gitshasum'] = to_state_r['gitshasum']
if 'bytesize' in to_state_r:
# if we got this cheap, report it
props['bytesize'] = to_state_r['bytesize']
elif props['state'] == 'clean' and 'bytesize' in from_state[f]:
# no change, we can take this old size info
props['bytesize'] = from_state[f]['bytesize']
if state in ('clean', 'modified', 'deleted'):
props['prev_gitshasum'] = from_state[f]['gitshasum']
status[f] = props
for f, from_state_r in iteritems(from_state):
if f not in to_state:
# we new this, but now it is gone and Git is not complaining
# about it being missing -> properly deleted and deletion
# stages
status[f] = dict(
state='deleted',
type=from_state_r['type'],
# report the shasum to distinguish from a plainly vanished
# file
gitshasum=from_state_r['gitshasum'],
)
if eval_state == 'global':
return 'modified'
if to is not None or eval_state == 'no':
# if we have `to` we are specifically comparing against
# a recorded state, and this function only attempts
# to label the state of a subdataset, not investigate
# specifically what the changes in subdatasets are
# this is done by a high-level command like rev-diff
# so the comparison within this repo and the present
# `state` label are all we need, and they are done already
if eval_state == 'global':
return 'clean'
else:
return status
# loop over all subdatasets and look for additional modifications
for f, st in iteritems(status):
f = text_type(f)
if 'state' in st or not st['type'] == 'dataset':
# no business here
continue
if not GitRepo.is_valid_repo(f):
# submodule is not present, no chance for a conflict
st['state'] = 'clean'
continue
# we have to recurse into the dataset and get its status
subrepo = GitRepo(f)
subrepo_commit = subrepo.get_hexsha()
st['gitshasum'] = subrepo_commit
# subdataset records must be labeled clean up to this point
# test if current commit in subdataset deviates from what is
# recorded in the dataset
st['state'] = 'modified' \
if st['prev_gitshasum'] != subrepo_commit \
else 'clean'
if eval_state == 'global' and st['state'] == 'modified':
return 'modified'
if eval_state == 'commit':
continue
# the recorded commit did not change, so we need to make
# a more expensive traversal
st['state'] = subrepo._diffstatus(
# we can use 'HEAD' because we know that the commit
# did not change. using 'HEAD' will facilitate
# caching the result
fr='HEAD',
to=None,
paths=None,
untracked=untracked,
eval_state='global',
eval_file_type=False,
_cache=_cache) if st['state'] == 'clean' else 'modified'
if eval_state == 'global' and st['state'] == 'modified':
return 'modified'
if eval_state == 'global':
return 'clean'
else:
return status
def _save_pre(self, paths, _status, **kwargs):
# helper to get an actionable status report
if paths is not None and not paths and not _status:
return
if _status is None:
if 'untracked' not in kwargs:
kwargs['untracked'] = 'normal'
status = self.status(
paths=paths,
**{k: kwargs[k] for k in kwargs
if k in ('untracked', 'eval_submodule_state')})
else:
# we want to be able to add items down the line
# make sure to detach from prev. owner
status = _status.copy()
status = OrderedDict(
(k, v) for k, v in iteritems(status)
if v.get('state', None) != 'clean'
)
return status
def get_staged_paths(self):
"""Returns a list of any stage repository path(s)
This is a rather fast call, as it will not depend on what is going on
in the worktree.
"""
try:
stdout, stderr = self._git_custom_command(
None,
['git', 'diff', '--name-only', '--staged'],
cwd=self.path,
log_stderr=True,
log_stdout=True,
log_online=False,
expect_stderr=False,
expect_fail=True)
except CommandError as e:
lgr.debug(exc_str(e))
stdout = ''
return [f for f in stdout.split('\n') if f]
def _save_post(self, message, status, partial_commit):
# helper to commit changes reported in status
_datalad_msg = False
if not message:
message = 'Recorded changes'
_datalad_msg = True
# TODO remove pathobj stringification when commit() can
# handle it
to_commit = [text_type(f.relative_to(self.pathobj))
for f, props in iteritems(status)] \
if partial_commit else None
if not partial_commit or to_commit:
# we directly call GitRepo.commit() to avoid a whole slew
# if direct-mode safeguards and workarounds in the AnnexRepo
# implementation (which also run an additional dry-run commit
GitRepo.commit(
self,
files=to_commit,
msg=message,
_datalad_msg=_datalad_msg,
options=None,
# do not raise on empty commit
# it could be that the `add` in this save-cycle has already
# brought back a 'modified' file into a clean state
careless=True,
)
def save(self, message=None, paths=None, _status=None, **kwargs):
"""Save dataset content.
Parameters
----------
message : str or None
A message to accompany the changeset in the log. If None,
a default message is used.
paths : list or None
Any content with path matching any of the paths given in this
list will be saved. Matching will be performed against the
dataset status (GitRepo.status()), or a custom status provided
via `_status`. If no paths are provided, ALL non-clean paths
present in the repo status or `_status` will be saved.
_status : dict or None
If None, Repo.status() will be queried for the given `ds`. If
a dict is given, its content will be used as a constraint.
For example, to save only modified content, but no untracked
content, set `paths` to None and provide a `_status` that has
no entries for untracked content.
**kwargs :
Additional arguments that are passed to underlying Repo methods.
Supported:
- git : bool (passed to Repo.add()
- eval_submodule_state : {'full', 'commit', 'no'}
passed to Repo.status()
- untracked : {'no', 'normal', 'all'} - passed to Repo.satus()
"""
return list(
self.save_(
message=message,
paths=paths,
_status=_status,
**kwargs
)
)
def save_(self, message=None, paths=None, _status=None, **kwargs):
"""Like `save()` but working as a generator."""
from datalad.interface.results import get_status_dict
status = self._save_pre(paths, _status, **kwargs)
if not status:
# all clean, nothing todo
lgr.debug('Nothing to save in %r, exiting early', self)
return
# three things are to be done:
# - remove (deleted if not already staged)
# - add (modified/untracked)
# - commit (with all paths that have been touched, to bypass
# potential pre-staged bits)
need_partial_commit = True if self.get_staged_paths() else False
# remove first, because removal of a subds would cause a
# modification of .gitmodules to be added to the todo list
to_remove = [
# TODO remove pathobj stringification when delete() can
# handle it
text_type(f.relative_to(self.pathobj))
for f, props in iteritems(status)
if props.get('state', None) == 'deleted' and
# staged deletions have a gitshasum reported for them
# those should not be processed as git rm will error
# due to them being properly gone already
not props.get('gitshasum', None)]
vanished_subds = any(
props.get('type', None) == 'dataset' and
props.get('state', None) == 'deleted'
for f, props in iteritems(status))
if to_remove:
for r in self.remove(
to_remove,
# we would always see individual files
recursive=False):
# TODO normalize result
yield get_status_dict(
action='delete',
refds=self.pathobj,
# TODO make remove() report the type
# for now it claims to report on files only
type='file',
path=(self.pathobj / ut.PurePosixPath(r)),
# make remove() report on failures too
status='ok',
logger=lgr)
# TODO this additonal query should not be, base on status as given
# if anyhow possible, however, when paths are given, status may
# not contain all required information. In case of path=None AND
# _status=None, we should be able to avoid this, because
# status should have the full info already
# looks for contained repositories
added_submodule = False
untracked_dirs = [f.relative_to(self.pathobj)
for f, props in iteritems(status)
if props.get('state', None) == 'untracked' and
props.get('type', None) == 'directory']
if untracked_dirs:
to_add_submodules = [sm for sm, sm_props in iteritems(
self.get_content_info(
untracked_dirs,
ref=None,
# request exhaustive list, so that everything that is
# still reported as a directory must be its own repository
untracked='all'))
if sm_props.get('type', None) == 'directory']
for cand_sm in to_add_submodules:
try:
self.add_submodule(
text_type(cand_sm.relative_to(self.pathobj)),
url=None, name=None)
except (CommandError, InvalidGitRepositoryError) as e:
yield get_status_dict(
action='add_submodule',
ds=self,
path=self.pathobj / ut.PurePosixPath(cand_sm),
status='error',
message=e.stderr if hasattr(e, 'stderr')
else ('not a Git repository: %s', exc_str(e)),
logger=lgr)
continue
# This mirrors the result structure yielded for
# to_stage_submodules below.
yield get_status_dict(
action='add',
refds=self.pathobj,
type='file',
key=None,
path=self.pathobj / ut.PurePosixPath(cand_sm),
status='ok',
logger=lgr)
added_submodule = True
if not need_partial_commit:
# without a partial commit an AnnexRepo would ignore any submodule
# path in its add helper, hence `git add` them explicitly
to_stage_submodules = {
text_type(f.relative_to(self.pathobj)): props
for f, props in iteritems(status)
if props.get('state', None) in ('modified', 'untracked')
and props.get('type', None) == 'dataset'}
if to_stage_submodules:
lgr.debug(
'%i submodule path(s) to stage in %r %s',
len(to_stage_submodules), self,
to_stage_submodules
if len(to_stage_submodules) < 10 else '')
for r in GitRepo._save_add(
self,
to_stage_submodules,
git_opts=None):
# TODO the helper can yield proper dicts right away
yield get_status_dict(
action=r.get('command', 'add'),
refds=self.pathobj,
type='file',
path=(self.pathobj / ut.PurePosixPath(r['file']))
if 'file' in r else None,
status='ok' if r.get('success', None) else 'error',
key=r.get('key', None),
logger=lgr)
if added_submodule or vanished_subds:
# need to include .gitmodules in what needs saving
status[self.pathobj.joinpath('.gitmodules')] = dict(
type='file', state='modified')
if hasattr(self, 'annexstatus') and not kwargs.get('git', False):
# we cannot simply hook into the coming add-call
# as this would go to annex, so make a dedicted git-add
# call to ensure .gitmodules is not annexed
# in any normal DataLad dataset .gitattributes will
# prevent this, but in a plain repo it won't
# https://github.com/datalad/datalad/issues/3306
for r in GitRepo._save_add(
self,
{op.join(self.path, '.gitmodules'): None}):
yield get_status_dict(
action='add',
refds=self.pathobj,
type='file',
path=(self.pathobj / ut.PurePosixPath(r['file'])),
status='ok' if r.get('success', None) else 'error',
logger=lgr)
to_add = {
# TODO remove pathobj stringification when add() can
# handle it
text_type(f.relative_to(self.pathobj)): props
for f, props in iteritems(status)
if props.get('state', None) in ('modified', 'untracked')}
if to_add:
lgr.debug(
'%i path(s) to add to %s %s',
len(to_add), self, to_add if len(to_add) < 10 else '')
for r in self._save_add(
to_add,
git_opts=None,
**{k: kwargs[k] for k in kwargs
if k in (('git',) if hasattr(self, 'annexstatus')
else tuple())}):
# TODO the helper can yield proper dicts right away
yield get_status_dict(
action=r.get('command', 'add'),
refds=self.pathobj,
type='file',
path=(self.pathobj / ut.PurePosixPath(r['file']))
if 'file' in r else None,
status='ok' if r.get('success', None) else 'error',
key=r.get('key', None),
logger=lgr)
self._save_post(message, status, need_partial_commit)
# TODO yield result for commit, prev helper checked hexsha pre
# and post...
def _save_add(self, files, git_opts=None):
"""Simple helper to add files in save()"""
try:
# without --verbose git 2.9.3 add does not return anything
add_out = self._git_custom_command(
list(files.keys()),
['git', 'add'] + assure_list(git_opts) + ['--verbose']
)
# get all the entries
for o in self._process_git_get_output(*add_out):
yield o
except OSError as e:
lgr.error("add: %s" % e)
raise
# TODO
# remove submodule: nope, this is just deinit_submodule + remove
# status?
def _fixup_submodule_dotgit_setup(ds, relativepath):
"""Implementation of our current of .git in a subdataset
Each subdataset/module has its own .git directory where a standalone
repository would have it. No gitdir files, no symlinks.
"""
# move .git to superrepo's .git/modules, remove .git, create
# .git-file
path = opj(ds.path, relativepath)
subds_dotgit = opj(path, ".git")
src_dotgit = GitRepo.get_git_dir(path)
if src_dotgit == '.git':
# this is what we want
return
# first we want to remove any conflicting worktree setup
# done by git to find the checkout at the mountpoint of the
# submodule, if we keep that, any git command will fail
# after we move .git
GitRepo(path, init=False).config.unset(
'core.worktree', where='local')
# what we have here is some kind of reference, remove and
# replace by the target
os.remove(subds_dotgit)
# make absolute
src_dotgit = opj(path, src_dotgit)
# move .git
from os import rename, listdir, rmdir
assure_dir(subds_dotgit)
for dot_git_entry in listdir(src_dotgit):
rename(opj(src_dotgit, dot_git_entry),
opj(subds_dotgit, dot_git_entry))
assert not listdir(src_dotgit)
rmdir(src_dotgit)
| 38.341087
| 103
| 0.556357
|
8882a9417ba4e01fb2269a4a038d16c1476e6e82
| 2,166
|
py
|
Python
|
src/main.py
|
shubhamgoel27/satellite-image-downloader
|
05c21b45f19c43ad9c9f20d589dc4f01178b3ce0
|
[
"Apache-2.0"
] | 2
|
2021-02-14T10:27:59.000Z
|
2021-10-29T18:51:34.000Z
|
src/main.py
|
shubhamgoel27/satellite-image-downloader
|
05c21b45f19c43ad9c9f20d589dc4f01178b3ce0
|
[
"Apache-2.0"
] | 3
|
2021-06-08T21:41:01.000Z
|
2022-01-13T02:48:30.000Z
|
src/main.py
|
shubhamgoel27/satellite-image-downloader
|
05c21b45f19c43ad9c9f20d589dc4f01178b3ce0
|
[
"Apache-2.0"
] | 2
|
2020-06-05T14:54:48.000Z
|
2021-12-22T16:35:56.000Z
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import argparse
from SatMaps.Maps import Maps
from utils.utils import setup_logger
from distutils.dir_util import mkpath
def parse_args():
"""Evaluation options for SatMaps download tool"""
parser = argparse.ArgumentParser(description='Satellite Map Generator')
# input_folder
parser.add_argument('project', action='store', help='Directory to store this project in')
parser.add_argument('--southwest', action='store', required=True, help='Southwest latitude and longitude. e.g. --southwest=39.1,-83.2')
parser.add_argument('--northeast', action='store', required=True, help='Northeast latitude and longitude, e.g. --northeast=40.3,-82.4')
parser.add_argument('--scale', action='store', type=int, default=1, help='Scale of image (1, 2)')
parser.add_argument('--size', action='store', type=int, default=2048, help='Size of image')
parser.add_argument('--format', action='store', default='png', help='File type')
parser.add_argument('--maptype', action='store', default='satellite', help='Map type')
parser.add_argument('--clientID', action='store',default=os.environ['GOOGLE_CLIENT_ID'], help='Google API client ID')
parser.add_argument('--secret_key', action='store', default=os.environ['GOOGLE_SECRET_KEY'], help='Google API secret key')
parser.add_argument('--skip', action='store_true', help='Redownload existing tiles')
parser.add_argument('--raster_path', action='store', default='output.tif', help='Output Raster path')
parser.add_argument('--data_dir', type=str, default='data/',
help='data directory (default: /data/input/)')
args, unknown = parser.parse_known_args()
return args, unknown
if __name__ == "__main__":
args, unknown = parse_args()
wrdr = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
mkpath(args.data_dir)
logger = setup_logger('gis_logger',
os.path.join(args.data_dir,"Maps.log"))
logger.info('Logger initialized')
maps = Maps(args, logger, unknown)
tiles = maps.generate()
maps.download(tiles)
maps.stitch(tiles)
| 52.829268
| 139
| 0.695291
|
a48ac74e887a13f618641f24a44110811697daf6
| 1,874
|
py
|
Python
|
examples/example_mcmc_fit_script.py
|
jtschindler/sculptor
|
67b7ebfb05ee8ec9d00399c6d80c238d2104eebc
|
[
"BSD-3-Clause"
] | 7
|
2021-01-25T16:45:57.000Z
|
2022-03-31T11:53:23.000Z
|
examples/example_mcmc_fit_script.py
|
jtschindler/sculptor
|
67b7ebfb05ee8ec9d00399c6d80c238d2104eebc
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T15:41:33.000Z
|
2022-03-31T15:41:33.000Z
|
examples/example_mcmc_fit_script.py
|
jtschindler/sculptor
|
67b7ebfb05ee8ec9d00399c6d80c238d2104eebc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import corner
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
from lmfit import Model, Parameters, fit_report
from sculptor import specfit as scfit
def example_fit_mcmc():
"""Fitting the Sculptor example spectrum CIV line using MCMC
:return:
"""
# Import the saved example spectrum
fit = scfit.SpecFit()
fit.load('example_spectrum_fit')
# Setting the fit method to MCMC via emcee
fit.fitting_method = 'Maximum likelihood via Monte-Carlo Markov Chain'
# Set the MCMC keywords
# They can be accessed via fit.emcee_kws
fit.emcee_kws['steps'] = 5000
fit.emcee_kws['burn'] = 500
# We are fitting 6 parameters so nwalker=50 is fine
fit.emcee_kws['nwalkers'] = 50
# No multiprocessing for now
fit.emcee_kws['workers'] = 1
fit.emcee_kws['thin'] = 2
fit.emcee_kws['progress'] = True
# Take uncertainties into account
fit.emcee_kws['is_weighted'] = True
# Select the CIV emission line SpecModel
civ_model = fit.specmodels[2]
# Fit the SpecModel using the MCMC method and emcee_kws modified above
civ_model.fit()
# Print the fit result
print(civ_model.fit_result.fit_report())
# Retrieve the MCMC flat chain of the CIV model fit
data = civ_model.fit_result.flatchain.to_numpy()
# Visualize the flat chain fit results using the typical corner plot
corner_plot = corner.corner(data,
labels=civ_model.fit_result.var_names,
quantiles=[0.16, 0.5, 0.84],
show_titles=True,
title_kwargs={"fontsize": 12}
)
plt.show()
# Save the MCMC flatchain to a file for analysis
civ_model.save_mcmc_chain('example_spectrum_fit')
example_fit_mcmc()
| 30.225806
| 74
| 0.653682
|
6d61594a470e6b89d12e49d206d5225e8ebb07e0
| 6,702
|
py
|
Python
|
closed/Alibaba/code/resnet/tfcpp/quantize/converter.py
|
EldritchJS/inference_results_v0.5
|
5552490e184d9fc342d871fcc410ac423ea49053
|
[
"Apache-2.0"
] | null | null | null |
closed/Alibaba/code/resnet/tfcpp/quantize/converter.py
|
EldritchJS/inference_results_v0.5
|
5552490e184d9fc342d871fcc410ac423ea49053
|
[
"Apache-2.0"
] | null | null | null |
closed/Alibaba/code/resnet/tfcpp/quantize/converter.py
|
EldritchJS/inference_results_v0.5
|
5552490e184d9fc342d871fcc410ac423ea49053
|
[
"Apache-2.0"
] | 1
|
2019-12-05T18:53:17.000Z
|
2019-12-05T18:53:17.000Z
|
# Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import argparse
from google.protobuf import text_format
import hgai
from hgai.frontend.tensorflow import converter
import tensorflow as tf
import json
import shutil
import pudb
# initialize HanGuangAI
hgai.tf_init()
def make_path(fpth):
"""make sure directory exists before read and write
Arguments:
fpth {str} -- path to desired file
"""
folder = os.path.dirname(fpth)
if folder is None or folder == '':
return
if not os.path.isdir(folder):
os.makedirs(folder)
# read graphdef from tensorflow pb file
def read_proto(pb_fname: str, as_text: bool = False):
graph_def = tf.GraphDef()
with open(pb_fname, "rb") as f:
if not as_text:
graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), graph_def)
return graph_def
# write graphdef to tensorflow pb file
def write_proto(graph_def, pb_fname, as_text: bool = False):
make_path(pb_fname)
if as_text:
with open(pb_fname, 'w') as f:
f.write(str(graph_def))
else:
with open(pb_fname, 'wb') as f:
f.write(graph_def.SerializeToString())
# initialize and update config settings for HanGuangAI quantization process
def update_config(output_type):
config = {}
config['input_shapes'] = [[1, 224, 224, 3]]
config['image_shape'] = [1, 224, 224, 3]
config['output_dir'] = 'output'
config['model_dir'] = os.getenv('MODEL_DIR')
config['data_dir'] = os.getenv('DATA_DIR')
config['cal_list_dir'] = os.getenv('LG_PATH') + '/../calibration/ImageNet/cal_image_list_option_1.txt'
config['quant_cfg'] = {}
for item in ['avgpool_input',
'avgpool_output',
'matmul_activations',
'matmul_weights',
'matmul_output',
'mean_input',
'mean_output',
'maxpool_input',
'maxpool_output',
'conv_activations',
'conv_weights',
'conv_output',
'reshape_input',
'reshape_output']:
config['quant_cfg'][item] = {}
config['quant_cfg'][item]['is_per_channel'] = False
config['quant_cfg'][item]['num_bits'] = 8
config['quant_cfg'][item]['data_format'] = 'NHWC'
for item in ['conv_weights',
'conv_output',
'matmul_weights',
'matmul_output']:
config['quant_cfg'][item]['is_per_channel'] = True
config['quant_cfg']['conv_weights']['data_format'] = 'HWIO'
config['quant_cfg']['matmul_activations']['data_format'] = 'NC'
config['quant_cfg']['matmul_weights']['data_format'] = 'IO'
config['quant_cfg']['matmul_output']['data_format'] = 'NC'
config['quant_cfg']['reshape_output']['data_format'] = 'NC'
config['input_nodes'] = ['input_tensor']
config['output_nodes'] = ['ArgMax']
config['fp32_model_path'] = config['model_dir'] + '/fp32.pb'
config['qfp32_quant_path'] = config['model_dir'] + '/quant.pb'
config['qfp32_cal_path'] = config['model_dir'] + '/cal.pb'
config['npu_model_path'] = config['model_dir'] + '/npu.pb'
config['output_dir'] = 'output'
config['output_type'] = output_type
config['cal_cmd'] = '../classification_and_detection/cpp/classification --dataset={} --calibration={}'.format(config['data_dir'], config['cal_list_dir']) + ' --model={}'
config['test_cmd'] = '../classification_and_detection/cpp/classification --dataset={} --scenario=SingleStream --queries-single=1 --skip-warmup --count=1 --accuracy'.format(config['data_dir']) + ' --model={}'
assert output_type in ['qfp32', 'npu'], "output_type should in [qfp32, npu]"
config['use_prequant'] = True
config['enable_EMA'] = False
config['remove_input_quant'] = True
return config
# HanGuangAI quantization process
def quantize(config):
# create directory for tmp files generated during process
cal_data_path = config['output_dir']
if os.path.exists(cal_data_path):
shutil.rmtree(cal_data_path)
os.makedirs(cal_data_path, exist_ok=True)
# read original graphdef from fp32 model
graph_def = read_proto(config['fp32_model_path'])
# create HanGuangAI converter object
c = converter(graph_def, config)
# calibration
cal_model = c.to_cal()
write_proto(cal_model, config['qfp32_cal_path'])
cal_cmd = config['cal_cmd'].format(config['qfp32_cal_path'])
print('cal: ' + cal_cmd)
os.system(cal_cmd)
# quantization
quant_model = c.to_quant()
write_proto(quant_model, config['qfp32_quant_path'])
test_cmd = config['test_cmd'].format(config['qfp32_quant_path'])
print('qfp32: ' + test_cmd)
input_quant_path = '{}/input_quant_nodes.txt'.format(config['model_dir'])
if os.path.exists('input_quant_nodes.txt'):
os.remove('input_quant_nodes.txt')
if os.path.exists(input_quant_path):
os.remove(input_quant_path)
os.system(test_cmd)
# compilation and finalization
npu_model = c.to_npu()
write_proto(npu_model, config['npu_model_path'])
if os.path.exists('input_quant_nodes.txt'):
os.rename('input_quant_nodes.txt', input_quant_path)
# final test to make sure the generated npu.pb working
test_cmd = config['test_cmd'].format(config['npu_model_path'])
print('npu: ' + test_cmd)
os.system(test_cmd)
if __name__ == "__main__":
# command line option parsing
parser = argparse.ArgumentParser()
parser.add_argument('--output_type', default='qfp32', help='quantize output type, [qfp32, npu]')
parser.add_argument('--debug', action='store_true', help='enable debug')
args = parser.parse_args()
# enable debugging if needed
if args.debug:
pu.db
# initialize and update config settings
c = update_config(args.output_type)
# HanGuangAI quantization process
quantize(c)
| 36.032258
| 211
| 0.643987
|
5837ed185ccacaffd048c92b68766206b4d3165a
| 32,965
|
py
|
Python
|
now_lms/__init__.py
|
bmosoluciones/now-lms
|
102c00f32ac63f80e428906a8f492e5ff6d4769e
|
[
"Apache-2.0"
] | null | null | null |
now_lms/__init__.py
|
bmosoluciones/now-lms
|
102c00f32ac63f80e428906a8f492e5ff6d4769e
|
[
"Apache-2.0"
] | 9
|
2021-10-30T15:32:00.000Z
|
2022-03-27T23:26:28.000Z
|
now_lms/__init__.py
|
bmosoluciones/now-lms
|
102c00f32ac63f80e428906a8f492e5ff6d4769e
|
[
"Apache-2.0"
] | 1
|
2021-10-17T22:33:03.000Z
|
2021-10-17T22:33:03.000Z
|
# Copyright 2021 BMO Soluciones, S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# - William José Moreno Reyes
"""NOW Learning Management System."""
# Libreria standar:
import sys
from functools import wraps
from os import environ, name, path, cpu_count
from pathlib import Path
from typing import Dict, Union
# Librerias de terceros:
from flask import Flask, abort, flash, redirect, request, render_template, url_for, current_app
from flask.cli import FlaskGroup
from flask_alembic import Alembic
from flask_login import LoginManager, UserMixin, current_user, login_required, login_user, logout_user
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from flask_uploads import IMAGES, UploadSet, configure_uploads
from loguru import logger as log
from pg8000.dbapi import ProgrammingError as PGProgrammingError
from pg8000.exceptions import DatabaseError
from sqlalchemy.exc import ArgumentError, OperationalError, ProgrammingError
from wtforms import BooleanField, DecimalField, DateField, IntegerField, PasswordField, SelectField, StringField, SubmitField
from wtforms.validators import DataRequired
# Recursos locales:
from now_lms.version import PRERELEASE, VERSION
# < --------------------------------------------------------------------------------------------- >
# Metadatos
__version__: str = VERSION
DESARROLLO: bool = (
(PRERELEASE is not None) or ("FLASK_DEBUG" in environ) or (environ.get("FLASK_ENV") == "development") or ("CI" in environ)
)
APPNAME: str = "NOW LMS"
if DESARROLLO:
log.warning("Opciones de desarrollo detectadas, favor revise su configuración.")
# < --------------------------------------------------------------------------------------------- >
# Datos predefinidos
TIPOS_DE_USUARIO: list = ["admin", "user", "instructor", "moderator"]
# < --------------------------------------------------------------------------------------------- >
# Directorios de la aplicacion
DIRECTORIO_APP: str = path.abspath(path.dirname(__file__))
DIRECTORIO_PRINCICIPAL: Path = Path(DIRECTORIO_APP).parent.absolute()
DIRECTORIO_PLANTILLAS: str = path.join(DIRECTORIO_APP, "templates")
DIRECTORIO_ARCHIVOS: str = path.join(DIRECTORIO_APP, "static")
DIRECTORIO_BASE_ARCHIVOS_DE_USUARIO: str = path.join(DIRECTORIO_APP, "static", "files")
DIRECTORIO_ARCHIVOS_PUBLICOS: str = path.join(DIRECTORIO_BASE_ARCHIVOS_DE_USUARIO, "public")
DIRECTORIO_ARCHIVOS_PRIVADOS: str = path.join(DIRECTORIO_BASE_ARCHIVOS_DE_USUARIO, "private")
# < --------------------------------------------------------------------------------------------- >
# Directorios utilizados para la carga de archivos.
DIRECTORIO_IMAGENES: str = path.join(DIRECTORIO_ARCHIVOS_PUBLICOS, "img")
CARGA_IMAGENES = UploadSet("photos", IMAGES)
# < --------------------------------------------------------------------------------------------- >
# Ubicación predeterminada de base de datos SQLITE
if name == "nt":
SQLITE: str = "sqlite:///" + str(DIRECTORIO_PRINCICIPAL) + "\\now_lms.db"
else:
SQLITE = "sqlite:///" + str(DIRECTORIO_PRINCICIPAL) + "/now_lms.db"
# < --------------------------------------------------------------------------------------------- >
# Configuración de la aplicación, siguiendo "Twelve Factors App" las opciones se leen del entorno
# o se utilizan valores predeterminados.
CONFIGURACION: Dict = {
"ADMIN_USER": environ.get("LMS_USER") or "lms-admin",
"ADMIN_PSWD": environ.get("LMS_PSWD") or "lms-admin",
"SECRET_KEY": environ.get("LMS_KEY") or "dev",
"SQLALCHEMY_DATABASE_URI": environ.get("LMS_DB") or SQLITE,
"SQLALCHEMY_TRACK_MODIFICATIONS": "False",
# Carga de archivos
"UPLOADED_PHOTOS_DEST": DIRECTORIO_IMAGENES,
}
# < --------------------------------------------------------------------------------------------- >
# Inicialización de extensiones de terceros
alembic: Alembic = Alembic()
administrador_sesion: LoginManager = LoginManager()
database: SQLAlchemy = SQLAlchemy()
# < --------------------------------------------------------------------------------------------- >
# Base de datos relacional
MAXIMO_RESULTADOS_EN_CONSULTA_PAGINADA: int = 3
# Para hacer feliz a Sonar Cloud
# https://sonarcloud.io/project/overview?id=bmosoluciones_now-lms
LLAVE_FORONEA_CURSO: str = "curso.codigo"
LLAVE_FORONEA_USUARIO: str = "usuario.usuario"
# pylint: disable=too-few-public-methods
# pylint: disable=no-member
class BaseTabla:
"""Columnas estandar para todas las tablas de la base de datos."""
# Pistas de auditoria comunes a todas las tablas.
id = database.Column(database.Integer(), primary_key=True, nullable=True)
status = database.Column(database.String(50), nullable=True)
creado = database.Column(database.DateTime, default=database.func.now(), nullable=False)
creado_por = database.Column(database.String(15), nullable=True)
modificado = database.Column(database.DateTime, default=database.func.now(), onupdate=database.func.now(), nullable=True)
modificado_por = database.Column(database.String(15), nullable=True)
class Usuario(UserMixin, database.Model, BaseTabla): # type: ignore[name-defined]
"""Una entidad con acceso al sistema."""
# Información Básica
__table_args__ = (database.UniqueConstraint("usuario", name="usuario_unico"),)
usuario = database.Column(database.String(150), nullable=False)
acceso = database.Column(database.LargeBinary(), nullable=False)
nombre = database.Column(database.String(100))
apellido = database.Column(database.String(100))
correo_electronico = database.Column(database.String(100))
# Tipo puede ser: admin, user, instructor, moderator
tipo = database.Column(database.String(20))
activo = database.Column(database.Boolean())
genero = database.Column(database.String(1))
nacimiento = database.Column(database.Date())
class Curso(database.Model, BaseTabla): # type: ignore[name-defined]
"""Un curso es la base del aprendizaje en NOW LMS."""
__table_args__ = (database.UniqueConstraint("codigo", name="curso_codigo_unico"),)
nombre = database.Column(database.String(150), nullable=False)
codigo = database.Column(database.String(20), unique=True)
descripcion = database.Column(database.String(500), nullable=False)
# draft, public, active, closed
estado = database.Column(database.String(10), nullable=False)
# mooc
publico = database.Column(database.Boolean())
certificado = database.Column(database.Boolean())
auditable = database.Column(database.Boolean())
precio = database.Column(database.Numeric())
capacidad = database.Column(database.Integer())
fecha_inicio = database.Column(database.Date())
fecha_fin = database.Column(database.Date())
duracion = database.Column(database.Integer())
portada = database.Column(database.String(250), nullable=True, default=None)
nivel = database.Column(database.Integer())
class CursoSeccion(database.Model, BaseTabla): # type: ignore[name-defined]
"""Los cursos tienen secciones para dividir el contenido en secciones logicas."""
curso = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_CURSO), nullable=False)
nombre = database.Column(database.String(150), nullable=False)
indice = database.Column(database.Integer())
class CursoRecurso(database.Model, BaseTabla): # type: ignore[name-defined]
"""Un curso consta de una serie de recursos."""
curso = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_CURSO), nullable=False)
seccion = database.Column(database.Integer(), database.ForeignKey("curso_seccion.id"), nullable=False)
nombre = database.Column(database.String(150), nullable=False)
indice = database.Column(database.Integer())
class Files(database.Model, BaseTabla): # type: ignore[name-defined]
"""Listado de archivos que se han cargado a la aplicacion."""
archivo = database.Column(database.String(100), nullable=False)
tipo = database.Column(database.String(15), nullable=False)
hash = database.Column(database.String(50), nullable=False)
url = database.Column(database.String(100), nullable=False)
class DocenteCurso(database.Model, BaseTabla): # type: ignore[name-defined]
"""Uno o mas usuario de tipo intructor pueden estar a cargo de un curso."""
curso = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_CURSO), nullable=False)
usuario = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_USUARIO), nullable=False)
vigente = database.Column(database.Boolean())
class ModeradorCurso(database.Model, BaseTabla): # type: ignore[name-defined]
"""Uno o mas usuario de tipo moderator pueden estar a cargo de un curso."""
curso = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_CURSO), nullable=False)
usuario = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_USUARIO), nullable=False)
vigente = database.Column(database.Boolean())
class EstudianteCurso(database.Model, BaseTabla): # type: ignore[name-defined]
"""Uno o mas usuario de tipo user pueden estar a cargo de un curso."""
curso = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_CURSO), nullable=False)
usuario = database.Column(database.String(10), database.ForeignKey(LLAVE_FORONEA_USUARIO), nullable=False)
vigente = database.Column(database.Boolean())
class Configuracion(database.Model, BaseTabla): # type: ignore[name-defined]
"""
Repositorio Central para la configuración de la aplicacion.
Realmente esta tabla solo va a contener un registro con una columna para cada opción, en las plantillas
va a estar disponible como la variable global config.
"""
titulo = database.Column(database.String(150), nullable=False)
descripcion = database.Column(database.String(500), nullable=False)
# Uno de mooc, school, training
modo = database.Column(database.String(500), nullable=False, default="mooc")
# Pagos en linea
paypal_key = database.Column(database.String(150), nullable=True)
stripe_key = database.Column(database.String(150), nullable=True)
# Micelaneos
dev_docs = database.Column(database.Boolean(), default=False)
# Permitir al usuario cargar archivos
file_uploads = database.Column(database.Boolean(), default=False)
# < --------------------------------------------------------------------------------------------- >
# Funciones auxiliares relacionadas a contultas de la base de datos.
def verifica_docente_asignado_a_curso(id_curso: Union[None, str] = None):
"""Si el usuario no esta asignado como docente al curso devuelve None."""
if current_user.is_authenticated:
return DocenteCurso.query.filter(DocenteCurso.usuario == current_user.usuario, DocenteCurso.curso == id_curso)
else:
return False
def verifica_moderador_asignado_a_curso(id_curso: Union[None, str] = None):
"""Si el usuario no esta asignado como moderador al curso devuelve None."""
if current_user.is_authenticated:
return ModeradorCurso.query.filter(ModeradorCurso.usuario == current_user.usuario, ModeradorCurso.curso == id_curso)
else:
return False
def verifica_estudiante_asignado_a_curso(id_curso: Union[None, str] = None):
"""Si el usuario no esta asignado como estudiante al curso devuelve None."""
if current_user.is_authenticated:
return EstudianteCurso.query.filter(EstudianteCurso.usuario == current_user.usuario, EstudianteCurso.curso == id_curso)
else:
return False
# < --------------------------------------------------------------------------------------------- >
# Control de acceso a la aplicación
@administrador_sesion.user_loader
def cargar_sesion(identidad):
"""Devuelve la entrada correspondiente al usuario que inicio sesión."""
if identidad is not None:
return Usuario.query.get(identidad)
return None
@administrador_sesion.unauthorized_handler
def no_autorizado():
"""Redirecciona al inicio de sesión usuarios no autorizados."""
flash("Favor iniciar sesión para acceder al sistema.")
return INICIO_SESION
def proteger_passwd(clave):
"""Devuelve una contraseña salteada con bcrytp."""
from bcrypt import hashpw, gensalt
return hashpw(clave.encode(), gensalt())
def validar_acceso(usuario_id, acceso):
"""Verifica el inicio de sesión del usuario."""
from bcrypt import checkpw
registro = Usuario.query.filter_by(usuario=usuario_id).first()
if registro is not None:
clave_validada = checkpw(acceso.encode(), registro.acceso)
else:
clave_validada = False
return clave_validada
# < --------------------------------------------------------------------------------------------- >
# Definición de formularios
class LoginForm(FlaskForm):
"""Formulario de inicio de sesión."""
usuario = StringField(validators=[DataRequired()])
acceso = PasswordField(validators=[DataRequired()])
inicio_sesion = SubmitField()
class LogonForm(FlaskForm):
"""Formulario para crear un nuevo usuario."""
usuario = StringField(validators=[DataRequired()])
acceso = PasswordField(validators=[DataRequired()])
nombre = StringField(validators=[DataRequired()])
apellido = StringField(validators=[DataRequired()])
correo_electronico = StringField(validators=[DataRequired()])
class CurseForm(FlaskForm):
"""Formulario para crear un nuevo curso."""
nombre = StringField(validators=[DataRequired()])
codigo = StringField(validators=[DataRequired()])
descripcion = StringField(validators=[DataRequired()])
publico = BooleanField(validators=[])
auditable = BooleanField(validators=[])
certificado = BooleanField(validators=[])
precio = DecimalField(validators=[])
capacidad = IntegerField(validators=[])
fecha_inicio = DateField(validators=[])
fecha_fin = DateField(validators=[])
duracion = IntegerField(validators=[])
nivel = SelectField("User", choices=[(0, "Introductorio"), (1, "Principiante"), (2, "Intermedio"), (2, "Avanzado")])
# < --------------------------------------------------------------------------------------------- >
# Definición de la aplicación
lms_app = Flask(
"now_lms",
template_folder=DIRECTORIO_PLANTILLAS,
static_folder=DIRECTORIO_ARCHIVOS,
)
lms_app.config.from_mapping(CONFIGURACION)
with lms_app.app_context():
alembic.init_app(lms_app)
administrador_sesion.init_app(lms_app)
database.init_app(lms_app)
configure_uploads(app=lms_app, upload_sets=[CARGA_IMAGENES])
try:
CONFIG = Configuracion.query.first()
except OperationalError:
CONFIG = None
except ProgrammingError:
CONFIG = None
except PGProgrammingError:
CONFIG = None
except DatabaseError:
CONFIG = None
if CONFIG:
log.info("Configuración detectada.")
else:
log.warning("No se pudo cargar la configuración.")
lms_app.jinja_env.globals["current_user"] = current_user
lms_app.jinja_env.globals["config"] = CONFIG
lms_app.jinja_env.globals["docente_asignado"] = verifica_docente_asignado_a_curso
lms_app.jinja_env.globals["moderador_asignado"] = verifica_moderador_asignado_a_curso
lms_app.jinja_env.globals["estudiante_asignado"] = verifica_estudiante_asignado_a_curso
def init_app():
"""Funcion auxiliar para iniciar la aplicacion."""
with current_app.app_context():
if DESARROLLO:
log.warning("Modo desarrollo detectado.")
log.warning("Iniciando una base de datos nueva.")
database.drop_all()
if not database.engine.has_table("usuario"):
log.info("Iniciando Configuracion de la aplicacion.")
log.info("Creando esquema de base de datos.")
database.create_all()
log.info("Creando usuario administrador.")
administrador = Usuario(
usuario=CONFIGURACION.get("ADMIN_USER"),
acceso=proteger_passwd(CONFIGURACION.get("ADMIN_PSWD")),
tipo="admin",
activo=True,
)
config = Configuracion(
titulo="NOW LMS",
descripcion="Sistema de aprendizaje en linea.",
)
database.session.add(administrador)
database.session.add(config)
database.session.commit()
else:
log.warning("NOW LMS ya se encuentra configurado.")
log.warning("Intente ejecutar 'python -m now_lms'")
@lms_app.cli.command()
def setup(): # pragma: no cover
"""Inicia al aplicacion."""
with current_app.app_context():
init_app()
@lms_app.cli.command()
def serve(): # pragma: no cover
"""Servidor WSGI predeterminado."""
from waitress import serve as server
if not CONFIG:
init_app()
if environ.get("LMS_PORT"):
PORT: int = int(environ.get("LMS_PORT"))
elif environ.get("PORT"):
PORT = int(environ.get("PORT"))
else:
PORT = 8080
if DESARROLLO:
THREADS: int = 4
else:
if environ.get("LMS_THREADS"):
THREADS = int(environ.get("LMS_THREADS"))
else:
THREADS = (cpu_count() * 2) + 1
log.info("Iniciando servidor WSGI en puerto {puerto} con {threads} hilos.", puerto=PORT, threads=THREADS)
server(app=lms_app, port=int(PORT), threads=THREADS)
@lms_app.errorhandler(404)
def error_404(error):
"""Pagina personalizada para recursos no encontrados."""
assert error is not None
return render_template("404.html"), 404
@lms_app.errorhandler(403)
def error_403(error):
"""Pagina personalizada para recursos no autorizados."""
assert error is not None
return render_template("403.html"), 403
# < --------------------------------------------------------------------------------------------- >
# Interfaz de linea de comandos
COMMAND_LINE_INTERFACE = FlaskGroup(
help="""\
Interfaz de linea de comandos para la administración de NOW LMS.
"""
)
def command(as_module=False) -> None:
"""Linea de comandos para administración de la aplicacion."""
COMMAND_LINE_INTERFACE.main(args=sys.argv[1:], prog_name="python -m flask" if as_module else None)
# < --------------------------------------------------------------------------------------------- >
# Funciones auxiliares
def asignar_curso_a_instructor(curso_codigo: Union[None, str] = None, usuario_id: Union[None, str] = None):
"""Asigna un usuario como instructor de un curso."""
ASIGNACION = DocenteCurso(curso=curso_codigo, usuario=usuario_id, vigente=True, creado_por=current_user.usuario)
database.session.add(ASIGNACION)
database.session.commit()
def asignar_curso_a_moderador(curso_codigo: Union[None, str] = None, usuario_id: Union[None, str] = None):
"""Asigna un usuario como moderador de un curso."""
ASIGNACION = ModeradorCurso(usuario=usuario_id, curso=curso_codigo, vigente=True, creado_por=current_user.usuario)
database.session.add(ASIGNACION)
database.session.commit()
def asignar_curso_a_estudiante(curso_codigo: Union[None, str] = None, usuario_id: Union[None, str] = None):
"""Asigna un usuario como moderador de un curso."""
ASIGNACION = EstudianteCurso(
creado_por=current_user.usuario,
curso=curso_codigo,
usuario=usuario_id,
vigente=True,
)
database.session.add(ASIGNACION)
database.session.commit()
def cambia_tipo_de_usuario_por_id(id_usuario: Union[None, str] = None, nuevo_tipo: Union[None, str] = None):
"""
Cambia el estatus de un usuario del sistema.
Los valores reconocidos por el sistema son: admin, user, instructor, moderator.
"""
USUARIO = Usuario.query.filter_by(usuario=id_usuario).first()
USUARIO.tipo = nuevo_tipo
database.session.commit()
def cambia_estado_curso_por_id(id_curso: Union[None, str, int] = None, nuevo_estado: Union[None, str] = None):
"""
Cambia el estatus de un curso.
Los valores reconocidos por el sistema son: draft, public, open, closed.
"""
CURSO = Curso.query.filter_by(codigo=id_curso).first()
CURSO.estado = nuevo_estado
database.session.commit()
def cambia_curso_publico(id_curso: Union[None, str, int] = None):
"""Cambia el estatus publico de un curso."""
CURSO = Curso.query.filter_by(codigo=id_curso).first()
if CURSO.publico:
CURSO.publico = False
else:
CURSO.publico = True
database.session.commit()
# < --------------------------------------------------------------------------------------------- >
# Definición de rutas/vistas
def perfil_requerido(perfil_id):
"""Comprueba si un usuario tiene acceso a un recurso determinado en base a su tipo."""
def decorator_verifica_acceso(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (current_user.is_authenticated and current_user.tipo == perfil_id) or current_user.tipo == "admin":
return func(*args, **kwargs)
else:
flash("No se encuentra autorizado a acceder al recurso solicitado.")
return abort(403)
return wrapper
return decorator_verifica_acceso
# < --------------------------------------------------------------------------------------------- >
# Definición de rutas/vistas
# pylint: disable=singleton-comparison
# <-------- Autenticación de usuarios -------->
INICIO_SESION = redirect("/login")
@lms_app.route("/login", methods=["GET", "POST"])
def inicio_sesion():
"""Inicio de sesión del usuario."""
form = LoginForm()
if form.validate_on_submit():
if validar_acceso(form.usuario.data, form.acceso.data):
identidad = Usuario.query.filter_by(usuario=form.usuario.data).first()
if identidad.activo:
login_user(identidad)
return redirect(url_for("panel"))
else:
flash("Su cuenta esta inactiva.")
return INICIO_SESION
else:
flash("Inicio de Sesion Incorrecto.")
return INICIO_SESION
return render_template("auth/login.html", form=form, titulo="Inicio de Sesion - NOW LMS")
@lms_app.route("/logon", methods=["GET", "POST"])
def crear_cuenta():
"""Crear cuenta de usuario."""
form = LogonForm()
if form.validate_on_submit() or request.method == "POST":
usuario_ = Usuario(
usuario=form.usuario.data,
acceso=proteger_passwd(form.acceso.data),
nombre=form.nombre.data,
apellido=form.apellido.data,
correo_electronico=form.correo_electronico.data,
tipo="user",
activo=False,
)
try:
database.session.add(usuario_)
database.session.commit()
flash("Cuenta creada exitosamente.")
return INICIO_SESION
except OperationalError:
flash("Error al crear la cuenta.")
return redirect("/logon")
else:
return render_template("auth/logon.html", form=form, titulo="Crear cuenta - NOW LMS")
@lms_app.route("/new_user", methods=["GET", "POST"])
def crear_usuario():
"""Crear manualmente una cuenta de usuario."""
form = LogonForm()
if form.validate_on_submit() or request.method == "POST":
usuario_ = Usuario(
usuario=form.usuario.data,
acceso=proteger_passwd(form.acceso.data),
nombre=form.nombre.data,
apellido=form.apellido.data,
correo_electronico=form.correo_electronico.data,
tipo="user",
activo=False,
)
try:
database.session.add(usuario_)
database.session.commit()
flash("Usuario creado exitosamente.")
return redirect(url_for("usuario", id_usuario=form.usuario.data))
except OperationalError:
flash("Error al crear la cuenta.")
return redirect("/new_user")
else:
return render_template(
"learning/nuevo_usuario.html",
form=form,
)
@lms_app.route("/exit")
@lms_app.route("/logout")
@lms_app.route("/salir")
def cerrar_sesion():
"""Finaliza la sesion actual."""
logout_user()
return redirect("/home")
# <-------- Estructura general de al aplicación -------->
@lms_app.route("/")
@lms_app.route("/home")
@lms_app.route("/index")
def home():
"""Página principal de la aplicación."""
CURSOS = Curso.query.filter(Curso.publico == True, Curso.estado == "public").paginate( # noqa: E712
request.args.get("page", default=1, type=int), 6, False
)
return render_template("inicio/mooc.html", cursos=CURSOS)
@lms_app.route("/dashboard")
@lms_app.route("/panel")
@login_required
def panel():
"""Panel principal de la aplicacion."""
return render_template("inicio/panel.html")
@lms_app.route("/student")
@login_required
def pagina_estudiante():
"""Perfil de usuario."""
return render_template("perfiles/estudiante.html")
@lms_app.route("/moderator")
@login_required
def pagina_moderador():
"""Perfil de usuario moderador."""
return render_template("perfiles/moderador.html")
@lms_app.route("/instructor")
@login_required
def pagina_instructor():
"""Perfil de usuario instructor."""
return render_template("perfiles/instructor.html")
@lms_app.route("/admin")
@login_required
def pagina_admin():
"""Perfil de usuario administrador."""
return render_template("perfiles/admin.html", inactivos=Usuario.query.filter_by(activo=False).count() or 0)
# <-------- Aprendizaje -------->
@lms_app.route("/program")
@lms_app.route("/programa")
def programa():
"""
Página principal del programa.
Un programa puede constar de uno o mas cursos individuales
"""
@lms_app.route("/new_curse", methods=["GET", "POST"])
@login_required
@perfil_requerido("instructor")
def nuevo_curso():
"""Formulario para crear un nuevo usuario."""
form = CurseForm()
if form.validate_on_submit() or request.method == "POST":
nuevo_curso_ = Curso(
nombre=form.nombre.data,
codigo=form.codigo.data,
descripcion=form.descripcion.data,
estado="draft",
publico=form.publico.data,
auditable=form.auditable.data,
certificado=form.certificado.data,
precio=form.precio.data,
capacidad=form.capacidad.data,
fecha_inicio=form.fecha_inicio.data,
fecha_fin=form.fecha_fin.data,
duracion=form.duracion.data,
creado_por=current_user.usuario,
nivel=form.nivel.data,
)
try:
database.session.add(nuevo_curso_)
database.session.commit()
asignar_curso_a_instructor(curso_codigo=form.codigo.data, usuario_id=current_user.usuario)
flash("Curso creado exitosamente.")
return redirect(url_for("curso", course_code=form.codigo.data))
except OperationalError:
flash("Hubo en error al crear su curso.")
return redirect("/instructor")
else:
return render_template("learning/nuevo_curso.html", form=form)
@lms_app.route("/courses")
@lms_app.route("/cursos")
@login_required
def cursos():
"""Pagina principal del curso."""
if current_user.tipo == "admin":
lista_cursos = Curso.query.paginate(
request.args.get("page", default=1, type=int), MAXIMO_RESULTADOS_EN_CONSULTA_PAGINADA, False
)
else:
try:
lista_cursos = (
Curso.query.join(Curso.creado_por)
.filter(Usuario.id == current_user.id)
.paginate(request.args.get("page", default=1, type=int), MAXIMO_RESULTADOS_EN_CONSULTA_PAGINADA, False)
)
except ArgumentError:
lista_cursos = None
return render_template("learning/curso_lista.html", consulta=lista_cursos)
@lms_app.route("/course/<course_code>")
@lms_app.route("/curso")
def curso(course_code):
"""Pagina principal del curso."""
return render_template(
"learning/curso.html",
curso=Curso.query.filter_by(codigo=course_code).first(),
secciones=CursoSeccion.query.filter_by(curso=course_code).all(),
recursos=CursoRecurso.query.filter_by(curso=course_code).all(),
)
# <-------- Administración -------->
@lms_app.route("/users")
@login_required
@perfil_requerido("admin")
def usuarios():
"""Lista de usuarios con acceso a al aplicación."""
CONSULTA = Usuario.query.paginate(
request.args.get("page", default=1, type=int), MAXIMO_RESULTADOS_EN_CONSULTA_PAGINADA, False
)
return render_template(
"admin/users.html",
consulta=CONSULTA,
)
@lms_app.route("/inactive_users")
@login_required
@perfil_requerido("admin")
def usuarios_inactivos():
"""Lista de usuarios con acceso a al aplicación."""
CONSULTA = Usuario.query.filter_by(activo=False).paginate(
request.args.get("page", default=1, type=int), MAXIMO_RESULTADOS_EN_CONSULTA_PAGINADA, False
)
return render_template(
"admin/inactive_users.html",
consulta=CONSULTA,
)
@lms_app.route("/user/<id_usuario>")
@login_required
@perfil_requerido("admin")
def usuario(id_usuario):
"""Acceso administrativo al perfil de un usuario."""
perfil_usuario = Usuario.query.filter_by(usuario=id_usuario).first()
# La misma plantilla del perfil de usuario con permisos elevados como
# activar desactivar el perfil o cambiar el perfil del usuario.
return render_template("inicio/perfil.html", perfil=perfil_usuario)
# <-------- Espacio del usuario -------->
@lms_app.route("/perfil")
@login_required
def perfil():
"""Perfil del usuario."""
perfil_usuario = Usuario.query.filter_by(usuario=current_user.usuario).first()
return render_template("inicio/perfil.html", perfil=perfil_usuario)
# <-------- Funciones Auxiliares -------->
@lms_app.route("/set_user_as_active/<user_id>")
@login_required
@perfil_requerido("admin")
def activar_usuario(user_id):
"""Estable el usuario como activo y redirecciona a la vista dada."""
perfil_usuario = Usuario.query.filter_by(usuario=user_id).first()
perfil_usuario.activo = True
database.session.add(perfil_usuario)
database.session.commit()
return redirect(url_for("usuario", id_usuario=user_id))
@lms_app.route("/set_user_as_inactive/<user_id>")
@login_required
@perfil_requerido("admin")
def inactivar_usuario(user_id):
"""Estable el usuario como activo y redirecciona a la vista dada."""
perfil_usuario = Usuario.query.filter_by(usuario=user_id).first()
perfil_usuario.activo = False
database.session.add(perfil_usuario)
database.session.commit()
return redirect(url_for("usuario", id_usuario=user_id))
@lms_app.route("/delete_user/<user_id>")
@login_required
@perfil_requerido("admin")
def eliminar_usuario(user_id):
"""Elimina un usuario por su id y redirecciona a la vista dada."""
perfil_usuario = Usuario.query.filter(Usuario.usuario == user_id)
perfil_usuario.delete()
database.session.commit()
return redirect(url_for(request.args.get("ruta", default="home", type=str)))
@lms_app.route("/change_user_tipo")
@login_required
@perfil_requerido("admin")
def cambiar_tipo_usario():
"""Actualiza el tipo de usuario."""
cambia_tipo_de_usuario_por_id(
id_usuario=request.args.get("user"),
nuevo_tipo=request.args.get("type"),
)
return redirect(url_for("usuario", id_usuario=request.args.get("user")))
@lms_app.route("/change_curse_status")
@login_required
@perfil_requerido("admin")
def cambiar_estatus_curso():
"""Actualiza el estatus de un curso."""
cambia_estado_curso_por_id(
id_curso=request.args.get("curse"),
nuevo_estado=request.args.get("status"),
)
return redirect(url_for("curso", course_code=request.args.get("curse")))
@lms_app.route("/change_curse_public")
@login_required
@perfil_requerido("admin")
def cambiar_curso_publico():
"""Actualiza el estado publico de un curso."""
cambia_curso_publico(
id_curso=request.args.get("curse"),
)
return redirect(url_for("curso", course_code=request.args.get("curse")))
# Los servidores WSGI buscan por defecto una app
app = lms_app
| 36.791295
| 127
| 0.666859
|
e1cb7acab81d706eb3e3c33a350c94b56c0ff899
| 8,879
|
py
|
Python
|
scipy/constants/constants.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | 2
|
2015-10-30T10:04:46.000Z
|
2017-03-11T00:58:21.000Z
|
scipy/constants/constants.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/constants/constants.py
|
seberg/scipy
|
d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Collection of physical constants and conversion factors.
Most constants are in SI units, so you can do
print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
The list is not meant to be comprehensive, but just a convenient list for everyday use.
"""
"""
BasSw 2006
physical constants: imported from CODATA
unit conversion: see e.g. NIST special publication 811
Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
Some constants exist in a few variants, which are marked with suffixes.
The ones without any suffix should be the most common one.
"""
import math as _math
from codata import value as _cd
import numpy as _np
#mathematical constants
pi = _math.pi
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
#SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
#binary prefixes
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
#physical constants
c = speed_of_light = _cd('speed of light in vacuum')
mu_0 = 4e-7*pi
epsilon_0 = 1 / (mu_0*c*c)
h = Planck = _cd('Planck constant')
hbar = h / (2 * pi)
G = gravitational_constant = _cd('Newtonian constant of gravitation')
g = _cd('standard acceleration of gravity')
e = elementary_charge = _cd('elementary charge')
R = gas_constant = _cd('molar gas constant')
alpha = fine_structure = _cd('fine-structure constant')
N_A = Avogadro = _cd('Avogadro constant')
k = Boltzmann = _cd('Boltzmann constant')
sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
Wien = _cd('Wien wavelength displacement law constant')
Rydberg = _cd('Rydberg constant')
#weight in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain #avoirdupois
oz = ounce = pound / 16
stone = 14 * pound
long_ton = 2240 * pound
short_ton = 2000 * pound
troy_ounce = 480 * grain #only for metals / gems
troy_pound = 12 * troy_ounce
carat = 200e-6
m_e = electron_mass = _cd('electron mass')
m_p = proton_mass = _cd('proton mass')
m_n = neutron_mass = _cd('neutron mass')
m_u = u = atomic_mass = _cd('atomic mass constant')
#angle in rad
degree = pi / 180
arcmin = arcminute = degree / 60
arcsec = arcsecond = arcmin / 60
#time in second
minute = 60.0
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
Julian_year = 365.25 * day
#length in meter
inch = 0.0254
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
mil = inch / 1000
pt = point = inch / 72 #typography
survey_foot = 1200.0 / 3937
survey_mile = 5280 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870691.0
light_year = Julian_year * c
parsec = au / arcsec
#pressure in pascal
atm = atmosphere = _cd('standard atmosphere')
bar = 1e5
torr = mmHg = atm / 760
psi = pound * g / (inch * inch)
#area in meter**2
hectare = 1e4
acre = 43560 * foot**2
#volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231 * inch**3 #US
#pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42 * gallon_US #for oil
gallon_imp = 4.54609e-3 #uk
fluid_ounce_imp = gallon_imp / 160
#speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
mach = speed_of_sound = 340.5 #approx value at 15 degrees in 1 atm. is this a common value?
knot = nautical_mile / hour
#temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1/1.8 #only for differences
#energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
#Wh = watt_hour
#power in watt
hp = horsepower = 550 * foot * pound * g
#force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
#functions for conversions that are not linear
def C2K(C):
"""
Convert Celsius to Kelvin
Parameters
----------
C : array_like
Celsius temperature(s) to be converted.
Returns
-------
K : float or array of floats
Equivalent Kelvin temperature(s).
Notes
-----
Computes ``K = C + zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import C2K
>>> C2K(_np.array([-40, 40.0]))
array([ 233.15, 313.15])
"""
return _np.asanyarray(C) + zero_Celsius
def K2C(K):
"""
Convert Kelvin to Celsius
Parameters
----------
K : array_like
Kelvin temperature(s) to be converted.
Returns
-------
C : float or array of floats
Equivalent Celsius temperature(s).
Notes
-----
Computes ``C = K - zero_Celsius`` where `zero_Celsius` = 273.15, i.e.,
(the absolute value of) temperature "absolute zero" as measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import K2C
>>> K2C(_np.array([233.15, 313.15]))
array([-40., 40.])
"""
return _np.asanyarray(K) - zero_Celsius
def F2C(F):
"""
Convert Fahrenheit to Celsius
Parameters
----------
F : array_like
Fahrenheit temperature(s) to be converted.
Returns
-------
C : float or array of floats
Equivalent Celsius temperature(s).
Notes
-----
Computes ``C = (F - 32) / 1.8``.
Examples
--------
>>> from scipy.constants.constants import F2C
>>> F2C(_np.array([-40, 40.0]))
array([-40. , 4.44444444])
"""
return (_np.asanyarray(F) - 32) / 1.8
def C2F(C):
"""
Convert Celsius to Fahrenheit
Parameters
----------
C : array_like
Celsius temperature(s) to be converted.
Returns
-------
F : float or array of floats
Equivalent Fahrenheit temperature(s).
Notes
-----
Computes ``F = 1.8 * C + 32``.
Examples
--------
>>> from scipy.constants.constants import C2F
>>> C2F(_np.array([-40, 40.0]))
array([ -40., 104.])
"""
return 1.8 * _np.asanyarray(C) + 32
def F2K(F):
"""
Convert Fahrenheit to Kelvin
Parameters
----------
F : array_like
Fahrenheit temperature(s) to be converted.
Returns
-------
K : float or array of floats
Equivalent Kelvin temperature(s).
Notes
-----
Computes ``K = (F - 32)/1.8 + zero_Celsius`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import F2K
>>> F2K(_np.array([-40, 104]))
array([ 233.15, 313.15])
"""
return C2K(F2C(_np.asanyarray(F)))
def K2F(K):
"""
Convert Kelvin to Fahrenheit
Parameters
----------
K : array_like
Kelvin temperature(s) to be converted.
Returns
-------
F : float or array of floats
Equivalent Fahrenheit temperature(s).
Notes
-----
Computes ``F = 1.8 * (K - zero_Celsius) + 32`` where `zero_Celsius` =
273.15, i.e., (the absolute value of) temperature "absolute zero" as
measured in Celsius.
Examples
--------
>>> from scipy.constants.constants import K2F
>>> K2F(_np.array([233.15, 313.15]))
array([ -40., 104.])
"""
return C2F(K2C(_np.asanyarray(K)))
#optics
def lambda2nu(lambda_):
"""
Convert wavelength to optical frequency
Parameters
----------
lambda : array_like
Wavelength(s) to be converted.
Returns
-------
nu : float or array of floats
Equivalent optical frequency.
Notes
-----
Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants.constants import lambda2nu
>>> lambda2nu(_np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return _np.asanyarray(c) / lambda_
def nu2lambda(nu):
"""
Convert optical frequency to wavelength.
Parameters
----------
nu : array_like
Optical frequency to be converted.
Returns
-------
lambda : float or array of floats
Equivalent wavelength(s).
Notes
-----
Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants.constants import nu2lambda
>>> nu2lambda(_np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return c / _np.asanyarray(nu)
| 22.253133
| 91
| 0.628562
|
984973de3680969d21de7daf893405ea2914817e
| 1,971
|
py
|
Python
|
Python3/745.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/745.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/745.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 716 ms submission
class WordFilter:
def __init__(self, words):
from collections import defaultdict
self.prefixes = defaultdict(set)
self.suffixes = defaultdict(set)
self.weights = {}
for index, word in enumerate(words):
prefix, suffix = '', ''
for char in [''] + list(word):
prefix += char
self.prefixes[prefix].add(word)
for char in [''] + list(word[::-1]):
suffix += char
self.suffixes[suffix[::-1]].add(word)
self.weights[word] = index
def f(self, prefix, suffix):
weight = -1
for word in self.prefixes[prefix] & self.suffixes[suffix]:
if self.weights[word] > weight:
weight = self.weights[word]
return weight
__________________________________________________________________________________________________
sample 748 ms submission
class WordFilter:
def __init__(self, words: List[str]):
self.filters = {}
for idx, word in enumerate(words):
n = len(word)
prefixes = ['']*(n+1)
suffixes = ['']*(n+1)
for i in range(n):
prefixes[i+1] = prefixes[i] + word[i]
suffixes[i+1] = word[n-i-1] + suffixes[i]
for pre in prefixes:
for suf in suffixes:
self.filters[pre + '_' + suf] = idx
def f(self, prefix: str, suffix: str) -> int:
key = prefix + '_' + suffix
if key in self.filters: return self.filters[key]
return -1
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
__________________________________________________________________________________________________
| 36.5
| 98
| 0.591578
|
2b4c9f9014c2ed80a8f51f2e884d312bec02ba67
| 10,737
|
py
|
Python
|
contrib/devtools/update-translations.py
|
SparkBaseHub/SparkToken
|
d5c560c8750fee02e6551e6b8254b6b3ebe3c6c2
|
[
"MIT"
] | 2
|
2018-11-01T20:51:47.000Z
|
2018-11-03T03:32:11.000Z
|
contrib/devtools/update-translations.py
|
SparkBaseHub/SparkToken
|
d5c560c8750fee02e6551e6b8254b6b3ebe3c6c2
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
SparkBaseHub/SparkToken
|
d5c560c8750fee02e6551e6b8254b6b3ebe3c6c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
- update git for added translations
- update build system
'''
import argparse
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'spark_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
# Minimum completion percentage required to download from transifex
MINIMUM_PERC = 80
# Path to git
GIT = os.getenv("GIT", "git")
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def remove_current_translations():
'''
Remove current translations, as well as temporary files that might be left behind
We only want the active translations that are currently on transifex.
This leaves spark_en.ts untouched.
'''
for (_,name) in all_ts_files():
os.remove(name)
for (_,name) in all_ts_files('.orig'):
os.remove(name + '.orig')
def fetch_all_translations(fAll = False):
call_list = [TX, 'pull', '-f', '-a']
if not fAll:
call_list.append('--minimum-perc=%s' % MINIMUM_PERC)
if subprocess.call(call_list):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix='', include_source=False):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or (not include_source and filename == SOURCE_LANG+suffix):
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
def update_git():
'''
Add new files to git repository.
(Removing files isn't necessary here, as `git commit -a` will take care of removing files that are gone)
'''
file_paths = [filepath for (filename, filepath) in all_ts_files()]
subprocess.check_call([GIT, 'add'] + file_paths)
def update_build_systems():
'''
Update build system and Qt resource descriptors.
'''
filename_lang = [re.match(r'((spark_(.*)).ts)$', filename).groups() for (filename, filepath) in all_ts_files(include_source=True)]
filename_lang.sort(key=lambda x: x[0])
# update qrc locales
with open('src/qt/spark_locale.qrc', 'w', encoding="utf8") as f:
f.write('<!DOCTYPE RCC><RCC version="1.0">\n')
f.write(' <qresource prefix="/translations">\n')
for (filename, basename, lang) in filename_lang:
f.write(f' <file alias="{lang}">locale/{basename}.qm</file>\n')
f.write(' </qresource>\n')
f.write('</RCC>\n')
# update Makefile include
with open('src/Makefile.qt_locale.include', 'w', encoding="utf8") as f:
f.write('QT_TS = \\\n')
f.write(' \\\n'.join(f' qt/locale/{filename}' for (filename, basename, lang) in filename_lang))
f.write('\n') # make sure last line doesn't end with a backslash
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [update-translations.py options] [flags]',
description=__doc__,
epilog='',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--ignore_completion', '-i', action='store_true', help='fetch all translations, even those not reaching the completion threshold')
args, unknown_args = parser.parse_known_args()
check_at_repository_root()
remove_current_translations()
fetch_all_translations(args.ignore_completion)
postprocess_translations()
update_git()
update_build_systems()
| 39.474265
| 154
| 0.632486
|
07d41e6dc8abb01474724b30ca742e51c0f85d15
| 1,073
|
py
|
Python
|
Assignment 01 - Python Basics/problem3.py
|
lmottasin/Simulation_Lab
|
e3b365f7bb7f6ac4b542f2c6bd721e4a77c056ea
|
[
"MIT"
] | 2
|
2021-07-13T04:44:47.000Z
|
2022-01-17T20:21:11.000Z
|
Assignment 01 - Python Basics/problem3.py
|
lmottasin/Simulation_Lab
|
e3b365f7bb7f6ac4b542f2c6bd721e4a77c056ea
|
[
"MIT"
] | null | null | null |
Assignment 01 - Python Basics/problem3.py
|
lmottasin/Simulation_Lab
|
e3b365f7bb7f6ac4b542f2c6bd721e4a77c056ea
|
[
"MIT"
] | 3
|
2021-06-30T11:16:27.000Z
|
2022-03-11T18:32:46.000Z
|
#!/usr/bin/env python
# coding: utf-8
# In[70]:
import pandas as pd
import matplotlib.pyplot as plt
#Load the CSV into a DataFrame
data = pd.read_csv("SalesData.csv")
# scatter plot
#plt.subplot(2, 1, 1)
#fig size
plt.figure(figsize=(8,5))
#title
plt.title("Scatter Plot")
#plotting the points
plt.scatter(data["Day"],data["Product A"])
plt.scatter(data["Day"],data["Product B"])
plt.scatter(data["Day"],data["Product C"])
# giving names to points by given order in points
labels = ['Product A', 'Product B', 'Product C']
#add the labels
plt.legend(labels)
# x,y label
plt.xlabel("Days")
plt.ylabel("Sales")
#showing the plotting
plt.show()
#plt.subplot(2, 1, 2)
#line plot
#plot size
plt.figure(figsize=(8,5))
#title
plt.title("Line Plot")
#plotting the points
plt.plot(data["Day"],data["Product A"])
plt.plot(data["Day"],data["Product B"])
plt.plot(data["Day"],data["Product C"])
labels = ['Product A', 'Product B', 'Product C']
#add the labels
plt.legend(labels)
# x,y label
plt.xlabel("Days")
plt.ylabel("Sales")
#plot show
plt.show()
# In[ ]:
| 15.779412
| 49
| 0.675676
|
7612ec494254e959c8965f7fb21f5da8915934f6
| 41,334
|
py
|
Python
|
readthedocs/rtd_tests/tests/test_sync_versions.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 1
|
2021-11-20T11:38:26.000Z
|
2021-11-20T11:38:26.000Z
|
readthedocs/rtd_tests/tests/test_sync_versions.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 4
|
2021-02-08T21:06:18.000Z
|
2021-06-10T23:24:55.000Z
|
readthedocs/rtd_tests/tests/test_sync_versions.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 1
|
2018-10-12T22:15:39.000Z
|
2018-10-12T22:15:39.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import json
from django.test import TestCase
from django.core.urlresolvers import reverse
import pytest
from readthedocs.builds.constants import BRANCH, STABLE, TAG
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
class TestSyncVersions(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
Version.objects.create(
project=self.pip,
identifier='origin/master',
verbose_name='master',
active=True,
machine=True,
type=BRANCH,
)
Version.objects.create(
project=self.pip,
identifier='to_delete',
verbose_name='to_delete',
active=False,
type=TAG,
)
def test_proper_url_no_slash(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
}
r = self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
json_data = json.loads(r.content)
self.assertEqual(json_data['deleted_versions'], ['to_delete'])
self.assertEqual(json_data['added_versions'], ['to_add'])
def test_new_tag_update_active(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=True,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active)
# Version 0.9 becomes the stable version
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
def test_new_tag_update_inactive(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=False,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
# Version 0.9 becomes the stable version and active
version_9 = Version.objects.get(slug='0.9')
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
self.assertTrue(version_9.active)
# Version 0.8.3 is still inactive
version_8 = Version.objects.get(slug='0.8.3')
self.assertFalse(version_8.active)
def test_delete_version(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=False,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
}
self.assertTrue(
Version.objects.filter(slug='0.8.3').exists()
)
self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
# There isn't a v0.8.3
self.assertFalse(
Version.objects.filter(slug='0.8.3').exists()
)
def test_machine_attr_when_user_define_stable_tag_and_delete_it(self):
"""
The user creates a tag named ``stable`` on an existing repo,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the tag is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
version8 = Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
type=TAG,
active=False,
machine=False,
)
self.pip.update_stable_version()
current_stable = self.pip.get_stable_version()
# 0.8.3 is the current stable
self.assertEqual(
version8.identifier,
current_stable.identifier
)
self.assertTrue(current_stable.machine)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
# User new stable
{
'identifier': '1abc2def3',
'verbose_name': 'stable',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
current_stable = self.pip.get_stable_version()
self.assertEqual(
'1abc2def3',
current_stable.identifier
)
# Deleting the tag should return the RTD's stable
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
'0.8.3',
current_stable.identifier
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_tag_and_delete_it_new_project(self):
"""
The user imports a new project with a tag named ``stable``,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the tag is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
# There isn't a stable version yet
self.pip.versions.exclude(slug='master').delete()
current_stable = self.pip.get_stable_version()
self.assertIsNone(current_stable)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
# User stable
{
'identifier': '1abc2def3',
'verbose_name': 'stable',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
current_stable = self.pip.get_stable_version()
self.assertEqual(
'1abc2def3',
current_stable.identifier
)
# User activates the stable version
current_stable.active = True
current_stable.save()
# Deleting the tag should return the RTD's stable
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
'0.8.3',
current_stable.identifier
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_branch_and_delete_it(self):
"""
The user creates a branch named ``stable`` on an existing repo,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the branch is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
# Project with just branches
self.pip.versions.filter(type=TAG).delete()
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
type=BRANCH,
active=False,
machine=False,
)
self.pip.update_stable_version()
current_stable = self.pip.get_stable_version()
# 0.8.3 is the current stable
self.assertEqual(
'0.8.3',
current_stable.identifier
)
self.assertTrue(current_stable.machine)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# User new stable
{
'identifier': 'origin/stable',
'verbose_name': 'stable',
},
{
'identifier': 'origin/0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
current_stable = self.pip.get_stable_version()
self.assertEqual(
'origin/stable',
current_stable.identifier
)
# Deleting the branch should return the RTD's stable
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous branch
current_stable = self.pip.get_stable_version()
self.assertEqual(
'origin/0.8.3',
current_stable.identifier
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_branch_and_delete_it_new_project(self):
"""
The user imports a new project with a branch named ``stable``,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the branch is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
# There isn't a stable version yet
self.pip.versions.exclude(slug='master').delete()
current_stable = self.pip.get_stable_version()
self.assertIsNone(current_stable)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# User stable
{
'identifier': 'origin/stable',
'verbose_name': 'stable',
},
{
'identifier': 'origin/0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
current_stable = self.pip.get_stable_version()
self.assertEqual(
'origin/stable',
current_stable.identifier
)
# User activates the stable version
current_stable.active = True
current_stable.save()
# Deleting the branch should return the RTD's stable
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
'origin/0.8.3',
current_stable.identifier
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_latest_tag_and_delete_it(self):
"""
The user creates a tag named ``latest`` on an existing repo,
when syncing the versions, the RTD's ``latest`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the tag is deleted on the user repository, the RTD's ``latest``
is back (set to machine=True).
"""
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
# User new stable
{
'identifier': '1abc2def3',
'verbose_name': 'latest',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The tag is the new latest
version_latest = self.pip.versions.get(slug='latest')
self.assertEqual(
'1abc2def3',
version_latest.identifier
)
# Deleting the tag should return the RTD's latest
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': []
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The latest isn't stuck with the previous commit
version_latest = self.pip.versions.get(slug='latest')
self.assertEqual(
'master',
version_latest.identifier
)
self.assertTrue(version_latest.machine)
def test_machine_attr_when_user_define_latest_branch_and_delete_it(self):
"""
The user creates a branch named ``latest`` on an existing repo,
when syncing the versions, the RTD's ``latest`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the branch is deleted on the user repository, the RTD's ``latest``
is back (set to machine=True).
"""
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# User new latest
{
'identifier': 'origin/latest',
'verbose_name': 'latest',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The branch is the new latest
version_latest = self.pip.versions.get(slug='latest')
self.assertEqual(
'origin/latest',
version_latest.identifier
)
# Deleting the branch should return the RTD's latest
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# The latest isn't stuck with the previous branch
version_latest = self.pip.versions.get(slug='latest')
self.assertEqual(
'master',
version_latest.identifier,
)
self.assertTrue(version_latest.machine)
class TestStableVersion(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_stable_versions(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
],
}
self.assertRaises(
Version.DoesNotExist,
Version.objects.get,
slug=STABLE,
)
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_pre_release_are_not_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0a1', 'verbose_name': '1.0a1'},
{'identifier': '0.9', 'verbose_name': '0.9'},
{'identifier': '0.9b1', 'verbose_name': '0.9b1'},
{'identifier': '0.8', 'verbose_name': '0.8'},
{'identifier': '0.8rc2', 'verbose_name': '0.8rc2'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_post_releases_are_stable(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': '1.0.post1', 'verbose_name': '1.0.post1'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.post1')
def test_invalid_version_numbers_are_not_stable(self):
self.pip.versions.all().delete()
version_post_data = {
'branches': [],
'tags': [
{
'identifier': 'this.is.invalid',
'verbose_name': 'this.is.invalid'
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertFalse(Version.objects.filter(slug=STABLE).exists())
version_post_data = {
'branches': [],
'tags': [
{
'identifier': '1.0',
'verbose_name': '1.0',
},
{
'identifier': 'this.is.invalid',
'verbose_name': 'this.is.invalid'
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
def test_update_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data = {
'tags': [
{
'identifier': '1.0.0',
'verbose_name': '1.0.0',
},
]
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
version_post_data = {
'tags': [
{
'identifier': '0.7',
'verbose_name': '0.7',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
def test_update_inactive_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertEqual(version_stable.identifier, '0.9')
version_stable.active = False
version_stable.save()
version_post_data['tags'].append({
'identifier': '1.0.0',
'verbose_name': '1.0.0',
})
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertFalse(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_stable_version_tags_over_branches(self):
version_post_data = {
'branches': [
# 2.0 development
{'identifier': 'origin/2.0', 'verbose_name': '2.0'},
{'identifier': 'origin/0.9.1rc1', 'verbose_name': '0.9.1rc1'},
],
'tags': [
{'identifier': '1.0rc1', 'verbose_name': '1.0rc1'},
{'identifier': '0.9', 'verbose_name': '0.9'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
# If there is a branch with a higher version, tags takes preferences
# over the branches to select the stable version
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data['tags'].append({
'identifier': '1.0',
'verbose_name': '1.0',
})
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
def test_stable_version_same_id_tag_branch(self):
version_post_data = {
'branches': [
# old 1.0 development branch
{'identifier': 'origin/1.0', 'verbose_name': '1.0'},
],
'tags': [
# tagged 1.0 final version
{'identifier': '1.0', 'verbose_name': '1.0'},
{'identifier': '0.9', 'verbose_name': '0.9'},
],
}
self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0')
self.assertEqual(version_stable.type, 'tag')
def test_unicode(self):
version_post_data = {
'branches': [],
'tags': [
{'identifier': 'foo-£', 'verbose_name': 'foo-£'},
],
}
resp = self.client.post(
'/api/v2/project/{}/sync_versions/'.format(self.pip.pk),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
def test_user_defined_stable_version_tag_with_tags(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=True,
)
# A pre-existing active stable tag that was machine created
Version.objects.create(
project=self.pip,
identifier='foo',
type=TAG,
verbose_name='stable',
active=True,
machine=True,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
# A new user-defined stable tag
{
'identifier': '1abc2def3',
'verbose_name': 'stable',
},
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# Didn't update to newest tag
version_9 = self.pip.versions.get(slug='0.9')
self.assertFalse(version_9.active)
# Did update to user-defined stable version
version_stable = self.pip.versions.get(slug='stable')
self.assertFalse(version_stable.machine)
self.assertTrue(version_stable.active)
self.assertEqual(
'1abc2def3',
self.pip.get_stable_version().identifier
)
# There arent others stable slugs like stable_a
other_stable = self.pip.versions.filter(
slug__startswith='stable_'
)
self.assertFalse(other_stable.exists())
# Check that posting again doesn't change anything from current state.
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
version_stable = self.pip.versions.get(slug='stable')
self.assertFalse(version_stable.machine)
self.assertTrue(version_stable.active)
self.assertEqual(
'1abc2def3',
self.pip.get_stable_version().identifier
)
other_stable = self.pip.versions.filter(
slug__startswith='stable_'
)
self.assertFalse(other_stable.exists())
def test_user_defined_stable_version_branch_with_tags(self):
Version.objects.create(
project=self.pip,
identifier='0.8.3',
verbose_name='0.8.3',
active=True,
)
# A pre-existing active stable branch that was machine created
Version.objects.create(
project=self.pip,
identifier='foo',
type=BRANCH,
verbose_name='stable',
active=True,
machine=True,
)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# A new user-defined stable branch
{
'identifier': 'origin/stable',
'verbose_name': 'stable',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# Didn't update to newest tag
version_9 = self.pip.versions.get(slug='0.9')
self.assertFalse(version_9.active)
# Did update to user-defined stable version
version_stable = self.pip.versions.get(slug='stable')
self.assertFalse(version_stable.machine)
self.assertTrue(version_stable.active)
self.assertEqual(
'origin/stable',
self.pip.get_stable_version().identifier
)
# There arent others stable slugs like stable_a
other_stable = self.pip.versions.filter(
slug__startswith='stable_'
)
self.assertFalse(other_stable.exists())
# Check that posting again doesn't change anything from current state.
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
version_stable = self.pip.versions.get(slug='stable')
self.assertFalse(version_stable.machine)
self.assertTrue(version_stable.active)
self.assertEqual(
'origin/stable',
self.pip.get_stable_version().identifier
)
other_stable = self.pip.versions.filter(
slug__startswith='stable_'
)
self.assertFalse(other_stable.exists())
class TestLatestVersion(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
Version.objects.create(
project=self.pip,
identifier='origin/master',
verbose_name='master',
active=True,
machine=True,
type=BRANCH,
)
# When the project is saved, the RTD's ``latest`` version
# is created.
self.pip.save()
def test_user_defined_latest_version_tag(self):
# TODO: the ``latest`` versions are created
# as a BRANCH, then here we will have a
# ``latest_a`` version.
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
# A new user-defined latest tag
{
'identifier': '1abc2def3',
'verbose_name': 'latest',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# Did update to user-defined latest version
version_latest = self.pip.versions.get(slug='latest')
self.assertFalse(version_latest.machine)
self.assertTrue(version_latest.active)
self.assertEqual(
'1abc2def3',
version_latest.identifier
)
# There arent others latest slugs like latest_a
other_latest = self.pip.versions.filter(
slug__startswith='latest_'
)
self.assertFalse(other_latest.exists())
# Check that posting again doesn't change anything from current state.
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
version_latest = self.pip.versions.get(slug='latest')
self.assertFalse(version_latest.machine)
self.assertTrue(version_latest.active)
self.assertEqual(
'1abc2def3',
version_latest.identifier
)
other_latest = self.pip.versions.filter(
slug__startswith='latest_'
)
self.assertFalse(other_latest.exists())
def test_user_defined_latest_version_branch(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
# A new user-defined latest branch
{
'identifier': 'origin/latest',
'verbose_name': 'latest',
},
],
}
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
# Did update to user-defined latest version
version_latest = self.pip.versions.get(slug='latest')
self.assertFalse(version_latest.machine)
self.assertTrue(version_latest.active)
self.assertEqual(
'origin/latest',
version_latest.identifier
)
# There arent others latest slugs like latest_a
other_latest = self.pip.versions.filter(
slug__startswith='latest_'
)
self.assertFalse(other_latest.exists())
# Check that posting again doesn't change anything from current state.
resp = self.client.post(
reverse('project-sync-versions', args=[self.pip.pk]),
data=json.dumps(version_post_data),
content_type='application/json',
)
self.assertEqual(resp.status_code, 200)
version_latest = self.pip.versions.get(slug='latest')
self.assertFalse(version_latest.machine)
self.assertTrue(version_latest.active)
self.assertEqual(
'origin/latest',
version_latest.identifier
)
other_latest = self.pip.versions.filter(
slug__startswith='latest_'
)
self.assertFalse(other_latest.exists())
| 32.21668
| 89
| 0.508105
|
4f6dbc83b16890ce3d7c9e0b420c13bcd536e4de
| 3,625
|
py
|
Python
|
PaperExperiments/XHExp207/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
PaperExperiments/XHExp207/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
PaperExperiments/XHExp207/parameters.py
|
stefan-c-kremer/TE_World2
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
[
"MIT"
] | null | null | null |
# parameters.py
"""
Exp 207 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '300', 'TE_excision_rate': '0.5', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.01', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.5; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 300;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| 37.760417
| 324
| 0.647448
|
33297b55761feecf298920a8ed3463af51b869b3
| 1,151
|
py
|
Python
|
incasem/gunpowder/merge_masks.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
incasem/gunpowder/merge_masks.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
incasem/gunpowder/merge_masks.py
|
kirchhausenlab/incasem
|
ee9e007c5c04571e547e2fb5af5e800bd2d2b435
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List
import numpy as np
import gunpowder as gp
class MergeMasks(gp.BatchFilter):
def __init__(
self,
arrays: List[gp.ArrayKey],
output_array: gp.ArrayKey):
"""Merge multiple binary masks with a logical and
Args:
arrays: list of binary masks for different structures
output_array:
"""
self.arrays = arrays
self.output_array = output_array
def setup(self):
self.enable_autoskip()
spec = self.spec[self.arrays[0]].copy()
self.provides(self.output_array, spec)
def prepare(self, request):
deps = gp.BatchRequest()
for array in self.arrays:
deps[array] = request[self.output_array]
return deps
def process(self, batch, request):
output = gp.Batch()
spec = batch[self.arrays[0]].spec.copy()
mask = np.logical_and.reduce(
[batch[key].data.astype(np.bool) for key in self.arrays]
)
mask = mask.astype(np.uint8)
output[self.output_array] = gp.Array(data=mask, spec=spec)
return output
| 25.021739
| 68
| 0.594266
|
1508cad526d8e3a05a55f990970c3eabfb0a77ab
| 1,398
|
py
|
Python
|
tests/utils/test_run_async.py
|
Alirezaja1384/tortoise-orm
|
e7ecbc81d43860a3b0b6d5d9da27497ed6234049
|
[
"Apache-2.0"
] | 33
|
2018-04-07T09:50:22.000Z
|
2018-08-24T10:25:29.000Z
|
tests/utils/test_run_async.py
|
Alirezaja1384/tortoise-orm
|
e7ecbc81d43860a3b0b6d5d9da27497ed6234049
|
[
"Apache-2.0"
] | 41
|
2018-03-29T17:09:18.000Z
|
2018-08-24T16:37:38.000Z
|
tests/utils/test_run_async.py
|
Alirezaja1384/tortoise-orm
|
e7ecbc81d43860a3b0b6d5d9da27497ed6234049
|
[
"Apache-2.0"
] | 4
|
2018-06-27T08:45:11.000Z
|
2018-07-30T18:16:55.000Z
|
import os
from unittest import skipIf
from tortoise import Tortoise, connections, run_async
from tortoise.contrib.test import TestCase
@skipIf(os.name == "nt", "stuck with Windows")
class TestRunAsync(TestCase):
async def asyncSetUp(self) -> None:
pass
async def asyncTearDown(self) -> None:
pass
def setUp(self):
self.somevalue = 1
async def init(self):
await Tortoise.init(db_url="sqlite://:memory:", modules={"models": []})
self.somevalue = 2
self.assertNotEqual(connections._get_storage(), {})
async def init_raise(self):
await Tortoise.init(db_url="sqlite://:memory:", modules={"models": []})
self.somevalue = 3
self.assertNotEqual(connections._get_storage(), {})
raise Exception("Some exception")
def test_run_async(self):
self.assertEqual(connections._get_storage(), {})
self.assertEqual(self.somevalue, 1)
run_async(self.init())
self.assertEqual(connections._get_storage(), {})
self.assertEqual(self.somevalue, 2)
def test_run_async_raised(self):
self.assertEqual(connections._get_storage(), {})
self.assertEqual(self.somevalue, 1)
with self.assertRaises(Exception):
run_async(self.init_raise())
self.assertEqual(connections._get_storage(), {})
self.assertEqual(self.somevalue, 3)
| 31.772727
| 79
| 0.655222
|
334e737bdb1ff24dce4e4f458bcfa03610bafb83
| 6,349
|
py
|
Python
|
moto/glacier/responses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
moto/glacier/responses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 1
|
2022-03-07T07:39:03.000Z
|
2022-03-07T07:39:03.000Z
|
moto/glacier/responses.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
import json
from moto.core.responses import BaseResponse
from .models import glacier_backends
from .utils import vault_from_glacier_url
class GlacierResponse(BaseResponse):
@property
def glacier_backend(self):
return glacier_backends[self.region]
def all_vault_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._all_vault_response(headers)
def _all_vault_response(self, headers):
vaults = self.glacier_backend.list_vaults()
response = json.dumps(
{"Marker": None, "VaultList": [vault.to_dict() for vault in vaults]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def vault_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._vault_response(request, full_url, headers)
def _vault_response(self, request, full_url, headers):
method = request.method
vault_name = vault_from_glacier_url(full_url)
if method == "GET":
return self._vault_response_get(vault_name, headers)
elif method == "PUT":
return self._vault_response_put(vault_name, headers)
elif method == "DELETE":
return self._vault_response_delete(vault_name, headers)
def _vault_response_get(self, vault_name, headers):
vault = self.glacier_backend.get_vault(vault_name)
headers["content-type"] = "application/json"
return 200, headers, json.dumps(vault.to_dict())
def _vault_response_put(self, vault_name, headers):
self.glacier_backend.create_vault(vault_name)
return 201, headers, ""
def _vault_response_delete(self, vault_name, headers):
self.glacier_backend.delete_vault(vault_name)
return 204, headers, ""
def vault_archive_response(self, request, full_url, headers):
return self._vault_archive_response(request, full_url, headers)
def _vault_archive_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
description = ""
if "x-amz-archive-description" in request.headers:
description = request.headers["x-amz-archive-description"]
vault_name = full_url.split("/")[-2]
if method == "POST":
return self._vault_archive_response_post(
vault_name, body, description, headers
)
else:
return 400, headers, "400 Bad Request"
def _vault_archive_response_post(self, vault_name, body, description, headers):
vault = self.glacier_backend.upload_archive(vault_name, body, description)
headers["x-amz-archive-id"] = vault["archive_id"]
headers["x-amz-sha256-tree-hash"] = vault["sha256"]
return 201, headers, ""
def vault_archive_individual_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._vault_archive_individual_response(request, full_url, headers)
def _vault_archive_individual_response(self, request, full_url, headers):
method = request.method
vault_name = full_url.split("/")[-3]
archive_id = full_url.split("/")[-1]
if method == "DELETE":
vault = self.glacier_backend.get_vault(vault_name)
vault.delete_archive(archive_id)
return 204, headers, ""
def vault_jobs_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._vault_jobs_response(request, full_url, headers)
def _vault_jobs_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
account_id = full_url.split("/")[1]
vault_name = full_url.split("/")[-2]
if method == "GET":
jobs = self.glacier_backend.list_jobs(vault_name)
headers["content-type"] = "application/json"
return (
200,
headers,
json.dumps(
{"JobList": [job.to_dict() for job in jobs], "Marker": None}
),
)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
job_type = json_body["Type"]
archive_id = None
if "ArchiveId" in json_body:
archive_id = json_body["ArchiveId"]
if "Tier" in json_body:
tier = json_body["Tier"]
else:
tier = "Standard"
job_id = self.glacier_backend.initiate_job(
vault_name, job_type, tier, archive_id
)
headers["x-amz-job-id"] = job_id
headers["Location"] = "/{0}/vaults/{1}/jobs/{2}".format(
account_id, vault_name, job_id
)
return 202, headers, ""
def vault_jobs_individual_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._vault_jobs_individual_response(full_url, headers)
def _vault_jobs_individual_response(self, full_url, headers):
vault_name = full_url.split("/")[-3]
archive_id = full_url.split("/")[-1]
job = self.glacier_backend.describe_job(vault_name, archive_id)
return 200, headers, json.dumps(job.to_dict())
def vault_jobs_output_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
return self._vault_jobs_output_response(full_url, headers)
def _vault_jobs_output_response(self, full_url, headers):
vault_name = full_url.split("/")[-4]
job_id = full_url.split("/")[-2]
output = self.glacier_backend.get_job_output(vault_name, job_id)
if output is None:
return 404, headers, "404 Not Found"
if isinstance(output, dict):
headers["content-type"] = "application/json"
return 200, headers, json.dumps(output)
else:
headers["content-type"] = "application/octet-stream"
return 200, headers, output
| 38.713415
| 83
| 0.630966
|
b785813a9aab9e2dc337b5b53e89a871a4ff34ad
| 2,547
|
py
|
Python
|
examples/dfp/v201411/suggested_ad_unit_service/approve_all_suggested_ad_units.py
|
cmm08/googleads-python-lib
|
97743df32eff92cf00cb8beaddcda42dfa0a37f4
|
[
"Apache-2.0"
] | 1
|
2018-09-06T18:50:58.000Z
|
2018-09-06T18:50:58.000Z
|
examples/dfp/v201411/suggested_ad_unit_service/approve_all_suggested_ad_units.py
|
cmm08/googleads-python-lib
|
97743df32eff92cf00cb8beaddcda42dfa0a37f4
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201411/suggested_ad_unit_service/approve_all_suggested_ad_units.py
|
cmm08/googleads-python-lib
|
97743df32eff92cf00cb8beaddcda42dfa0a37f4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example approves all suggested ad units with 50 or more requests.
This feature is only available to DFP premium solution networks.
"""
# Import appropriate modules from the client library.
from googleads import dfp
THRESHOLD_NUMBER_OF_REQUESTS = '50'
def main(client):
# Initialize appropriate service.
suggested_ad_unit_service = client.GetService(
'SuggestedAdUnitService', version='v201411')
values = [{
'key': 'numRequests',
'value': {
'xsi_type': 'NumberValue',
'value': THRESHOLD_NUMBER_OF_REQUESTS
}
}]
query = 'WHERE numRequests > :numRequests'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
num_approved_suggested_ad_units = 0
# Get suggested ad units by statement.
while True:
response = suggested_ad_unit_service.getSuggestedAdUnitsByStatement(
statement.ToStatement())
if 'results' in response:
# Print suggested ad units that will be approved.
for suggested_ad_unit in response['results']:
print ('Suggested ad unit with id \'%s\', and number of requests \'%s\''
' will be approved.' % (suggested_ad_unit['id'],
suggested_ad_unit['numRequests']))
# Approve suggested ad units.
result = suggested_ad_unit_service.performSuggestedAdUnitAction(
{'xsi_type': 'ApproveSuggestedAdUnit'},
statement.ToStatement())
if result and int(result['numChanges']) > 0:
num_approved_suggested_ad_units += int(result['numChanges'])
else:
break
if num_approved_suggested_ad_units > 0:
print ('Number of suggested ad units approved: %s' %
num_approved_suggested_ad_units)
else:
print 'No suggested ad units were approved.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 32.240506
| 80
| 0.698076
|
eab3c48fe3187d1064f781a68228840d686d1dca
| 1,322
|
py
|
Python
|
open_bus_siri_requester/health_daemon.py
|
hasadna/open-bus-siri-requester
|
4c1fa315eec54a5340b9334de0ebff28a3ddea3e
|
[
"MIT"
] | null | null | null |
open_bus_siri_requester/health_daemon.py
|
hasadna/open-bus-siri-requester
|
4c1fa315eec54a5340b9334de0ebff28a3ddea3e
|
[
"MIT"
] | 1
|
2022-02-08T15:08:46.000Z
|
2022-02-09T13:24:33.000Z
|
open_bus_siri_requester/health_daemon.py
|
hasadna/open-bus-siri-requester
|
4c1fa315eec54a5340b9334de0ebff28a3ddea3e
|
[
"MIT"
] | null | null | null |
from http.server import ThreadingHTTPServer
from http.server import BaseHTTPRequestHandler
from . import config
from . import storage
class HealthDaemonHTTPRequestHandler(BaseHTTPRequestHandler):
def _send_ok(self, msg='OK'):
self.send_response(200)
self.send_header("Content-type", "application/json; charset=utf-8")
self.end_headers()
self.wfile.write(msg.encode())
def _send_error(self, error='Server Error', status_code=500):
self.send_response(status_code)
self.end_headers()
self.wfile.write(error.encode())
def do_GET(self):
seconds_since_last_snapshot = storage.get_seconds_since_last_snapshot()
msg = '{} seconds since last snapshot'.format(seconds_since_last_snapshot)
if seconds_since_last_snapshot > config.HEALTH_DAEMON_MAX_SECONDS_SINCE_LAST_SNAPSHOT:
self._send_error(error='ERROR: ' + msg)
else:
self._send_ok(msg='OK: ' + msg)
class HealthDaemonHTTPServer(ThreadingHTTPServer):
def __init__(self):
print("Starting health daemon on port {}".format(config.HEALTH_DAEMON_PORT))
super(HealthDaemonHTTPServer, self).__init__(('0.0.0.0', config.HEALTH_DAEMON_PORT), HealthDaemonHTTPRequestHandler)
def start():
HealthDaemonHTTPServer().serve_forever()
| 33.897436
| 124
| 0.717852
|
b7136b7d2c69e94485568e4bbba78e187cb3294b
| 5,265
|
py
|
Python
|
hill2.py
|
Ziggareto/national_cipher_challenge
|
afac5a4b3c31ec78e6c8ef211ba9dd664a4070f7
|
[
"MIT"
] | null | null | null |
hill2.py
|
Ziggareto/national_cipher_challenge
|
afac5a4b3c31ec78e6c8ef211ba9dd664a4070f7
|
[
"MIT"
] | null | null | null |
hill2.py
|
Ziggareto/national_cipher_challenge
|
afac5a4b3c31ec78e6c8ef211ba9dd664a4070f7
|
[
"MIT"
] | null | null | null |
#hill2
import basics
import genetic_algorithm as ga
import math
import numpy as np
import random
""" Hill Cipher - uses a matrix as a key, and splits text into small chunks
[1, 2, 3] ['a'][1] [14] ['n']
[4, 5, 6] ['b'][2] = [32] mod(26) = ['f']
[7, 8, 9] ['c'][3] [50] ['x']
"""
def encrypt_text(text, key_matrix):
"""Returns the encrypted by key_matrix text"""
length = len(key_matrix)
chunks = split(text, length)
comp = ''
for num in range(math.ceil(len(text)/length)):
comp += convert_chunk(chunks[num], key_matrix)
return(comp)
def decrypt_text2(text, key):
dm1m26 = key.T % 26
new = (key * dm1m26).T
length = len(key)
chunks = split(text, length)
comp = ''
for num in range(math.ceil(len(text)/length)):
comp += convert_chunk(chunks[num], key)
return(comp)
def decrypt_text(text, key):
length = len(key)
chunks = split(text, length)
chunknums = [chunk_to_num(chunk) for chunk in chunks][:-1]
posses = []
for numpair in chunknums:
posses.append(solve_equation([key[0][0], key[0][1], numpair[0]], [key[1][0], key[1][1], numpair[1]]))
return(posses)
comp = ''
for thing in posses:
for mini in thing:
comp += basics.alphabet[mini[0]]
return(comp)
def solve2(eq1, eq2):
e1, e2 = np.array(eq1), np.array(eq2)
def split(text, length):
"""Splits the text into chunks of length length, returning the list of
chunks e.g. text='abcdefghi', length=3 -> ['abc', 'def', 'ghi']
If the text isn't long enough, the last chunk might be shorter
"""
chunks = []
count = 0
iterations = math.floor(len(text)/length)
while count < iterations:
chunks.append(text[length*count : length*(count+1)])
count += 1
#Catches the tail end which might be shorter than length
chunks.append(text[length*count:])
if len(chunks[-1]) > 0: #If there was some leftover
for num in range(length-len(chunks[-1])):#pad with 'a' as necessary
chunks[-1] += 'a'
return(chunks)
def convert_chunk(chunk, key_matrix):
a = np.array([basics.alphabet.index(letter) for letter in chunk])
b = np.dot(key_matrix, a)
b = b % 26
comp = ''.join([basics.alphabet[int(index)] for index in b])
return(comp)
def chunk_to_num(chunk):
return([basics.alphabet.index(char) for char in chunk])
def get_key(th, he):
thnum = chunk_to_num(th)
henum = chunk_to_num(he)
poss1 = solve_equation([19, 7, thnum[0]], [7, 4, henum[0]])
poss2 = solve_equation([19, 7, thnum[1]], [7, 4, henum[1]])
return(poss1 + poss2)
def solve_equation(eq1, eq2):
"""Takes in e.g. eq1 = [q, w, e], eq2 = [r, t, y] where
qa + wb = e mod(26) and ra + tb = y mod(26). Returns values of a and b
"""
e1, e2 = solve_half_p1(eq1, eq2), solve_half_p1([eq1[1], eq1[0], eq1[2]], [eq2[1], eq2[0], eq2[2]])
aposs, bposs = find_possibilities(e1[0], e1[2]), find_possibilities(e2[0], e2[2])
return([aposs, bposs])
def solve_half_p1(eq1, eq2):
#Works by elimination
e1, e2 = np.array(eq1), np.array(eq2)
e1x = e1 * e2[1]
e2x = e2 * e1[1]
e3 = (e1x - e2x) % 26
return(e3)
def find_possibilities(a, b):
"""Given ax = b mod(26), finds possibilities for a and b"""
prac = np.array([x for x in range(26)])
prac *= a
prac = prac % 26
indices = []
for index in range(26):
if prac[index] == b:
indices.append(index)
return(indices)
class hill_cipher_node(ga.node):
def reproduce(self, another):
#Returns a node, whose keyword is a breeding of self and another's
length = len(self.key)
baby_key = np.zeros([length, length], int)
for rownum in range(length):
for colnum in range(length):
if random.randint(0, 1):
out = self.key[rownum][colnum]
else:
out = self.key[rownum][colnum]
if random.randint(0, 12) == 12:
out = random.randint(0, 25)
baby_key[rownum][colnum] = out
return(hill_cipher_node(baby_key))
class hill_cipher_algorithm(ga.genetic_algorithm):
def __init__(self, text, population_size, breeding_times, node_class, key_matrix_side_length):
super(type(self), self).__init__(text, population_size, breeding_times, node_class)
self.key_matrix_side_length = key_matrix_side_length
self.initialize_population()
def decrypt(self, text, key):
return(encrypt_text(text, key))
def initialize_population(self):
for num in range(self.population_size):
self.population.append(self.node(self.make_key(self.key_matrix_side_length)))
@staticmethod
def make_key(length):
key = np.zeros([length, length], int)
for rownum in range(length):
for colnum in range(length):
key[rownum][colnum] = random.randint(0, 25)
return(key)
| 32.103659
| 110
| 0.576068
|
65b6f3454d483db1b9e007039aafb9d0b3b155b2
| 1,550
|
py
|
Python
|
src/utils/UtilsDecorators.py
|
Gabvaztor/tensorflowCode
|
e206ea4544552b87c2d43274cea3182f6b385a87
|
[
"Apache-2.0"
] | 4
|
2019-12-14T08:06:18.000Z
|
2020-09-12T10:09:31.000Z
|
src/utils/UtilsDecorators.py
|
Gabvaztor/tensorflowCode
|
e206ea4544552b87c2d43274cea3182f6b385a87
|
[
"Apache-2.0"
] | null | null | null |
src/utils/UtilsDecorators.py
|
Gabvaztor/tensorflowCode
|
e206ea4544552b87c2d43274cea3182f6b385a87
|
[
"Apache-2.0"
] | 2
|
2020-09-12T10:10:07.000Z
|
2021-09-15T11:58:37.000Z
|
"""
To manage Errors
"""
import logging
def logger():
"""
Creates a logging object and returns it
"""
logger = logging.getLogger("example_logger")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(r"/path/to/test.log")
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt)
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
return logger
def exception(logger):
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
@param logger: The logging object
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
# log the exception
err = "There was an exception in "
err += func.__name__
logger.exception(err)
necessary_to_raise = False
# TODO (@gabvaztor) Send flag to check if it is necessary to raise
if necessary_to_raise:
# re-raise the exception
raise
return wrapper
return decorator
def for_all_methods(decorator):
def decorate(cls):
for attr in cls.__dict__: # there's propably a better way to do this
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
| 26.271186
| 83
| 0.582581
|
fb275daf0b33717026cc90c37243e29d2aa198c3
| 633
|
py
|
Python
|
generators/skip_first.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 6
|
2017-12-21T04:32:35.000Z
|
2022-02-15T07:06:45.000Z
|
generators/skip_first.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 21
|
2017-09-08T13:02:18.000Z
|
2020-03-28T19:10:01.000Z
|
generators/skip_first.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 2
|
2018-09-30T16:16:10.000Z
|
2019-05-06T02:16:11.000Z
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2018-02-17 10:42:15
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2018-02-17 12:02:24
from .skip import skip
def skip_first(pipe, items=1):
''' this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
'''
pipe = iter(pipe)
for i in skip(pipe, items):
yield i
if __name__ == '__main__':
l = list(range(10))
print(l)
for i in skip_first(l, 5):
print(i)
| 27.521739
| 80
| 0.64139
|
39cedbe14b7715f4709ca8efcfac99d1ef86d3c3
| 11,772
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/_network_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/_network_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/_network_management_client_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import NetworkManagementClientConfiguration
from .operations_async import ApplicationGatewaysOperations
from .operations_async import NetworkManagementClientOperationsMixin
from .operations_async import ExpressRouteCircuitAuthorizationsOperations
from .operations_async import ExpressRouteCircuitPeeringsOperations
from .operations_async import ExpressRouteCircuitsOperations
from .operations_async import ExpressRouteServiceProvidersOperations
from .operations_async import LoadBalancersOperations
from .operations_async import NetworkInterfacesOperations
from .operations_async import NetworkSecurityGroupsOperations
from .operations_async import SecurityRulesOperations
from .operations_async import NetworkWatchersOperations
from .operations_async import PacketCapturesOperations
from .operations_async import PublicIPAddressesOperations
from .operations_async import RouteFiltersOperations
from .operations_async import RouteFilterRulesOperations
from .operations_async import RouteTablesOperations
from .operations_async import RoutesOperations
from .operations_async import BgpServiceCommunitiesOperations
from .operations_async import UsagesOperations
from .operations_async import VirtualNetworksOperations
from .operations_async import SubnetsOperations
from .operations_async import VirtualNetworkPeeringsOperations
from .operations_async import VirtualNetworkGatewaysOperations
from .operations_async import VirtualNetworkGatewayConnectionsOperations
from .operations_async import LocalNetworkGatewaysOperations
from .. import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2017_03_01.aio.operations_async.ApplicationGatewaysOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2017_03_01.aio.operations_async.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2017_03_01.aio.operations_async.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2017_03_01.aio.operations_async.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2017_03_01.aio.operations_async.ExpressRouteServiceProvidersOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2017_03_01.aio.operations_async.LoadBalancersOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2017_03_01.aio.operations_async.NetworkInterfacesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2017_03_01.aio.operations_async.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2017_03_01.aio.operations_async.SecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2017_03_01.aio.operations_async.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2017_03_01.aio.operations_async.PacketCapturesOperations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2017_03_01.aio.operations_async.PublicIPAddressesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2017_03_01.aio.operations_async.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2017_03_01.aio.operations_async.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2017_03_01.aio.operations_async.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2017_03_01.aio.operations_async.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2017_03_01.aio.operations_async.BgpServiceCommunitiesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2017_03_01.aio.operations_async.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2017_03_01.aio.operations_async.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2017_03_01.aio.operations_async.SubnetsOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2017_03_01.aio.operations_async.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2017_03_01.aio.operations_async.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2017_03_01.aio.operations_async.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2017_03_01.aio.operations_async.LocalNetworkGatewaysOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "NetworkManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 65.4
| 180
| 0.797316
|
1e36a1fe1b7b56dc2324075659ea2b63af794882
| 35,061
|
py
|
Python
|
py_trees/blackboard.py
|
fjulian/py_trees
|
63fffdfb581f390546c375f13d064b10fdace786
|
[
"BSD-3-Clause"
] | null | null | null |
py_trees/blackboard.py
|
fjulian/py_trees
|
63fffdfb581f390546c375f13d064b10fdace786
|
[
"BSD-3-Clause"
] | null | null | null |
py_trees/blackboard.py
|
fjulian/py_trees
|
63fffdfb581f390546c375f13d064b10fdace786
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://raw.githubusercontent.com/splintered-reality/py_trees/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
Blackboards are not a necessary component of behaviour tree implementations,
but are nonetheless, a fairly common mechanism for for sharing data between
behaviours in the tree. See, for example, the `design notes`_
for blackboards in Unreal Engine.
.. image:: images/blackboard.jpg
:width: 300px
:align: center
Implementations vary widely depending on the needs of
the framework using them. The simplest implementations take the
form of a key-value store with global access, while more
rigorous implementations scope access and form a secondary
graph overlaying the tree graph connecting data ports between behaviours.
The implementation here strives to remain simple to use
(so 'rapid development' does not become just 'development'), yet
sufficiently featured so that the magic behind the scenes (i.e. the
data sharing on the blackboard) is exposed and helpful in debugging
tree applications.
To be more concrete, the following is a list of features that this
implementation either embraces or does not.
* [+] Centralised key-value store
* [+] Client based usage with registration of read/write intentions at construction
* [+] Activity stream that tracks read/write operations by behaviours
* [-] Sharing between tree instances
* [-] Exclusive locks for reading/writing
* [-] Priority policies for variable instantiations
.. include:: weblinks.rst
"""
##############################################################################
# Imports
##############################################################################
import enum
import operator
import re
import typing
import uuid
from . import console
from . import utilities
##############################################################################
# Classes
##############################################################################
class KeyMetaData(object):
"""
Stores the aggregated metadata for a key on the blackboard.
"""
def __init__(self):
self.read = set()
self.write = set()
class ActivityType(enum.Enum):
"""An enumerator representing the operation on a blackboard variable"""
READ = "READ"
"""Read from the blackboard"""
INITIALISED = "INITIALISED"
"""Initialised a key-value pair on the blackboard"""
WRITE = "WRITE"
"""Wrote to the blackboard."""
ACCESSED = "ACCESSED"
"""Key accessed, either for reading, or modification of the value's internal attributes (e.g. foo.bar)."""
ACCESS_DENIED = "ACCESS_DENIED"
"""Client did not have access to read/write a key."""
NO_KEY = "NO_KEY"
"""Tried to access a key that does not yet exist on the blackboard."""
NO_OVERWRITE = "NO_OVERWRITE"
"""Tried to write but variable already exists and a no-overwrite request was respected."""
UNSET = "UNSET"
"""Key was removed from the blackboard"""
class ActivityItem(object):
"""
Recorded data pertaining to activity on the blackboard.
Args:
key: name of the variable on the blackboard
client_name: convenient name of the client performing the operation
client_id: unique id of the client performing the operation
activity_type: type of activity
previous_value: of the given key (None if this field is not relevant)
current_value: current value for the given key (None if this field is not relevant)
"""
def __init__(
self,
key,
client_name: str,
client_id: uuid.UUID,
activity_type: ActivityType,
previous_value: typing.Any=None,
current_value: typing.Any=None):
# TODO validity checks for values passed/not passed on the
# respective activity types. Note: consider using an enum
# for 'no value here' since None is a perfectly acceptable
# value for a key
self.key = key
self.client_name = client_name
self.client_id = client_id
self.activity_type = activity_type
self.previous_value = previous_value
self.current_value = current_value
class ActivityStream(object):
"""
Storage container with convenience methods for manipulating the stored
activity stream.
Attributes:
data (typing.List[ActivityItem]: list of activity items, earliest first
maximum_size (int): pop items if this size is exceeded
"""
def __init__(self, maximum_size: int=500):
"""
Initialise the stream with a maximum storage limit.
Args:
maximum_size: pop items from the stream if this size is exceeded
"""
self.data = []
self.maximum_size = maximum_size
def push(self, activity_item: ActivityItem):
"""
Push the next activity item to the stream.
Args:
activity_item: new item to append to the stream
"""
if len(self.data) > self.maximum_size:
self.data.pop()
self.data.append(activity_item)
def clear(self):
"""
Delete all activities from the stream.
"""
self.data = []
class Blackboard(object):
"""
Centralised key-value store for sharing data between behaviours.
This class is a coat-hanger for the centralised data store, metadata
for it's administration and static methods for interacting with it.
This api is intended for authors of debugging and introspection
tools on the blackboard. Users should make use of the :class:`BlackboardClient`.
Attributes:
Blackboard.clients (typing.Dict[uuid.UUID, Blackboard]): clients, gathered by uuid
Blackboard.storage (typing.Dict[str, typing.Any]): key-value data store
Blackboard.metadata (typing.Dict[str, KeyMetaData]): key associated metadata
Blackboard.activity_stream (ActivityStream): logged activity
"""
storage = {} # Dict[str, Any] / key-value storage
metadata = {} # Dict[ str, KeyMetaData ] / key-metadata information
clients = {} # Dict[ uuid.UUID, Blackboard] / id-client information
activity_stream = None
@staticmethod
def keys() -> typing.Set[str]:
"""
Get the set of blackboard keys.
Returns:
the complete set of keys registered by clients
"""
# return registered keys, those on the blackboard are not
# necessarily written to yet
return Blackboard.metadata.keys()
@staticmethod
def get(variable_name: str) -> typing.Any:
"""
Extract the value associated with the given a variable name,
can be nested, e.g. battery.percentage. This differs from the
client get method in that it doesn't pass through the client access
checks. To be used for utility tooling (e.g. display methods) and not by
users directly.
Args:
variable_name: of the variable to get, can be nested, e.g. battery.percentage
Raises:
KeyError: if the variable or it's nested attributes do not yet exist on the blackboard
Return:
The stored value for the given variable
"""
# convenience, just in case they used slashes instead of .'s
if '/' in variable_name:
variable_name = ".".join(variable_name.split('/'))
name_components = variable_name.split('.')
key = name_components[0]
key_attributes = '.'.join(name_components[1:])
# can raise KeyError
value = Blackboard.storage[key]
if key_attributes:
try:
value = operator.attrgetter(key_attributes)(value)
except AttributeError:
raise KeyError("Key exists, but does not have the specified nested attributes [{}]".format(variable_name))
return value
@staticmethod
def set(variable_name: str, value: typing.Any):
"""
Set the value associated with the given a variable name,
can be nested, e.g. battery.percentage. This differs from the
client get method in that it doesn't pass through the client access
checks. To be used for utility tooling (e.g. display methods) and not by
users directly.
Args:
variable_name: of the variable to set, can be nested, e.g. battery.percentage
Raises:
AttributeError: if it is attempting to set a nested attribute tha does not exist.
"""
name_components = variable_name.split('.')
key = name_components[0]
key_attributes = '.'.join(name_components[1:])
if not key_attributes:
Blackboard.storage[key] = value
else:
setattr(Blackboard.storage[key], key_attributes, value)
@staticmethod
def unset(key: str):
"""
For when you need to completely remove a blackboard variable (key-value pair),
this provides a convenient helper method.
Args:
key: name of the variable to remove
Returns:
True if the variable was removed, False if it was already absent
"""
try:
del Blackboard.storage[key]
return True
except KeyError:
return False
@staticmethod
def keys_filtered_by_regex(regex: str) -> typing.Set[str]:
"""
Get the set of blackboard keys filtered by regex.
Args:
regex: a python regex string
Returns:
subset of keys that have been registered and match the pattern
"""
pattern = re.compile(regex)
return [key for key in Blackboard.metadata.keys() if pattern.search(key) is not None]
@staticmethod
def keys_filtered_by_clients(client_ids: typing.Union[typing.List[str], typing.Set[str]]) -> typing.Set[str]:
"""
Get the set of blackboard keys filtered by client ids.
Args:
client_ids: set of client uuid's.
Returns:
subset of keys that have been registered by the specified clients
"""
# convenience for users
if type(client_ids) == list:
client_ids = set(client_ids)
keys = set()
for key in Blackboard.metadata.keys():
# for sets, | is union, & is intersection
key_clients = set(Blackboard.metadata[key].read) | set(Blackboard.metadata[key].write)
if key_clients & client_ids:
keys.add(key)
return keys
@staticmethod
def enable_activity_stream(maximum_size: int=500):
"""
Enable logging of activities on the blackboard.
Args:
maximum_size: pop items from the stream if this size is exceeded
Raises:
RuntimeError if the activity stream is already enabled
"""
if Blackboard.activity_stream is None:
Blackboard.activity_stream = ActivityStream(maximum_size)
else:
RuntimeError("activity stream is already enabled for this blackboard")
@staticmethod
def disable_activity_stream():
"""
Disable logging of activities on the blackboard
"""
Blackboard.activity_stream = None
@staticmethod
def clear():
"""
Completely clear all key, value and client information from the blackboard.
Also deletes the activity stream.
"""
Blackboard.storage.clear()
Blackboard.metadata.clear()
Blackboard.clients.clear()
Blackboard.activity_stream = None
class BlackboardClient(object):
"""
Client to the key-value store for sharing data between behaviours.
**Examples**
Blackboard clients will accept a user-friendly name / unique identifier for
registration on the centralised store or create them for you if none is provided.
.. code-block:: python
provided = py_trees.blackboard.BlackboardClient(
name="Provided",
unique_identifier=uuid.uuid4()
)
print(provided)
generated = py_trees.blackboard.BlackboardClient()
print(generated)
.. figure:: images/blackboard_client_instantiation.png
:align: center
Client Instantiation
Register read/write access for keys on the blackboard. Note, registration is
not initialisation.
.. code-block:: python
blackboard = py_trees.blackboard.BlackboardClient(
name="Client",
read={"foo"},
write={"bar"}
)
blackboard.register_key(key="foo", write=True)
blackboard.foo = "foo"
print(blackboard)
.. figure:: images/blackboard_read_write.png
:align: center
Variable Read/Write Registration
Disconnected instances will discover the centralised
key-value store.
.. code-block:: python
def check_foo():
blackboard = py_trees.blackboard.BlackboardClient(name="Reader", read={"foo"})
print("Foo: {}".format(blackboard.foo))
blackboard = py_trees.blackboard.BlackboardClient(name="Writer", write={"foo"})
blackboard.foo = "bar"
check_foo()
To respect an already initialised key on the blackboard:
.. code-block:: python
blackboard = BlackboardClient(name="Writer", read={"foo"))
result = blackboard.set("foo", "bar", overwrite=False)
Store complex objects on the blackboard:
.. code-block:: python
class Nested(object):
def __init__(self):
self.foo = None
self.bar = None
def __str__(self):
return str(self.__dict__)
writer = py_trees.blackboard.BlackboardClient(
name="Writer",
write={"nested"}
)
reader = py_trees.blackboard.BlackboardClient(
name="Reader",
read={"nested"}
)
writer.nested = Nested()
writer.nested.foo = "foo"
writer.nested.bar = "bar"
foo = reader.nested.foo
print(writer)
print(reader)
.. figure:: images/blackboard_nested.png
:align: center
Log and display the activity stream:
.. code-block:: python
py_trees.blackboard.Blackboard.enable_activity_stream(maximum_size=100)
blackboard_reader = py_trees.blackboard.BlackboardClient(name="Reader", read={"foo"})
blackboard_writer = py_trees.blackboard.BlackboardClient(name="Writer", write={"foo"})
blackboard_writer.foo = "bar"
blackboard_writer.foo = "foobar"
unused_result = blackboard_reader.foo
print(py_trees.display.unicode_blackboard_activity_stream())
py_trees.blackboard.Blackboard.activity_stream.clear()
.. figure:: images/blackboard_activity_stream.png
:align: center
Display the blackboard on the console, or part thereof:
.. code-block:: python
writer = py_trees.blackboard.BlackboardClient(
name="Writer",
write={"foo", "bar", "dude", "dudette"}
)
reader = py_trees.blackboard.BlackboardClient(
name="Reader",
read={"foo", "bBlackboardClient( )
writer.foo = "foo"
writer.bar = "bar"
writer.dude = "bob"
# all key-value pairs
print(py_trees.display.unicode_blackboard())
# various filtered views
print(py_trees.display.unicode_blackboard(key_filter={"foo"}))
print(py_trees.display.unicode_blackboard(regex_filter="dud*"))
print(py_trees.display.unicode_blackboard(client_filter={reader.unique_identifier}))
# list the clients associated with each key
print(py_trees.display.unicode_blackboard(display_only_key_metadata=True))
.. figure:: images/blackboard_display.png
:align: center
Behaviours register their own blackboard clients with the same name/id as the
behaviour itself. This helps associate blackboard variables with behaviours, enabling
various introspection and debugging capabilities on the behaviour trees.
Creating a custom behaviour with blackboard variables:
.. code-block:: python
class Foo(py_trees.behaviours.Behaviour):
def __init__(self, name):
super().__init__(name=name)
self.blackboard.register_key("foo", read=True)
def update(self):
self.feedback_message = self.blackboard.foo
return py_trees.common.Status.Success
Rendering a dot graph for a behaviour tree, complete with blackboard variables:
.. code-block:: python
# in code
py_trees.display.render_dot_tree(py_trees.demos.blackboard.create_root())
# command line tools
py-trees-render --with-blackboard-variables py_trees.demos.blackboard.create_root
.. graphviz:: dot/demo-blackboard.dot
:align: center
And to demonstrate that it doesn't become a tangled nightmare at scale, an example of
a more complex tree:
.. graphviz:: dot/blackboard-with-variables.dot
:align: center
With judicious use of the display methods / activity stream around the ticks
of a tree (refer to :class:`py_trees.visitors.DisplaySnapshotVisitor` for
examplar code):
.. figure:: images/blackboard_trees.png
:align: center
.. seealso::
* :ref:`py-trees-demo-blackboard <py-trees-demo-blackboard-program>`
* :class:`py_trees.visitors.DisplaySnapshotVisitor`
* :class:`py_trees.behaviours.SetBlackboardVariable`
* :class:`py_trees.behaviours.UnsetBlackboardVariable`
* :class:`py_trees.behaviours.CheckBlackboardVariableExists`
* :class:`py_trees.behaviours.WaitForBlackboardVariable`
* :class:`py_trees.behaviours.CheckBlackboardVariableValue`
* :class:`py_trees.behaviours.WaitForBlackboardVariableValue`
Attributes:
name (str): client's convenient, but not necessarily unique identifier
unique_identifier (uuid.UUID): client's unique identifier
read (typing.List[str]): keys this client has permission to read
write (typing.List[str]): keys this client has permission to write
"""
def __init__(
self, *,
name: str=None,
unique_identifier: uuid.UUID=None,
read: typing.Set[str]=None,
write: typing.Set[str]=None):
"""
Args:
name: client's convenient identifier (stringifies the uuid if None)
unique_identifier: client's unique identifier (auto-generates if None)
read: list of keys this client has permission to read
write: list of keys this client has permission to write
Raises:
TypeError: if the provided name/unique identifier is not of type str/uuid.UUID
ValueError: if the unique identifier has already been registered
"""
# unique identifier
if unique_identifier is None:
super().__setattr__("unique_identifier", uuid.uuid4())
else:
if type(unique_identifier) != uuid.UUID:
raise TypeError("provided unique identifier is not of type uuid.UUID")
super().__setattr__("unique_identifier", unique_identifier)
if super().__getattribute__("unique_identifier") in Blackboard.clients.keys():
raise ValueError("this unique identifier has already been registered")
# name
if name is None or not name:
name = utilities.truncate(
original=str(super().__getattribute__("unique_identifier")).replace('-', '_'),
length=7
)
super().__setattr__("name", name)
else:
if not isinstance(name, str):
raise TypeError("provided name is not of type str [{}]".format(type(name)))
super().__setattr__("name", name)
# read
if read is None:
super().__setattr__("read", set())
elif type(read) is list:
super().__setattr__("read", set(read))
else:
super().__setattr__("read", read)
for key in super().__getattribute__("read"):
Blackboard.metadata.setdefault(key, KeyMetaData())
Blackboard.metadata[key].read.add(
super().__getattribute__("unique_identifier")
)
# write
if write is None:
super().__setattr__("write", set())
elif type(write) is list:
super().__setattr__("write", set(write))
else:
super().__setattr__("write", write)
for key in super().__getattribute__("write"):
Blackboard.metadata.setdefault(key, KeyMetaData())
Blackboard.metadata[key].write.add(
super().__getattribute__("unique_identifier")
)
Blackboard.clients[
super().__getattribute__("unique_identifier")
] = self
def __setattr__(self, name: str, value: typing.Any):
"""
Convenience attribute style referencing with checking against
permissions.
Raises:
AttributeError: if the client does not have write access to the variable
"""
# print("__setattr__ [{}][{}]".format(name, value))
if name not in super().__getattribute__("write"):
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(name, ActivityType.ACCESS_DENIED)
)
raise AttributeError("client '{}' does not have write access to '{}'".format(self.name, name))
if Blackboard.activity_stream is not None:
if name in Blackboard.storage.keys():
Blackboard.activity_stream.push(
self._generate_activity_item(
key=name,
activity_type=ActivityType.WRITE,
previous_value=Blackboard.storage[name],
current_value=value
)
)
else:
Blackboard.activity_stream.push(
self._generate_activity_item(
key=name,
activity_type=ActivityType.INITIALISED,
current_value=value
)
)
Blackboard.storage[name] = value
def __getattr__(self, name: str):
"""
Convenience attribute style referencing with checking against
permissions.
Raises:
AttributeError: if the client does not have read access to the variable
KeyError: if the variable does not yet exist on the blackboard
"""
# print("__getattr__ [{}]".format(name))
if name not in (super().__getattribute__("read") | super().__getattribute__("write")):
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(name, ActivityType.ACCESS_DENIED)
)
raise AttributeError("client '{}' does not have read/write access to '{}'".format(self.name, name))
try:
if name in super().__getattribute__("write"):
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(
key=name,
activity_type=ActivityType.ACCESSED,
current_value=Blackboard.storage[name],
)
)
return Blackboard.storage[name]
if name in super().__getattribute__("read"):
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(
key=name,
activity_type=ActivityType.READ,
current_value=Blackboard.storage[name],
)
)
return Blackboard.storage[name]
except KeyError as e:
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(name, ActivityType.NO_KEY)
)
raise KeyError("client '{}' tried to access '{}' but it does not yet exist on the blackboard".format(self.name, name)) from e
def set(self, name: str, value: typing.Any, overwrite: bool=True) -> bool:
"""
Set, conditionally depending on whether the variable already exists or otherwise.
This is most useful when initialising variables and multiple elements
seek to do so. A good policy to adopt for your applications in these situations is
a first come, first served policy. Ensure global configuration has the first
opportunity followed by higher priority behaviours in the tree and so forth.
Lower priority behaviours would use this to respect the pre-configured
setting and at most, just validate that it is acceptable to the functionality
of it's own behaviour.
Args:
name: name of the variable to set
value: value of the variable to set
overwrite: do not set if the variable already exists on the blackboard
Returns:
success or failure (overwrite is False and variable already set)
Raises:
AttributeError: if the client does not have write access to the variable
KeyError: if the variable does not yet exist on the blackboard
"""
name_components = name.split('.')
key = name_components[0]
key_attributes = '.'.join(name_components[1:])
if key not in super().__getattribute__("write"):
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(name, ActivityType.ACCESS_DENIED)
)
raise AttributeError("client '{}' does not have write access to '{}'".format(self.name, name))
if not overwrite:
if key in Blackboard.storage:
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(
key=key,
activity_type=ActivityType.NO_OVERWRITE,
current_value=Blackboard.storage[name])
)
return False
if not key_attributes:
setattr(self, name, value)
return True
else:
blackboard_object = getattr(self, key)
try:
setattr(blackboard_object, key_attributes, value)
return True
except AttributeError: # when the object doesn't have the attributes
return False
def exists(self, name: str) -> bool:
"""
Check if the specified variable exists on the blackboard.
Args:
name: name of the variable to get, can be nested, e.g. battery.percentage
Raises:
AttributeError: if the client does not have read access to the variable
"""
try:
unused_value = self.get(name)
return True
except KeyError:
return False
def get(self, name: str) -> typing.Any:
"""
Method based accessor to the blackboard variables (as opposed to simply using
'.<name>').
Args:
name: name of the variable to get, can be nested, e.g. battery.percentage
Raises:
AttributeError: if the client does not have read access to the variable
KeyError: if the variable or it's nested attributes do not yet exist on the blackboard
"""
# key attributes is an empty string if not a nested variable name
name_components = name.split('.')
key = name_components[0]
key_attributes = '.'.join(name_components[1:])
value = getattr(self, key) # will run through client access checks in __getattr__
if key_attributes:
try:
value = operator.attrgetter(key_attributes)(value)
except AttributeError:
raise KeyError("Key exists, but does not have the specified nested attributes [{}]".format(name))
return value
def unset(self, key: str):
"""
For when you need to completely remove a blackboard variable (key-value pair),
this provides a convenient helper method.
Args:
key: name of the variable to remove
Returns:
True if the variable was removed, False if it was already absent
"""
if Blackboard.activity_stream is not None:
Blackboard.activity_stream.push(
self._generate_activity_item(key, ActivityType.UNSET)
)
# Three means of handling a non-existent key - 1) raising a KeyError, 2) catching
# the KeyError and passing, 3) catch the KeyError and return True/False.
# Option 1) is inconvenient - requires a redundant try/catch 99% of cases
# Option 2) hides information - bad
# Option 3) no extra code necessary and information is there if desired
try:
del Blackboard.storage[key]
return True
except KeyError:
return False
def __str__(self):
indent = " "
s = console.green + "Blackboard Client" + console.reset + "\n"
s += console.white + indent + "Client Data" + console.reset + "\n"
keys = ["name", "unique_identifier", "read", "write"]
s += self._stringify_key_value_pairs(keys, self.__dict__, 2 * indent)
s += console.white + indent + "Variables" + console.reset + "\n"
keys = self.read | self.write
s += self._stringify_key_value_pairs(keys, Blackboard.storage, 2 * indent)
return s
def _generate_activity_item(self, key, activity_type, previous_value=None, current_value=None):
return ActivityItem(
key=key,
client_name=super().__getattribute__("name"),
client_id=super().__getattribute__("unique_identifier"),
activity_type=activity_type,
previous_value=previous_value,
current_value=current_value
)
def _stringify_key_value_pairs(self, keys, key_value_dict, indent):
s = ""
max_length = 0
for key in keys:
max_length = len(key) if len(key) > max_length else max_length
for key in keys:
try:
value = key_value_dict[key]
lines = ('{0}'.format(value)).split('\n')
if len(lines) > 1:
s += console.cyan + indent + '{0: <{1}}'.format(key, max_length + 1) + console.reset + ":\n"
for line in lines:
s += console.yellow + indent + " {0}\n".format(line) + console.reset
else:
s += console.cyan + indent + '{0: <{1}}'.format(key, max_length + 1) + console.reset + ": " + console.yellow + '{0}\n'.format(value) + console.reset
except KeyError:
s += console.cyan + indent + '{0: <{1}}'.format(key, max_length + 1) + console.reset + ": " + console.yellow + "-\n" + console.reset
s += console.reset
return s
def unregister(self, clear: bool=True):
"""
Unregister this blackboard client and if requested, clear key-value pairs if this
client is the last user of those variables.
Args:
clear: remove key-values pairs from the blackboard
"""
self.unregister_all_keys(clear)
del Blackboard.clients[super().__getattribute__("unique_identifier")]
def unregister_all_keys(self, clear: bool=True):
"""
Unregister all keys currently registered by this blackboard client and if requested,
clear key-value pairs if this client is the last user of those variables.
Args:
clear: remove key-values pairs from the blackboard
"""
for key in self.read:
Blackboard.metadata[key].read.remove(super().__getattribute__("unique_identifier"))
for key in self.write:
Blackboard.metadata[key].write.remove(super().__getattribute__("unique_identifier"))
if clear:
for key in (set(self.read) | set(self.write)):
if not (set(Blackboard.metadata[key].read) | set(Blackboard.metadata[key].write)):
try:
del Blackboard.storage[key]
except KeyError:
pass # perfectly acceptal for a key to not exist on the blackboard yet
del Blackboard.metadata[key]
def register_key(self, key: str, read: bool=False, write: bool=False):
"""
Register a key on the blackboard to associate with this client.
Args:
key: key to register
read: permit/track read access
write: permit/track write access
"""
Blackboard.metadata.setdefault(key, KeyMetaData())
if read:
super().__getattribute__("read").add(key)
Blackboard.metadata[key].read.add(super().__getattribute__("unique_identifier"))
if write:
super().__getattribute__("write").add(key)
Blackboard.metadata[key].write.add(super().__getattribute__("unique_identifier"))
def unregister_key(self, key: str, clear: bool=True):
"""
Unegister a key associated with this client.
Args:
key: key to unregister
clear: remove key-values pairs from the blackboard
Raises:
KeyError if the key has not been previously registered
"""
super().__getattribute__("read").discard(key) # doesn't throw exceptions if it not present
super().__getattribute__("write").discard(key)
Blackboard.metadata[key].read.discard(super().__getattribute__("unique_identifier"))
Blackboard.metadata[key].write.discard(super().__getattribute__("unique_identifier"))
if not (Blackboard.metadata[key].read | Blackboard.metadata[key].write):
del Blackboard.metadata[key]
if clear:
try:
del Blackboard.storage[key]
except KeyError:
pass # perfectly legitimate for a registered key to not exist on the blackboard
| 38.109783
| 168
| 0.609823
|
7436fba3286534bf59a40571433f71e1782e5e42
| 951
|
py
|
Python
|
AnalyticsVidhya/AmExpert2021/bhaTrain1.py
|
BharathC15/bharathML
|
ab0460eace3bc83a6b9a7ba7c40e9721baead09a
|
[
"MIT"
] | null | null | null |
AnalyticsVidhya/AmExpert2021/bhaTrain1.py
|
BharathC15/bharathML
|
ab0460eace3bc83a6b9a7ba7c40e9721baead09a
|
[
"MIT"
] | null | null | null |
AnalyticsVidhya/AmExpert2021/bhaTrain1.py
|
BharathC15/bharathML
|
ab0460eace3bc83a6b9a7ba7c40e9721baead09a
|
[
"MIT"
] | null | null | null |
# on XGBoosting [Not working]
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
import xgboost as xgb
if __name__=='__main__':
print('Starting the Program')
df = pd.read_csv('trainData.csv')
print(df.head())
print(df.info())
# Extraction of Track columns
TargetCol = df.columns[df.columns.str.startswith('Target')]
targetDF = df[TargetCol]
df.drop(TargetCol,inplace=True,axis=1)
X_train, X_test, y_train, y_test = train_test_split(df, targetDF, test_size = 0.2)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model = xgb.XGBClassifier(objective='multi:softprob')
model.fit(X_train, y_train)
print(model)
expected_y = y_test
predicted_y = model.predict(X_test)
print(metrics.classification_report(expected_y, predicted_y))
print(metrics.confusion_matrix(expected_y, predicted_y))
| 28.818182
| 86
| 0.722397
|
2c95dd2bd5bc65d7dfea362155f92d527594014b
| 10,377
|
py
|
Python
|
pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
|
liruiw/Pointnet2_PyTorch
|
e803915c929b3b69bafe4c07e1f2c322e7a20aae
|
[
"Unlicense"
] | null | null | null |
pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
|
liruiw/Pointnet2_PyTorch
|
e803915c929b3b69bafe4c07e1f2c322e7a20aae
|
[
"Unlicense"
] | null | null | null |
pointnet2_ops_lib/pointnet2_ops/pointnet2_utils.py
|
liruiw/Pointnet2_PyTorch
|
e803915c929b3b69bafe4c07e1f2c322e7a20aae
|
[
"Unlicense"
] | 1
|
2021-01-07T03:15:04.000Z
|
2021-01-07T03:15:04.000Z
|
import torch
import torch.nn as nn
import warnings
from torch.autograd import Function
try:
import pointnet2_ops._ext as _ext
except ImportError:
from torch.utils.cpp_extension import load
import glob
import os.path as osp
import os
warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.")
_ext_src_root = osp.join(osp.dirname(__file__), "_ext-src")
_ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob(
osp.join(_ext_src_root, "src", "*.cu")
)
_ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*"))
os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5"
_ext = load(
"_ext",
sources=_ext_sources,
extra_include_paths=[osp.join(_ext_src_root, "include")],
extra_cflags=["-O3"],
extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"],
with_cuda=True,
)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
out = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(out)
return out
@staticmethod
def backward(ctx, grad_out):
return ()
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
dist = torch.sqrt(dist2)
ctx.mark_non_differentiable(dist, idx)
return dist, idx
@staticmethod
def backward(ctx, grad_dist, grad_idx):
return ()
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, features = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, torch.zeros_like(idx), torch.zeros_like(weight)
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, features = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, torch.zeros_like(idx)
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_out):
return ()
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 27.307895
| 103
| 0.571552
|
a589d8c2054f0126a76102c6f5735a4762a2c530
| 661
|
py
|
Python
|
commitizen/factory.py
|
blaggacao/commitizen
|
a108872bafebd601358b282d0385760b5fca036d
|
[
"MIT"
] | null | null | null |
commitizen/factory.py
|
blaggacao/commitizen
|
a108872bafebd601358b282d0385760b5fca036d
|
[
"MIT"
] | null | null | null |
commitizen/factory.py
|
blaggacao/commitizen
|
a108872bafebd601358b282d0385760b5fca036d
|
[
"MIT"
] | null | null | null |
from commitizen import BaseCommitizen, out
from commitizen.config import BaseConfig
from commitizen.cz import registry
from commitizen.error_codes import NO_COMMITIZEN_FOUND
def commiter_factory(config: BaseConfig) -> BaseCommitizen:
"""Return the correct commitizen existing in the registry."""
name: str = config.settings["name"]
try:
_cz = registry[name](config)
except KeyError:
msg_error = (
"The committer has not been found in the system.\n\n"
f"Try running 'pip install {name}'\n"
)
out.error(msg_error)
raise SystemExit(NO_COMMITIZEN_FOUND)
else:
return _cz
| 31.47619
| 65
| 0.6823
|
7431b7d2acf46186b050a80bf16be78881218d54
| 2,229
|
py
|
Python
|
sigstore/_internal/set.py
|
trailofbits/sigstore-python
|
1772dba7b416b4537f4d71605ab0ac2cf8491899
|
[
"Apache-2.0"
] | 5
|
2022-03-15T18:38:45.000Z
|
2022-03-24T20:59:29.000Z
|
sigstore/_internal/set.py
|
trailofbits/sigstore-python
|
1772dba7b416b4537f4d71605ab0ac2cf8491899
|
[
"Apache-2.0"
] | 9
|
2022-03-22T01:55:06.000Z
|
2022-03-27T09:21:59.000Z
|
sigstore/_internal/set.py
|
trailofbits/sigstore-python
|
1772dba7b416b4537f4d71605ab0ac2cf8491899
|
[
"Apache-2.0"
] | 1
|
2022-03-21T21:36:17.000Z
|
2022-03-21T21:36:17.000Z
|
# Copyright 2022 The Sigstore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for verifying Signed Entry Timestamps.
"""
import base64
from importlib import resources
from typing import cast
import cryptography.hazmat.primitives.asymmetric.ec as ec
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from securesystemslib.formats import encode_canonical
from sigstore._internal.rekor import RekorEntry
REKOR_ROOT_PUBKEY = resources.read_binary("sigstore._store", "rekor.pub")
class InvalidSetError(Exception):
pass
def verify_set(entry: RekorEntry) -> None:
"""Verify the Signed Entry Timestamp for a given Rekor entry"""
# Put together the payload
#
# This involves removing any non-required fields (verification and attestation) and then
# canonicalizing the remaining JSON in accordance with IETF's RFC 8785.
raw_data = entry.raw_data.copy()
del raw_data["verification"]
del raw_data["attestation"]
canon_data: bytes = encode_canonical(raw_data).encode()
# Decode the SET field
signed_entry_ts: bytes = base64.b64decode(
entry.verification["signedEntryTimestamp"].encode()
)
# Load the Rekor public key
rekor_key = load_pem_public_key(REKOR_ROOT_PUBKEY)
rekor_key = cast(ec.EllipticCurvePublicKey, rekor_key)
# Validate the SET
try:
rekor_key.verify(
signature=signed_entry_ts,
data=canon_data,
signature_algorithm=ec.ECDSA(hashes.SHA256()),
)
except InvalidSignature as inval_sig:
raise InvalidSetError from inval_sig
| 32.779412
| 92
| 0.750112
|
376bd4205672e05c29d60f03c82229cf96c4eeda
| 46,038
|
py
|
Python
|
ivy/functional/ivy/general.py
|
VedPatwardhan/ivy
|
7b2105fa8cf38879444a1029bfaa7f0b2f27717a
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/ivy/general.py
|
VedPatwardhan/ivy
|
7b2105fa8cf38879444a1029bfaa7f0b2f27717a
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/ivy/general.py
|
VedPatwardhan/ivy
|
7b2105fa8cf38879444a1029bfaa7f0b2f27717a
|
[
"Apache-2.0"
] | null | null | null |
"""Collection of general Ivy functions."""
# global
import gc
import math
import einops
import inspect
import numpy as np
from numbers import Number
from typing import Callable, Any, Union, List, Tuple, Dict, Iterable, Optional
# local
import ivy
from ivy.functional.ivy.device import dev
from ivy.backend_handler import current_backend as _cur_backend
from ivy.func_wrapper import (
infer_device,
inputs_to_native_arrays,
to_native_arrays_and_back,
handle_out_argument,
)
FN_CACHE = dict()
INF = float("inf")
TIMEOUT = 15.0
TMP_DIR = "/tmp"
def get_referrers_recursive(
item, depth=0, max_depth=None, seen_set=None, local_set=None
):
"""Summary.
Parameters
----------
item
depth
(Default value = 0)
max_depth
(Default value = None)
seen_set
(Default value = None)
local_set
(Default value = None)
"""
seen_set = ivy.default(seen_set, set())
local_set = ivy.default(local_set, set())
ret_cont = ivy.Container(
repr=str(item).replace(" ", ""),
alphabetical_keys=False,
keyword_color_dict={"repr": "magenta"},
)
referrers = [
ref
for ref in gc.get_referrers(item)
if not (
isinstance(ref, dict)
and min([k in ref for k in ["depth", "max_depth", "seen_set", "local_set"]])
)
]
local_set.add(str(id(referrers)))
for ref in referrers:
ref_id = str(id(ref))
if ref_id in local_set or hasattr(ref, "cell_contents"):
continue
seen = ref_id in seen_set
seen_set.add(ref_id)
refs_rec = lambda: get_referrers_recursive(
ref, depth + 1, max_depth, seen_set, local_set
)
this_repr = "tracked" if seen else str(ref).replace(" ", "")
if not seen and (not max_depth or depth < max_depth):
val = ivy.Container(
repr=this_repr,
alphabetical_keys=False,
keyword_color_dict={"repr": "magenta"},
)
refs = refs_rec()
for k, v in refs.items():
val[k] = v
else:
val = this_repr
ret_cont[str(ref_id)] = val
return ret_cont
def is_native_array(x: Any, exclusive: bool = False) -> bool:
"""Determines whether the input x is a Native Array.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an array.
"""
try:
return _cur_backend(x).is_native_array(x, exclusive)
except ValueError:
return False
def is_ivy_array(x: Any, exclusive: bool = False) -> bool:
"""Determines whether the input x is an Ivy Array.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an array.
Examples
--------
>>> x = [0., 1., 2.]
>>> ivy.is_ivy_array(x)
False
>>> x = ivy.array([0., 1., 2.])
>>> ivy.is_ivy_array(x)
True
"""
return isinstance(x, ivy.Array) and ivy.is_native_array(x.data, exclusive)
def is_array(x: Any, exclusive: bool = False) -> bool:
"""Determines whether the input x is either an Ivy Array or a Native Array.
Parameters
----------
x
The input to check
exclusive
Whether to check if the data type is exclusively an array, rather than a
variable or traced array.
Returns
-------
ret
Boolean, whether or not x is an array.
"""
return ivy.is_ivy_array(x, exclusive) or ivy.is_native_array(x, exclusive)
def is_ivy_container(x: Any) -> bool:
"""Determines whether the input x is an Ivy Container.
Parameters
----------
x
The input to check
Returns
-------
ret
Boolean, whether or not x is an ivy container.
"""
return isinstance(x, ivy.Container)
@to_native_arrays_and_back
@handle_out_argument
def copy_array(
x: Union[ivy.Array, ivy.NativeArray]
) -> Union[ivy.Array, ivy.NativeArray]:
"""Copy an array.
Returns
-------
ret
a copy of the input array ``x``.
Examples
--------
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.copy_array(x)
>>> print(y)
ivy.array([-1, 0, 1])
"""
return _cur_backend(x).copy_array(x)
@inputs_to_native_arrays
def array_equal(
x0: Union[ivy.Array, ivy.NativeArray], x1: Union[ivy.Array, ivy.NativeArray]
) -> bool:
"""Determines whether two input arrays are equal across all elements.
Parameters
----------
x0
The first input array to compare.
x1
The second input array to compare.
dtype
array data type
Returns
-------
ret
Boolean, whether or not the input arrays are equal across all elements.
Examples
--------
>>> x = ivy.array([1,0,1])
>>> y = ivy.array([1,0,-1])
>>> z = ivy.array_equal(x,y)
>>> print(z)
False
>>> a = ivy.array([1, 2])
>>> b = ivy.array([1, 2])
>>> c = ivy.array_equal(a,b)
>>> print(c)
True
>>> i = ivy.array([1, 2])
>>> j = ivy.array([1, 2, 3])
>>> k = ivy.array_equal(i,j)
>>> print(k)
False
"""
return _cur_backend(x0).array_equal(x0, x1)
@inputs_to_native_arrays
def arrays_equal(xs: List[Union[ivy.Array, ivy.NativeArray]]) -> bool:
"""Determines whether input arrays are equal across all elements.
Parameters
----------
xs
Sequence of arrays to compare for equality
dtype
list data type
Returns
-------
ret
Boolean, whether or not all of the input arrays are equal across all elements.
Functional Examples
-------------------
With :code:`ivy.Array` input:
>>> i = ivy.array([1, 2])
>>> j = ivy.arrays_equal([i])
>>> print(j)
True
>>> x = ivy.array([0, 1, 2])
>>> y = ivy.array([1, 0, 2])
>>> z = ivy.array([0, 1, 2])
>>> w = ivy.arrays_equal([x, y, z])
>>> print(w)
False
>>> a = ivy.array([-1, 0, 1])
>>> b = ivy.array([-1, 0, 1])
>>> c = ivy.array([-1, 0, 1])
>>> d = ivy.arrays_equal([a, b, c])
>>> print(d)
True
>>> x = ivy.array([0.1, 1.1])
>>> y = ivy.array([0.1, 1.1, 2.1])
>>> z = ivy.array([0.1, 1.1])
>>> w = ivy.arrays_equal([x, y, z])
>>> print(w)
False
With :code:`ivy.NativeArray` input:
>>> m = ivy.native_array([1.1, 0.2, 1.3])
>>> n = ivy.native_array([1.1, 0.2, 1.4])
>>> o = ivy.arrays_equal([m, n])
>>> print(o)
False
>>> a = ivy.native_array([1, 2, 3, 0, -1])
>>> b = ivy.array([1, 2, 3, 0, -1])
>>> c = ivy.arrays_equal([a,b])
>>> print(c)
True
>>> a = ivy.native_array([1, 2, 3, 0, -1])
>>> b = ivy.array([1, 2, 3, 0, -2])
>>> c = ivy.arrays_equal([a,b])
>>> print(c)
False
With :code:`ivy.Container` input:
>>> r = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> s = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> t = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([6., 7., 8.]))
>>> print(ivy.arrays_equal([r,s,t]))
{
a: true,
b: false
}
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([3, 4, 5]))
>>> y = ivy.array([0,1,2])
>>> z = ivy.arrays_equal([x,y])
>>> print(z)
{
a: true,
b: false
}
"""
x0 = xs[0]
for x in xs[1:]:
if not array_equal(x0, x):
return False
return True
@to_native_arrays_and_back
def all_equal(
*xs: Iterable[Any], equality_matrix: bool = False
) -> Union[bool, Union[ivy.Array, ivy.NativeArray]]:
"""Determines whether the inputs are all equal.
Parameters
----------
xs
inputs to compare.
equality_matrix
Whether to return a matrix of equalities comparing each input with every other.
Default is False.
Returns
-------
ret
Boolean, whether or not the inputs are equal, or matrix array of booleans if
equality_matrix=True is set.
"""
equality_fn = ivy.array_equal if ivy.is_native_array(xs[0]) else lambda a, b: a == b
if equality_matrix:
num_arrays = len(xs)
mat = [[None for _ in range(num_arrays)] for _ in range(num_arrays)]
for i, xa in enumerate(xs):
for j_, xb in enumerate(xs[i:]):
j = j_ + i
res = equality_fn(xa, xb)
if ivy.is_native_array(res):
# noinspection PyTypeChecker
res = ivy.to_scalar(res)
# noinspection PyTypeChecker
mat[i][j] = res
# noinspection PyTypeChecker
mat[j][i] = res
return ivy.array(mat)
x0 = xs[0]
for x in xs[1:]:
if not equality_fn(x0, x):
return False
return True
@inputs_to_native_arrays
def to_numpy(x: Union[ivy.Array, ivy.NativeArray]) -> np.ndarray:
"""Converts an array into a numpy array.
Parameters
----------
x
input array
Returns
-------
ret
a numpy array copying all the element of the array ``x``.
Examples
--------
With :code:`ivy.Array` input:
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([-1 0 1])
>>> print(type(y))
<class 'numpy.ndarray'>
>>> x = ivy.array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([[-1 0 1] \
[-1 0 1] \
[1 0 -1]])
>>> print(type(y))
<class 'numpy.ndarray'>
>>> x = ivy.array([[[-1, 0, 1], [1, 0, -1]], [[1, -1, 0], [1, 0, -1]]])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([[[-1 0 1] \
[1 0 -1]] \
[[1 -1 0] \
[1 0 -1]]])
>>> print(type(y))
<class 'numpy.ndarray'>
With :code:`ivy.NativeArray` input:
>>> x = ivy.native_array([-1, 0, 1])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([-1 0 1])
>>> print(type(y))
<class 'numpy.ndarray'>
>>> x = ivy.native_array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([[-1 0 1] \
[-1 0 1] \
[1 0 -1]])
>>> print(type(y))
<class 'numpy.ndarray'>
>>> x = ivy.native_array([[[-1, 0, 1], [1, 0, -1]], [[1, -1, 0], [1, 0, -1]]])
>>> y = ivy.to_numpy(x)
>>> print(y)
np.ndarray([[[-1 0 1] \
[1 0 -1]] \
[[1 -1 0] \
[1 0 -1]]])
>>> print(type(y))
<class 'numpy.ndarray'>
With a mix of :code:`ivy.Container` and :code:`ivy.NativeArray` input:
>>> x = ivy.Container(ivy.native_array([-1, 0, 1]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([-1 0 1])
}
>>> x = ivy.Container(ivy.native_array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([[-1 0 1] \
[-1 0 1] \
[1 0 -1]])
}
>>> x = ivy.Container(ivy.native_array([[[-1, 0, 1], [1, 0, -1]],
... [[1, -1, 0], [1, 0, -1]]]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([[[-1 0 1] \
[1 0 -1]] \
[[1 -1 0] \
[1 0 -1]]])
}
With a mix of :code:`ivy.Container` and :code:`ivy.Array` input:
>>> x = ivy.Container(ivy.array([-1, 0, 1]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([-1 0 1])
}
>>> x = ivy.Container(ivy.array([[-1, 0, 1],[-1, 0, 1], [1,0,-1]]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([[-1 0 1] \
[-1 0 1] \
[1 0 -1]])
}
>>> x = ivy.Container(ivy.array([[[-1, 0, 1], [1, 0, -1]],
... [[1, -1, 0], [1, 0, -1]]]))
>>> y = ivy.to_numpy(x)
>>> print(y)
{
np.ndarray([[[-1 0 1] \
[1 0 -1]] \
[[1 -1 0] \
[1 0 -1]]])
}
"""
return _cur_backend(x).to_numpy(x)
@inputs_to_native_arrays
def to_scalar(x: Union[ivy.Array, ivy.NativeArray]) -> Number:
"""Converts an array with a single element into a scalar.
Parameters
----------
x
Input array with a single element.
Returns
-------
ret
a scalar copying the element of the array ``x``.
Examples
--------
>>> x = ivy.array([-1])
>>> y = ivy.to_scalar(x)
>>> print(y)
-1
>>> print(ivy.is_int_dtype(y))
True
"""
return _cur_backend(x).to_scalar(x)
@inputs_to_native_arrays
def to_list(x: Union[ivy.Array, ivy.NativeArray]) -> List:
"""Creates a (possibly nested) list from input array.
Parameters
----------
x
Input array.
Returns
-------
ret
A list representation of the input array ``x``.
Examples
--------
>>> x = ivy.array([-1, 0, 1])
>>> y = ivy.to_list(x)
>>> print(y)
[-1, 0, 1]
>>> print(isinstance(y, list))
True
"""
return _cur_backend(x).to_list(x)
def clip_vector_norm(
x: Union[ivy.Array, ivy.NativeArray],
max_norm: float,
p: float = 2.0,
*,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Clips (limits) the vector p-norm of an array.
Parameters
----------
x
array, input array containing elements to clip.
max_norm
float, the maximum value of the array norm.
p
optional float, the p-value for computing the p-norm. Default is 2.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array with the vector norm downscaled to the max norm if needed.
"""
norm = ivy.vector_norm(x, keepdims=True, ord=p)
ratio = ivy.stable_divide(max_norm, norm)
if ratio < 1:
ret = ratio * x
else:
ret = x
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@to_native_arrays_and_back
def clip_matrix_norm(
x: Union[ivy.Array, ivy.NativeArray],
max_norm: float,
p: float = 2.0,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Clips (limits) the matrix norm of an array.
Parameters
----------
x
Input array containing elements to clip.
max_norm
The maximum value of the array norm.
p
The p-value for computing the p-norm. Default is 2.
Returns
-------
ret
An array with the matrix norm downscaled to the max norm if needed.
"""
norms = ivy.matrix_norm(x, p, keepdims=True)
ratios = ivy.maximum(ivy.stable_divide(max_norm, norms), 1.0)
return ivy.multiply(ratios, x, out=out)
@to_native_arrays_and_back
@handle_out_argument
def floormod(
x: Union[ivy.Array, ivy.NativeArray],
y: Union[ivy.Array, ivy.NativeArray],
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Returns element-wise remainder of division.
Parameters
----------
x
array, input to floormod
y
array, denominator input for floormod.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
An array of the same shape and type as x, with the elements floor modded.
"""
return _cur_backend(x).floormod(x, y, out=out)
@to_native_arrays_and_back
def unstack(
x: Union[ivy.Array, ivy.NativeArray], axis: int, keepdims: bool = False
) -> Union[ivy.Array, ivy.NativeArray]:
"""Unpacks the given dimension of a rank-R array into rank-(R-1) arrays.
Parameters
----------
x
Input array to unstack.
axis
Axis for which to unpack the array.
keepdims
Whether to keep dimension 1 in the unstack dimensions. Default is False.
Returns
-------
ret
List of arrays, unpacked along specified dimensions.
"""
return _cur_backend(x).unstack(x, axis, keepdims)
@to_native_arrays_and_back
def fourier_encode(
x: Union[ivy.Array, ivy.NativeArray],
max_freq: Union[float, Union[ivy.Array, ivy.NativeArray]],
num_bands: int = 4,
linear: bool = False,
concat: bool = True,
flatten: bool = False,
) -> Union[ivy.Array, ivy.NativeArray, Tuple]:
"""Pads an array with fourier encodings.
Parameters
----------
x
Input array to encode.
max_freq
The maximum frequency of the encoding.
num_bands
The number of frequency bands for the encoding. Default is 4.
linear
Whether to space the frequency bands linearly as opposed to geometrically.
Default is False.
concat
Whether to concatenate the position, sin and cos values, or return seperately.
Default is True.
flatten
Whether to flatten the position dimension into the batch dimension. Default is
False.
Returns
-------
ret
New array with the final dimension expanded, and the encodings stored in this
channel.
"""
x_in = x
dim = x.shape[-1]
x = ivy.expand_dims(x, -1)
orig_x = x
if linear:
scales = ivy.linspace(1.0, max_freq / 2, num_bands, device=dev(x))
else:
if ivy.backend == "torch" and isinstance(max_freq, float):
scales = ivy.logspace(
0.0,
ivy.log(ivy.array(max_freq / 2)) / math.log(10),
num_bands,
base=10,
device=dev(x),
)
else:
scales = ivy.logspace(
0.0,
ivy.log(max_freq / 2) / math.log(10),
num_bands,
base=10,
device=dev(x),
)
scales = ivy.astype(scales, ivy.dtype(x))
scales = scales[(*((None,) * (len(x.shape) - len(scales.shape))), Ellipsis)]
x = x * scales * math.pi
sin_x = ivy.sin(x)
cos_x = ivy.cos(x)
if flatten:
orig_x = x_in
sin_x = ivy.reshape(sin_x, [-1, num_bands * dim])
cos_x = ivy.reshape(cos_x, [-1, num_bands * dim])
if concat:
return ivy.concat([orig_x, sin_x, cos_x], -1)
return sin_x, cos_x
@inputs_to_native_arrays
def value_is_nan(
x: Union[ivy.Array, ivy.NativeArray, Number], include_infs: bool = True
) -> bool:
"""Determine whether the single valued array or scalar is of nan type.
Parameters
----------
x
The input to check Input array.
include_infs
Whether to include infs and -infs in the check. Default is True.
Returns
-------
ret
Boolean as to whether the input value is a nan or not.
"""
x_scalar = ivy.to_scalar(x) if ivy.is_native_array(x) else x
if not x_scalar == x_scalar:
return True
if include_infs and x_scalar == INF or x_scalar == -INF:
return True
return False
@inputs_to_native_arrays
def has_nans(x: Union[ivy.Array, ivy.NativeArray], include_infs: bool = True) -> bool:
"""Determine whether the array contains any nans, as well as infs or -infs if
specified.
Parameters
----------
x
Input array.
include_infs
Whether to include infs and -infs in the check. Default is True.
Returns
-------
ret
Boolean as to whether the array contains nans.
"""
return value_is_nan(ivy.sum(x), include_infs)
def exists(x: Any) -> bool:
"""Simple check as to whether the input is None or not.
Parameters
----------
x
Input to check.
Returns
-------
ret
True if x is not None, else False.
"""
return x is not None
def default(
x: Any,
default_val: Any,
catch_exceptions: bool = False,
rev: bool = False,
with_callable: bool = False,
) -> Any:
"""Returns x provided it exists (is not None), else returns default value.
Parameters
----------
x
Input which may or may not exist (be None).
default_val
The default value.
catch_exceptions
Whether to catch exceptions from callable x. Default is False.
rev
Whether to reverse the input x and default_val. Default is False.
with_callable
Whether either of the arguments might be callable functions. Default is False.
Returns
-------
ret
x if x exists (is not None), else default.
"""
with_callable = catch_exceptions or with_callable
if rev:
tmp = x
x = default_val
default_val = tmp
if with_callable:
x_callable = callable(x)
default_callable = callable(default_val)
else:
x_callable = False
default_callable = False
if catch_exceptions:
# noinspection PyBroadException
try:
x = x() if x_callable else x
except Exception:
return default_val() if default_callable else default_val
else:
x = x() if x_callable else x
return x if exists(x) else default_val() if default_callable else default_val
def shape_to_tuple(shape: Union[int, Tuple[int], List[int]]):
"""Returns a tuple representation of the input shape.
Parameters
----------
shape
The shape input to convert to tuple representation.
Returns
-------
The shape in tuple representation
"""
if isinstance(shape, int):
return (shape,)
else:
return tuple(shape)
def try_else_none(fn):
"""Try and return the function, otherwise return None if an exception was raised
during function execution.
Parameters
----------
fn
Function to try and call and return.
"""
return default(fn, None, True)
def arg_names(receiver):
"""Get the expected keyword arguments for a function or class constructor.
Parameters
----------
receiver
"""
return list(inspect.signature(receiver).parameters.keys())
def match_kwargs(kwargs, *receivers, allow_duplicates=False):
"""Match keyword arguments to either class or function receivers.
Parameters
----------
kwargs
Keyword arguments to match.
receivers
Functions and/or classes to match the keyword arguments to.
allow_duplicates
Whether to allow one keyword argument to be used for multiple receivers.
Default is False.
Returns
-------
ret
Sequence of keyword arguments split as best as possible.
"""
split_kwargs = list()
for receiver in receivers:
expected_kwargs = arg_names(receiver)
found_kwargs = {k: v for k, v in kwargs.items() if k in expected_kwargs}
if not allow_duplicates:
for k in found_kwargs.keys():
del kwargs[k]
split_kwargs.append(found_kwargs)
if len(split_kwargs) == 1:
return split_kwargs[0]
return split_kwargs
def cache_fn(func: Callable) -> Callable:
"""Wrap a function, such that when cache=True is passed as an argument, a previously
cached output is returned.
Parameters
----------
func
The function to wrap, whose output should be cached for later.
Returns
-------
ret
The newly cache wrapped function.
"""
global FN_CACHE
if func not in FN_CACHE:
FN_CACHE[func] = dict()
def cached_fn(*args, **kwargs):
"""Summary.
Parameters
----------
*args
**kwargs
"""
key = "".join(
[str(i) + ", " for i in args]
+ [" kw, "]
+ [str(i) + ", " for i in sorted(kwargs.items())]
)
cache = FN_CACHE[func]
if key in cache:
return cache[key]
ret = func(*args, **kwargs)
cache[key] = ret
return ret
return cached_fn
def current_backend_str() -> Union[str, None]:
"""Summary.
Returns
-------
ret
The framework string.
"""
fw = _cur_backend()
if fw is None:
return None
return fw.current_backend_str()
@to_native_arrays_and_back
def einops_rearrange(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""Perform einops rearrange operation on input array x.
Parameters
----------
x
Input array to be re-arranged.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
Returns
-------
ret
New array with einops.rearrange having been applied.
"""
ret = einops.rearrange(x, pattern, **axes_lengths)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@to_native_arrays_and_back
def einops_reduce(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
reduction: Union[str, Callable],
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> ivy.Array:
"""Perform einops reduce operation on input array x.
Parameters
----------
x
Input array to be reduced.
pattern
Reduction pattern.
reduction
One of available reductions ('min', 'max', 'sum', 'mean', 'prod'), or callable.
axes_lengths
Any additional specifications for dimensions.
Returns
-------
ret
New array with einops.reduce having been applied.
"""
ret = einops.reduce(x, pattern, reduction, **axes_lengths)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@to_native_arrays_and_back
def einops_repeat(
x: Union[ivy.Array, ivy.NativeArray],
pattern: str,
out: Optional[ivy.Array] = None,
**axes_lengths: Dict[str, int],
) -> Union[ivy.Array, ivy.NativeArray]:
"""Perform einops repeat operation on input array x.
Parameters
----------
x
Input array to be repeated.
pattern
Rearrangement pattern.
axes_lengths
Any additional specifications for dimensions.
Returns
-------
ret
New array with einops.repeat having been applied.
"""
ret = einops.repeat(x, pattern, **axes_lengths)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def get_min_denominator() -> float:
"""Get the global minimum denominator used by ivy for numerically stable division.
Returns
-------
ret
A float number of the global minimum denominator.
Examples
--------
>>> x = ivy.get_min_denominator()
>>> print(x)
1e-12
"""
return ivy._MIN_DENOMINATOR
def set_min_denominator(val: float) -> None:
"""Set the global minimum denominator used by ivy for numerically stable division.
Parameters
----------
val
The new value to set the minimum denominator to.
"""
ivy._MIN_DENOMINATOR = val
def get_min_base() -> float:
"""Get the global minimum base used by ivy for numerically stable power raising."""
# noinspection PyProtectedMember
return ivy._MIN_BASE
def set_min_base(val: float) -> None:
"""Set the global minimum base used by ivy for numerically stable power raising.
Parameters
----------
val
The new value to set the minimum base to.
"""
ivy._MIN_BASE = val
def stable_divide(
numerator: Any, denominator: Any, min_denominator: float = None
) -> Any:
"""Divide the numerator by the denominator, with min denominator added to the
denominator for numerical stability.
Parameters
----------
numerator
The numerator of the division.
denominator
The denominator of the division.
min_denominator
The minimum denominator to use, use global ivy._MIN_DENOMINATOR by default.
Returns
-------
ret
The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return numerator / (denominator + default(min_denominator, ivy._MIN_DENOMINATOR))
def stable_pow(base: Any, exponent: Any, min_base: float = None) -> Any:
"""Raise the base by the power, with MIN_BASE added to the base when exponent > 1
for numerical stability.
Parameters
----------
base
The numerator of the division.
exponent
The denominator of the division.
min_base
The minimum base to use, use global ivy._MIN_BASE by default.
Returns
-------
ret
The new item following the numerically stable division.
"""
# noinspection PyProtectedMember
return (base + default(min_base, ivy._MIN_BASE)) ** exponent
def get_all_arrays_in_memory():
"""Gets all arrays which are currently alive."""
all_arrays = list()
for obj in gc.get_objects():
# noinspection PyBroadException
try:
if ivy.is_native_array(obj):
all_arrays.append(obj)
except Exception:
pass
return all_arrays
def num_arrays_in_memory():
"""Returns the number of arrays which are currently alive."""
return len(get_all_arrays_in_memory())
def print_all_arrays_in_memory():
"""Prints all arrays which are currently alive."""
for arr in get_all_arrays_in_memory():
print(type(arr), arr.shape)
def set_queue_timeout(timeout):
"""Set the global queue timeout values (in seconds). Default value without this
function being called is 10 seconds.
Parameters
----------
timeout
The timeout to set in seconds.
"""
global TIMEOUT
TIMEOUT = timeout
def queue_timeout():
"""Get the global queue timeout values (in seconds).
Default value without this function being called is 10 seconds.
"""
global TIMEOUT
return TIMEOUT
def tmp_dir():
""""""
return TMP_DIR
def set_tmp_dir(tmp_dr):
"""Set the directory for saving temporary files.
Parameters
----------
tmp_dr
"""
global TMP_DIR
TMP_DIR = tmp_dr
def container_types():
"""Summary.
Returns
-------
ret
a key-value structure, and exposes public methods .keys(), .values() and
items().
"""
# noinspection PyBroadException
try:
return _cur_backend().container_types()
except ValueError:
return []
def inplace_arrays_supported(f=None):
"""Determine whether inplace arrays are supported for the current backend framework.
Parameters
----------
f
(Default value = None)
Returns
-------
ret
Boolean, whether or not inplace arrays are supported.
"""
return _cur_backend().inplace_arrays_supported()
def inplace_variables_supported(f=None):
"""Determine whether inplace variables are supported for the current backend
framework.
Parameters
----------
f
(Default value = None)
Returns
-------
ret
Boolean, whether or not inplace variables are supported.
"""
return _cur_backend().inplace_variables_supported()
@inputs_to_native_arrays
def supports_inplace(x):
"""Determine whether inplace operations are supported for the data type of x.
Parameters
----------
x
Input variable or array to check for inplace support for.
Returns
-------
ret
Boolean, whether or not inplace operations are supported for x.
"""
if ivy.is_variable(x):
return ivy.inplace_variables_supported()
elif ivy.is_native_array(x):
return ivy.inplace_arrays_supported()
raise Exception("Input x must be either a variable or an array.")
@inputs_to_native_arrays
def assert_supports_inplace(x):
"""Asserts that inplace operations are supported for x, else raises exception.
Parameters
----------
x
Input variable or array to check for inplace support for.
Returns
-------
ret
True if support, raises exception otherwise
"""
if not ivy.supports_inplace(x):
raise Exception(
"Inplace operations are not supported {} types with {} backend".format(
type(x), ivy.current_backend_str()
)
)
return True
def inplace_update(
x: Union[ivy.Array, ivy.NativeArray],
val: Union[ivy.Array, ivy.NativeArray],
ensure_in_backend: bool = False,
) -> ivy.Array:
"""Perform in-place update for the input array. This will always be performed on
ivy.Array instances pass in the input, and will also be performed on the native
array classes in the backend when the backend supports this. If the backend does
not natively support inplace updates, and x is an ivy.NativeArray instance,
then an exception will be thrown.
Parameters
----------
x
The variable to update.
val
The array to update the variable with.
ensure_in_backend
Whether or not to ensure that the `ivy.NativeArray` is also inplace updated.
In cases where it should be, backends which do not natively support inplace
updates will raise an exception.
Returns
-------
ret
The array following the in-place update.
"""
return _cur_backend(x).inplace_update(x, val, ensure_in_backend)
def inplace_decrement(x, val):
"""Perform in-place decrement for the input array.
Parameters
----------
x
The array to decrement.
val
The array to decrement the variable with.
Returns
-------
ret
The array following the in-place decrement.
"""
return _cur_backend(x).inplace_decrement(x, val)
def inplace_increment(x, val):
"""Perform in-place increment for the input array.
Parameters
----------
x
The array to increment.
val
The array to increment the variable with.
Returns
-------
ret
The array following the in-place increment.
"""
return _cur_backend(x).inplace_increment(x, val)
@to_native_arrays_and_back
@handle_out_argument
def cumsum(
x: Union[ivy.Array, ivy.NativeArray],
axis: int = 0,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Returns the cumulative sum of the elements along a given axis.
Parameters
----------
x
Input array.
axis
int, Axis along which the cumulative sum is computed. By default 0.
out
optional output array, for writing the result to.
Returns
-------
ret
Input array with cumulatively summed elements along axis
"""
return _cur_backend(x).cumsum(x, axis, out=out)
@to_native_arrays_and_back
@handle_out_argument
def cumprod(
x: Union[ivy.Array, ivy.NativeArray],
axis: int = 0,
exclusive: Optional[bool] = False,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Returns the cumulative product of the elements along a given axis.
Parameters
----------
x
Input array.
axis
int , axis along which the cumulative product is computed. By default 0.
exclusive
optional bool, Whether to perform the cumprod exclusively. Defaults is False.
Returns
-------
ret
Input array with cumulatively multiplied elements along axis.
"""
return _cur_backend(x).cumprod(x, axis, exclusive, out=out)
@to_native_arrays_and_back
@handle_out_argument
@infer_device
def scatter_flat(
indices: Union[ivy.Array, ivy.NativeArray],
updates: Union[ivy.Array, ivy.NativeArray],
size: Optional[int] = None,
tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = "sum",
*,
device: Union[ivy.Device, ivy.NativeDevice] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Scatter flat updates into a new flat array according to flat indices.
Parameters
----------
indices
Indices for the new values to occupy.
updates
Values for the new array to hold.
size
The size of the result.
tensor
The tensor in which to scatter the results, default is None, in which case the
size is used to
scatter into a zeros array.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as
updates if None.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
"""
return _cur_backend(indices).scatter_flat(
indices, updates, size, tensor, reduction, device=device
)
@to_native_arrays_and_back
@handle_out_argument
@infer_device
def scatter_nd(
indices: Union[ivy.Array, ivy.NativeArray],
updates: Union[ivy.Array, ivy.NativeArray],
shape: Optional[Iterable[int]] = None,
tensor: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
reduction: str = "sum",
*,
device: Union[ivy.Device, ivy.NativeDevice] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Scatter updates into a new array according to indices.
Parameters
----------
indices
Indices for the new values to occupy.
updates
Values for the new array to hold.
shape
The shape of the result. Default is None, in which case tensor argument must be
provided.
tensor
The tensor in which to scatter the results, default is None, in which case the
shape arg is used to
scatter into a zeros array.
reduction
The reduction method for the scatter, one of 'sum', 'min', 'max' or 'replace'
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as
updates if None.
Returns
-------
ret
New array of given shape, with the values scattered at the indices.
"""
return _cur_backend(indices).scatter_nd(
indices, updates, shape, tensor, reduction, device=device
)
@to_native_arrays_and_back
@handle_out_argument
@infer_device
def gather(
params: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
axis: int = -1,
*,
device: Union[ivy.Device, ivy.NativeDevice] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Gather slices from params at axis according to indices.
Parameters
----------
params
array, the array from which to gather values.
indices
array, index array.
axis
optional int, the axis from which to gather from. Default is -1.
device
optional ivy.Device, device on which to create the array 'cuda:0', 'cuda:1',
'cpu' etc. Same as x if None.
out
optional output array, for writing the result to.
Returns
-------
ret
New array with the values gathered at the specified indices along the specified
axis.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :code:`ivy.Container`
instances in place of any of the arguments.
Functional Examples
-------------------
With :code:`ivy.Array` input:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.array([0, 1])
>>> print(ivy.gather(x, y))
ivy.array([0., 1.])
>>> x = ivy.array([[0., 1., 2.], \
[3., 4., 5.]])
>>> y = ivy.array([[0, 1], \
[1, 2]])
>>> z = ivy.array([[0., 0.], \
[0., 0.]])
>>> ivy.gather(x, y, device='cpu', out=z)
>>> print(z)
ivy.array([[0., 1.],
[4., 5.]])
>>> x = ivy.array([[[0., 1.], [2., 3.]], \
[[4., 5.], [6., 7.]], \
[[8., 9.], [10., 11.]]])
>>> y = ivy.array([[[0, 1]], \
[[1, 2]], \
[[2, 0]]])
>>> ivy.gather(x, y, axis=0, out=x)
>>> print(x)
ivy.array([[[ 0., 5.],
[ 2., 7.]],
[[ 4., 9.],
[ 6., 11.]],
[[ 8., 1.],
[10., 3.]]])
With :code:`ivy.NativeArray` input:
>>> x = ivy.native_array([0., 1., 2.])
>>> y = ivy.native_array([0, 1])
>>> print(ivy.gather(x, y))
ivy.array([0., 1.])
With a mix of :code:`ivy.Array` and :code:`ivy.NativeArray` inputs:
>>> x = ivy.native_array([0., 1., 2.])
>>> y = ivy.array([0, 1])
>>> print(ivy.gather(x, y))
ivy.array([0., 1.])
With a mix of :code:`ivy.Array` and :code:`ivy.Container` inputs:
>>> x = ivy.Container(a = ivy.array([0., 1., 2.]), \
b = ivy.array([4., 5., 6.]))
>>> y = ivy.array([0, 1])
>>> print(ivy.gather(x, y))
{
a: ivy.array([0., 1.]),
b: ivy.array([4., 5.])
}
With :code:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.array([0., 1., 2.]), \
b = ivy.array([4., 5., 6.]))
>>> y = ivy.Container(a = ivy.array([0, 1]), \
b = ivy.array([1, 2]))
>>> print(ivy.gather(x, y))
{
a: ivy.array([0., 1.]),
b: ivy.array([5., 6.])
}
Instance Method Examples
------------------------
Using :code:`ivy.Array` instance method:
>>> x = ivy.array([0., 1., 2.])
>>> y = ivy.array([0, 1])
>>> print(x.gather(y))
ivy.array([0., 1.])
Using :code:`ivy.Container` instance method:
>>> x = ivy.Container(a = ivy.array([0., 1., 2.]), \
b = ivy.array([4., 5., 6.]))
>>> y = ivy.Container(a = ivy.array([0, 1]), \
b = ivy.array([1, 2]))
>>> print(x.gather(y))
{
a: {
a: ivy.array([0., 1.]),
b: ivy.array([1., 2.])
},
b: {
a: ivy.array([4., 5.]),
b: ivy.array([5., 6.])
}
}
"""
return _cur_backend(params).gather(params, indices, axis, device=device, out=out)
@to_native_arrays_and_back
@handle_out_argument
@infer_device
def gather_nd(
params: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
*,
device: Union[ivy.Device, ivy.NativeDevice] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Gather slices from params into a array with shape specified by indices.
Parameters
----------
params
The array from which to gather values.
indices
Index array.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if
None.
Returns
-------
ret
New array of given shape, with the values gathered at the indices.
"""
return _cur_backend(params).gather_nd(params, indices, device=device)
def multiprocessing(context: str = None):
"""Return backend-specific multiprocessing module.
Parameters
----------
context
The context of the multiprocessing, either fork, forkserver or spawn.
Default is None.
Returns
-------
ret
Multiprocessing module
"""
return _cur_backend().multiprocessing(context)
@to_native_arrays_and_back
@handle_out_argument
def indices_where(
x: Union[ivy.Array, ivy.NativeArray]
) -> Union[ivy.Array, ivy.NativeArray]:
"""Returns indices or true elements in an input boolean array.
Parameters
----------
x
Boolean array, for which indices are desired.
Returns
-------
ret
Indices for where the boolean array is True.
"""
return _cur_backend(x).indices_where(x)
@to_native_arrays_and_back
@handle_out_argument
@infer_device
def one_hot(
indices: Union[ivy.Array, ivy.NativeArray],
depth: int,
*,
device: Union[ivy.Device, ivy.NativeDevice] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Returns a one-hot array.
Parameters
----------
indices
Indices for where the ones should be scattered *[batch_shape, dim]*
depth
Scalar defining the depth of the one-hot dimension.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if
None.
Returns
-------
ret
Tensor of zeros with the same shape and type as a, unless dtype provided which
overrides.
"""
return _cur_backend(indices).one_hot(indices, depth, device=device)
@inputs_to_native_arrays
def shape(
x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False
) -> Iterable[int]:
"""Returns the shape of the array ``x``.
Parameters
----------
x
Input array to infer the shape of.
as_array
Whether to return the shape as a array, default False.
Returns
-------
ret
Shape of the array ``x``.
Examples
--------
>>> x = ivy.array([[-1, 0, 1],[1,0,-1]])
>>> y_tuple = ivy.shape(x)
>>> y_tensor = ivy.shape(x, as_tensor = True)
>>> print(y_tuple)
(2, 3)
>>> print(y_tensor)
ivy.array([2, 3])
"""
return _cur_backend(x).shape(x, as_array)
@inputs_to_native_arrays
def get_num_dims(x: Union[ivy.Array, ivy.NativeArray], as_array: bool = False) -> int:
"""Returns the number of dimensions of the array x.
Parameters
----------
x
Input array to infer the number of dimensions for.
as_array
Whether to return the shape as a array, default False.
Returns
-------
ret
Shape of the array
"""
return _cur_backend(x).get_num_dims(x, as_array)
| 24.449283
| 88
| 0.576828
|
9017787151f97b256be69ed090bb5eae68f16749
| 6,320
|
py
|
Python
|
nova/tests/test_hooks.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 7
|
2015-09-22T11:27:16.000Z
|
2015-11-02T12:33:46.000Z
|
nova/tests/test_hooks.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/test_hooks.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
class SampleHookA(object):
name = "a"
def _add_called(self, op, kwargs):
called = kwargs.get('called', None)
if called is not None:
called.append(op + self.name)
def pre(self, *args, **kwargs):
self._add_called("pre", kwargs)
class SampleHookB(SampleHookA):
name = "b"
def post(self, rv, *args, **kwargs):
self._add_called("post", kwargs)
class SampleHookC(SampleHookA):
name = "c"
def pre(self, f, *args, **kwargs):
self._add_called("pre" + f.__name__, kwargs)
def post(self, f, rv, *args, **kwargs):
self._add_called("post" + f.__name__, kwargs)
class SampleHookExceptionPre(SampleHookA):
name = "epre"
exception = Exception()
def pre(self, f, *args, **kwargs):
raise self.exception
class SampleHookExceptionPost(SampleHookA):
name = "epost"
exception = Exception()
def post(self, f, rv, *args, **kwargs):
raise self.exception
class MockEntryPoint(object):
def __init__(self, cls):
self.cls = cls
def load(self):
return self.cls
class MockedHookTestCase(test.BaseHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return []
def setUp(self):
super(MockedHookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
class HookTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
]
def setUp(self):
super(HookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
@hooks.add_hook('test_hook')
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['test_hook']
self.assert_has_hook('test_hook', self._hooked)
self.assertEqual(2, len(mgr.extensions))
self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['prea', 'preb', 'postb'], called_order)
class HookTestCaseWithFunction(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('function_hook',
MockEntryPoint(SampleHookC), SampleHookC, SampleHookC()),
]
@hooks.add_hook('function_hook', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['function_hook']
self.assert_has_hook('function_hook', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookC, mgr.extensions[0].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['pre_hookedc', 'post_hookedc'], called_order)
class HookFailPreTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_pre',
MockEntryPoint(SampleHookExceptionPre),
SampleHookExceptionPre, SampleHookExceptionPre()),
]
@hooks.add_hook('fail_pre', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_pre']
self.assert_has_hook('fail_pre', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPre, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPre, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
class HookFailPostTestCase(MockedHookTestCase):
def _mock_load_plugins(self, iload, *iargs, **ikwargs):
return [
stevedore.extension.Extension('fail_post',
MockEntryPoint(SampleHookExceptionPost),
SampleHookExceptionPost, SampleHookExceptionPost()),
]
@hooks.add_hook('fail_post', pass_function=True)
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_hook_fail_should_still_return(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['fail_post']
self.assert_has_hook('fail_post', self._hooked)
self.assertEqual(1, len(mgr.extensions))
self.assertEqual(SampleHookExceptionPost, mgr.extensions[0].plugin)
def test_hook_fail_should_raise_fatal(self):
self.stubs.Set(SampleHookExceptionPost, 'exception',
hooks.FatalHookException())
self.assertRaises(hooks.FatalHookException,
self._hooked, 1)
| 30.679612
| 78
| 0.65269
|
c81b12b22bcb7e655d8f57ac6b96803b6dfd3b23
| 948
|
py
|
Python
|
lib/tree.py
|
Lucas2012/ProbablisticNeuralProgramedNetwork
|
56e33dae1ec01580bca267011adf6c45c1fd505d
|
[
"Apache-2.0"
] | 3
|
2020-07-12T02:15:27.000Z
|
2021-07-18T06:00:49.000Z
|
lib/tree.py
|
Lucas2012/ProbablisticNeuralProgramedNetwork
|
56e33dae1ec01580bca267011adf6c45c1fd505d
|
[
"Apache-2.0"
] | 1
|
2021-07-18T06:00:10.000Z
|
2021-07-24T11:51:17.000Z
|
lib/tree.py
|
Lucas2012/ProbablisticNeuralProgramedNetwork
|
56e33dae1ec01580bca267011adf6c45c1fd505d
|
[
"Apache-2.0"
] | null | null | null |
# tree object from stanfordnlp/treelstm
class Tree(object):
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
def add_child(self, child):
child.parent = self
self.num_children += 1
self.children.append(child)
def size(self):
if getattr(self, '_size'):
return self._size
count = 1
for i in xrange(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self, '_depth'):
return self._depth
count = 0
if self.num_children > 0:
for i in xrange(self.num_children):
child_depth = self.children[i].depth()
if child_depth > count:
count = child_depth
count += 1
self._depth = count
return self._depth
| 27.882353
| 54
| 0.543249
|
0e4dfe809517ffd0df1d6dbc145fa6e8b583edf1
| 1,008
|
py
|
Python
|
graph_a2_vs_alpha.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
graph_a2_vs_alpha.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
graph_a2_vs_alpha.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 19:25:21 2018
@author: malopez
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_folder = "C:/Users/malopez/Desktop/disksMD/data"
output_image = './plot.png'
alphas = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
size_X_inches = 8
size_Y_inches = 6
size_figure = (size_X_inches, size_Y_inches)
fig, ax = plt.subplots(figsize=size_figure, dpi=250)
ax.set_xlim([0,1])
a2s = []
for alpha in alphas:
file_name = data_folder + "/a2_alpha"+str(alpha)+".dat"
data = pd.read_table(file_name, sep='\s+',
header = None, names =['x', 'y'])
# Promediar ultimas 1500 colisiones
mean_a2 = data.iloc[-2000:,1].mean()
a2s.append(mean_a2)
plt.xkcd()
plt.scatter(alphas, a2s, marker='o')
# Fit
plt.plot(np.unique(alphas), np.poly1d(np.polyfit(alphas, a2s, 10))(np.unique(alphas)))
plt.axhline(y=0.0, color='black', linestyle='-')
plt.show()
fig.savefig(output_image)
plt.close()
| 24.585366
| 86
| 0.65377
|
fc13bed2a48225602a9bbbe7473d967a524dfa98
| 1,178
|
py
|
Python
|
deepgmap/post_train_tools/PCA.py
|
koonimaru/DeepGMAP
|
7daac354229fc25fba81649b741921345dc5db05
|
[
"Apache-2.0"
] | 11
|
2018-06-27T11:45:47.000Z
|
2021-07-01T15:32:56.000Z
|
deepgmap/post_train_tools/PCA.py
|
koonimaru/DeepGMAP
|
7daac354229fc25fba81649b741921345dc5db05
|
[
"Apache-2.0"
] | 3
|
2020-01-28T21:45:15.000Z
|
2020-04-20T02:40:48.000Z
|
deepgmap/post_train_tools/PCA.py
|
koonimaru/DeepGMAP
|
7daac354229fc25fba81649b741921345dc5db05
|
[
"Apache-2.0"
] | 1
|
2018-10-19T19:43:27.000Z
|
2018-10-19T19:43:27.000Z
|
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
#print X.shape
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
#print X_ipca.shape
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| 28.047619
| 75
| 0.662988
|
1f06cc574f69e68730da19fc4a384ccf417369e3
| 42
|
py
|
Python
|
instapush/version.py
|
adamwen829/instapush-py
|
6b7c186af9671aea94085f18a95946844df63349
|
[
"MIT"
] | 21
|
2015-01-12T04:46:01.000Z
|
2017-12-19T02:56:20.000Z
|
instapush/version.py
|
adamwen829/instapush-py
|
6b7c186af9671aea94085f18a95946844df63349
|
[
"MIT"
] | 4
|
2015-01-13T11:46:14.000Z
|
2021-04-21T01:10:23.000Z
|
instapush/version.py
|
adamwen829/instapush-py
|
6b7c186af9671aea94085f18a95946844df63349
|
[
"MIT"
] | 9
|
2015-03-26T14:50:51.000Z
|
2020-02-18T21:36:51.000Z
|
# -*- coding: utf-8 -*-
VERSION = '0.1.2'
| 14
| 23
| 0.47619
|
f59d40f75338d27845a11075a124001fc7a0346b
| 734
|
py
|
Python
|
src/common/methods/root_separation/segment_tabulation.py
|
GirZ0n/Methods-of-Computation
|
65e0c5c965d80a8f7c3875621460324d208b1934
|
[
"Unlicense"
] | 2
|
2021-09-14T07:10:34.000Z
|
2021-09-28T21:41:34.000Z
|
src/common/methods/root_separation/segment_tabulation.py
|
GirZ0n/Methods-of-Computation
|
65e0c5c965d80a8f7c3875621460324d208b1934
|
[
"Unlicense"
] | 3
|
2021-09-27T19:21:32.000Z
|
2021-09-28T18:46:37.000Z
|
src/common/methods/root_separation/segment_tabulation.py
|
GirZ0n/Methods-of-Computation
|
65e0c5c965d80a8f7c3875621460324d208b1934
|
[
"Unlicense"
] | null | null | null |
from typing import Callable, List
from src.common.model.line_segment import LineSegment
from src.common.model.root_separator import RootSeparator
class Tabulator(RootSeparator):
number_of_parts: int
def __init__(self, number_of_parts: int):
self.number_of_parts = number_of_parts
def separate(self, *, f: Callable, line_segment: LineSegment) -> List[LineSegment]:
segments = line_segment.split_into_segments(self.number_of_parts)
found_segments = []
for segment in segments:
left_value = f(segment.left)
right_value = f(segment.right)
if left_value * right_value <= 0:
found_segments.append(segment)
return found_segments
| 29.36
| 87
| 0.69346
|
faeeb68996e7ae910b138fbb2fad26e9d16f2d65
| 4,565
|
py
|
Python
|
CNN - RNN/Embedding.py
|
sajR/SAD
|
9d4cee51e6f5a85cc8ffe724fe6078ef4a35c782
|
[
"MIT"
] | null | null | null |
CNN - RNN/Embedding.py
|
sajR/SAD
|
9d4cee51e6f5a85cc8ffe724fe6078ef4a35c782
|
[
"MIT"
] | null | null | null |
CNN - RNN/Embedding.py
|
sajR/SAD
|
9d4cee51e6f5a85cc8ffe724fe6078ef4a35c782
|
[
"MIT"
] | null | null | null |
# The code below obtains embeddings/features from existing CNN models.
# Due to a high number of images the pre-processing (getting features from the existing models) can take time and power.
# Thus, the pre-processing is done beforehand whereby features are stored as CSV. Avoids generating features each time a model is experimented.
# Images are read in the order of non-speech to speech. Provides control and order in which the features are saved and the abilty to generate sequences for the images/features (as the features/images are in order)
# By default the embedding length is 2048 for Xception,
import os
from keras import applications
from keras.preprocessing import image
import numpy as np
#images directories
train_speech_dir="/Train/speach"
train_nonspeech_dir="/Train/nonspeech"
test_speech_dir="/Test/speach"
test_nonspeech_dir="/Test/nonspeech"
val_speech_dir="/Validation/speach"
val_nonspeech_dir="/Validation/nonspeech"
class Embeddings():
def __init__(self, model_name):
# Initialising key variables and obtaining relevant models for pre-processing and obtaining features with a pretrained model
# include_top=False removes the fully connected layer at the end/top of the network
# This allows to get the feature vector as opposed to a classification
self.model_name=model_name
if model_name == 'Xception':
self.model=applications.xception
self.pretrained_model=applications.xception.Xception(weights='imagenet', include_top=False, pooling='avg')
if model_name == 'VGG16':
self.model.vgg16
self.pretrained_model=applications.vgg16.VGG16(weights='imagenet', include_top=False, pooling='avg')
if model_name == 'InceptionV3':
self.model=applications.inception_v3
self.pretrained_model=applications.inception_v3.InceptionV3(weights='imagenet', include_top=False, pooling='avg')
def getEmbeddings(self,imagePath,filename):
#Obtains features for every image in a given folder and stores as train,val and test.
print (filename)
imageFormat=".png"
label=""
fileList=[os.path.join(imagePath,f) for f in os.listdir(imagePath) if f.endswith(imageFormat)]
for imageName in fileList:
img = image.load_img(imageName, target_size=(100, 100))
if img is not None:
processed_array=self.formatEmbedding(img)
# obtains features by predicting the processed/formatted image array to the existing model
features=self.pretrained_model.predict(processed_array)
# Adds a "1" or "0" depending on speech/non-speech to establish feature as a speech/non-speech
if "nospeaking" in imageName:
label="0,"
else:
label="1,"
with open(filename,"a") as file:
file.write(label)
# Each line is written with a label followed by feature
np.savetxt(file,features,delimiter=",")
else:
print ("error with obtaining image")
def formatEmbedding(self,img):
# formats the image as array and pre-processes the array based on the model.
image_array=image.img_to_array(img)
# converts image array to numpy array and alters the shape of the array, expected by the model
image_array=np.expand_dims(image_array,axis=0)
# pre-proces the image array based on the model, either 0-255 or -1 to +1 etc
processed_array=self.model.preprocess_input(image_array)
return processed_array
if __name__ == '__main__':
# flow of the program. model name is defined, embeddings for each image directory, names for the CSV files.
XceptionEmbeddings=Embeddings('Xception')
model=XceptionEmbeddings.pretrained_model
model.summary()
XceptionEmbeddings.getEmbeddings(train_speech_dir,("train"+XceptionEmbeddings.model_name+'.csv'))
XceptionEmbeddings.getEmbeddings(train_nonspeech_dir,("train"+XceptionEmbeddings.model_name+'.csv'))
XceptionEmbeddings.getEmbeddings(val_speech_dir,("val"+XceptionEmbeddings.model_name+'.csv'))
XceptionEmbeddings.getEmbeddings(val_nonspeech_dir,("val"+XceptionEmbeddings.model_name+'.csv'))
XceptionEmbeddings.getEmbeddings(test_speech_dir,("test"+XceptionEmbeddings.model_name+'.csv'))
XceptionEmbeddings.getEmbeddings(test_nonspeech_dir,("test"+XceptionEmbeddings.model_name+'.csv'))
| 53.081395
| 214
| 0.704053
|
ee390e1221183e53ece2275c1e18f40c94651ebf
| 500
|
py
|
Python
|
output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_4_xsd/nistschema_sv_iv_list_normalized_string_length_4.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_4_xsd/nistschema_sv_iv_list_normalized_string_length_4.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/normalized_string/schema_instance/nistschema_sv_iv_list_normalized_string_length_4_xsd/nistschema_sv_iv_list_normalized_string_length_4.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-normalizedString-length-4-NS"
@dataclass
class NistschemaSvIvListNormalizedStringLength4:
class Meta:
name = "NISTSchema-SV-IV-list-normalizedString-length-4"
namespace = "NISTSchema-SV-IV-list-normalizedString-length-4-NS"
value: List[str] = field(
default_factory=list,
metadata={
"length": 8,
"tokens": True,
}
)
| 25
| 72
| 0.666
|
29220a4026fef3d918c471e137e490605a18dd2e
| 2,294
|
py
|
Python
|
publish_firmware.py
|
bpapesh/testTravis
|
12ef3d79c279ca5065e2f2aedca2222099917d45
|
[
"Apache-2.0"
] | null | null | null |
publish_firmware.py
|
bpapesh/testTravis
|
12ef3d79c279ca5065e2f2aedca2222099917d45
|
[
"Apache-2.0"
] | null | null | null |
publish_firmware.py
|
bpapesh/testTravis
|
12ef3d79c279ca5065e2f2aedca2222099917d45
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import sys
from os.path import basename
from platformio import util
Import('env')
project_config = util.load_project_config()
bintray_config = {k: v for k, v in project_config.items("bintray")}
version = project_config.get("common", "release_version")
#
# Push new firmware to the Bintray storage using API
#
def publish_firmware(source, target, env):
firmware_path = str(source[0])
firmware_name = basename(firmware_path)
print("Uploading {0} to Bintray. Version: {1}".format(
firmware_name, version))
url = "/".join([
"https://api.bintray.com", "content",
bintray_config.get("user"),
bintray_config.get("repository"),
bintray_config.get("package"), version, firmware_name
])
headers = {
"Content-type": "application/octet-stream",
"X-Bintray-Publish": "1",
"X-Bintray-Override": "1"
}
r = None
try:
r = requests.put(url,
data=open(firmware_path, "rb"),
headers=headers,
auth=(bintray_config.get("user"),
bintray_config.get("api_token")))
r.raise_for_status()
except requests.exceptions.RequestException as e:
sys.stderr.write("Failed to submit package: %s\n" %
("%s\n%s" % (r.status_code, r.text) if r else str(e)))
env.Exit(1)
print("The firmware has been successfuly published at Bintray.com!")
# Custom upload command and program name
env.Replace(PROGNAME="firmware_v_%s" % version, UPLOADCMD=publish_firmware)
| 33.246377
| 80
| 0.637751
|
2e5355211f6cdb22e4aebc41ea4cb69cdb442deb
| 79,416
|
py
|
Python
|
DtsShape_Blender.py
|
pchan126/Blender_DTS_30
|
730dabe3d620b088811b86e34583e92ed30dd184
|
[
"MIT"
] | null | null | null |
DtsShape_Blender.py
|
pchan126/Blender_DTS_30
|
730dabe3d620b088811b86e34583e92ed30dd184
|
[
"MIT"
] | null | null | null |
DtsShape_Blender.py
|
pchan126/Blender_DTS_30
|
730dabe3d620b088811b86e34583e92ed30dd184
|
[
"MIT"
] | null | null | null |
'''
Dts.Shape_Blender.py
Copyright (c) 2005 - 2006 James Urquhart(j_urquhart@btinternet.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
bl_info = {
"name": "Torque Shape (.dts)...",
"author": "Paul Jan",
"version": (1, 0),
"blender": (2, 75, 0),
"location": "File > Export > Torque (.dts)",
"description": "Export to Torque (.dts) format.",
"warning": "",
"wiki_url": "",
"category": "Export",
}
from . import DTSPython
from .DTSPython import *
from . import DtsMesh_Blender
from .DtsMesh_Blender import *
import bpy
# from bpy import NMesh, Armature, Scene, Object, Material, Texture
# from bpy import Mathutils as bMath
from . import DtsPoseUtil
import gc
'''
Util functions used by class as well as exporter gui
'''
# -------------------------------------------------------------------------------------------------
# Helpful function to make a map of curve names
def BuildCurveMap(ipo):
curvemap = {}
ipocurves = ipo.getCurves()
for i in range(ipo.getNcurves()):
curvemap[ipocurves[i].getName()] = i
return curvemap
# Function to determine what animation is present in a curveMap
def getCMapSupports(curveMap):
try:
foo = curveMap['LocX']
has_loc = True
except KeyError:
has_loc = False
try:
foo = curveMap['QuatX']
has_rot = True
except KeyError:
has_rot = False
try:
foo = curveMap['SizeX']
has_scale = True
except KeyError:
has_scale = False
return has_loc, has_rot, has_scale
# gets the highest frame in an action
def getHighestActFrame(act):
actFrames = act.getFrameNumbers()
highest = 0
for fr in actFrames:
if fr > highest:
highest = int(fr)
return highest
'''
Shape Class (Blender Export)
'''
# -------------------------------------------------------------------------------------------------
class BlenderShape(DtsShape):
def __init__(self, prefs):
DtsShape.__init__(self)
self.preferences = prefs
# Add Root Node to catch meshes and vertices not assigned to the armature
n = Node(self.addName("Exp-Catch-Root"), -1)
# Add default translations and rotations for this bone
self.defaultTranslations.append(Vector(0, 0, 0))
self.defaultRotations.append(Quaternion(0, 0, 0, 1))
self.nodes.append(n)
# Detail level counts
self.numBaseDetails = 0
self.numCollisionDetails = 0
self.numLOSCollisionDetails = 0
self.subshapes.append(SubShape(0, 0, 0, 1, 0, 0)) # Regular meshes
self.subshapes.append(SubShape(0, 0, 0, 1, 0, 0)) # Collision meshes
self.addedArmatures = [] # Armature object, Armature matrix
self.externalSequences = []
self.scriptMaterials = []
# temp container that holds the raw rest transforms, including default scale
self.restTransforms = None
# set rest frame before initializing transformUtil
# Blender.Set('curframe', prefs['RestFrame'])
bpy.context.scene.frame_set(prefs['RestFrame'])
# this object is the interface through which we get blender tranform data
# for object and bone nodes
self.transformUtil = DtsPoseUtil.NodeTransformUtil(self.preferences['ExportScale'])
# extra book keeping for armature modifier warning (see long note/explanation in Dts_Blender.py)
self.badArmatures = []
gc.enable()
def __del__(self):
DtsShape.__del__(self)
del self.addedArmatures
del self.externalSequences
del self.scriptMaterials
# Find an existing dts object in the shape
def findDtsObject(self, dtsObjName):
# get/add dts object to shape
masterObject = None
for dObj in self.objects:
if self.sTable.get(dObj.name).upper() == dtsObjName.upper():
masterObject = dObj
return masterObject
# Adds a dts object to the shape
def addDtsObject(self, dtsObjName, pNodeIdx):
masterObject = dObject(self.addName(dtsObjName), -1, -1, pNodeIdx)
masterObject.tempMeshes = []
self.objects.append(masterObject)
return masterObject
# Adds a mesh to a dts object
def addMesh(self, o, masterObject):
hasArmatureDeform = False
armParentDeform = False
# Check for armature modifier
for mod in o.modifiers:
if mod.type == 'ARMATURE':
hasArmatureDeform = True
# Check for an armature parent
try:
if o.parentType == 'ARMATURE':
hasArmatureDeform = True
armParentDeform = True
except:
pass
# does the object have geometry?
# do we even have any modifiers? If not, we can skip copying the display data.
hasMultiRes = False
try:
for modifier in o.modifiers:
if modifier.type == "MULTIRES":
hasMultiRes = True
except AttributeError:
hasMultiRes = False
hasModifiers = False
for mod in o.modifiers:
# skip armature modifiers
if mod.type == 'ARMATURE': continue
# skip modifiers that are "OK" if we know they can't affect the number
# of verts in the mesh.
# undocumented implicit "Collision" modifier
if mod.type == 23: continue
# if we've got a skinned mesh, we can safely bake some modifiers
# into the mesh's root pose.
if hasArmatureDeform:
if mod.type == 'CURVE' \
or mod.type == 'LATTACE' \
or mod.type == 'WAVE' \
or mod.type == 'DISPLAY' \
or mod.type == 'SMOOTH' \
or mod.type == 'CAST': continue
# if we made it here we've got at least one valid (non-armature) modifier on the mesh
Torque_Util.dump_writeln("hasModifiers:" + modifier.type)
hasModifiers = True
break
# if a mesh has multires, treat it as if it has modifiers
if hasMultiRes: hasModifiers = True
mesh_data = None
# Get display data for non-skinned mesh with modifiers
if (not hasArmatureDeform) and (hasModifiers or (o.type in DtsGlobals.needDisplayDataTypes)):
# print "mesh:", o.name, "has modifers but not armature deform or is not a true mesh."
try:
temp_obj = bpy.data.objects["DTSExpObj_Tmp"]
except:
me = bpy.data.meshes.new()
temp_obj = bpy.data.objects.new(obName, me)
try:
mesh_data = bpy.data.meshes("DTSExpMshObj_Tmp")
except:
mesh_data = bpy.data.meshes.new("DTSExpMshObj_Tmp")
# try to get the raw display data
try:
mesh_data.getFromObject(o)
temp_obj.link(mesh_data)
except:
# todo - warn when we couldn't get mesh data?
pass
# Get display data for skinned mesh without (additional) modifiers
elif hasArmatureDeform and not (hasModifiers or (o.type in DtsGlobals.needDisplayDataTypes)):
# print "mesh:", o.name, "has armature deform but no modifiers."
# originalMesh = o.getData(False, True)
if o.type == "MESH":
originalMesh = o.data
for modifier in o.modifiers:
print(modifier.type)
if modifier.type == 'MULTIRES':
originalMesh = modifier
# get vertex weight info
influences = {}
for v in originalMesh.vertices:
# for k in v.groups.values():
# Torque_Util.dump_writeln('Vertex %d has a weight of %f for bone %s' % (v.index, k.weight, o.vertex_groups[k.group].name))
influences[v.index] = v.groups.items()
arm = bpy.data.objects['Armature']
# obj_verts = originalMesh.vertices
# obj_group_names = [g.name for g in o.vertex_groups]
# for x in obj_verts:
# influences[x.index] = []
# for bone in arm.pose.bones:
# if bone.name not in obj_group_names:
# continue
#
# gidx = o.vertex_groups[bone.name].index
#
# bone_verts = [v for v in obj_verts if gidx in [g.group for g in v.groups]]
#
# for v in bone_verts:
# w = v.groups[gidx].weight
# # influences[v.index].append([bone.name,w])
# Torque_Util.dump_writeln('Vertex %d has a weight of %f for bone %s' % (v.index, w, bone.name))
groups = []
for gps in o.vertex_groups:
groups.append(gps.name)
# -----------------------------
# apply armature modifier
try:
temp_obj = bpy.data.objects["DTSExpObj_Tmp"]
except:
me = bpy.data.meshes.new("DTSExpMshObj_Tmp")
temp_obj = bpy.data.objects.new("DTSExpObj_Tmp", me)
try:
mesh_data = bpy.data.meshes("DTSExpMshObj_Tmp")
except:
mesh_data = bpy.data.meshes.new("DTSExpMshObj_Tmp")
# try to get the raw display data
try:
mesh_data.getFromObject(o)
temp_obj.link(mesh_data)
except:
# todo - warn when we couldn't get mesh data?
pass
# -----------------------------
# remove any existing groups if we are recycling a datablock
# if len(mesh_data.getVertGroupNames()) != 0:
# for group in mesh_data.getVertGroupNames():
# mesh_data.removeVertsFromGroup(group)
o.vertex_groups.clear()
# mesh_data.update()
o.data.update()
# add vertex weights back in
# existingNames = mesh_data.getVertGroupNames()
# for group in groups:
# if not group in existingNames:
# mesh_data.addVertGroup(group)
# recreate vertex groups
# for vIdx in list(influences.keys()):
# for inf in influences[vIdx]:
# group, weight = inf
# mesh_data.assignVertsToGroup(group, [vIdx], weight, add)
# Get (non-display) mesh data for ordinary mesh with no armature deform or modifiers
elif (not hasArmatureDeform) and not (hasModifiers or (o.type in DtsGlobals.needDisplayDataTypes)):
# print "mesh:", o.name, "has no modifiers and no armature deform"
for modifier in o.modifiers:
Torque_Util.dump_writeln("modtype:" + modifier.type)
if modifier.type == "MULTIRES":
mesh_data = modifier
# mesh_data = o.getData(False, True)
temp_obj = None
if o.type == "MESH":
mesh_data = o.data
Torque_Util.dump_writeln("object type " + o.type)
# if not hasArmatureDeform:
# Torque_Util.dump_writeln("not hasArmatureDeform")
# if not o.type in DtsGlobals.needDisplayDataTypes:
# Torque_Util.dump_writeln("not hasModifiers")
# if not hasArmatureDeform:
# Torque_Util.dump_writeln("not o.type in DtsGlobals.needDisplayDataTypes")
if mesh_data is None:
raise Exception("no multires not hasArmatureDeform")
# Give error message if we've got a skinned mesh with additional modifiers
elif hasArmatureDeform and (hasModifiers or (o.type in DtsGlobals.needDisplayDataTypes)):
# we can't support this, since the number of verts in the mesh may have been changed
# by one if the modifiers, it is impossible to reconstruct vertex groups.
print("Can't reconstruct vertex group for skinned mesh with additional modifiers!")
for modifier in o.modifiers:
Torque_Util.dump_writeln("modtype:" + modifier.type)
if modifier.type == "MULTIRES":
mesh_data = modifier
# mesh_data = o.getData(False, True)
if o.type == "MESH":
mesh_data = o.data
Torque_Util.dump_writeln("huh1")
if mesh_data is None:
raise Exception("no multires hasArmatureDeform")
temp_obj = None
else:
# unknown mesh configuration?!
raise Exception("Unknown mesh configuration!!!")
# Get Object's Matrix
mat = self.collapseBlenderTransform(o)
# print "mat = \n", str(mat)
# Get armatures targets if mesh is skinned
armTargets = DtsGlobals.SceneInfo.getSkinArmTargets(o)
# Import Mesh, process flags
try:
x = self.preferences['PrimType']
except KeyError:
self.preferences['PrimType'] = "Tris"
if mesh_data is None:
raise Exception("empty mesh_data")
if type(o) is BlenderShape:
raise Exception("huh")
tmsh = BlenderMesh(self, o, o.name, mesh_data, -1, 1.0, mat, o.scale, hasArmatureDeform, armTargets, False,
(self.preferences['PrimType'] == "TriLists" or self.preferences['PrimType'] == "TriStrips"))
# Add mesh flags based on blender object game properties.
if len(o.game.properties) > 0:
propNames = []
for prop in o.game.properties:
if (prop.type == 'BOOL' and prop.value == True) \
or (prop.type == 'INT' and prop.value != 0) \
or (prop.type == 'FLOAT' and prop.value != 0.0) \
or (prop.type == 'STRING' and prop.value.lower() == "true"):
propNames.append(prop.getName())
tmsh.setBlenderMeshFlags(propNames)
# If we ended up being a Sorted Mesh, sort the faces
if tmsh.mtype == tmsh.T_Sorted:
tmsh.sortMesh(self.preferences['AlwaysWriteDepth'], self.preferences['ClusterDepth'])
# Increment polycount metric
polyCount = tmsh.getPolyCount()
masterObject.tempMeshes.append(tmsh)
# clean up temporary objects
try:
bpy.context.scene.objects.unlink(bpy.context.objects["DTSExpObj_Tmp"])
except:
pass
del mesh_data
del temp_obj
return polyCount
# todo - combine with addMesh and paramatize
def addCollisionMesh(self, o, masterObject):
mesh_data = o.data;
# Get Object's Matrix
mat = self.collapseBlenderTransform(o)
# Import Mesh, process flags
tmsh = BlenderMesh(self, o, o.name, mesh_data, -1, 1.0, mat, o.scale, False, None, True)
# Increment polycount metric
polyCount = tmsh.getPolyCount()
masterObject.tempMeshes.append(tmsh)
return polyCount
# Adds all meshes, detail levels, and dts objects to the shape.
# this should be called after nodes are added.
def addAllDetailLevels(self, dtsObjects, sortedObjects):
# get lists of visible and collision detail levels
sortedDetailLevels = DtsGlobals.Prefs.getSortedDLNames()
visDetailLevels = []
colDetailLevels = []
for dlName in sortedDetailLevels:
strippedDLName = DtsGlobals.Prefs.getTextPortion(dlName)
if strippedDLName.upper() == "DETAIL":
visDetailLevels.append(dlName)
else:
colDetailLevels.append(dlName)
# add visible detail levels
self.addDetailLevels(dtsObjects, sortedObjects, visDetailLevels)
# add collision detail levels
colDLObjects = self.getCollisionDLObjects(dtsObjects, sortedObjects)
self.addDetailLevels(dtsObjects, colDLObjects, colDetailLevels)
# gets a list of objects that exist in visible dls.
def getVisibleDLObjects(self, dtsObjects, sortedObjects):
sortedDetailLevels = DtsGlobals.Prefs.getSortedDLNames()
dtsObjList = dtsObjects
visObjList = []
for dlName in sortedDetailLevels:
# --------------------------------------------
strippedDLName = DtsGlobals.Prefs.getTextPortion(dlName)
if strippedDLName.upper() != "DETAIL":
continue
for dtsObjName in sortedObjects:
# get nodeinfo struct for the current DL and dts object
ni = dtsObjects[dtsObjName][dlName]
if (ni != None) and (dtsObjName not in visObjList):
visObjList.append(dtsObjName)
return visObjList
# gets a list of objects that exist in collision dls.
def getCollisionDLObjects(self, dtsObjects, sortedObjects):
sortedDetailLevels = DtsGlobals.Prefs.getSortedDLNames()
dtsObjList = dtsObjects
colObjList = []
for dlName in sortedDetailLevels:
# --------------------------------------------
strippedDLName = DtsGlobals.Prefs.getTextPortion(dlName)
if strippedDLName.upper() == "DETAIL":
continue
for dtsObjName in sortedObjects:
# get nodeinfo struct for the current DL and dts object
ni = dtsObjects[dtsObjName][dlName]
if (ni != None) and (dtsObjName not in colObjList):
colObjList.append(dtsObjName)
return colObjList
# Adds all meshes, detail levels, and dts objects to the shape.
# this should be called after nodes are added.
def addDetailLevels(self, dtsObjects, sortedObjects, sortedDLWorkingList):
sortedDetailLevels = DtsGlobals.Prefs.getSortedDLNames()
# set current frame to rest frame
restFrame = self.preferences['RestFrame']
# if Blender.Get('curframe') == restFrame: Blender.Set('curframe',restFrame+1)
# Blender.Set('curframe', restFrame)
if bpy.context.scene.frame_current == restFrame: bpy.context.scene.frame_set(restFrame + 1)
bpy.context.scene.frame_set(restFrame)
# dtsObjList = dtsObjects.keys()
dtsObjList = dtsObjects
# add each detail level
for dlName in sortedDLWorkingList:
# --------------------------------------------
numAddedMeshes = 0
polyCount = 0
size = DtsGlobals.Prefs.getTrailingNumber(dlName)
# loop through each dts object, add dts objects and meshes to the shape.
for dtsObjName in sortedObjects:
# get nodeinfo struct for the current DL and dts object
ni = dtsObjects[dtsObjName][dlName]
# get parent node index for dts object
pNodeNI = None
# find the actual parent node
for dln in sortedDetailLevels:
if dtsObjects[dtsObjName][dln] != None:
pNodeNI = dtsObjects[dtsObjName][dln].getGoodMeshParentNI()
break
if pNodeNI == None:
pNodeIdx = -1
else:
pNodeIdx = -1
for node in self.nodes:
if self.sTable.get(node.name).upper() == pNodeNI.dtsNodeName.upper():
pNodeIdx = node.name
break
# get/add dts object to shape
masterObject = self.findDtsObject(dtsObjName)
if masterObject == None:
masterObject = self.addDtsObject(dtsObjName, pNodeIdx)
if ni == None:
# add a null mesh if there's no mesh for this dl
masterObject.tempMeshes.append(DtsMesh(DtsMesh.T_Null))
else:
# otherwise add a regular mesh
o = ni.getBlenderObj()
if dlName[0:3].upper() == "DET":
polyCount += self.addMesh(o, masterObject)
elif dlName[0:3].upper() == "COL" or dlName[0:3].upper() == "LOS":
polyCount += self.addCollisionMesh(o, masterObject)
numAddedMeshes += 1
# Modify base subshape if required
if self.numBaseDetails == 0:
self.subshapes[0].firstObject = len(self.objects) - numAddedMeshes
self.subshapes[0].numObjects = numAddedMeshes
# Get name, do housekeeping
strippedDLName = DtsGlobals.Prefs.getTextPortion(dlName)
self.numBaseDetails += 1
if strippedDLName.upper() == "DETAIL":
detailName = "Detail-%d" % (self.numBaseDetails)
elif strippedDLName.upper() == "COLLISION":
self.numCollisionDetails += 1
detailName = "Collision-%d" % (self.numCollisionDetails)
elif strippedDLName.upper() == "LOSCOLLISION":
self.numLOSCollisionDetails += 1
LOSTrailingNumber = -int(DtsGlobals.Prefs.getTrailingNumber(dlName))
# look for a matching collision detail level
i = 0
found = False
for dl in self.detaillevels:
dln = self.sTable.get(dl.name)
if dln[0:3].lower() == "col":
tn = -int(DtsGlobals.Prefs.getTrailingNumber(dln))
# print self.sTable.get(dl.name), tn
if tn == LOSTrailingNumber:
detailName = "LOS-%d" % (i + 9)
found = True
i += 1
# if we didn't find a matching collision detail level,
# pick a name. Need to make sure we don't match any col
# dls and that we don't have a name collision with another
# loscollision detail level.
if not found:
detailName = "LOS-%d" % (len(sortedDetailLevels) + 9 + self.numLOSCollisionDetails)
# Store constructed detail level info into shape
self.detaillevels.append(
DetailLevel(self.addName(detailName), 0, self.numBaseDetails - 1, size, -1, -1, polyCount))
# --------------------------------------------
def addBillboardDetailLevel(self, dispDetail, equator, polar, polarangle, dim, includepoles, size):
self.numBaseDetails += 1
bb = DetailLevel(self.addName("BILLBOARD-%d" % (self.numBaseDetails)), -1,
encodeBillBoard(
equator,
polar,
polarangle,
dispDetail,
dim,
includepoles),
size, -1, -1, 0)
self.detaillevels.insert(self.numBaseDetails - self.numLOSCollisionDetails - self.numCollisionDetails - 1, bb)
# create triangle strips
def stripMeshes(self, maxsize):
subshape = self.subshapes[0]
for obj in self.objects[subshape.firstObject:subshape.firstObject + (
subshape.numObjects - (self.numCollisionDetails + self.numLOSCollisionDetails))]:
for i in range(obj.firstMesh, (obj.firstMesh + obj.numMeshes)):
tmsh = self.meshes[i]
if len(tmsh.primitives) == 0: continue
tmsh.windStrip(maxsize)
return True
# this should probably be called before the other finalize functions
def finalizeMaterials(self):
# Go through materials, strip ".ignore", add IFL image frames.
for i in range(0, len(self.materials.materials)):
mat = self.materials.materials[i]
if mat.flags & mat.IFLMaterial != 0:
# remove the trailing numbers from the IFL material
mntp = self.preferences.getTextPortion(mat.name)
# add a name for our IflMaterial into the string table
si = self.sTable.addString(mntp + ".ifl")
# create an IflMaterial object and append it to the shape
iflMat = IflMaterial(si, i, 0, 0, 0)
self.iflmaterials.append(iflMat)
mat.name = finalizeImageName(mat.name, False)
def finalizeObjects(self):
# Go through objects, add meshes, set transforms
for o in self.objects:
o.numMeshes = len(o.tempMeshes)
o.firstMesh = len(self.meshes)
# Initial animation frame
# (We could add other frames here for visibility / material / vertex animation)
self.objectstates.append(ObjectState(1.0, 0, 0))
# Get node from first mesh
if len(o.tempMeshes) == 0:
Torque_Util.dump_writeWarning("Warning: Object '%s' has no meshes!" % self.sTable.get(o.name));
continue
isSkinned = False
# if o.tempMeshes[0].mtype != o.tempMeshes[0].T_Null: o.mainMaterial = o.tempMeshes[0].mainMaterial
# else: o.mainMaterial = None
# Determine mesh type for these objects (assumed by first entry)
if o.tempMeshes[0].mtype != o.tempMeshes[0].T_Skin:
if o.tempMeshes[0].getNodeIndex(0) != None:
o.node = o.tempMeshes[0].getNodeIndex(0)
elif o.node < 1:
o.node = 0
else:
# o.node = -1
o.node = 0
isSkinned = True
Torque_Util.dump_writeln("Object %s, Skinned" % (self.sTable.get(o.name)))
for tmsh in o.tempMeshes:
'''
We need to assign nodes to objects and set transforms.
Rigid meshes can be attached to a single node, in which
case we need to transform the vertices into the node's
local space.
'''
if not isSkinned:
# Transform the mesh into node space. The Mesh vertices
# must all be relative to the bone they're attached to
world_trans, world_rot = self.getNodeWorldPosRot(o.node)
tmsh.translate(-world_trans)
tmsh.rotate(world_rot.inverse())
if tmsh.mtype == tmsh.T_Skin:
tmsh.mtype = tmsh.T_Standard
Torque_Util.dump_writeWarning(
"Warning: Invalid skinned mesh in rigid object '%s'!" % (self.sTable.get(o.name)))
else:
for n in range(0, tmsh.getNodeIndexCount()):
# The node transform must take us from shape space to bone space
world_trans, world_rot = self.getNodeWorldPosRot(tmsh.getNodeIndex(n))
tmsh.setNodeTransform(n, world_trans, world_rot)
self.meshes.append(tmsh)
# To conclude, remove subshape's and details we don't need
if self.subshapes[1].numObjects == 0: del self.subshapes[1]
if self.subshapes[0].numObjects == 0: del self.subshapes[0]
count = 0
while count != len(self.detaillevels):
if self.detaillevels[count].subshape >= len(self.subshapes):
del self.detaillevels[count]
else:
count += 1
# Calculate bounds and sizes
if len(self.detaillevels) == 0:
Torque_Util.dump_writeErr(" Error : Shape contains no meshes (no valid detail levels)!")
self.calcSmallestSize() # Get smallest size where shape is visible
# Calculate the bounds,
# If we have an object in blender called "Bounds" of type "Mesh", use that.
try:
bound_obj = bpy.data.objects["Bounds"]
matf = self.collapseBlenderTransform(bound_obj)
if bound_obj.type == "MESH":
bmesh = bound_obj.data
self.bounds.max = Vector(-10e30, -10e30, -10e30)
self.bounds.min = Vector(10e30, 10e30, 10e30)
for v in bmesh.vertices:
real_vert = matf.passPoint(v)
self.bounds.min[0] = min(self.bounds.min.x(), real_vert[0])
self.bounds.min[1] = min(self.bounds.min.y(), real_vert[1])
self.bounds.min[2] = min(self.bounds.min.z(), real_vert[2])
self.bounds.max[0] = max(self.bounds.max.x(), real_vert[0])
self.bounds.max[1] = max(self.bounds.max.y(), real_vert[1])
self.bounds.max[2] = max(self.bounds.max.z(), real_vert[2])
# The center...
self.center = self.bounds.max.midpoint(self.bounds.min)
# Tube Radius.
dist = self.bounds.max - self.center
self.tubeRadius = Vector2(dist[0], dist[1]).length()
# Radius...
self.radius = (self.bounds.max - self.center).length()
else:
self.calculateBounds()
self.calculateCenter()
self.calculateRadius()
self.calculateTubeRadius()
except KeyError:
self.calculateBounds()
self.calculateCenter()
self.calculateRadius()
self.calculateTubeRadius()
# Converts a blender matrix to a Torque_Util.MatrixF
def toTorqueUtilMatrix(self, blendermatrix):
return MatrixF([blendermatrix[0][0], blendermatrix[0][1], blendermatrix[0][2], blendermatrix[0][3],
blendermatrix[1][0], blendermatrix[1][1], blendermatrix[1][2], blendermatrix[1][3],
blendermatrix[2][0], blendermatrix[2][1], blendermatrix[2][2], blendermatrix[2][3],
blendermatrix[3][0], blendermatrix[3][1], blendermatrix[3][2], blendermatrix[3][3]])
# Creates a matrix that transforms to shape space
def collapseBlenderTransform(self, object):
cmat = self.toTorqueUtilMatrix(object.matrix_world)
# add on scaling factor
exportScale = self.preferences['ExportScale']
scaleMat = MatrixF([exportScale, 0.0, 0.0, 0.0,
0.0, exportScale, 0.0, 0.0,
0.0, 0.0, exportScale, 0.0,
0.0, 0.0, 0.0, exportScale])
return scaleMat * cmat
# Creates a cumilative scaling ratio for an object
def collapseBlenderScale(self, object):
csize = object.getSize()
csize = [csize[0], csize[1], csize[2]]
parent = object.getParent()
while parent != None:
nsize = parent.getSize()
csize[0], csize[1], csize[2] = csize[0] * nsize[0], csize[1] * nsize[1], csize[2] * nsize[2]
parent = parent.getParent()
exportScale = self.preferences['ExportScale']
# add on export scale factor
csize[0], csize[1], csize[2] = csize[0] * exportScale, csize[1] * exportScale, csize[2] * exportScale
return csize
# A utility method that gets the min and max positions of the nodes in an armature
# within a passed-in ordered list.
def getMinMax(self, rootNode, nodeOrder, nodeOrderDict, warning=False):
# find the current node in our ordered list
try:
pos = nodeOrderDict[rootNode.dtsNodeName]
minPos, maxPos = pos, pos
except:
minPos, maxPos = 99999, -99999
cMin = []
cMax = []
nnames = []
for child in [x for x in list(self.transformUtil.nodes.values()) if x.getGoodNodeParentNI() == rootNode]:
nnames.append(child.dtsNodeName)
start, end, warning = self.getMinMax(child, nodeOrder, nodeOrderDict, warning)
if start == None and end == None: continue
if end > maxPos: maxPos = end
if start < minPos: minPos = start
cMin.append(start)
cMax.append(end)
# check all children of the current root bone to make sure their min/max values don't
# overlap.
for i in range(0, len(cMin)):
for j in range(i + 1, len(cMin)):
if (cMin[i] <= cMax[j] and cMin[i] >= cMin[j]) \
or (cMax[i] <= cMax[j] and cMax[i] >= cMin[j]) \
or (cMin[j] <= cMax[i] and cMin[j] >= cMin[i]) \
or (cMax[j] <= cMax[i] and cMax[j] >= cMin[i]):
Torque_Util.dump_writeWarning(
"-\nWarning: Invalid Traversal - Node hierarchy cannot be matched with the")
Torque_Util.dump_writeln(" node ordering specified in the NodeOrder text buffer.")
Torque_Util.dump_writeln(" Details:")
Torque_Util.dump_writeln(" node tree with root node \'%s\'" % nnames[i])
Torque_Util.dump_writeln(" overlaps sibling tree with root node \'%s\'" % nnames[j])
Torque_Util.dump_writeln(" in the NodeOrder text buffer.")
Torque_Util.dump_writeln(" cMin[i], cMax[i] = %i, %i" % (cMin[i], cMax[i]))
Torque_Util.dump_writeln(" cMin[j], cMax[j] = %i, %i\n-" % (cMin[j], cMax[j]))
warning = True
return minPos, maxPos, warning
def createOrderedNodeList(self):
orderedNodeList = []
# read in desired node ordering from a text buffer, if it exists.
no = None
try:
noTxt = bpy.data.objects["NodeOrder"]
no = noTxt.asLines()
Torque_Util.dump_writeln("NodeOrder text buffer found, attempting to export nodes in the order specified.")
except:
no = None
nodeOrderDict = {}
if no != None:
# build a dictionary for fast order compares
i = 0
for n in no:
nodeOrderDict[n] = i
i += 1
boneTree = []
# Validate the node ordering against the bone hierarchy in our armatures.
#
# Validation rules:
#
# 1. Child nodes must come after their parents in the node order
# list.
#
# 2. The min and max positions of all child nodes in a given bone
# tree should not overlap the min/max positions of other bone
# trees on the same level of the overall tree.
# Test Rule #1
for nodeInfo in list(self.transformUtil.nodes.values()):
if nodeInfo.getGoodNodeParentNI() != None:
if nodeInfo.dtsNodeName in list(nodeOrderDict.keys()) \
and nodeInfo.getGoodNodeParentNI() != None \
and nodeInfo.getGoodNodeParentNI().dtsNodeName in list(nodeOrderDict.keys()):
if nodeOrderDict[nodeInfo.dtsNodeName] < nodeOrderDict[
nodeInfo.getGoodNodeParentNI().dtsNodeName]:
Torque_Util.dump_writeWarning(
"-\nWarning: Invalid node order, child bone \'%s\' comes before" % nodeInfo.dtsNodeName)
Torque_Util.dump_writeln(
" parent bone \'%s\' in the NodeOrder text buffer\n-" % nodeInfo.getGoodNodeParentNI().dtsNodeName)
# Test Rule #2
start, end, warning = self.getMinMax(None, no, nodeOrderDict)
if not warning:
# export in the specified order
orderedNodeList = self.walkNodeTreeInOrder(None, nodeOrderDict, [])
else:
# otherwise export in natural order
orderedNodeList = self.walkNodeTree(None, [])
else:
# get list of nodes in natural order
orderedNodeList = self.walkNodeTree(None, [])
return orderedNodeList
# Walks the node tree recursively and returns a list of nodes in natural order
def walkNodeTree(self, nodeInfo, nodeOrderList):
thisLevel = [x for x in list(self.transformUtil.nodes.values()) if x.getGoodNodeParentNI() == nodeInfo]
thisLevel.sort(key=lambda x: x.dtsNodeName)
for child in thisLevel:
if not child.isBanned(): nodeOrderList.append(child.dtsNodeName)
nodeOrderList = self.walkNodeTree(child, nodeOrderList)
return nodeOrderList
# Walks the node tree recursively and returns a list of nodes in the specified order, if possible
def walkNodeTreeInOrder(self, nodeInfo, nodeOrderDict, nodeOrderList):
childList = [x for x in list(self.transformUtil.nodes.values()) if x.getGoodNodeParentNI() == nodeInfo]
orderedChildList = [x for x in childList if x.dtsNodeName in list(nodeOrderDict.keys())]
extraChildList = [x for x in childList if not (x.dtsNodeName in list(nodeOrderDict.keys()))]
orderedChildList.sort(key=lambda x: nodeOrderDict[x.dtsNodeName])
for child in orderedChildList:
if not child.isBanned(): nodeOrderList.append(child.dtsNodeName)
nodeOrderList = self.walkNodeTreeInOrder(child, nodeOrderDict, nodeOrderList)
for child in extraChildList:
if not child.isBanned(): nodeOrderList.append(child.dtsNodeName)
nodeOrderList = self.walkNodeTreeInOrder(child, nodeOrderDict, nodeOrderList)
return nodeOrderList
# adds all object and bone nodes to the shape using the poseUtil tree
def addAllNodes(self):
orderedNodeList = self.createOrderedNodeList()
# add armatures to our list
for arm in [x for x in bpy.context.scene.objects if x.type == 'ARMATURE']:
self.addedArmatures.append(arm)
# build a dict of node indices for lookup
nodeIndices = {}
i = 1
for nodeName in orderedNodeList:
nodeInfo = self.transformUtil.nodes[nodeName]
if not nodeInfo.isBanned():
nodeIndices[nodeName] = i
i += 1
# add nodes in order
for nodeName in orderedNodeList:
nodeInfo = self.transformUtil.nodes[nodeName]
if not nodeInfo.isBanned():
if nodeInfo.getGoodNodeParentNI() != None:
parentNodeIndex = nodeIndices[nodeInfo.getGoodNodeParentNI().dtsNodeName]
else:
parentNodeIndex = -1
n = Node(self.sTable.addString(nodeInfo.dtsNodeName), parentNodeIndex)
try:
n.armName = nodeInfo.armParentNI.dtsNodeName
except:
n.armName = None
n.obj = nodeInfo.getBlenderObj()
self.nodes.append(n)
nodeIndex = len(self.nodes) - 1
self.subshapes[0].numNodes += 1
# dump node transforms for the rest frame
# Blender.Set('curframe', self.preferences['RestFrame'])
bpy.context.scene.frame_set(self.preferences['RestFrame'])
self.restTransforms = self.transformUtil.dumpReferenceFrameTransforms(orderedNodeList,
self.preferences['RestFrame'],
self.preferences[
'SequenceExportTwoPassMode'])
# Set up default translations and rotations for nodes.
i = 0
for nname in orderedNodeList:
pos = self.restTransforms[i][0]
rot = self.restTransforms[i][2]
self.defaultTranslations.append(pos)
self.defaultRotations.append(rot)
i += 1
# adds a ground frame to a sequence
def addGroundFrame(self, sequence, frame_idx, boundsStartMat):
# quit trying to export ground frames if we have had an error.
try:
x = self.GroundFrameError
except:
self.GroundFrameError = False
if self.GroundFrameError: return
# Add ground frames if enabled
if sequence.has_ground:
# Check if we have any more ground frames to add
if sequence.ground_target != sequence.numGroundFrames:
# Ok, we can add a ground frame, but do we add it now?
duration = sequence.numKeyFrames / sequence.ground_target
if frame_idx >= (duration * (sequence.numGroundFrames + 1)) - 1:
# We are ready, lets stomp!
try:
bound_obj = bpy.data.objects["Bounds"]
matf = self.collapseBlenderTransform(bound_obj)
pos = Vector(matf.get(3, 0), matf.get(3, 1), matf.get(3, 2))
pos = pos - Vector(boundsStartMat.get(3, 0), boundsStartMat.get(3, 1), boundsStartMat.get(3, 2))
matf = boundsStartMat.inverse() * matf
rot = Quaternion().fromMatrix(matf).inverse()
self.groundTranslations.append(pos)
self.groundRotations.append(rot)
sequence.numGroundFrames += 1
except ValueError:
# record the error state so we don't repeat ourselves.
self.GroundFrameError = True
sequence.has_ground = False # <- nope, no ground frames.
Torque_Util.dump_writeErr("Error: Could not get ground frames for sequence %s." % sequence.name)
Torque_Util.dump_writeln(
" You must have an object named Bounds in your scene to export ground frames.")
# Adds a generic sequence
def addSequence(self, seqName, seqPrefs, scene=None, action=None):
numFrameSamples = self.preferences.getSeqNumFrames(seqName)
visIsValid = validateVisibility(seqName, seqPrefs)
IFLIsValid = validateIFL(seqName, seqPrefs)
# ActionIsValid = validateAction(seqName, seqPrefs)
ActionIsValid = True
if numFrameSamples < 1:
ActionIsValid = False
visIsValid = False
IFLIsValid = False
# Did we have any valid animations at all for the sequence?
if not (visIsValid or IFLIsValid or ActionIsValid):
Torque_Util.dump_writeln(
" Skipping sequence %s, no animation types were valid for the sequence. " % seqName)
return None
# We've got something to export, so lets start off with the basic sequence
sequence = Sequence(self.sTable.addString(seqName))
sequence.name = seqName
sequence.numTriggers = 0
sequence.firstTrigger = -1
sequence.numKeyFrames = 0
sequence.has_vis = False
sequence.has_ifl = False
sequence.has_loc = False
sequence.has_rot = False
sequence.has_scale = False
sequence.has_ground = False
sequence.frames = []
for n in self.nodes:
sequence.matters_translation.append(False)
sequence.matters_rotation.append(False)
sequence.matters_scale.append(False)
sequence.frames.append(0)
# apply common sequence settings
sequence.fps = seqPrefs['FPS']
if seqPrefs['Cyclic']:
sequence.flags |= sequence.Cyclic
sequence.duration = seqPrefs['Duration']
sequence.priority = seqPrefs['Priority']
lastFrameRemoved = False
if ActionIsValid:
# startTime = Blender.sys.time()
sequence, lastFrameRemoved = self.addNodeSeq(sequence, action, numFrameSamples, scene, seqPrefs)
# endTime = Blender.sys.time()
# print "Sequence export finished in:", str(endTime-startTime)
# if we had to remove the last frame from a cyclic action, and the original action
# frame samples was the same as the overall number of frames for the sequence, adjust
# the overall sequence length.
if lastFrameRemoved:
numFrameSamples -= 1
if visIsValid:
sequence = self.addSequenceVisibility(sequence, numFrameSamples, seqPrefs, int(seqPrefs['StartFrame']),
int(seqPrefs['EndFrame']))
if IFLIsValid:
sequence = self.addSequenceIFL(sequence, getNumIFLFrames(seqName, seqPrefs), seqPrefs)
self.sequences.append(sequence)
# add triggers
if len(seqPrefs['Triggers']) != 0:
self.addSequenceTriggers(sequence, seqPrefs['Triggers'], numFrameSamples)
return sequence
# Import an action
def addNodeSeq(self, sequence, action, numOverallFrames, scene, seqPrefs):
'''
This adds an action to a shape as a sequence.
Sequences are added on a one-by-one basis.
The first part of the function determines if the action is worth exporting - if not, the function fails,
otherwise it is setup.
The second part of the function determines what the action animates.
The third part of the function adds the keyframes, making heavy use of the getPoseTransform function. You can control the
amount of frames exported via the 'FrameSamples' option.
Finally, the sequence data is dumped to the shape. Additionally, if the sequence has been marked as a dsq,
the dsq writer function is invoked - the data for that particular sequence is then removed from the shape.
NOTE: this function needs to be called AFTER all calls to addArmature/addNode, for obvious reasons.
'''
# build ordered node list.
orderedNodeList = []
for nodeIndex in range(1, len(self.nodes)):
orderedNodeList.append(self.sTable.get(self.nodes[nodeIndex].name))
# Get a list of armatures that need to be checked in order to issue
# warnings regarding armature modifiers (see long note in Dts_Blender.py)
checkArmatureNIs = DtsGlobals.SceneInfo.getArmaturesOfConcern()
# Add sequence flags
if seqPrefs['Blend']:
isBlend = True
sequence.flags |= sequence.Blend
else:
isBlend = False
if seqPrefs['NumGroundFrames'] != 0:
sequence.has_ground = True
sequence.ground_target = seqPrefs['NumGroundFrames']
sequence.flags |= sequence.MakePath
else:
sequence.has_ground = False
# Determine the number of key frames. Takes into account channels for bones that are
# not being exported, as they may still effect the animation through IK or other constraints.
# sequence.numKeyFrames = getNumFrames(action.getAllChannelIpos().values(), False)
sequence.numKeyFrames = numOverallFrames
interpolateInc = 1
Torque_Util.dump_writeln(" Frames: %d " % numOverallFrames)
# Depending on what we have, set the bases accordingly
if sequence.has_ground:
sequence.firstGroundFrame = len(self.groundTranslations)
else:
sequence.firstGroundFrame = -1
# this is the number of real action frames we are exporting.
numFrameSamples = numOverallFrames
removeLast = False
baseTransforms = []
useAction = None
useFrame = None
if isBlend:
# Need to build a list of node transforms to use as the
# base transforms for nodes in our blend animation.
# useAction = seqPrefs['Action']['BlendRefPoseAction']
refFrame = seqPrefs['BlendRefPoseFrame']
baseTransforms = self.transformUtil.dumpBlendRefFrameTransforms(orderedNodeList, refFrame, self.preferences[
'SequenceExportTwoPassMode'])
if baseTransforms == None:
Torque_Util.dump_writeln("Error getting base Transforms!!!!!")
# *** special processing for the first frame:
# store off the default position of the bounds box
try:
# Blender.Set('curframe', self.preferences['RestFrame'])
bpy.context.scene.frame_set(self.preferences['RestFrame'])
bound_obj = bpy.data.objects["Bounds"]
boundsStartMat = self.collapseBlenderTransform(bound_obj)
except ValueError:
boundsStartMat = MatrixF()
# For blend animations, we need to reset the pose to the reference pose instead of the default
# transforms. Otherwise, we won't be able to tell reliably which bones have actually moved
# during the blend sequence.
if isBlend:
# Set the current frame in blender
pass
# For normal animations, loop through each node and reset it's transforms.
# This avoids transforms carrying over from other action animations.
else:
# need to cycle through ALL bones and reset the transforms.
for armOb in bpy.context.scene.objects:
if (armOb.type != 'ARMATURE'): continue
tempPose = armOb.pose
armDb = armOb.data
for bonename in list(armDb.bones.keys()):
# for bonename in self.poseUtil.armBones[armOb.name].keys():
# reset the bone's transform
tempPose.bones[bonename].quat = bMath.Quaternion().identity()
tempPose.bones[bonename].size = bMath.Vector(1.0, 1.0, 1.0)
tempPose.bones[bonename].loc = bMath.Vector(0.0, 0.0, 0.0)
# update the pose.
tempPose.update()
# create blank frames for each node
for nodeIndex in range(1, len(self.nodes)):
sequence.frames[nodeIndex] = []
# get transforms for every frame in a big nested list.
if isBlend:
transforms = self.transformUtil.dumpBlendFrameTransforms(orderedNodeList, seqPrefs['StartFrame'],
seqPrefs['EndFrame'],
self.preferences['SequenceExportTwoPassMode'])
else:
transforms = self.transformUtil.dumpFrameTransforms(orderedNodeList, seqPrefs['StartFrame'],
seqPrefs['EndFrame'],
self.preferences['SequenceExportTwoPassMode'])
# if this is a blend animation, calculate deltas
if isBlend and baseTransforms != None:
transforms = self.transformUtil.getDeltasFromRef(baseTransforms, transforms, orderedNodeList)
# loop through each frame and transcribe transforms
for frameTransforms in transforms:
for nodeIndex in range(1, len(self.nodes)):
if isBlend:
# print "nodeIndex=", nodeIndex
baseTransform = baseTransforms[nodeIndex - 1]
else:
baseTransform = None
loc, scale, rot = frameTransforms[nodeIndex - 1]
sequence.frames[nodeIndex].append([loc, rot, scale])
# add ground frames
for frame in range(0, numOverallFrames):
self.addGroundFrame(sequence, frame, boundsStartMat)
# calculate matters
for nodeIndex in range(1, len(self.nodes)):
# get root transforms
if not isBlend:
rootLoc = self.defaultTranslations[nodeIndex]
rootRot = self.defaultRotations[nodeIndex]
else:
rootLoc = Vector(0.0, 0.0, 0.0)
rootRot = Quaternion(0.0, 0.0, 0.0, 1.0)
for fr in range(0, len(transforms)):
# check deltas from base transforms
if sequence.matters_translation[nodeIndex] == False:
if not rootLoc.eqDelta(transforms[fr][nodeIndex - 1][0], 0.02):
# print "LOC detected:"
# print " rootLoc=", rootLoc
# print " loc =", transforms[fr][nodeIndex-1][0]
sequence.matters_translation[nodeIndex] = True
sequence.has_loc = True
if sequence.matters_rotation[nodeIndex] == False:
# print "angle between quats is:", rootRot.angleBetween(transforms[fr][nodeIndex-1][2])
# if not rootRot.eqDelta(transforms[fr][nodeIndex-1][2], 0.02):
if rootRot.angleBetween(transforms[fr][nodeIndex - 1][2]) > 0.008:
# print "ROT detected:"
# print " rootRot=", rootRot
# print " rot =", transforms[fr][nodeIndex-1][2]
sequence.matters_rotation[nodeIndex] = True
sequence.has_rot = True
if sequence.matters_scale[nodeIndex] == False:
if not Vector(1.0, 1.0, 1.0).eqDelta(transforms[fr][nodeIndex - 1][1], 0.02):
# print "Scale detected:"
# print " deltaScale=", transforms[fr][nodeIndex-1][1]
sequence.matters_scale[nodeIndex] = True
sequence.has_scale = True
# if nothing was actually animated abandon exporting the action.
if not (sequence.has_loc or sequence.has_rot or sequence.has_scale):
# don't write this warning anymore, not needed.
# Torque_Util.dump_writeWarning("Warning: Action has no keyframes, aborting export for this animation.")
return sequence, False
# set the aligned scale flag if we have scale.
if sequence.has_scale: sequence.flags |= Sequence.AlignedScale
# It should be safe to add this sequence to the list now.
# self.sequences.append(sequence)
# Now that we have all the transforms for each node at
# every frame, remove the ones that we don't need. This is much faster than doing
# two passes through the blender frames to determine what's animated and what's not.
for nodeIndex in range(1, len(self.nodes)):
if not sequence.matters_translation[nodeIndex]:
for frame in range(0, numOverallFrames):
sequence.frames[nodeIndex][frame][0] = None
if not sequence.matters_rotation[nodeIndex]:
for frame in range(0, numOverallFrames):
sequence.frames[nodeIndex][frame][1] = None
if not sequence.matters_scale[nodeIndex]:
for frame in range(0, numOverallFrames):
sequence.frames[nodeIndex][frame][2] = None
remove_translation, remove_rotation, remove_scale = True, True, True
if seqPrefs['Cyclic']:
for nodeIndex in range(1, len(self.nodes)):
# If we added any new translations, and the first frame is equal to the last,
# allow the next pass of nodes to happen, to remove the last frame.
# (This fixes the "dead-space" issue)
if len(sequence.frames[nodeIndex]) != 0:
if (sequence.frames[nodeIndex][0][0] != None) and (
sequence.frames[nodeIndex][-1][0] != None) and not sequence.frames[nodeIndex][0][
0].eqDelta(
sequence.frames[nodeIndex][-1][0], 0.01):
remove_translation = False
if (sequence.frames[nodeIndex][0][1] != None) and (
sequence.frames[nodeIndex][-1][1] != None) and not sequence.frames[nodeIndex][0][
1].eqDelta(
sequence.frames[nodeIndex][-1][1], 0.01):
remove_rotation = False
if (sequence.frames[nodeIndex][0][2] != None) and (
sequence.frames[nodeIndex][-1][2] != None) and not sequence.frames[nodeIndex][0][
2].eqDelta(
sequence.frames[nodeIndex][-1][2], 0.01):
remove_scale = False
# Determine if the change has affected all that we animate
if (remove_translation) and (remove_rotation) and (remove_scale):
removeLast = True
Torque_Util.dump_write(" Animates:")
if sequence.has_loc: Torque_Util.dump_write("loc")
if sequence.has_rot: Torque_Util.dump_write("rot")
if sequence.has_scale: Torque_Util.dump_write("scale")
if sequence.has_ground: Torque_Util.dump_write("ground")
Torque_Util.dump_writeln("")
# We can now reveal the true number of keyframes
# sequence.numKeyFrames = numFrameSamples-1
# Do a second pass on the nodes to remove the last frame for cyclic anims
if removeLast:
# Go through list of frames for nodes animated in sequence and delete the last frame from all of them
for nodeIndex in range(len(self.nodes)):
if sequence.matters_translation[nodeIndex] or sequence.matters_rotation[nodeIndex] or \
sequence.matters_scale[nodeIndex]:
del sequence.frames[nodeIndex][-1]
sequence.numKeyFrames -= 1
Torque_Util.dump_writeln(" Note: Duplicate frames removed, (was %d, now %d)" % (
sequence.numKeyFrames + 1, sequence.numKeyFrames))
# Calculate Bases
if sequence.has_loc:
sequence.baseTranslation = len(self.nodeTranslations)
else:
sequence.baseTranslation = -1
if sequence.has_rot:
sequence.baseRotation = len(self.nodeRotations)
else:
sequence.baseRotation = -1
if sequence.has_scale:
sequence.baseScale = len(self.nodeAlignedScales)
else:
sequence.baseScale = -1
# To simplify things, we now assume everything is internal and just dump the sequence
# Dump Frames
for node in sequence.frames:
if node == 0: continue
for frame in node:
if frame[0]:
self.nodeTranslations.append(frame[0])
if frame[1]:
self.nodeRotations.append(frame[1])
if frame[2]:
self.nodeAlignedScales.append(frame[2])
# Clean out temporary junk
del sequence.frames
return sequence, removeLast
def addSequenceTriggers(self, sequence, unsortedTriggers, nFrames):
print("addSequenceTriggers called!!!")
if sequence.firstTrigger == -1:
sequence.firstTrigger = len(self.triggers)
# Sort triggers by position
triggers = []
for u in unsortedTriggers:
if len(triggers) == 0:
triggers.append(u)
continue
for i in range(0, len(triggers)):
if (triggers[i][1] <= u[1]):
triggers.insert(i, u)
break
elif (i == (len(triggers) - 1)):
triggers.append(u)
triggers.reverse()
# Check for triggers with both on and off states
triggerState = []
for t in triggers:
triggerState.append(False)
for comp in triggers:
if (t[0] == comp[0]) and (t[2] != comp[2]):
# Trigger controls a state that is turned on and off, so must state must reverse
# if played backwards
triggerState[-1] = True
break
for i in range(0, len(triggers)):
# [ state(1-32), position(0-1.0), on(True/False) ]
if triggers[i][1] <= 1:
realPos = 0.0
else:
realPos = float(triggers[i][1] - 1) / (nFrames - 1)
print("realPos=", realPos)
self.triggers.append(Trigger(triggers[i][0], triggers[i][2], realPos, triggerState[i]))
del triggerState
sequence.numTriggers += len(triggers)
sequence.flags |= sequence.MakePath
# Add sequence matters for IFL animation.
def addSequenceIFL(self, sequence, numFrameSamples, sequenceKey):
sequence.matters_ifl = [False] * len(self.materials.materials)
if sequence.baseObjectState == -1:
sequence.baseObjectState = len(self.objectstates)
# Now we can dump each frame for the objects
# Sequence matters_ifl indexes iflmaterials.
for i in range(0, len(self.iflmaterials)):
mat = self.iflmaterials[i]
IFLMatName = self.sTable.get(mat.name)
# must strip last four chars from IFLMatName (".ifl")
if self.preferences.getTextPortion(sequenceKey['IFL']['Material']) == IFLMatName[0:len(IFLMatName) - 4]:
sequence.matters_ifl[i] = True
else:
pass
sequence.has_ifl = True
return sequence
# Processes a material ipo and incorporates it into the Action
def addSequenceVisibility(self, sequence, numOverallFrames, sequenceKey, startFrame, endFrame):
'''
This adds ObjectState tracks to the sequence.
Since blender's Actions don't support object or material ipo's, we need to use the ipos directly.
In addition, a starting point and end point must be defined, which is useful in using a the ipo in more than 1 animation track.
If the visibility subsequence is shorter than the other subsequences belonging to the same sequence,
sampling of the IPOs will continue past the end of the subsequence until the full sequence is finished.
If the visibility sequence is longer than the other subsequences, other subsequences will be sampled past the end of their
runs until the full sequence is finished.
NOTE: this function needs to be called AFTER finalizeObjects, for obvious reasons.
'''
scene = bpy.context.scene
sequence.matters_vis = [False] * len(self.objects)
# includes last frame
# numVisFrames = int((startFrame - endFrame) + 1)
# Just do it.
for i in range(0, len(self.objects)):
dObj = self.objects[i]
dObjName = self.sTable.get(dObj.name)
try:
keyedObj = sequenceKey['Vis']['Tracks'][dObjName]
except:
sequence.matters_vis[i] = False
continue
# skip this object if the vis track is not enabled
if not keyedObj['hasVisTrack']: continue
try:
if keyedObj['IPOType'] == "Object":
bObj = bpy.data.objects[keyedObj['IPOObject']]
elif keyedObj['IPOType'] == "Material":
bObj = bpy.data.objects[keyedObj['IPOObject']]
bIpo = bObj.getIpo()
IPOCurveName = getBlenderIPOChannelConst(keyedObj['IPOType'], keyedObj['IPOChannel'])
IPOCurve = None
IPOCurveConst = bIpo.curveConsts[IPOCurveName]
IPOCurve = bIpo[IPOCurveConst]
if IPOCurve == None: raise TypeError
except:
Torque_Util.dump_writeErr(
"Error: Could not get animation curve for visibility animation: %s " % sequence.name)
continue
sequence.matters_vis[i] = True
if sequence.baseObjectState == -1:
sequence.baseObjectState = len(self.objectstates)
# add the object states, include the last frame
for fr in range(startFrame, numOverallFrames + startFrame):
val = IPOCurve[int(fr)]
if val > 1.0:
val = 1.0
elif val < 0.0:
val = 0.0
# Make sure we're still in the user defined frame range.
if fr <= endFrame:
self.objectstates.append(ObjectState(val, 0, 0))
print("appending vis frame with val of:", val)
# If we're past the user defined frame range, pad out object states
# with copies of the good last frame state.
else:
val = IPOCurve[int(endFrame)]
if val > 1.0:
val = 1.0
elif val < 0.0:
val = 0.0
self.objectstates.append(ObjectState(val, 0, 0))
print("appending vis frame with val of:", val)
sequence.has_vis = True
return sequence
def convertAndDumpSequenceToDSQ(self, sequence, filename, version):
# Write entry for this in shape script, if neccesary
self.externalSequences.append(self.sTable.get(sequence.nameIndex))
# Simple task of opening the file and dumping sequence data
dsq_file = open(filename, "wb")
self.writeDSQSequence(dsq_file, sequence, version) # Write only current sequence data
dsq_file.close()
# Remove anything we added (using addNodeSeq or addSequenceTrigger only) to the main list
if sequence.baseTranslation != -1: del self.nodeTranslations[
sequence.baseTranslation - 1:sequence.baseTranslation + sequence.numKeyFrames]
if sequence.baseRotation != -1: del self.nodeRotations[
sequence.baseRotation - 1:sequence.baseRotation + sequence.numKeyFrames]
if sequence.baseScale != -1: del self.nodeAlignedScales[
sequence.baseScale - 1:sequence.baseScale + sequence.numKeyFrames]
if sequence.firstTrigger != -1: del self.triggers[
sequence.firstTrigger - 1:sequence.firstTrigger + sequence.numTriggers]
if sequence.firstGroundFrame != -1:
del self.groundTranslations[
sequence.firstGroundFrame - 1:sequence.firstGroundFrame + sequence.numGroundFrames]
del self.groundRotations[sequence.firstGroundFrame - 1:sequence.firstGroundFrame + sequence.numGroundFrames]
# ^^ Add other data here once exporter has support for it.
# Remove sequence from list
for i in range(0, len(self.sequences)):
if self.sequences[i] == sequence:
del self.sequences[i]
break
return True
# Generic object addition
def addObject(self, object):
if object.type == "ARMATURE":
return self.addArmature(object)
elif object.type == "CAMERA":
return self.addNode(object)
elif object.type == "MESH":
return self.addDetailLevel([object], -1)
else:
Torque_Util.dump_writeln("addObject() failed for type %s!" % object.type)
return False
# Material addition
def addMaterial(self, imageName):
if imageName == None:
Torque_Util.dump_writeln("addMaterial() returning none")
return None
# find the material associated with the texture image file name
material = None
try:
Torque_Util.dump_writeln("test1")
mat = self.preferences['Materials'][imageName]
Torque_Util.dump_writeln("test2")
flags = 0x00000000
if mat['SWrap'] == True: flags |= dMaterial.SWrap
if mat['TWrap'] == True: flags |= dMaterial.TWrap
if mat['Translucent'] == True: flags |= dMaterial.Translucent
if mat['Additive'] == True: flags |= dMaterial.Additive
if mat['Subtractive'] == True: flags |= dMaterial.Subtractive
if mat['SelfIlluminating'] == True: flags |= dMaterial.SelfIlluminating
if mat['NeverEnvMap'] == True: flags |= dMaterial.NeverEnvMap
if mat['NoMipMap'] == True: flags |= dMaterial.NoMipMap
if mat['MipMapZeroBorder'] == True: flags |= dMaterial.MipMapZeroBorder
if mat['IFLMaterial'] == True: flags |= dMaterial.IFLMaterial
material = dMaterial(mat['BaseTex'], flags, -1, -1, -1, (1.0 / mat['detailScale']), mat['reflectance'])
# Must have something in the reflectance slot to prevent TGE from
# crashing when env mapping without a reflectance map.
material.reflectance = len(self.materials.materials)
if mat['DetailMapFlag'] == True and mat['DetailTex'] != None:
dmFlags = 0x00000000
if mat['SWrap'] == True: dmFlags |= dMaterial.SWrap
if mat['TWrap'] == True: dmFlags |= dMaterial.TWrap
dmFlags |= dMaterial.DetailMap
# detail_map = dMaterial(mat['DetailTex'], dmFlags,-1,-1,-1,1.0,mat['reflectance'])
detail_map = dMaterial(mat['DetailTex'], dmFlags, -1, -1, -1, 1.0, 0.0)
material.detail = self.materials.add(detail_map)
if mat['NeverEnvMap'] == False:
Torque_Util.dump_writeWarning(
" Warning: Material (%s) is using environment mapping with a detail map, strange things may happen!" % imageName)
if mat['BumpMapFlag'] == True and mat['BumpMapTex'] != None:
bmFlags = 0x00000000
if mat['SWrap'] == True: bmFlags |= dMaterial.SWrap
if mat['TWrap'] == True: bmFlags |= dMaterial.TWrap
bmFlags |= dMaterial.BumpMap
bump_map = dMaterial(mat['BumpMapTex'], bmFlags, -1, -1, -1, 1.0, mat['reflectance'])
material.bump = self.materials.add(bump_map)
if mat['ReflectanceMapFlag'] == True and mat['RefMapTex'] != None:
rmFlags = 0x00000000
if mat['SWrap'] == True: rmFlags |= dMaterial.SWrap
if mat['TWrap'] == True: rmFlags |= dMaterial.TWrap
rmFlags |= dMaterial.ReflectanceMap
refl_map = dMaterial(mat['RefMapTex'], rmFlags, -1, -1, -1, 1.0, mat['reflectance'])
material.reflectance = self.materials.add(refl_map)
except KeyError:
Torque_Util.dump_writeWarning(
" Warning: Texture Image (%s) is used on a mesh but could not be found in the material list!" % imageName)
return None
material.name = imageName
retVal = self.materials.add(material)
if self.preferences['TSEMaterial']:
self.addTGEAMaterial(imageName)
return retVal
# Material addition (TGEA mode)
def addTGEAMaterial(self, imageName):
mat = self.preferences['Materials'][imageName]
# Build the material string.
materialString = "new Material(%s)\n{\n" % (
finalizeImageName(SceneInfoClass.stripImageExtension(imageName), True))
materialString += "// Rendering Stage 0\n"
materialString += "baseTex[0] = \"./%s\";\n" % (
finalizeImageName(SceneInfoClass.stripImageExtension(imageName)))
if mat['DetailMapFlag'] == True and mat['DetailTex'] != None:
materialString += "detailTex[0] = \"./%s\";\n" % (mat['DetailTex'])
if mat['BumpMapFlag'] == True and mat['BumpMapTex'] != None:
materialString += "bumpTex[0] = \"./%s\";\n" % (mat['BumpMapTex'])
if mat['SelfIlluminating'] == True:
materialString += "emissive[0] = true;\n"
if mat['Translucent'] == True:
materialString += "translucent[0] = true;\n"
if mat['Additive'] == True:
materialString += "TranslucentBlendOp[0] = Add;\n" # <- not sure if it's Add or $Add, docs incomplete...
elif mat['Subtractive'] == True:
materialString += "TranslucentBlendOp[0] = Sub;\n" # <- ditto
# need to set a default blend op? Which one?
materialString += "};\n\n"
self.scriptMaterials.append(materialString)
# Finalizes shape
def finalize(self, writeShapeScript=False):
pathSep = "/"
if "\\" in self.preferences['exportBasepath']: pathSep = "\\"
# Write out shape script
if writeShapeScript:
Torque_Util.dump_writeln(" Writing script%s%s%s.cs" % (
self.preferences['exportBasepath'], pathSep, self.preferences['exportBasename']))
shapeScript = open(
"%s%s%s.cs" % (self.preferences['exportBasepath'], pathSep, self.preferences['exportBasename']), "w")
shapeScript.write("datablock TSShapeConstructor(%sDts)\n" % self.preferences['exportBasename'])
shapeScript.write("{\n")
# don't need to write out the full path, in fact, it causes problems to do so. We'll just assume
# that the player is putting their shape script in the same folder as the .dts.
shapeScript.write(" baseShape = \"./%s\";\n" % (self.preferences['exportBasename'] + ".dts"))
count = 0
for sequence in self.externalSequences:
# shapeScript.write(" sequence%d = \"./%s_%s.dsq %s\";\n" % (count,self.preferences['exportBasepath'],sequence,sequence))
shapeScript.write(" sequence%d = \"./%s.dsq %s\";\n" % (count, sequence, sequence))
count += 1
shapeScript.write("};")
shapeScript.close()
# Write out TGEA Material Script
if self.preferences['TSEMaterial']:
Torque_Util.dump_writeln(
" Writing material script %s%smaterials.cs" % (self.preferences['exportBasepath'], pathSep))
materialScript = open("%s%smaterials.cs" % (self.preferences['exportBasepath'], pathSep), "w")
materialScript.write("// Script automatically generated by Blender DTS Exporter\n\n")
for materialDef in self.scriptMaterials:
materialScript.write(materialDef)
materialScript.write("// End of generated script\n")
materialScript.close()
# Write out IFL File
# Now we can dump each frame
for seqName in list(self.preferences['Sequences'].keys()):
seqPrefs = self.preferences['Sequences'][seqName]
if seqPrefs['IFL']['Enabled'] and validateIFL(seqName, seqPrefs) and seqPrefs['IFL']['WriteIFLFile']:
iflName = self.preferences.getTextPortion(seqPrefs['IFL']['Material'])
Torque_Util.dump_writeln(
" Writing IFL script %s%s%s.ifl" % (self.preferences['exportBasepath'], pathSep, iflName))
IFLScript = open("%s%s%s.ifl" % (self.preferences['exportBasepath'], pathSep, iflName), "w")
for frame in seqPrefs['IFL']['IFLFrames']:
IFLScript.write("%s %i\n" % (frame[0], frame[1]))
IFLScript.close()
def dumpShapeInfo(self):
Torque_Util.dump_writeln(" > Nodes")
for n in range(0, len(self.nodes)):
if self.nodes[n].parent == -1:
self.dumpShapeNode(n)
Torque_Util.dump_writeln(" > Objects")
for obj in self.objects:
Torque_Util.dump_write(" '%s' :" % self.sTable.get(obj.name))
for mesh in self.meshes[obj.firstMesh:obj.firstMesh + obj.numMeshes]:
if mesh.mtype == mesh.T_Standard:
Torque_Util.dump_write("Standard")
elif mesh.mtype == mesh.T_Skin:
Torque_Util.dump_write("Skinned")
elif mesh.mtype == mesh.T_Decal:
Torque_Util.dump_write("Decal")
elif mesh.mtype == mesh.T_Sorted:
Torque_Util.dump_write("Sorted")
elif mesh.mtype == mesh.T_Null:
Torque_Util.dump_write("Null")
else:
Torque_Util.dump_write("Unknown")
Torque_Util.dump_writeln("")
Torque_Util.dump_writeln(" > Materials")
for mat in self.materials.materials:
Torque_Util.dump_writeln(" %s" % mat.name)
Torque_Util.dump_writeln(" > Detail Levels")
for detail in self.detaillevels:
Torque_Util.dump_writeln(" %s (size : %d)" % (self.sTable.get(detail.name), detail.size))
if len(self.detaillevels) > 0:
Torque_Util.dump_writeln(" Smallest : %s (size : %d)" % (
self.sTable.get(self.detaillevels[self.mSmallestVisibleDL].name), self.mSmallestVisibleSize))
Torque_Util.dump_writeln(" > Internal Sequences")
for sequence in self.sequences:
Torque_Util.dump_writeln(" - %s" % self.sTable.get(sequence.nameIndex))
Torque_Util.dump_write(" Animates:")
if sequence.has_ifl: Torque_Util.dump_write("ifl")
if sequence.has_vis: Torque_Util.dump_write("vis")
if sequence.has_loc: Torque_Util.dump_write("loc")
if sequence.has_rot: Torque_Util.dump_write("rot")
if sequence.has_scale: Torque_Util.dump_write("scale")
if sequence.has_ground: Torque_Util.dump_write("ground")
Torque_Util.dump_writeln("")
Torque_Util.dump_writeln(" Frames: %d" % sequence.numKeyFrames)
Torque_Util.dump_writeln(" Ground Frames: %d" % sequence.numGroundFrames)
Torque_Util.dump_writeln(" Triggers: %d" % sequence.numTriggers)
def dumpShapeNode(self, nodeIdx, indent=0):
Torque_Util.dump_write(" " * (indent + 4))
Torque_Util.dump_writeln(
"^^ Bone [%s] (parent %d)" % (self.sTable.get(self.nodes[nodeIdx].name), self.nodes[nodeIdx].parent))
for n in range(0, len(self.nodes)):
if self.nodes[n].parent == nodeIdx:
self.dumpShapeNode(n, indent + 1)
| 45.328767
| 143
| 0.574431
|
9510f7e2ff5506dbdfe8e62a51d7e9b2d05c4878
| 7,012
|
py
|
Python
|
algorithms/POMDP/3-DRQN-Store-State-HeavenHellSimple/train.py
|
zhihanyang2022/drqn
|
ac2482e3b42094e6242c042583dbbd9c98e4750b
|
[
"MIT"
] | 5
|
2021-03-28T14:12:40.000Z
|
2021-11-19T20:46:10.000Z
|
algorithms/POMDP/3-DRQN-Store-State-HeavenHellSimple/train.py
|
zhihanyang2022/drqn
|
ac2482e3b42094e6242c042583dbbd9c98e4750b
|
[
"MIT"
] | null | null | null |
algorithms/POMDP/3-DRQN-Store-State-HeavenHellSimple/train.py
|
zhihanyang2022/drqn
|
ac2482e3b42094e6242c042583dbbd9c98e4750b
|
[
"MIT"
] | null | null | null |
import os
import sys
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from model import DRQN
from memory import Memory
# from tensorboardX import SummaryWriter
import argparse
import wandb
from heaven_hell_simple import HeavenHellSimple
# ==================================================
# hyper-parameters that need tuning
"""
python algorithms/POMDP/3-DRQN-Store-State-HeavenHellSimple/train.py \
--lr=0.00001 \
--use_experts=0 \
--debug_mode=0 \
--device_str=cpu \
--use_deeper_net=1 \
--use_early_stopping=1 \
--seed=1
"""
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, help='learning rate (e.g., 0.001)')
parser.add_argument('--use_experts', type=int, help='whether to use two experts to guide exploration (0 for on; 1 for off)')
parser.add_argument('--seed', type=int, help='seed for np.random.seed and torch.manual_seed (e.g., 42)')
parser.add_argument('--debug_mode', type=int)
parser.add_argument('--device_str', type=str)
parser.add_argument('--use_deeper_net', type=int)
parser.add_argument('--use_early_stopping', type=int)
args = parser.parse_args()
lr = args.lr
use_experts = bool(args.use_experts)
seed = args.seed
debug_mode = bool(args.debug_mode)
device = torch.device(args.device_str)
use_deeper_net = bool(args.use_deeper_net)
use_early_stopping = bool(args.use_early_stopping)
if debug_mode: print('Running debug mode (i.e., without wandb)')
# ==================================================
# ==================================================
# fixed hyper-parameters
gamma = 0.99
sequence_length = 8
max_episodes = int(10 * 1e3) # 10k episodes; less than or equal to 10k * 20 = 200k steps
epsilon = 1.0 # initial uniform exploration
terminal_epsilon = 0.1
decay_over_episodes = int(3 * 1e3) # 3k episodes
decay_per_episode = (epsilon - terminal_epsilon) / decay_over_episodes
replay_memory_capacity = max_episodes # can store 500 episodes
batch_size = 32
update_target = 1000 # once per 1000 steps
log_interval = 10 # one console log per 10 episodes
target_score = 0.99
patience = 3
# ==================================================
# ==================================================
# logging settings
if debug_mode is False:
group_name = f"lr={lr} use_experts={use_experts} use_deeper_net={use_deeper_net} use_early_stopping={use_early_stopping}"
run_name = f"seed={seed}"
print('Group name:')
print(group_name)
print('Run name:')
print(run_name)
wandb.init(
project="drqn",
entity='pomdpr',
group=group_name,
settings=wandb.Settings(_disable_stats=True),
name=run_name
)
# ==================================================
def get_action(obs, target_net, epsilon, env, hidden, expert_actions=None):
action, hidden = target_net.get_action(obs, hidden)
if np.random.rand() <= epsilon:
if expert_actions is None:
return env.action_space.sample(), hidden
else:
return np.random.choice(expert_actions), hidden
else:
return action, hidden
def update_target_model(online_net, target_net):
# Target <- Net
target_net.load_state_dict(online_net.state_dict())
def one_hot_encode_obs(obs:int):
one_hot_repr = np.zeros((HeavenHellSimple().observation_space_dim, ))
one_hot_repr[obs] = 1
return one_hot_repr
env = HeavenHellSimple()
np.random.seed(seed)
torch.manual_seed(seed)
num_inputs = env.observation_space_dim
num_actions = env.action_space_dim
print('observation size:', num_inputs)
print('action size:', num_actions)
online_net = DRQN(num_inputs, num_actions, sequence_length, use_deeper_net)
target_net = DRQN(num_inputs, num_actions, sequence_length, use_deeper_net)
update_target_model(online_net, target_net)
optimizer = optim.Adam(online_net.parameters(), lr=lr)
# if use_experts is False:
# writer = SummaryWriter('logs/normal')
# else:
# writer = SummaryWriter('logs/experts')
online_net.to(device)
target_net.to(device)
online_net.train()
target_net.train()
memory = Memory(replay_memory_capacity, sequence_length)
steps = 0 # number of actions taken in the environment / number of parameter updates
loss = 0
running_score = 0
converged = False
patience_used = 0
for e in range(max_episodes):
done = False
obs = env.reset()
obs = one_hot_encode_obs(obs)
obs = torch.Tensor(obs).to(device)
hidden = (torch.Tensor().new_zeros(1, 1, 16), torch.Tensor().new_zeros(1, 1, 16))
episode_len = 0 # incremented per action taken
trajectory = []
while not done:
if use_experts is False: # do the normal thing
action, new_hidden = get_action(obs, target_net, epsilon, env, hidden)
else:
action, new_hidden = get_action(obs, target_net, epsilon, env, hidden, expert_actions=env.get_expert_actions())
episode_len += 1
trajectory.append(
(episode_len, int(torch.argmax(obs)), int(action))
)
next_obs, reward, done = env.step(action)
next_obs = one_hot_encode_obs(next_obs)
next_obs = torch.Tensor(next_obs)
mask = 0 if done else 1
if use_early_stopping is False:
memory.push(obs, next_obs, action, reward, mask, hidden)
else:
if converged is False:
memory.push(obs, next_obs, action, reward, mask, hidden)
hidden = new_hidden
obs = next_obs
if len(memory) > batch_size and (use_early_stopping is False or converged is False):
# Result of use_early_stopping is False or converged is False
# use_early_stopping | converged | results
# True True False -> avoid updated
# True False True -> do update
# False True True -> do update
# False False True -> do update
batch = memory.sample(batch_size)
loss = DRQN.train_model(online_net, target_net, optimizer, batch, batch_size, sequence_length, gamma, use_deeper_net)
if steps % update_target == 0:
update_target_model(online_net, target_net)
steps += 1
trajectory.append(
(episode_len + 1, int(torch.argmax(obs)), None)
)
if epsilon > terminal_epsilon:
epsilon -= decay_per_episode
# wandb logging
if debug_mode is False:
wandb.log({
"return": reward,
"episode_len": episode_len
})
# console logging
running_score = 0.95 * running_score + 0.05 * reward
if running_score >= target_score:
patience_used += 1
if patience_used >= patience:
converged = True
else:
patience_used = 0
if e % log_interval == 0:
print(f'Iteration {e} / {max_episodes} | Running score {round(running_score, 2)} | Epsilon {round(epsilon, 2)}')
print(trajectory)
| 29.462185
| 129
| 0.645893
|
fddc481bae7c46b47a263326b7e3cdac41e70998
| 710
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scattermapbox/marker/colorbar/tickformatstop/_dtickrange.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/Lib/site-packages/plotly/validators/scattermapbox/marker/colorbar/tickformatstop/_dtickrange.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/Lib/site-packages/plotly/validators/scattermapbox/marker/colorbar/tickformatstop/_dtickrange.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scattermapbox.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "valType": "any"},
{"editType": "calc", "valType": "any"},
],
),
**kwargs
)
| 29.583333
| 75
| 0.529577
|
5fa5642b9baa29634c4d96e766d13516614b24d8
| 8,161
|
py
|
Python
|
tools/utils_3d.py
|
sunnyln/birdnet2
|
d1a2b703475345d887c325c135013ed9f72d3a57
|
[
"Apache-2.0"
] | null | null | null |
tools/utils_3d.py
|
sunnyln/birdnet2
|
d1a2b703475345d887c325c135013ed9f72d3a57
|
[
"Apache-2.0"
] | null | null | null |
tools/utils_3d.py
|
sunnyln/birdnet2
|
d1a2b703475345d887c325c135013ed9f72d3a57
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import cv2
import csv
import pdb
import math
from tools.utils_calib import Calibration
# color_dict = {'Car':[0.2, 0.2, 0.9],
# 'Van':[0.4, 0.2, 0.4],
# 'Truck':[0.6, 0.2, 0.6],
# 'Pedestrian':[0.9, 0.2, 0.2],
# 'Person_sitting':[0.6, 0.2, 0.4],
# 'Cyclist':[0.2, 0.9, 0.2],
# 'Tram':[0.2, 0.6, 0.2],
# 'Misc':[0.2, 0.4, 0.2],
# 'DontCare':[0.2, 0.2, 0.2]}
color_dict = {'Car':[216, 216, 100],
'Pedestrian':[0, 210, 0],
'Cyclist':[0, 128, 255]}
def dottedline(img,p0,p1,linepoints,width,color):
xpts = np.linspace(p0[0],p1[0],linepoints)
ypts = np.linspace(p0[1],p1[1],linepoints)
for xp,yp in zip(xpts,ypts):
cv2.circle(img,(int(xp),int(yp)),width,color,-1)
return img
def _draw_projection_obstacle_to_cam(obs, calib_file, bvres, only_front, draw, img=None, bv_img=None, is_kitti_gt=False, n=None, is_kitti_ann=False):
'''
This script is used for fullfil two complementary task:
* Obtain 2D bbox in camera view
* Draw BV and camera predictions in 3D
It totally depends on the parameter draw
'''
calib_c = Calibration(calib_file)
yaw = -obs.yaw + math.pi/2
if is_kitti_gt or is_kitti_ann:
xyz = calib_c.project_rect_to_velo(np.array([[obs.location.x,obs.location.y,obs.location.z]]))
x = xyz[0][0]
y = xyz[0][1]
z = xyz[0][2] + obs.height / 2
else:
x = obs.location.x
y = obs.location.y
z = obs.location.z + obs.height / 2
l = obs.length
h = obs.height
w = obs.width
centroid = np.array([x, y])
corners = np.array([
[x - l / 2., y + w / 2.],
[x + l / 2., y + w / 2.],
[x + l / 2., y - w / 2.],
[x - l / 2., y - w / 2.]
])
# Compute rotation matrix
c, s = np.cos(yaw), np.sin(yaw)
R = np.array([[c, -s], [s, c]])
# Rotate all corners at once by yaw
rot_corners = np.dot(corners - centroid, R.T) + centroid
if draw:
# Convert x to x in BV
xs = bv_img.shape[1] / 2 - rot_corners[:, 1] / bvres
ys = (bv_img.shape[0] if only_front else bv_img.shape[0]/2) - rot_corners[:, 0] / bvres
xsc = bv_img.shape[1] / 2 - centroid[1] / bvres
ysc = (bv_img.shape[0] if only_front else bv_img.shape[0]/2) - centroid[0] / bvres
pt1 = np.array([xs[0], ys[0]])
pt2 = np.array([xs[1], ys[1]])
pt3 = np.array([xs[2], ys[2]])
pt4 = np.array([xs[3], ys[3]])
ctr = np.array([pt1, pt2, pt3, pt4]).reshape((-1, 1, 2)).astype(np.int32)
for j in range(4):
k = (j + 1) % 4
if is_kitti_gt:
# print(ctr[j][0],ctr[k][0])
bv_img = dottedline(bv_img,ctr[j][0],ctr[k][0],5,2,(0,135,135))
else:
cv2.line(bv_img,( int(ctr[j][0][0]), int(ctr[j][0][1])),
( int(ctr[k][0][0]), int(ctr[k][0][1])),
color_dict[obs.kind_name],3)
arrow_len = (l/bvres)/2.+10
xs1 = arrow_len*math.cos(obs.yaw)
ys1 = arrow_len*math.sin(obs.yaw)
bv_img = cv2.arrowedLine(bv_img, (int(xsc),int(ysc)), (int(xs1+xsc),int(ys1+ysc)),
color_dict[obs.kind_name],3)
x1 = rot_corners[0,0]
x2 = rot_corners[1,0]
x3 = rot_corners[2,0]
x4 = rot_corners[3,0]
y1 = rot_corners[0,1]
y2 = rot_corners[1,1]
y3 = rot_corners[2,1]
y4 = rot_corners[3,1]
# Project the 8 vertices of the prism
vertices = []
vertices.append([x1, y1, z+h/2])
vertices.append([x2, y2, z+h/2])
vertices.append([x3, y3, z+h/2])
vertices.append([x4, y4, z+h/2])
vertices.append([x1, y1, z-h/2])
vertices.append([x2, y2, z-h/2])
vertices.append([x3, y3, z-h/2])
vertices.append([x4, y4, z-h/2])
image_pts = calib_c.project_velo_to_image(np.array(vertices))
#3D draw: front-backs-sides
if draw:
if is_kitti_gt:
for j in np.arange(0,8,2):
# print(image_pts[j][0],[image_pts[j][0],image_pts[j][1]],[image_pts[j+1][0],image_pts[j+1][1]])
img = dottedline(img,[image_pts[j][0],image_pts[j][1]],[image_pts[j+1][0],image_pts[j+1][1]],10,2,(0,255,255))
img = dottedline(img,[image_pts[0][0],image_pts[0][1]],[image_pts[4][0],image_pts[4][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[4][0],image_pts[4][1]],[image_pts[7][0],image_pts[7][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[7][0],image_pts[7][1]],[image_pts[3][0],image_pts[3][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[3][0],image_pts[3][1]],[image_pts[0][0],image_pts[0][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[1][0],image_pts[1][1]],[image_pts[5][0],image_pts[5][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[5][0],image_pts[5][1]],[image_pts[6][0],image_pts[6][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[6][0],image_pts[6][1]],[image_pts[2][0],image_pts[2][1]],7,2,(0,255,255))
img = dottedline(img,[image_pts[2][0],image_pts[2][1]],[image_pts[1][0],image_pts[1][1]],7,2,(0,255,255))
else:
for j in range(0,3):
#0,0-3,0
cv2.line(img,( int(np.ceil(image_pts[j][0])), int(np.ceil(image_pts[j][1]))),
( int(np.ceil(image_pts[j+1][0])), int(np.ceil(image_pts[j+1][1]))),
color_dict[obs.kind_name],3)
#4,0-7,0
cv2.line(img,( int(np.ceil(image_pts[j+4][0])), int(np.ceil(image_pts[j+4][1]))),
( int(np.ceil(image_pts[j+5][0])), int(np.ceil(image_pts[j+5][1]))),
color_dict[obs.kind_name],3)
cv2.line(img,( int(np.ceil(image_pts[0][0])), int(np.ceil(image_pts[0][1]))),
( int(np.ceil(image_pts[3][0])), int(np.ceil(image_pts[3][1]))),
color_dict[obs.kind_name],3)
cv2.line(img,( int(np.ceil(image_pts[4][0])), int(np.ceil(image_pts[4][1]))),
( int(np.ceil(image_pts[7][0])), int(np.ceil(image_pts[7][1]))),
color_dict[obs.kind_name],3)
for j in range(0,2):
cv2.line(img,( int(np.ceil(image_pts[j*2][0])), int(np.ceil(image_pts[j*2][1]))),
( int(np.ceil(image_pts[j*2+4][0])), int(np.ceil(image_pts[j*2+4][1]))),
color_dict[obs.kind_name],3)
cv2.line(img,( int(np.ceil(image_pts[j*2+1][0])), int(np.ceil(image_pts[j*2+1][1]))),
( int(np.ceil(image_pts[j*2+5][0])), int(np.ceil(image_pts[j*2+5][1]))),
color_dict[obs.kind_name],3)
# Extreme object points in the image
image_u1 = np.min(image_pts[:, 0]) # Limits for kitti dataset
image_v1 = np.min(image_pts[:, 1])
image_u2 = np.max(image_pts[:, 0])
image_v2 = np.max(image_pts[:, 1])
if (image_u1 <= 0 and image_u2 <= 0) or \
(image_u1 >= 1242. - 1 and image_u2 >= 1242. - 1) or \
(image_v1 <= 0 and image_v2 <= 0) or \
(image_v1 >= 375. - 1 and image_v2 >= 375. - 1):
return img, bv_img, None
image_u1 = np.min((np.max((image_u1,0.)),1242.))
image_v1 = np.min((np.max((image_v1,0.)),375.))
image_u2 = np.min((np.max((image_u2,0.)),1242.))
image_v2 = np.min((np.max((image_v2,0.)),375.))
# if draw:
# #2D draw: front-side
# if is_kitti_gt:
# cv2.rectangle(img, (int(image_u1), int(image_v1)),
# (int(image_u2), int(image_v2)),
# (255,255,255), 2)
# else:
# cv2.rectangle(img, (int(image_u1), int(image_v1)),
# (int(image_u2), int(image_v2)),
# (100,100,100), 4)
return img, bv_img, [image_u1, image_v1, image_u2, image_v2]
| 42.952632
| 149
| 0.513908
|
f03a52935db0e45d557a3a1b737f409064966208
| 1,085
|
py
|
Python
|
cogs/info/prefix.py
|
FevenKitsune/Fox-Utilities
|
95bc63897a35977989378cf54d5aa8356b238ec8
|
[
"MIT"
] | 3
|
2020-07-19T15:11:21.000Z
|
2021-07-01T13:40:48.000Z
|
cogs/info/prefix.py
|
FevenKitsune/Fox-Utilities
|
95bc63897a35977989378cf54d5aa8356b238ec8
|
[
"MIT"
] | 23
|
2019-05-03T15:42:14.000Z
|
2021-05-16T03:19:59.000Z
|
cogs/info/prefix.py
|
FevenKitsune/Fox-Utilities
|
95bc63897a35977989378cf54d5aa8356b238ec8
|
[
"MIT"
] | 1
|
2020-07-23T19:46:56.000Z
|
2020-07-23T19:46:56.000Z
|
from discord import Embed
from discord.ext.commands import Cog, command
from config.globals import message_color
from utils.generators import generate_footer
class Prefix(Cog):
category = "info"
def __init__(self, client):
self.client = client
@command(
name="prefix",
brief="Information about setting Fox Utilities prefix.",
usage="",
help="Returns information on how to use the Fox Utilities prefix tags to configure the prefix used in your "
"guild."
)
async def prefix(self, ctx):
"""Gives the user information on prefix tags. This allows per-guild prefix settings."""
em = Embed(
title="Fox Utilities Prefix Tags",
description="Create a server role with the syntax `fox_prefix:desired_prefix`. Assign this role to the "
"Fox Utilities bot to give it a new prefix.",
color=message_color)
em.set_footer(text=generate_footer(ctx))
await ctx.send(embed=em)
def setup(client):
client.add_cog(Prefix(client))
| 31
| 116
| 0.652535
|
e670da0bc9798434e0da10a75c20e777a2367847
| 3,567
|
py
|
Python
|
pyarray/pyarray.py
|
AlanCristhian/pyarray
|
632407c71cdcb884347f43848fdb1b9deb40bdea
|
[
"MIT"
] | null | null | null |
pyarray/pyarray.py
|
AlanCristhian/pyarray
|
632407c71cdcb884347f43848fdb1b9deb40bdea
|
[
"MIT"
] | null | null | null |
pyarray/pyarray.py
|
AlanCristhian/pyarray
|
632407c71cdcb884347f43848fdb1b9deb40bdea
|
[
"MIT"
] | null | null | null |
from array import array
__all__ = ["Int8", "UInt8", "Int16", "UInt16", "Int32", "UInt32", "Int64",
"UInt64", "Float32", "Float64", ]
_TEMPLATE = """
class {name}Meta(type):
def __call__(cls, *initializer):
if len(initializer) == 1:
if isinstance(initializer[0], (list, tuple)):
initializer = initializer[0]
{name}_array = array("{typecode}", initializer)
class {name}Class:
# Methods:
append = {name}_array.append
buffer_info = {name}_array.buffer_info
byteswap = {name}_array.byteswap
count = {name}_array.count
extend = {name}_array.extend
fromfile = {name}_array.fromfile
fromlist = {name}_array.fromlist
frombytes = {name}_array.frombytes
index = {name}_array.index
insert = {name}_array.insert
pop = {name}_array.pop
remove = {name}_array.remove
reverse = {name}_array.reverse
tofile = {name}_array.tofile
tolist = {name}_array.tolist
tobytes = {name}_array.tobytes
# Attributes:
typecode = {name}_array.typecode
itemsize = {name}_array.itemsize
# Methods defined here:
__add__ = {name}_array.__add__
__contains__ = {name}_array.__contains__
__copy__ = {name}_array.__copy__
__deepcopy__ = {name}_array.__deepcopy__
__delitem__ = {name}_array.__delitem__
__eq__ = {name}_array.__eq__
__ge__ = {name}_array.__ge__
__getattribute__ = {name}_array.__getattribute__
__getitem__ = {name}_array.__getitem__
__gt__ = {name}_array.__gt__
__iadd__ = {name}_array.__iadd__
__imul__ = {name}_array.__imul__
__iter__ = {name}_array.__iter__
__le__ = {name}_array.__le__
__len__ = {name}_array.__len__
__lt__ = {name}_array.__lt__
__mul__ = {name}_array.__mul__
__ne__ = {name}_array.__ne__
__reduce_ex__ = {name}_array.__reduce_ex__
__rmul__ = {name}_array.__rmul__
__setitem__ = {name}_array.__setitem__
__sizeof__ = {name}_array.__sizeof__
def __repr__(self):
return {name}_array.__repr__() \
.replace("array('{typecode}', ", "{name}(")
cls.__parent = {name}Class
cls.__typecode = {name}_array.__repr__().split("'")[1]
return {name}Class()
def __instancecheck__(self, other):
return (self.__parent in type(other).__mro__) \
and (self.__typecode == '{typecode}')
class {name}(metaclass={name}Meta):
pass
"""
# I define this just to calmdown linters. Just ignore the next 10 lines
Int8 = array
UInt8 = array
Int16 = array
UInt16 = array
Int32 = array
UInt32 = array
Int64 = array
UInt64 = array
Float32 = array
Float64 = array
# Here is the real definition
exec(_TEMPLATE.format(name="Int8", typecode="b"))
exec(_TEMPLATE.format(name="UInt8", typecode="B"))
exec(_TEMPLATE.format(name="Int16", typecode="h"))
exec(_TEMPLATE.format(name="UInt16", typecode="H"))
exec(_TEMPLATE.format(name="Int32", typecode="l"))
exec(_TEMPLATE.format(name="UInt32", typecode="L"))
exec(_TEMPLATE.format(name="Int64", typecode="q"))
exec(_TEMPLATE.format(name="UInt64", typecode="Q"))
exec(_TEMPLATE.format(name="Float32", typecode="f"))
exec(_TEMPLATE.format(name="Float64", typecode="d"))
| 33.336449
| 74
| 0.599383
|
9117ca6e2b0d75549d2a60d5508ffa79a7b724d2
| 329
|
py
|
Python
|
invenio_formatter/context_processors/__init__.py
|
torchingloom/invenio-formatter
|
e9b8b35ef53734c0500b5528ae3e9eb8ab0f2ccf
|
[
"MIT"
] | 1
|
2015-08-19T12:49:20.000Z
|
2015-08-19T12:49:20.000Z
|
invenio_formatter/context_processors/__init__.py
|
torchingloom/invenio-formatter
|
e9b8b35ef53734c0500b5528ae3e9eb8ab0f2ccf
|
[
"MIT"
] | 39
|
2015-08-12T11:25:27.000Z
|
2021-04-30T14:45:13.000Z
|
invenio_formatter/context_processors/__init__.py
|
torchingloom/invenio-formatter
|
e9b8b35ef53734c0500b5528ae3e9eb8ab0f2ccf
|
[
"MIT"
] | 32
|
2015-08-12T07:44:07.000Z
|
2021-11-25T09:13:10.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Context processors for Invenio."""
from __future__ import absolute_import, print_function
| 27.416667
| 72
| 0.732523
|
cf7551891fac09044c2a380e85116a24a445f9b0
| 445
|
py
|
Python
|
fossil/auth/models.py
|
Remember-Fossil/Fossil-Server
|
45b9002a8431fa9377ee3eba23ab01aeb564559a
|
[
"MIT"
] | null | null | null |
fossil/auth/models.py
|
Remember-Fossil/Fossil-Server
|
45b9002a8431fa9377ee3eba23ab01aeb564559a
|
[
"MIT"
] | null | null | null |
fossil/auth/models.py
|
Remember-Fossil/Fossil-Server
|
45b9002a8431fa9377ee3eba23ab01aeb564559a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from google.appengine.ext import db
class User(db.Model):
facebook_id = db.StringProperty()
name = db.StringProperty()
profile_image = db.StringProperty()
created_at = db.DateTimeProperty(auto_now_add=True)
class FacebookSession(db.Model):
user = db.ReferenceProperty(User)
token = db.StringProperty()
expires = db.IntegerProperty()
created_at = db.DateTimeProperty(auto_now_add=True)
| 26.176471
| 55
| 0.716854
|
ac4a1ebab001735fdd08a4a7600654fd465fe6bd
| 737
|
py
|
Python
|
setup.py
|
Cracky5457/ebrains-drive
|
d5c08d44c423c38e91e246859ab34e2eeca109b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Cracky5457/ebrains-drive
|
d5c08d44c423c38e91e246859ab34e2eeca109b1
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Cracky5457/ebrains-drive
|
d5c08d44c423c38e91e246859ab34e2eeca109b1
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
from version_query import predict_version_str
__version__ = predict_version_str()
setup(name='ebrains-drive',
version=__version__,
license='Apache-2.0 License',
description='Python client interface for EBrains Collaboratory Seafile storage',
author='Ebrains, CNRS',
author_email='support@ebrains.eu',
url='http://seafile.com',
platforms=['Any'],
packages=find_packages(),
install_requires=['requests'],
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
)
| 33.5
| 86
| 0.641791
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.