max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
XtDac/ChandraUtils/configuration.py
|
giacomov/XtDac
| 0
|
12782451
|
<reponame>giacomov/XtDac
import yaml
import os
from XtDac.ChandraUtils.sanitize_filename import sanitize_filename
class ReadOnlyContainer(object):
def __init__(self, dictionary):
self._dict = dict(dictionary)
def __getitem__(self, item):
return self._dict[item]
def get_configuration(filename):
filename = sanitize_filename(filename)
assert os.path.exists(filename), "Configuration file %s does not exist!" % filename
try:
with open(filename, "r") as f:
configuration_dict = yaml.safe_load(f)
except:
raise IOError("Couldn't read configuration file %s. File is not readable, or wrong format." % (filename))
configuration = ReadOnlyContainer(configuration_dict)
return configuration
| 2.703125
| 3
|
DS&Algo Programs in Python/inserting_heap.py
|
prathimacode-hub/HacktoberFest-2020
| 386
|
12782452
|
<reponame>prathimacode-hub/HacktoberFest-2020
import heapq
H = [21,1,45,78,3,5]
# Covert to a heap
heapq.heapify(H)
print(H)
# Add element
heapq.heappush(H,8)
print(H)
| 2.546875
| 3
|
fiftyone/core/view.py
|
seantrue/fiftyone
| 0
|
12782453
|
<reponame>seantrue/fiftyone
"""
Dataset views.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from collections import OrderedDict
from copy import copy, deepcopy
import numbers
from bson import ObjectId
import fiftyone.core.aggregations as foa
import fiftyone.core.collections as foc
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
class DatasetView(foc.SampleCollection):
"""A view into a :class:`fiftyone.core.dataset.Dataset`.
Dataset views represent ordered collections of subsets of samples in a
dataset.
Operations on dataset views are designed to be chained together to yield
the desired subset of the dataset, which is then iterated over to directly
access the sample views. Each stage in the pipeline defining a dataset view
is represented by a :class:`fiftyone.core.stages.ViewStage` instance.
The stages of a dataset view specify:
- what subset of samples (and their order) should be included
- what "parts" (fields and their elements) of the sample should be
included
Samples retrieved from dataset views are returns as
:class:`fiftyone.core.sample.SampleView` objects, as opposed to
:class:`fiftyone.core.sample.Sample` objects, since they may contain a
subset of the sample's content.
Example use::
# Print paths for 5 random samples from the test split of a dataset
view = dataset.match_tag("test").take(5)
for sample in view:
print(sample.filepath)
Args:
dataset: a :class:`fiftyone.core.dataset.Dataset`
"""
def __init__(self, dataset):
self._dataset = dataset
self._stages = []
def __len__(self):
return self.aggregate(foa.Count()).count
def __getitem__(self, sample_id):
if isinstance(sample_id, numbers.Integral):
raise KeyError(
"Accessing samples by numeric index is not supported. "
"Use sample IDs or slices"
)
if isinstance(sample_id, slice):
return self._slice(sample_id)
view = self.match({"_id": ObjectId(sample_id)})
try:
return view.first()
except ValueError:
raise KeyError("No sample found with ID '%s'" % sample_id)
def __copy__(self):
view = self.__class__(self._dataset)
view._stages = deepcopy(self._stages)
return view
@property
def media_type(self):
"""The media type of the underlying dataset."""
return self._dataset.media_type
@property
def name(self):
"""The name of the view."""
return self.dataset_name + "-view"
@property
def dataset_name(self):
"""The name of the underlying dataset."""
return self._dataset.name
@property
def info(self):
"""The :meth:`fiftyone.core.dataset.Dataset.info` dict of the
underlying dataset.
"""
return self._dataset.info
@property
def stages(self):
"""The list of :class:`fiftyone.core.stages.ViewStage` instances in
this view's pipeline.
"""
return self._stages
def summary(self):
"""Returns a string summary of the view.
Returns:
a string summary
"""
aggs = self.aggregate(
[foa.Count(), foa.Distinct("tags")], _attach_frames=False
)
elements = [
"Dataset: %s" % self.dataset_name,
"Media type: %s" % self.media_type,
"Num samples: %d" % aggs[0].count,
"Tags: %s" % aggs[1].values,
"Sample fields:",
self._dataset._to_fields_str(self.get_field_schema()),
]
if self.media_type == fom.VIDEO:
elements.extend(
[
"Frame fields:",
self._dataset._to_fields_str(
self.get_frame_field_schema()
),
]
)
if self._stages:
pipeline_str = " " + "\n ".join(
[
"%d. %s" % (idx, str(d))
for idx, d in enumerate(self._stages, 1)
]
)
else:
pipeline_str = " ---"
elements.extend(["Pipeline stages:", pipeline_str])
return "\n".join(elements)
def iter_samples(self):
"""Returns an iterator over the samples in the view.
Returns:
an iterator over :class:`fiftyone.core.sample.SampleView` instances
"""
selected_fields, excluded_fields = self._get_selected_excluded_fields()
filtered_fields = self._get_filtered_fields()
for d in self._aggregate(hide_frames=True):
try:
frames = d.pop("_frames", [])
doc = self._dataset._sample_dict_to_doc(d)
sample = fos.SampleView(
doc,
self._dataset,
selected_fields=selected_fields,
excluded_fields=excluded_fields,
filtered_fields=filtered_fields,
)
if self.media_type == fom.VIDEO:
sample.frames._set_replacements(frames)
yield sample
except Exception as e:
raise ValueError(
"Failed to load sample from the database. This is likely "
"due to an invalid stage in the DatasetView"
) from e
def get_field_schema(
self, ftype=None, embedded_doc_type=None, include_private=False
):
"""Returns a schema dictionary describing the fields of the samples in
the view.
Args:
ftype (None): an optional field type to which to restrict the
returned schema. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): an optional embedded document type to
which to restrict the returned schema. Must be a subclass of
:class:`fiftyone.core.odm.BaseEmbeddedDocument`
include_private (False): whether to include fields that start with
`_` in the returned schema
Returns:
an ``OrderedDict`` mapping field names to field types
"""
field_schema = self._dataset.get_field_schema(
ftype=ftype,
embedded_doc_type=embedded_doc_type,
include_private=include_private,
)
return self._get_filtered_schema(field_schema)
def get_frame_field_schema(
self, ftype=None, embedded_doc_type=None, include_private=False
):
"""Returns a schema dictionary describing the fields of the frames of
the samples in the view.
Only applicable for video datasets.
Args:
ftype (None): an optional field type to which to restrict the
returned schema. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): an optional embedded document type to
which to restrict the returned schema. Must be a subclass of
:class:`fiftyone.core.odm.BaseEmbeddedDocument`
include_private (False): whether to include fields that start with
`_` in the returned schema
Returns:
a dictionary mapping field names to field types, or ``None`` if
the dataset is not a video dataset
"""
field_schema = self._dataset.get_frame_field_schema(
ftype=ftype,
embedded_doc_type=embedded_doc_type,
include_private=include_private,
)
return self._get_filtered_schema(field_schema, frames=True)
def get_tags(self):
"""Returns the list of unique tags of samples in the view.
Returns:
a list of tags
"""
return self.aggregate(foa.Distinct("tags")).values
def create_index(self, field):
"""Creates a database index on the given field, enabling efficient
sorting on that field.
Args:
field: the name of the field to index
"""
self._dataset.create_index(field)
def to_dict(self, rel_dir=None, frame_labels_dir=None, pretty_print=False):
"""Returns a JSON dictionary representation of the view.
Args:
rel_dir (None): a relative directory to remove from the
``filepath`` of each sample, if possible. The path is converted
to an absolute path (if necessary) via
``os.path.abspath(os.path.expanduser(rel_dir))``. The typical
use case for this argument is that your source data lives in
a single directory and you wish to serialize relative, rather
than absolute, paths to the data within that directory
frame_labels_dir (None): a directory in which to write per-sample
JSON files containing the frame labels for video samples. If
omitted, frame labels will be included directly in the returned
JSON dict (which can be quite quite large for video datasets
containing many frames). Only applicable to video datasets
pretty_print (False): whether to render frame labels JSON in human
readable format with newlines and indentations. Only applicable
to video datasets when a ``frame_labels_dir`` is provided
Returns:
a JSON dict
"""
d = super().to_dict(
rel_dir=rel_dir,
frame_labels_dir=frame_labels_dir,
pretty_print=pretty_print,
)
samples = d.pop("samples") # hack so that `samples` is last in JSON
d["stages"] = [s._serialize() for s in self._stages]
d["samples"] = samples
return d
def _pipeline(
self,
pipeline=None,
hide_frames=False,
squash_frames=False,
attach_frames=True,
):
_pipeline = []
for s in self._stages:
_pipeline.extend(s.to_mongo(self))
if pipeline is not None:
_pipeline.extend(pipeline)
return self._dataset._pipeline(
pipeline=_pipeline,
hide_frames=hide_frames,
squash_frames=squash_frames,
attach_frames=attach_frames,
)
def _aggregate(
self,
pipeline=None,
hide_frames=False,
squash_frames=False,
attach_frames=True,
):
_pipeline = self._pipeline(
pipeline=pipeline,
hide_frames=hide_frames,
squash_frames=squash_frames,
attach_frames=attach_frames,
)
return self._dataset._sample_collection.aggregate(_pipeline)
@property
def _doc(self):
return self._dataset._doc
def _get_pipeline(self):
pipeline = []
for s in self._stages:
pipeline.extend(s.to_mongo())
return pipeline
def _serialize(self):
return [s._serialize() for s in self._stages]
def _slice(self, s):
if s.step is not None and s.step != 1:
raise ValueError(
"Unsupported slice '%s'; step is not supported" % s
)
_len = None
start = s.start
if start is not None:
if start < 0:
_len = len(self)
start += _len
if start <= 0:
start = None
stop = s.stop
if stop is not None and stop < 0:
if _len is None:
_len = len(self)
stop += _len
if start is None:
if stop is None:
return self
return self.limit(stop)
if stop is None:
return self.skip(start)
return self.skip(start).limit(stop - start)
def _add_view_stage(self, stage):
stage.validate(self)
view = copy(self)
view._stages.append(stage)
return view
def _get_filtered_schema(self, schema, frames=False):
selected_fields, excluded_fields = self._get_selected_excluded_fields(
frames=frames
)
if selected_fields is not None:
schema = OrderedDict(
{fn: f for fn, f in schema.items() if fn in selected_fields}
)
if excluded_fields is not None:
schema = OrderedDict(
{
fn: f
for fn, f in schema.items()
if fn not in excluded_fields
}
)
return schema
def _get_selected_excluded_fields(self, frames=False):
selected_fields = None
excluded_fields = set()
for stage in self._stages:
_selected_fields = stage.get_selected_fields(frames=frames)
if _selected_fields:
if selected_fields is None:
selected_fields = set(_selected_fields)
else:
selected_fields.intersection_update(_selected_fields)
_excluded_fields = stage.get_excluded_fields(frames=frames)
if _excluded_fields:
excluded_fields.update(_excluded_fields)
if selected_fields is not None:
selected_fields.difference_update(excluded_fields)
excluded_fields = None
return selected_fields, excluded_fields
def _get_filtered_fields(self):
filtered_fields = set()
for stage in self._stages:
_filtered_fields = stage.get_filtered_list_fields()
if _filtered_fields:
filtered_fields.update(_filtered_fields)
return filtered_fields
| 2.921875
| 3
|
billy/scrape/actions.py
|
paultag/billy
| 0
|
12782454
|
<filename>billy/scrape/actions.py
import re
from collections import namedtuple, defaultdict, Iterable
class Rule(namedtuple('Rule', 'regexes types stop attrs')):
'''If any of ``regexes`` matches the action text, the resulting
action's types should include ``types``.
If stop is true, no other rules should be tested after this one;
in other words, this rule conclusively determines the action's
types and attrs.
The resulting action should contain ``attrs``, which basically
enables overwriting certain attributes, like the chamber if
the action was listed in the wrong column.
'''
def __new__(_cls, regexes, types=None, stop=False,
flexible_whitespace=True, **kwargs):
'Create new instance of Rule(regex, types, attrs, stop)'
# Regexes can be a string, regex, or sequence.
if isinstance(regexes, basestring) or hasattr(regexes, 'match'):
regexes = (regexes,)
compiled_regexes = []
# pre-compile any string regexes
for regex in regexes:
if isinstance(regex, basestring):
if flexible_whitespace:
regex = re.sub('\s{1,4}', '\s{,10}', regex)
compiled_regexes.append(re.compile(regex))
else:
compiled_regexes.append(regex)
# Types can be a string or a sequence.
if isinstance(types, basestring):
types = set([types])
types = set(types or [])
return tuple.__new__(_cls, (compiled_regexes, types, stop, kwargs))
def match(self, text):
attrs = {}
matched = False
for regex in self.regexes:
m = regex.search(text)
if m:
matched = True
# add any matched attrs
attrs.update(m.groupdict())
if matched:
return attrs
else:
# return None if no regexes matched
return None
class BaseCategorizer(object):
'''A class that exposes a main categorizer function
and before and after hooks, in case categorization requires specific
steps that make use of action or category info. The return
value is a 2-tuple of category types and a dictionary of
attributes to overwrite on the target action object.
'''
rules = []
def __init__(self):
pass
def categorize(self, text):
# run pre-categorization hook on text
text = self.pre_categorize(text)
types = set()
return_val = defaultdict(set)
for rule in self.rules:
attrs = rule.match(text)
# matched if attrs is not None - empty attr dict means a match
if attrs is not None:
# add types, rule attrs and matched attrs
types |= rule.types
# Also add its specified attrs.
for k, v in attrs.items():
return_val[k].add(v)
return_val.update(**rule.attrs)
# break if there was a match and rule says so, otherwise
# continue testing against other rules
if rule.stop:
break
# set type
return_val['type'] = list(types)
# run post-categorize hook
return_val = self.post_categorize(return_val)
return self.finalize(return_val)
def finalize(self, return_val):
'''Before the types and attrs get passed to the
importer they need to be altered by converting lists to
sets, etc.
'''
attrs = return_val
return_val = {}
# Get rid of defaultdict.
for k, v in attrs.items():
# Skip empties.
if not isinstance(v, Iterable):
continue
v = filter(None, v)
# Get rid of sets.
if isinstance(v, set):
v = list(v)
# Some vals should be strings, not seqs.
if k == 'actor' and len(v) == 1:
v = v.pop()
return_val[k] = v
return return_val
def pre_categorize(self, text):
'''A precategorization hook. Takes & returns text. '''
return text
def post_categorize(self, return_val):
'''A post-categorization hook. Takes & returns attrs dict. '''
return return_val
| 2.875
| 3
|
wallet_one/__init__.py
|
everhide/wallet-one-payments
| 0
|
12782455
|
<reponame>everhide/wallet-one-payments
from typing import Dict
from collections import defaultdict
import binascii
from hashlib import md5, sha1
from enum import Enum
class TypeCrypt(Enum):
"""Types of crypts."""
MD5 = 1
SHA1 = 2
class Payment(object):
"""Wallet one payments."""
def __init__(
self,
merchant_id: str,
amount: float,
secret: str,
description: str,
url_success: str,
url_fail: str,
currency: int = 643,
override_fields: Dict = None,
crypto_type: int = TypeCrypt.MD5.value,
):
"""Wallet one init."""
self._secret = secret
self._crypto_type = crypto_type
self._form = {
'WMI_MERCHANT_ID': merchant_id,
'WMI_PAYMENT_AMOUNT': str(round(amount, 1)),
'WMI_CURRENCY_ID': str(currency),
'WMI_DESCRIPTION': description if description else 'Products',
'WMI_SUCCESS_URL': url_success,
'WMI_FAIL_URL': url_fail,
}
if override_fields and isinstance(override_fields, dict):
self._form.update(override_fields)
def _params(self) -> str:
"""Returns ordered form params as valued string."""
params = [
(pname, pval)
for pname, pval in self._form.items()
if pname != 'WMI_SIGNATURE'
]
lcase = lambda s: str(s).lower() # noqa
lists_by_keys = defaultdict(list)
for key, value in params:
lists_by_keys[key].append(value)
buffer = ''
for key in sorted(lists_by_keys, key=lcase):
for value in sorted(lists_by_keys[key], key=lcase):
buffer += str(value)
return buffer
def _sign_data(self, raw_data: bytes) -> str:
"""Returns utf-decoded signature from form params as bytes."""
eds = md5 if self._crypto_type == TypeCrypt.MD5.value else sha1
base_view = binascii.b2a_base64(eds(raw_data).digest())[:-1]
return base_view.decode('utf-8')
@property
def form(self) -> Dict:
"""Returns form data with signature for request."""
form_data = dict(self._form)
form_data.update({'WMI_SIGNATURE': self.signature})
return form_data
@property
def signature(self) -> str:
"""Returns form signature for request."""
params = f'{self._params().encode().decode("1251")}{self._secret}'
return self._sign_data(params.encode())
| 2.9375
| 3
|
src/algorithms/simulated_annealing.py
|
amasiukevich/ALHE
| 0
|
12782456
|
<gh_stars>0
from .base_algorithm import BaseAlgorithm
import numpy as np
class SimulatedAnnealing(BaseAlgorithm):
def __init__(self,
begin_curr_idx,
end_curr_idx,
num_currs,
rates_data,
random_state,
next_state_method="all",
num_iterations=1000,
population_size=10,
heat=10):
super().__init__(begin_curr_idx, end_curr_idx, num_currs, rates_data, random_state, next_state_method)
self.num_iterations = num_iterations
self.population_size = population_size
self.heat = heat
self.random_state = random_state
self.population = self.generate_population()
self.log = list(self.population)
self.best_state = self.find_best(self.population)
def calc_heat(self, price1, price2):
return np.exp((-1) * (np.abs(price1 - price2) / self.heat))
def generate_population(self):
population = []
for i in range(self.population_size):
population.append(self.init_state())
return population
def optimize(self):
for i in range(self.num_iterations):
new_state = self.next_state(list(self.best_state))
if self.calc_price(new_state) > self.calc_price(self.best_state):
self.best_state = list(new_state)
elif self.random_state.uniform() < self.calc_heat(
self.calc_price(new_state),
self.calc_price(self.best_state)
):
self.best_state = list(new_state)
self.log.append(new_state)
best_state = self.find_best(self.log)
return best_state, self.calc_price(best_state)
| 3.09375
| 3
|
npreadtext/_loadtxt.py
|
rossbar/npreadtext
| 0
|
12782457
|
<filename>npreadtext/_loadtxt.py
import numpy as np
from ._readers import read
def _loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None, **kwargs):
"""
Monkeypatched version of `np.loadtxt`. Unlike loadtxt it allows some
additional keyword arguments, such as `quote='"'`.
Please check `npreadtxt.read` for details.
"""
if delimiter is None:
delimiter = ''
elif isinstance(delimiter, bytes):
delimiter.decode("latin1")
if dtype is None:
dtype = np.float64
if ndmin is None:
ndmin = 0
if ndmin not in [0, 1, 2]:
raise ValueError(f'Illegal value of ndmin keyword: {ndmin}')
comment = comments
# Type conversions for Py3 convenience
if comment is None:
comment = ''
else:
if isinstance(comment, (str, bytes)):
comment = [comment]
comment = [x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
# Disable quoting unless passed:
quote = kwargs.pop('quote', '')
arr = read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
converters=converters, skiprows=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding,
max_rows=max_rows, quote=quote, **kwargs)
return arr
try:
# Try giving some reasonable docs, but __doc__ could be None.
_loadtxt.__doc__ += np.loadtxt.__doc__
except:
pass
| 2.6875
| 3
|
prosemirror/schema/list/__init__.py
|
p7g/prosemirror-py
| 18
|
12782458
|
from .schema_list import * # noqa
| 1
| 1
|
backend/api/db/schemas/users.py
|
kkevinn114/Yacht
| 1
|
12782459
|
from fastapi_users import models
| 1.101563
| 1
|
tests/test_index.py
|
alytle/local-lambda-toolkit
| 0
|
12782460
|
import mock
import unittest
from mock import patch, Mock, MagicMock
import boto3
from botocore.stub import Stubber
import sys
sys.path.append("..")
import awslambda
class TestHandler(unittest.TestCase):
def test_handler(self):
"""
Test the handler operates as expected.
"""
pass
# test_event = MagicMock()
# test_context = MagicMock()
# aws_account_id.return_value = '1234567890'
# index.handler(test_event, test_context)
def main():
unittest.main()
if __name__ == '__main__':
main()
| 2.625
| 3
|
py3canvas/apis/brand_configs.py
|
tylerclair/py3canvas
| 0
|
12782461
|
<reponame>tylerclair/py3canvas
"""BrandConfigs API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
class BrandConfigsAPI(BaseCanvasAPI):
"""BrandConfigs API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for BrandConfigsAPI."""
super(BrandConfigsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.BrandConfigsAPI")
def get_brand_config_variables_that_should_be_used_for_this_domain(self):
"""
Get the brand config variables that should be used for this domain.
Will redirect to a static json file that has all of the brand
variables used by this account. Even though this is a redirect,
do not store the redirected url since if the account makes any changes
it will redirect to a new url. Needs no authentication.
"""
path = {}
data = {}
params = {}
self.logger.debug(
"GET /api/v1/brand_variables with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/brand_variables".format(**path),
data=data,
params=params,
no_data=True,
)
| 2.5
| 2
|
lang/Python/draw-a-cuboid-1.py
|
ethansaxenian/RosettaDecode
| 1
|
12782462
|
def _pr(t, x, y, z):
txt = '\n'.join(''.join(t[(n,m)] for n in range(3+x+z)).rstrip()
for m in reversed(range(3+y+z)))
return txt
def cuboid(x,y,z):
t = {(n,m):' ' for n in range(3+x+z) for m in range(3+y+z)}
xrow = ['+'] + ['%i' % (i % 10) for i in range(x)] + ['+']
for i,ch in enumerate(xrow):
t[(i,0)] = t[(i,1+y)] = t[(1+z+i,2+y+z)] = ch
if _debug: print(_pr(t, x, y, z))
ycol = ['+'] + ['%i' % (j % 10) for j in range(y)] + ['+']
for j,ch in enumerate(ycol):
t[(0,j)] = t[(x+1,j)] = t[(2+x+z,1+z+j)] = ch
zdepth = ['+'] + ['%i' % (k % 10) for k in range(z)] + ['+']
if _debug: print(_pr(t, x, y, z))
for k,ch in enumerate(zdepth):
t[(k,1+y+k)] = t[(1+x+k,1+y+k)] = t[(1+x+k,k)] = ch
return _pr(t, x, y, z)
_debug = False
if __name__ == '__main__':
for dim in ((2,3,4), (3,4,2), (4,2,3)):
print("CUBOID%r" % (dim,), cuboid(*dim), sep='\n')
| 2.90625
| 3
|
TZ/normalization/test.py
|
dreamiond/TZ-KPrototypes
| 0
|
12782463
|
from kmodes.util.dissim import num_TZ_dissim,cat_TZ_dissim
from sklearn.decomposition import PCA
import numpy
centroid = [
[1,2,3],
[5,6,6]
]
Xnum = [
[54,2,44],
[89,6,4],
[1.5,0,-5],
[5346,874,212]
]
centroid = numpy.array(centroid)
Xnum = numpy.array(Xnum)
x = numpy.array([[1,2,3],[2,3,3],[12938,9999,666],[54,11,21354]])
pca = PCA(n_components=1)
newx = pca.fit_transform(x)
print(newx)
| 2.59375
| 3
|
tests/int_rep/test_ops.py
|
thomashopkins32/LEAP
| 0
|
12782464
|
<reponame>thomashopkins32/LEAP<gh_stars>0
"""Unit tests for operators in the integer representation package."""
from collections import Counter
import pytest
from scipy import stats
import toolz
from leap_ec.individual import Individual
import leap_ec.ops
import leap_ec.int_rep.ops as intrep_ops
from leap_ec import statistical_helpers as stat
##############################
# Tests for mutate_randint
##############################
def collect_two_gene_mutation_counts(mutator, N: int):
"""Helper to collect the distribution of results when we
apply mutation to two small individuals."""
# Set up arrays to collect the values of 4 different loci after mutation
ind0_gene0_values = []
ind0_gene1_values = []
ind1_gene0_values = []
ind1_gene1_values = []
for _ in range(N):
# Set up two parents with fixed genomes, two genes each
ind1 = Individual([0, 0])
ind2 = Individual([1, 1])
population = iter([ind1, ind2])
# Mutate the parents
result = mutator(population)
result = list(result) # Pulse the iterator
# Collect the values of each of the genes after mutation
ind0_gene0_values.append(result[0].genome[0])
ind0_gene1_values.append(result[0].genome[1])
ind1_gene0_values.append(result[1].genome[0])
ind1_gene1_values.append(result[1].genome[1])
# Count the number of times that each gene value occurs at each locus
ind0_gene0_counts = Counter(ind0_gene0_values)
ind0_gene1_counts = Counter(ind0_gene1_values)
ind1_gene0_counts = Counter(ind1_gene0_values)
ind1_gene1_counts = Counter(ind1_gene1_values)
return [ [ ind0_gene0_counts, ind0_gene1_counts ],
[ ind1_gene0_counts, ind1_gene1_counts ] ]
@pytest.mark.stochastic
def test_mutate_randint1():
"""If you send me two individuals with two genes each and ask for 1 gene to
be mutated on average, then on average each gene has a probability
of 0.5 of being mutated."""
N = 1000 # We'll sample 1,000 independent genomes
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)], expected_num_mutations=1)
observed = collect_two_gene_mutation_counts(mutator, N)
# Expected distribution of mutations.
# We arrive at this by the following reasoning: each gene has a 1/L = 0.5
# chance of not being mutated, in which case it keeps it original value.
# Otherwise, it's value is sampled uniformly from the set {0, 1}.
expected_ind0_gene0 = { 0: 0.5*N + 0.25*N, 1: 0.25*N }
expected_ind0_gene1 = expected_ind0_gene0
expected_ind1_gene0 = { 0: 0.25*N, 1: 0.5*N + 0.25*N }
expected_ind1_gene1 = expected_ind1_gene0
# Use a chi2 test to see if the observed gene-value counts are
# differ significantly from the expected distributions.
p = 0.001
assert(stat.stochastic_equals(expected_ind0_gene0, observed[0][0], p=p))
assert(stat.stochastic_equals(expected_ind0_gene1, observed[0][1], p=p))
assert(stat.stochastic_equals(expected_ind1_gene0, observed[1][0], p=p))
assert(stat.stochastic_equals(expected_ind1_gene1, observed[1][1], p=p))
@pytest.mark.stochastic
def test_mutate_randint2():
"""If we set the expected number of mutations to 2 when our genomes have
only 2 genes, then each gene is always mutated, meaning individuals are
completely resampled from a uniform distribution."""
N = 1000 # We'll sample 1,000 independent genomes
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)], expected_num_mutations=2)
observed = collect_two_gene_mutation_counts(mutator, N)
# Expected distribution of mutations.
# We arrive at this by the following reasoning: since we only have
# two genes, our mutation probability is 2/L = 1.0. So all four genes
# should be sampled uniformly from the set {0, 1}.
expected = { 0: 0.5*N, 1: 0.5*N }
p = 0.001
assert(stat.stochastic_equals(expected, observed[0][0], p=p))
assert(stat.stochastic_equals(expected, observed[0][1], p=p))
assert(stat.stochastic_equals(expected, observed[1][0], p=p))
assert(stat.stochastic_equals(expected, observed[1][1], p=p))
@pytest.mark.stochastic
def test_mutate_randint3():
"""If you send me two individuals with two genes each and ask for a mutations
probability of 0.2, then that's what will happen."""
N = 1000 # We'll sample 1,000 independent genomes
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)], probability=0.2)
observed = collect_two_gene_mutation_counts(mutator, N)
# Expected distribution of mutations.
# We arrive at this by the following reasoning: each gene has a 0.8
# chance of not being mutated, in which case it keeps it original value.
# Otherwise, it's value is sampled uniformly from the set {0, 1}.
expected_ind0_gene0 = { 0: 0.8*N + 0.1*N, 1: 0.1*N }
expected_ind0_gene1 = expected_ind0_gene0
expected_ind1_gene0 = { 0: 0.1*N, 1: 0.8*N + 0.1*N }
expected_ind1_gene1 = expected_ind1_gene0
# Use a chi2 test to see if the observed gene-value counts are
# differ significantly from the expected distributions.
p = 0.001
assert(stat.stochastic_equals(expected_ind0_gene0, observed[0][0], p=p))
assert(stat.stochastic_equals(expected_ind0_gene1, observed[0][1], p=p))
assert(stat.stochastic_equals(expected_ind1_gene0, observed[1][0], p=p))
assert(stat.stochastic_equals(expected_ind1_gene1, observed[1][1], p=p))
@pytest.mark.stochastic
def test_mutate_randint4():
"""If you send me two individuals with two genes each and ask for a mutations
probability of 1.0, then all genes should be completely resampled from a
uniform distribution."""
N = 1000 # We'll sample 1,000 independent genomes
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)], probability=1.0)
observed = collect_two_gene_mutation_counts(mutator, N)
# Expected distribution of mutations.
# We arrive at this by the following reasoning: each gene has a 0.8
# chance of not being mutated, in which case it keeps it original value.
# Otherwise, it's value is sampled uniformly from the set {0, 1}.
expected = { 0: 0.5*N, 1: 0.5*N }
# Use a chi2 test to see if the observed gene-value counts are
# differ significantly from the expected distributions.
p = 0.001
assert(stat.stochastic_equals(expected, observed[0][0], p=p))
assert(stat.stochastic_equals(expected, observed[0][1], p=p))
assert(stat.stochastic_equals(expected, observed[1][0], p=p))
assert(stat.stochastic_equals(expected, observed[1][1], p=p))
def test_mutate_randint5():
"""If we fail to provide either expected_num_mutations or a probability parameter,
an exception should occur when the operator is used."""
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)])
ind1 = Individual([0, 0])
ind2 = Individual([1, 1])
population = iter([ind1, ind2])
result = mutator(population)
with pytest.raises(ValueError):
# Pulse the iterator so mutation gets executed
result = list(result)
def test_mutate_randint6():
"""If we provide a value for both expected_num_mutations and the probability parameter,
an exception should occur when the operator is used."""
mutator = intrep_ops.mutate_randint(bounds=[(0, 1), (0, 1)],
expected_num_mutations=1,
probability=0.1)
ind1 = Individual([0, 0])
ind2 = Individual([1, 1])
population = iter([ind1, ind2])
result = mutator(population)
with pytest.raises(ValueError):
# Pulse the iterator so mutation gets executed
result = list(result)
def test_mutate_randint_pipe():
""" This tests pipeline integration
"""
ind1 = Individual([0, 0, 0])
ind2 = Individual([1, 1, 1])
population = iter([ind1, ind2])
bounds = [(-100, 100), (0, 25), (-10, 10)]
# Test that mutate_randint can be plugged into a pipeline since we
# were experiencing an error when trying to do this. The error turned out
# to be that `bounds=` wasn't included in the call, which meant that python
# tried to immediately invoke the `mutate_randint` instead of delaying
# execution per the pipeline calls.
results = toolz.pipe(population,
leap_ec.ops.clone,
intrep_ops.mutate_randint(bounds=bounds, expected_num_mutations=1),
# intrep_ops.mutate_randint(bounds), INCORRECT USAGE
leap_ec.ops.pool(size=2))
assert len(results)
##############################
# Tests for mutate_binomial
##############################
def test_mutate_binomial_bounds():
"""If we apply a wide mutation distribution repeatedly, it should never stray
outside of the provided bounds.
This test runs the stochastic function repeatedly, but we don't mark it as a
stochastic test because it's and should never fail unless there is actually a
fault."""
operator = intrep_ops.mutate_binomial(std=20, bounds=[(0, 10), (2, 20)],
expected_num_mutations=1)
N = 100
for i in range(N):
population = iter([ Individual([5,10]) ])
mutated = next(operator(population))
assert(mutated.genome[0] >= 0)
assert(mutated.genome[0] <= 10)
assert(mutated.genome[1] >= 2)
assert(mutated.genome[1] <= 20)
@pytest.mark.stochastic
def test_mutate_binomial_dist():
"""When we apply binomial mutation repeatedly, the resulting distribution
of offspring should follow the expected theoretical distribution."""
N = 5000 # Number of mutantes to generate
binom_n = 10000 # "coin flips" parameter for the binomial
std = 2.5 # Standard deviation of the mutation distribution
# We'll set up our operator with infinite bounds, so we needn't worry about clipping
operator = intrep_ops.mutate_binomial(std=std, expected_num_mutations=2,
bounds=[(-float('inf'), float('inf')), (-float('inf'), float('inf'))])
# Any value could appear, but we'll focus on measuring just a few
# nearby values
genome = [5, 10]
gene0_observed_dist = { '3': 0, '4': 0, '5': 0, '6': 0, '7':0 }
gene1_observed_dist = { '8': 0, '9': 0, '10': 0, '11': 0, '12': 0 }
# Count the observed mutations in N trials
for i in range(N):
population = iter([ Individual(genome) ])
mutated = next(operator(population))
gene0, gene1 = mutated.genome
gene0, gene1 = str(gene0), str(gene1)
# Count the observed values of the first gene
if gene0 in gene0_observed_dist.keys():
gene0_observed_dist[gene0] += 1
# Count the observed values of the second gene
if gene1 in gene1_observed_dist.keys():
gene1_observed_dist[gene1] += 1
# Set up the expected distribution by using SciPy's binomial PMF function
binom_p = intrep_ops._binomial_p_from_std(binom_n, std)
binom = stats.binom(binom_n, binom_p)
mu = binom_n * binom_p # Mean of a binomial distribution is n*p
gene0_expected_dist = { k: int(N*binom.pmf(int(mu - (genome[0] - int(k))))) for k in gene0_observed_dist.keys() }
gene1_expected_dist = { k: int(N*binom.pmf(int(mu - (genome[1] - int(k))))) for k in gene1_observed_dist.keys() }
# Toss all the other values under one value
gene0_observed_dist['other'] = N - sum(gene0_observed_dist.values())
gene1_observed_dist['other'] = N - sum(gene1_observed_dist.values())
gene0_expected_dist['other'] = N - sum(gene0_expected_dist.values())
gene1_expected_dist['other'] = N - sum(gene1_expected_dist.values())
p = 0.01
assert(stat.stochastic_equals(gene0_expected_dist, gene0_observed_dist, p=p))
assert(stat.stochastic_equals(gene1_expected_dist, gene1_observed_dist, p=p))
def test_mutate_binomial_err1():
"""If we fail to provide either expected_num_mutations or a probability parameter,
an exception should occur when the operator is used."""
mutator = intrep_ops.mutate_binomial(std=1, bounds=[(0, 1), (0, 1)])
ind1 = Individual([0, 0])
ind2 = Individual([1, 1])
population = iter([ind1, ind2])
result = mutator(population)
with pytest.raises(ValueError):
# Pulse the iterator so mutation gets executed
result = list(result)
def test_mutate_binomial_err2():
"""If we provide a value for both expected_num_mutations and the probability parameter,
an exception should occur when the operator is used."""
mutator = intrep_ops.mutate_binomial(std=1, bounds=[(0, 1), (0, 1)],
expected_num_mutations=1,
probability=0.1)
ind1 = Individual([0, 0])
ind2 = Individual([1, 1])
population = iter([ind1, ind2])
result = mutator(population)
with pytest.raises(ValueError):
# Pulse the iterator so mutation gets executed
result = list(result)
| 2.734375
| 3
|
src/grpc_router.py
|
Biano-AI/serving-compare-middleware
| 6
|
12782465
|
<filename>src/grpc_router.py
# -*- encoding: utf-8 -*-
# ! python3
from __future__ import annotations
import io
import logging
from pathlib import Path
import platform
from typing import BinaryIO, cast, Final
import aiofiles
import grpc
import httpx
import numpy as np
from devtools import debug
from fastapi import APIRouter, Depends, File, UploadFile
from fastapi.responses import PlainTextResponse
from PIL import Image
from tensorflow import make_tensor_proto
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
from src.dependencies import get_random_image, get_settings, httpx_client
from src.factories import make_triton_grpc_client
from src.inference import AVAILABLE_SERVINGS
from src.torchserve_grpc_inference_client import inference_pb2, inference_pb2_grpc
from src.types import Servings
logger: Final = logging.getLogger(__name__)
router: Final = APIRouter()
try:
from tritonclient.grpc import InferInput, InferRequestedOutput
except ImportError as e:
if platform.system() == "Windows":
logger.warning(
"Triton is not available on Windows. "
"Inference will not work on this server, "
"but at least the whole project will run."
)
InferInput = object()
InferRequestedOutput = object()
else:
raise RuntimeError("Failed to import Triton client") from e
@router.post(
"/infer/{serving_type}",
response_class=PlainTextResponse,
summary="Performs GRPC inference for an image",
)
async def _(
*,
serving_type: Servings,
client: httpx.AsyncClient = Depends(httpx_client),
image: UploadFile = File(...),
) -> str:
"""
Performs inference for the input image (JPEG).
Based on the value of `serving_type`, it selects the correct
backend implementation for the model and calls this backend.
The output of the model is discarded.
"""
image_content = io.BytesIO(cast(bytes, await image.read()))
inference_function, service_url = AVAILABLE_SERVINGS[serving_type]
await inference_function(client=client, image_content=image_content, url=service_url)
return "OK"
@router.get(
"/randinfer/{serving_type}",
response_class=PlainTextResponse,
summary="Performs GRPC inference for an image",
)
async def _(*, serving_type: Servings, random_image: Path = Depends(get_random_image)) -> str:
"""
Performs inference for the input image (JPEG).
Based on the value of `serving_type`, it selects the correct
backend implementation for the model and calls this backend.
The output of the model is discarded.
"""
async with aiofiles.open(random_image, mode="rb") as f:
image_content = io.BytesIO(await f.read())
# TODO This is ugly
if serving_type == Servings.torchserve:
await torchserve_grpc(image_content)
elif serving_type == Servings.tfserving:
await tfserving_grpc(image_content)
elif serving_type == Servings.triton_pytorch:
await triton_pytorch_grpc(image_content)
elif serving_type == Servings.triton_tensorflow:
await triton_tensorflow_grpc(image_content)
return "OK"
async def triton_pytorch_grpc(image_content: BinaryIO) -> None:
triton_client = make_triton_grpc_client()
jpeg_rgb = Image.open(image_content).convert("RGB")
jpeg_rgb = jpeg_rgb.resize((224, 224))
normalized_jpeg = (np.array(jpeg_rgb) - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
normalized_jpeg = np.expand_dims(np.einsum("ijk->kij", np.array(normalized_jpeg)), 0).astype(np.float32)
infer_input = InferInput("input__0", [1, 3, 224, 224], "FP32")
infer_input.set_data_from_numpy(normalized_jpeg)
output = InferRequestedOutput("output__0")
response = triton_client.infer("resnet-50-torch", model_version="1", inputs=[infer_input], outputs=[output])
assert response.as_numpy("output__0").tolist()
async def triton_tensorflow_grpc(image_content: BinaryIO) -> None:
triton_client = make_triton_grpc_client()
jpeg_rgb = Image.open(image_content).convert("RGB")
jpeg_rgb = jpeg_rgb.resize((224, 224))
jpeg_rgb = np.expand_dims(np.array(jpeg_rgb) / 255.0, 0).astype(np.float32)
infer_input = InferInput("input_1", [1, 224, 224, 3], "FP32")
infer_input.set_data_from_numpy(jpeg_rgb)
output = InferRequestedOutput("activation_49")
response = triton_client.infer("resnet-50-tensorflow", model_version="1", inputs=[infer_input], outputs=[output])
assert response.as_numpy("activation_49").tolist()
async def tfserving_grpc(image_content: BinaryIO) -> None:
async with grpc.aio.insecure_channel(get_settings().tfserving_grpc_host) as channel:
stub_tf = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = "resnet_50_classification"
request.model_spec.signature_name = "serving_default"
jpeg_rgb = Image.open(image_content).convert("RGB")
jpeg_rgb = np.expand_dims(np.array(jpeg_rgb) / 255.0, 0)
jpeg_rgb = jpeg_rgb.astype(np.float32)
request.inputs["input_1"].CopyFrom(make_tensor_proto(jpeg_rgb))
response = await stub_tf.Predict(request)
debug(response)
async def torchserve_grpc(image_content: BinaryIO) -> None:
async with grpc.aio.insecure_channel(get_settings().torchserve_grpc_host) as channel:
stub = inference_pb2_grpc.InferenceAPIsServiceStub(channel)
input_data = {"data": image_content.read()}
response = await stub.Predictions(inference_pb2.PredictionsRequest(model_name="resnet-50", input=input_data))
debug(response)
| 2.078125
| 2
|
scripts/generate-framework-agreement-signature-pages.py
|
jonodrew/digitalmarketplace-scripts
| 0
|
12782466
|
<gh_stars>0
#!/usr/bin/env python
"""
PREREQUISITE: You'll need wkhtmltopdf installed for this to work (http://wkhtmltopdf.org/)
Generate framework agreement signature pages from supplier "about you" information for suppliers
who successfully applied to a framework.
This script supersedes the old "generate-agreement-signature-pages.py" which uses framework-specific templates.
Instead, this script uses the core 'framework' record from the API which contains a 'frameworkAgreementDetails' dict
providing extra information needed for generating framework-specific agreements.
The 'frameworkAgreementDetails' JSON object from the API should look something like this:
{
"contractNoticeNumber": "2016/S 217-395765",
"frameworkAgreementVersion": "RM1043iv",
"frameworkExtensionLength": "12 months",
"frameworkRefDate": "16-01-2017",
"frameworkURL": "https://www.gov.uk/government/publications/digital-outcomes-and-specialists-2-framework-agreement",
"lotDescriptions": {
"digital-outcomes": "Lot 1: digital outcomes",
"digital-specialists": "Lot 2: digital specialists",
"user-research-participants": "Lot 4: user research participants",
"user-research-studios": "Lot 3: user research studios"
},
"lotOrder": [
"digital-outcomes",
"digital-specialists",
"user-research-studios",
"user-research-participants"
],
"pageTotal": 45,
"signaturePageNumber": 3
}
If supplied, <supplier_id_file> is a text file with one supplier ID per line; framework agreement pages will only be
generated for these suppliers.
Usage:
scripts/generate-framework-agreement-signature-pages.py <stage> <framework_slug> <output_folder>
<path_to_agreements_repo> [<supplier_id_file>]
"""
import os
import shutil
import sys
import tempfile
sys.path.insert(0, '.')
from docopt import docopt
from dmscripts.export_framework_applicant_details import get_csv_rows
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.framework_helpers import find_suppliers_with_details_and_draft_service_counts
from dmscripts.helpers.supplier_data_helpers import get_supplier_ids_from_file
from dmscripts.generate_framework_agreement_signature_pages import render_html_for_successful_suppliers, \
render_pdf_for_each_html_page
from dmapiclient import DataAPIClient
from dmutils.env_helpers import get_api_endpoint_from_stage
if __name__ == '__main__':
args = docopt(__doc__)
framework_slug = args['<framework_slug>']
client = DataAPIClient(get_api_endpoint_from_stage(args['<stage>']), get_auth_token('api', args['<stage>']))
framework = client.get_framework(framework_slug)['frameworks']
framework_lot_slugs = tuple([lot['slug'] for lot in client.get_framework(framework_slug)['frameworks']['lots']])
supplier_id_file = args['<supplier_id_file>']
supplier_ids = get_supplier_ids_from_file(supplier_id_file)
html_dir = tempfile.mkdtemp()
records = find_suppliers_with_details_and_draft_service_counts(client, framework_slug, supplier_ids)
headers, rows = get_csv_rows(records, framework_slug, framework_lot_slugs, count_statuses=("submitted",))
render_html_for_successful_suppliers(
rows, framework, os.path.join(args['<path_to_agreements_repo>'], 'documents', framework['slug']), html_dir
)
html_pages = os.listdir(html_dir)
html_pages.remove('framework-agreement-signature-page.css')
render_pdf_for_each_html_page(html_pages, html_dir, args['<output_folder>'])
shutil.rmtree(html_dir)
| 2.390625
| 2
|
conf_site/cms/__init__.py
|
jasongrout/conf_site
| 0
|
12782467
|
<filename>conf_site/cms/__init__.py
default_app_config = "conf_site.cms.apps.CmsConfig"
| 1.070313
| 1
|
dl_markup/UndoRedo.py
|
ekuptsov/dl_markup
| 0
|
12782468
|
from abc import ABC, abstractmethod
from PyQt5 import QtWidgets
from .Scene import Scene
class ICommand(ABC):
"""Abstract command."""
@abstractmethod
def execute(self):
"""Execute command."""
pass
@abstractmethod
def un_execute(self):
"""Undo command execution."""
pass
class AddCommand(ICommand):
"""Command adding new item to scene."""
def __init__(
self,
item: QtWidgets.QGraphicsItem,
scene: Scene):
"""Initialize new command.
:param item: item to be added to scene
:param scene: scene to operate with
"""
self.__item = item
self.__scene = scene
def execute(self):
"""Add item to scene."""
if self.__item not in self.__scene.items():
self.__scene.addItem(self.__item)
def un_execute(self):
"""Remove item from scene."""
self.__scene.removeItem(self.__item)
class UndoRedo:
"""Class for saving drawing history and performing undo/redo functionality."""
def __init__(self, scene: Scene):
"""Initialize UndoRedo object.
:param scene: scene for drawing
"""
self.__undo_commands = []
self.__redo_commands = []
self.__container = scene
def undo(self, levels: int):
"""Undo last actions.
:param levels: number of actions to undo
"""
for _ in range(levels):
if not self.__undo_commands:
break
command = self.__undo_commands.pop()
command.un_execute()
self.__redo_commands.append(command)
def redo(self, levels: int):
"""Redo actions, which were undone.
:param levels: number of actions to redo
"""
for _ in range(levels):
if not self.__redo_commands:
break
command = self.__redo_commands.pop()
command.execute()
self.__undo_commands.append(command)
def insert_in_undo_redo(self, command: ICommand):
"""Insert command to history.
:param command: command to be inserted
"""
self.__undo_commands.append(command)
self.__redo_commands.clear()
def insert_in_undo_redo_add(
self,
item: QtWidgets.QGraphicsItem):
"""Insert and execute AddCommand.
:param item: AddCommand to be executed and added to history
"""
command = AddCommand(item, self.__container)
command.execute()
self.insert_in_undo_redo(command)
def clear(self):
"""Clear all history."""
self.__undo_commands.clear()
self.__redo_commands.clear()
| 3.265625
| 3
|
Scripts/sims4communitylib/utils/sims/common_buff_utils.py
|
ColonolNutty/Sims4CommunityLibrary
| 118
|
12782469
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Union, List, Tuple, Iterator
from buffs.buff import Buff
from distributor.shared_messages import IconInfoData
from protocolbuffers.Localization_pb2 import LocalizedString
from server_commands.argument_helpers import TunableInstanceParam, OptionalTargetParam
from sims.sim_info import SimInfo
from sims4.commands import Command, CommandType, CheatOutput
from sims4.resources import Types
from sims4communitylib.enums.buffs_enum import CommonBuffId
from sims4communitylib.enums.strings_enum import CommonStringId
from sims4communitylib.enums.types.component_types import CommonComponentType
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.logging.has_class_log import HasClassLog
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.notifications.common_basic_notification import CommonBasicNotification
from sims4communitylib.utils.common_component_utils import CommonComponentUtils
from sims4communitylib.utils.localization.common_localization_utils import CommonLocalizationUtils
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
class CommonBuffUtils(HasClassLog):
"""Utilities for manipulating Buffs on Sims.
"""
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_mod_identity(cls) -> CommonModIdentity:
return ModInfo.get_identity()
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_log_identifier(cls) -> str:
return 'common_buff_utils'
@staticmethod
def has_fertility_boosting_buff(sim_info: SimInfo) -> bool:
"""has_fertility_boosting_buff(sim_info)
Determine if any fertility boosting buffs are currently active on a sim.
.. note::
Fertility Boosting Buffs:
- Fertility Potion
- Fertility Potion Masterwork
- Fertility Potion Normal
- Fertility Potion Outstanding
- Massage Table Fertility Boost
- Massage Table Fertility Boost Incense
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any fertility boosting buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_MASTERWORK,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_NORMAL,
CommonBuffId.OBJECT_HERBALIST_POTION_FERTILITY_POTION_OUTSTANDING,
CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST,
CommonBuffId.OBJECT_MASSAGE_TABLE_FERTILITY_BOOST_INCENSE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_morning_person_buff(sim_info: SimInfo) -> bool:
"""has_morning_person_buff(sim_info)
Determine if any Morning Person Trait buffs are currently active on a Sim.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any morning person buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.TRAIT_MORNING_PERSON,
CommonBuffId.TRAIT_MORNING_PERSON_ACTIVE,
CommonBuffId.TRAIT_MORNING_PERSON_CHECK_ACTIVE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_night_owl_buff(sim_info: SimInfo) -> bool:
"""has_night_owl_buff(sim_info)
Determine if any Night Owl Trait buffs are currently active on a sim.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if they have any night owl buffs. False, if not.
:rtype: bool
"""
buff_ids = (
CommonBuffId.TRAIT_NIGHT_OWL,
CommonBuffId.TRAIT_NIGHT_OWL_ACTIVE,
CommonBuffId.TRAIT_NIGHT_OWL_CHECK_ACTIVE
)
return CommonBuffUtils.has_buff(sim_info, *buff_ids)
@staticmethod
def has_buff(sim_info: SimInfo, *buffs: Union[int, CommonBuffId, Buff]) -> bool:
"""has_buff(sim_info, *buffs)
Determine if any of the specified buffs are currently active on a sim.
:param sim_info: The sim being checked.
:type sim_info: SimInfo
:param buffs: The identifiers of Buffs.
:type buffs: Union[int, CommonBuffId, Buff]
:return: True, if the sim has any of the specified buffs.
:rtype: int
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return False
if not buffs:
return False
buff_ids = [CommonBuffUtils.get_buff_id(buff) for buff in buffs]
sim_buff_ids = CommonBuffUtils.get_buff_ids(sim_info)
for sim_buff_id in sim_buff_ids:
if sim_buff_id in buff_ids:
return True
return False
@staticmethod
def get_buffs(sim_info: SimInfo) -> List[Buff]:
"""get_buffs(sim_info)
Retrieve all buffs currently active on a Sim.
:param sim_info: The Sim to retrieve the buffs of.
:type sim_info: SimInfo
:return: A collection of currently active buffs on the Sim.
:rtype: Tuple[Buff]
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return list()
from objects.components.buff_component import BuffComponent
buff_component: BuffComponent = CommonComponentUtils.get_component(sim_info, CommonComponentType.BUFF)
buffs = list()
for buff in buff_component:
if buff is None or not isinstance(buff, Buff):
continue
buffs.append(buff)
return buffs
@staticmethod
def get_buff_ids(sim_info: SimInfo) -> List[int]:
"""get_buff_ids(sim_info)
Retrieve decimal identifiers for all Buffs of a sim.
:param sim_info: The sim to checked.
:type sim_info: SimInfo
:return: A collection of Buff identifiers on a Sim.
:rtype: List[int]
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return list()
buff_ids = list()
sim_buffs = CommonBuffUtils.get_buffs(sim_info)
for buff in sim_buffs:
buff_id = CommonBuffUtils.get_buff_id(buff)
if buff_id is None:
continue
buff_ids.append(buff_id)
return buff_ids
@classmethod
def add_buff(cls, sim_info: SimInfo, *buffs: Union[int, CommonBuffId], buff_reason: Union[int, str, LocalizedString, CommonStringId]=None) -> bool:
"""add_buff(sim_info, *buffs, buff_reason=None)
Add the specified buffs to a sim.
:param sim_info: The sim to add the specified buffs to.
:type sim_info: SimInfo
:param buffs: An iterable of identifiers of buffs being added.
:type buffs: Union[int, CommonBuffId, Buff]
:param buff_reason: The text that will display when the player hovers over the buffs. What caused the buffs to be added.
:type buff_reason: Union[int, str, LocalizedString, CommonStringId], optional
:return: True, if all of the specified buffs were successfully added. False, if not.
:rtype: bool
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
cls.get_log().format_with_message('Failed to add Buff to Sim. They did not have a Buff component!', buffs=buffs, sim=sim_info, buff_reason=buff_reason)
return False
localized_buff_reason = None
if buff_reason is not None:
localized_buff_reason = CommonLocalizationUtils.create_localized_string(buff_reason)
has_any = False
success = True
for buff_id in buffs:
buff = CommonBuffUtils.load_buff_by_id(buff_id)
if buff is None:
cls.get_log().format_with_message('No buff found using identifier.', buffs=buffs, sim=sim_info, buff_reason=buff_reason, buff_id=buff_id)
continue
if not sim_info.add_buff_from_op(buff, buff_reason=localized_buff_reason):
cls.get_log().format_with_message('Failed to add buff for unknown reasons.', buff=buff, sim=sim_info, buff_reason=buff_reason)
success = False
else:
cls.get_log().format_with_message('Successfully added buff.', buff=buff, sim=sim_info, buff_reason=buff_reason)
has_any = True
cls.get_log().format_with_message('Finished adding buffs to Sim.', buffs=buffs, sim=sim_info, buff_reason=buff_reason, success=success, has_any=has_any)
return success and has_any
@staticmethod
def remove_buff(sim_info: SimInfo, *buffs: Union[int, CommonBuffId, Buff]) -> bool:
"""remove_buff(sim_info, *buffs)
Remove the specified buffs from a sim.
:param sim_info: The sim to remove the specified buffs from.
:type sim_info: SimInfo
:param buffs: An iterable of identifiers of buffs being removed.
:type buffs: Union[int, CommonBuffId, Buff]
:return: True, if all of the specified buffs were successfully removed. False, if not.
:rtype: bool
"""
if sim_info is None:
raise AssertionError('Argument sim_info was None')
if not CommonComponentUtils.has_component(sim_info, CommonComponentType.BUFF):
return False
has_any = False
success = True
for buff in buffs:
buff = CommonBuffUtils.load_buff_by_id(buff)
if buff is None:
continue
sim_info.remove_buff_by_type(buff)
has_any = True
if CommonBuffUtils.has_buff(sim_info, buff):
success = False
return success and has_any
@staticmethod
def get_buff_id(buff_identifier: Union[int, Buff]) -> Union[int, None]:
"""get_buff_id(buff_identifier)
Retrieve the decimal identifier of a Buff.
:param buff_identifier: The identifier or instance of a Buff.
:type buff_identifier: Union[int, Buff]
:return: The decimal identifier of the Buff or None if the Buff does not have an id.
:rtype: Union[int, None]
"""
if isinstance(buff_identifier, int):
return buff_identifier
return getattr(buff_identifier, 'guid64', None)
@staticmethod
def get_buff_name(buff: Buff) -> Union[str, None]:
"""get_buff_name(buff)
Retrieve the Name of a Buff.
:param buff: An instance of a Buff.
:type buff: Buff
:return: The name of a Buff or None if a problem occurs.
:rtype: Union[str, None]
"""
if buff is None:
return None
# noinspection PyBroadException
try:
return buff.__class__.__name__ or ''
except:
return ''
@staticmethod
def get_buff_names(buffs: Iterator[Buff]) -> Tuple[str]:
"""get_buff_names(buffs)
Retrieve the Names of a collection of Buffs.
:param buffs: A collection of Buff instances.
:type buffs: Iterator[Buff]
:return: A collection of names for all specified Buffs.
:rtype: Tuple[str]
"""
if buffs is None or not buffs:
return tuple()
names: List[str] = []
for buff in buffs:
# noinspection PyBroadException
try:
name = CommonBuffUtils.get_buff_name(buff)
if not name:
continue
except:
continue
names.append(name)
return tuple(names)
@staticmethod
def load_buff_by_id(buff: Union[int, CommonBuffId, Buff]) -> Union[Buff, None]:
"""load_buff_by_id(buff)
Load an instance of a Buff by its identifier.
:param buff: The identifier of a Buff.
:type buff: Union[int, CommonBuffId, Buff]
:return: An instance of a Buff matching the decimal identifier or None if not found.
:rtype: Union[Buff, None]
"""
if isinstance(buff, Buff):
return buff
# noinspection PyBroadException
try:
buff: int = int(buff)
except:
buff: Buff = buff
return buff
from sims4.resources import Types
from sims4communitylib.utils.common_resource_utils import CommonResourceUtils
return CommonResourceUtils.load_instance(Types.BUFF, buff)
@Command('s4clib.add_buff', command_type=CommandType.Live)
def _common_add_buff(buff: TunableInstanceParam(Types.BUFF), opt_sim: OptionalTargetParam=None, buff_reason: str=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if buff is None:
output('Failed, Buff not specified or Buff did not exist! s4clib.add_buff <buff_name_or_id> [opt_sim=None]')
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Adding buff {} to Sim {}'.format(str(buff), sim_name))
try:
if CommonBuffUtils.add_buff(sim_info, buff, buff_reason=buff_reason):
output('Successfully added buff.')
else:
output('Failed to add buff.')
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to add buff {} to Sim {}.'.format(str(buff), sim_name), exception=ex)
output('Failed to add buff {} to Sim {}. {}'.format(str(buff), sim_name, str(ex)))
@Command('s4clib.remove_buff', command_type=CommandType.Live)
def _common_remove_buff(buff: TunableInstanceParam(Types.BUFF), opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
if buff is None:
output('Failed, Buff not specified or Buff did not exist! s4clib.remove_buff <buff_name_or_id> [opt_sim=None]')
return
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Removing buff {} from Sim {}'.format(str(buff), sim_name))
try:
if CommonBuffUtils.remove_buff(sim_info, buff):
output('Successfully removed buff.')
else:
output('Failed to remove buff.')
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to remove buff {} from Sim {}.'.format(str(buff), sim_name), exception=ex)
output('Failed to remove buff {} from Sim {}. {}'.format(str(buff), sim_name, str(ex)))
@Command('s4clib.show_active_buffs', command_type=CommandType.Live)
def _common_show_active_buffs(opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
sim = get_optional_target(opt_sim, _connection)
sim_info = CommonSimUtils.get_sim_info(sim)
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
sim_name = CommonSimNameUtils.get_full_name(sim_info)
output('Showing active buffs of Sim {}'.format(sim_name))
try:
sim_buff_strings: List[str] = list()
for buff in CommonBuffUtils.get_buffs(sim_info):
buff_name = CommonBuffUtils.get_buff_name(buff)
buff_id = CommonBuffUtils.get_buff_id(buff)
sim_buff_strings.append('{} ({})'.format(buff_name, buff_id))
sim_buff_strings = sorted(sim_buff_strings, key=lambda x: x)
sim_buffs = ', '.join(sim_buff_strings)
text = ''
text += 'Active Buffs:\n{}\n\n'.format(sim_buffs)
CommonBasicNotification(
CommonLocalizationUtils.create_localized_string('{} Active Buffs ({})'.format(sim_name, CommonSimUtils.get_sim_id(sim_info))),
CommonLocalizationUtils.create_localized_string(text)
).show(
icon=IconInfoData(obj_instance=CommonSimUtils.get_sim_instance(sim_info))
)
except Exception as ex:
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Failed to show active buffs of Sim {}.'.format(sim_name), exception=ex)
output('Failed to show active buffs of Sim {}. {}'.format(sim_name, str(ex)))
| 1.445313
| 1
|
src/robot/pythonpathsetter.py
|
gdw2/robot-framework
| 0
|
12782470
|
# Copyright 2008-2010 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that adds directories needed by Robot to sys.path when imported."""
import sys
import os
import fnmatch
def add_path(path, to_beginning=False, force=False):
if _should_be_added(path, force):
if to_beginning:
sys.path.insert(0, path)
else:
sys.path.append(path)
def remove_path(path):
path = _normpath(path)
sys.path = [p for p in sys.path if _normpath(p) != path]
def _should_be_added(path, force):
if (not path) or _find_in_syspath_normalized(path):
return False
return force or os.path.exists(path)
def _find_in_syspath_normalized(path):
path = _normpath(path)
for element in sys.path:
if _normpath(element) == path:
return element
return None
def _normpath(path):
return os.path.normcase(os.path.normpath(path))
ROBOTDIR = os.path.dirname(os.path.abspath(__file__))
PARENTDIR = os.path.dirname(ROBOTDIR)
add_path(os.path.join(ROBOTDIR, 'libraries'), to_beginning=True,
force=True)
add_path(PARENTDIR, to_beginning=True)
# Handles egg installations
if fnmatch.fnmatchcase(os.path.basename(PARENTDIR), 'robotframework-*.egg'):
add_path(os.path.dirname(PARENTDIR), to_beginning=True)
# Remove ROBOTDIR dir to disallow importing robot internal modules directly
remove_path(ROBOTDIR)
# Elements from PYTHONPATH. By default it is not processed in Jython and in
# Python valid non-absolute paths may be ignored.
PYPATH = os.environ.get('PYTHONPATH')
if PYPATH:
for path in PYPATH.split(os.pathsep):
add_path(path)
del path
# Current dir (it seems to be in Jython by default so let's be consistent)
add_path('.')
del _find_in_syspath_normalized, _normpath, add_path, remove_path, ROBOTDIR, PARENTDIR, PYPATH
| 2.1875
| 2
|
teste.py
|
j0nathan-calist0/Aula-18_03
| 0
|
12782471
|
<reponame>j0nathan-calist0/Aula-18_03
import pytest
from principal import somar
from principal import subtrair
def test_somar():
assert somar (2,4)==6
| 1.992188
| 2
|
MetaScreener/analyze_results/Get_histogram/PlipGraph.py
|
bio-hpc/metascreener
| 8
|
12782472
|
<reponame>bio-hpc/metascreener
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .Tools import *
from .debug import BColors
TAG = "plipGraph.py :"
color = BColors.GREEN
class PlipGraph(object):
def __init__(self, cfg ):
self.cfg = cfg
self.best_poses = self.cfg.best_poses
self.generate_plip()
def generate_plip(self):
for pose in self.best_poses:
prefix_out = os.path.join(self.cfg.OUTPUT_DIRS['interacciones'], pose.file_name)
if bool(self.cfg.file_target_pdb and not self.cfg.file_target_pdb.isspace()) :
cmd = '{} {} {} {} {} {}'.format(
self.cfg.python_exe,
self.cfg.ligand_plip,
pose.file_ori_target,
pose.file_result,
prefix_out,
self.cfg.file_target_pdb
)
else:
cmd = '{} {} {} {} {}'.format(
self.cfg.python_exe,
self.cfg.ligand_plip,
pose.file_ori_target,
pose.file_result,
prefix_out,
)
self.cfg.execute("Plip Interactions", cmd)
| 2.03125
| 2
|
tests/units/utils/test_log_linear_exp.py
|
nilin/vmcnet
| 17
|
12782473
|
"""Tests for log_linear_exp function."""
import chex
import jax
import jax.numpy as jnp
import numpy as np
from vmcnet.utils.log_linear_exp import log_linear_exp
import vmcnet.utils.slog_helpers as slog_helpers
def test_log_linear_exp_shape():
"""Test output shape of log linear exp."""
signs = jnp.ones((5, 2, 4, 3))
vals = jnp.zeros((5, 2, 4, 3))
weights = jnp.ones((2, 7))
out = log_linear_exp(signs, vals, weights, axis=-3)
out_no_weights = log_linear_exp(signs, vals, axis=-3)
desired_shape = (5, 7, 4, 3)
desired_shape_no_weights = (5, 1, 4, 3)
chex.assert_shape(out, desired_shape)
chex.assert_shape(out_no_weights, desired_shape_no_weights)
def test_log_linear_exp_no_overflow():
"""Test that the log-linear-exp trick avoids overflow when any vals are big."""
signs = jnp.array([-1.0, -1.0, 1.0, 1.0])
vals = jnp.array([300.0, 100.0, 3000.0, 1.5])
weights = jnp.reshape(jnp.array([-1.0, 2.0, 0.5, 0.6]), (4, 1))
sign_out, log_out = log_linear_exp(signs, vals, weights, axis=0)
# the output should be sign_out=1.0, log_out=log|0.5 * exp(3000) + tinier stuff|
assert jnp.isfinite(log_out)
np.testing.assert_allclose(sign_out, 1.0)
np.testing.assert_allclose(log_out, jnp.log(0.5) + 3000.0)
def test_log_linear_exp_no_underflow():
"""Test that the log-linear-exp trick avoids underflow when all vals are small."""
signs = jnp.array([-1.0, -1.0, 1.0, 1.0])
vals = jnp.array([-4000.0, -5500.0, -3000.0, -1234.5])
sign_out, log_out = log_linear_exp(signs, vals, axis=0)
# the output should be sign_out=1.0, log_out=log|exp(-1234.5) + tinier stuff|
np.testing.assert_allclose(sign_out, 1.0)
np.testing.assert_allclose(log_out, -1234.5)
def test_log_linear_equals_log_linear_exp_log():
"""Test that log-linear-exp of sign(x), log|x| is just log-linear."""
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
x = jax.random.normal(subkey, (9, 5))
sign_x, log_x = slog_helpers.array_to_slog(x)
key, subkey = jax.random.split(key)
kernel = jax.random.normal(subkey, (5, 7))
sign_linear_out, log_linear_out = slog_helpers.array_to_slog(jnp.dot(x, kernel))
sign_linear_exp_log_out, log_linear_exp_log_out = log_linear_exp(
sign_x, log_x, kernel, axis=-1
)
np.testing.assert_allclose(sign_linear_exp_log_out, sign_linear_out)
np.testing.assert_allclose(log_linear_exp_log_out, log_linear_out, rtol=1e-5)
| 2.59375
| 3
|
tests/gamestonk_terminal/stocks/discovery/test_disc_api.py
|
minhhoang1023/GamestonkTerminal
| 1
|
12782474
|
<filename>tests/gamestonk_terminal/stocks/discovery/test_disc_api.py<gh_stars>1-10
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
# IMPORTATION INTERNAL
from gamestonk_terminal.helper_classes import ModelsNamespace as _models
from gamestonk_terminal.stocks.discovery import disc_api
def test_models():
assert isinstance(disc_api.models, _models)
| 1.742188
| 2
|
tests/datamodules/util/test_predict_dataset.py
|
DIVA-DIA/DIVA-DAF
| 3
|
12782475
|
<filename>tests/datamodules/util/test_predict_dataset.py
import pytest
import torch
from torchvision.transforms import ToTensor
from src.datamodules.utils.dataset_predict import DatasetPredict
from src.datamodules.utils.misc import ImageDimensions
from tests.test_data.dummy_data_hisdb.dummy_data import data_dir
@pytest.fixture
def file_path_list(data_dir):
test_data_path = data_dir / 'test' / 'data'
return [str(p) for p in test_data_path.iterdir()]
@pytest.fixture
def predict_dataset(file_path_list):
return DatasetPredict(image_path_list=file_path_list, image_dims=ImageDimensions(width=487, height=649))
def test__load_data_and_gt(predict_dataset):
img = predict_dataset._load_data_and_gt(index=0)
assert img.size == (487, 649)
assert img.mode == 'RGB'
assert torch.equal(ToTensor()(img), predict_dataset[0][0])
def test__apply_transformation(predict_dataset):
img = predict_dataset._load_data_and_gt(index=0)
img_tensor = predict_dataset._apply_transformation(img)
assert torch.equal(img_tensor, predict_dataset[0][0])
assert img_tensor.shape == torch.Size((3, 649, 487))
| 2.1875
| 2
|
proliantutils/ipa_hw_manager/hardware_manager.py
|
mail2nsrajesh/proliantutils
| 12
|
12782476
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic_python_agent import hardware
from proliantutils.hpssa import manager as hpssa_manager
from proliantutils.hpsum import hpsum_controller
class ProliantHardwareManager(hardware.GenericHardwareManager):
HARDWARE_MANAGER_VERSION = "3"
def get_clean_steps(self, node, ports):
"""Return the clean steps supported by this hardware manager.
This method returns the clean steps that are supported by
proliant hardware manager. This method is invoked on every
hardware manager by Ironic Python Agent to give this information
back to Ironic.
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: A list of dictionaries, each item containing the step name,
interface and priority for the clean step.
"""
return [{'step': 'create_configuration',
'interface': 'raid',
'priority': 0},
{'step': 'delete_configuration',
'interface': 'raid',
'priority': 0},
{'step': 'erase_devices',
'interface': 'deploy',
'priority': 0},
{'step': 'update_firmware',
'interface': 'management',
'priority': 0}]
def evaluate_hardware_support(cls):
return hardware.HardwareSupport.SERVICE_PROVIDER
def create_configuration(self, node, ports):
"""Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
target_raid_config = node.get('target_raid_config', {}).copy()
return hpssa_manager.create_configuration(
raid_config=target_raid_config)
def delete_configuration(self, node, ports):
"""Deletes RAID configuration on the bare metal.
This method deletes all the RAID disks on the bare metal.
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
"""
return hpssa_manager.delete_configuration()
def erase_devices(self, node, port):
"""Erase the drives on the bare metal.
This method erase all the drives which supports sanitize and the drives
which are not part of any logical volume on the bare metal. It calls
generic erase method after the success of Sanitize disk erase.
:param node: A dictionary of the node object.
:param port: A list of dictionaries containing information of ports
for the node.
:raises exception.HPSSAOperationError, if there is a failure on the
erase operation on the controllers.
:returns: The dictionary of controllers with the drives and erase
status for each drive.
"""
result = {}
result['Disk Erase Status'] = hpssa_manager.erase_devices()
result.update(super(ProliantHardwareManager,
self).erase_devices(node, port))
return result
def update_firmware(self, node, port):
"""Performs HPSUM based firmware update on the bare metal node.
This method performs firmware update on all or some of the firmware
components on the bare metal node.
:returns: A string with return code and the statistics of
updated/failed components.
:raises: HpsumOperationError, when the hpsum firmware update operation
on the node fails.
"""
return hpsum_controller.update_firmware(node)
| 2.140625
| 2
|
Day_07/part1.py
|
Uklusi/AdventOfCode2021
| 0
|
12782477
|
from AoCUtils import *
result = 0
partNumber = "1"
writeToLog = False
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
with open("input.txt", "r") as inputFile:
lines = inputFile.read().strip().split("\n")
for line in lines:
line = line.strip()
positions = [int(n) for n in line.split(",")]
positions.sort()
middle = len(positions) // 2
result = sum([abs(middle - n) for n in positions])
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
| 2.953125
| 3
|
tests/test_mdtoc.py
|
scottfrazer/mdtoc
| 2
|
12782478
|
<filename>tests/test_mdtoc.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import textwrap
import pytest
import mdtoc.main
@pytest.mark.parametrize(
"i,out",
[
("# Simple markdown", "Simple markdown"),
(" # Leading space", "Leading space"),
("### Ending hash ##", "Ending hash"),
(
"# This is markdown, I promise \\### ",
"This is markdown, I promise \\#",
),
],
)
def test_strip(i, out):
assert mdtoc.main._strip(i) == out
@pytest.mark.parametrize(
"header,out",
[
("# This is an L1 header", "this-is-an-l1-header"),
("# Spaces here ... ", "spaces-here-"),
(" ## Three leading spaces", "three-leading-spaces"),
("# THis is CAPS!!!", "this-is-caps"),
("## this is an l2 header", "this-is-an-l2-header"),
("### This is ... an L3 header??", "this-is--an-l3-header"),
(
"#### This is a Spicy Jalapeño Header! :)",
"this-is-a-spicy-jalapeño-header-",
),
(
"# Чемезов заявил об уничтожении поврежденных штормом ракет С-400 для Китая", # noqa
"чемезов-заявил-об-уничтожении-поврежденных-штормом-ракет-с-400-для-китая", # noqa
),
("### This has (some parens) in it", "this-has-some-parens-in-it"),
("## What Happens to Me? #####", "what-happens-to-me"),
("## foo ##", "foo"),
("# foo ##################################", "foo"),
("##### foo ##", "foo"),
("### foo ### ", "foo"),
("### foo ### b", "foo--b"),
("### foo \\###", "foo-"),
("## foo #\\##", "foo-"),
("# foo \\#", "foo-"),
(
r"### Here is All Punctuation '!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ At Once",
"here-is-all-punctuation--_-at-once",
),
("## Err ... ##", "err-"),
("##### Redirect `stderr` _and_ `stdout`", "redirect-stderr-and-stdout"),
("##### Redirect `stderr` *_and_* `stdout`", "redirect-stderr-and-stdout"),
("##### Redirect `stderr` **and** `stdout`", "redirect-stderr-and-stdout"),
("##### Redirect `stderr` *_and_* `stdout`", "redirect-stderr-and-stdout"),
],
)
def test_as_link(header, out):
assert mdtoc.main.as_link(header) == out
def test_header_pat():
headers = textwrap.dedent(
"""\
# Header 1 here
## Header 3 here ...
### Header 4 Here ##"""
)
for line in headers.split("\n"):
assert mdtoc.main.HEADER_PAT.match(line)
def test_repeated_headers():
s = """\
# foo
## foo
### foo"""
assert (
mdtoc.main.toc(s)
== "* [foo](#foo)\n * [foo](#foo-1)\n * [foo](#foo-2)"
)
@pytest.mark.parametrize(
"s",
[
" # Header 1 - too many leading spaces",
"####### Header 2 - too many hash signs",
"\\## Escaped initial hash",
"#missingspace",
],
)
def test_header_pat_neg(s):
assert not mdtoc.main.HEADER_PAT.match(s)
@pytest.mark.parametrize(
"i,out",
[
(
"[link here](https://github.com/scottfrazer/mdtoc/)",
("link here", "https://github.com/scottfrazer/mdtoc/"),
),
(
"[multi parens??](https://google.com/co(mp)uting(iscool))",
("multi parens??", "https://google.com/co(mp)uting(iscool)"),
),
],
)
def test_md_link_pat(i, out):
match = mdtoc.main.MD_LINK_PAT.search(i)
assert match
assert match.group(1) == out[0]
assert match.group(2) == out[1]
# ------------------
# Input/output pairs
# ------------------
_bad_markdown = (
"""\
# Welcome To Hell
<!---toc start-->
xxx
<!---toc end-->
## Okay so far
## Wait, This is Not a Header!!!
## Err ... ##
### Header 3
xxx""",
"""\
# Welcome To Hell
<!---toc start-->
* [Welcome To Hell](#welcome-to-hell)
* [Okay so far](#okay-so-far)
* [Err ...](#err-)
* [Header 3](#header-3)
<!---toc end-->
## Okay so far
## Wait, This is Not a Header!!!
## Err ... ##
### Header 3
xxx""",
)
_good_markdown = (
"""\
# Welcome to Heaven
<!---toc start-->
<!---toc end-->
xxx
## Wow, Isn't This Neat!
xyz
```python
# Hopefully no one ever sees this
def f():
return f(f()) - f()
```
All done.""",
"""\
# Welcome to Heaven
<!---toc start-->
* [Welcome to Heaven](#welcome-to-heaven)
* [Wow, Isn't This Neat!](#wow-isnt-this-neat)
<!---toc end-->
xxx
## Wow, Isn't This Neat!
xyz
```python
# Hopefully no one ever sees this
def f():
return f(f()) - f()
```
All done.""",
)
_missing_delims = (
"""\
x""",
"""\
x""",
)
def _mimic_cli(s):
"""Mimic reading & over-writing temporary files as done via CLI."""
assert len(s) == 2, "Improper test tuple"
i, out = s
tmp = tempfile.NamedTemporaryFile(delete=False, mode="w")
name = tmp.name
tmp.write(i)
tmp.close()
# tmp.name is absolute path
try:
mdtoc.main.modify_and_write(name)
tmp.close()
with open(name) as f:
assert f.read() == out
finally:
tmp.close()
if os.path.isfile(name):
os.remove(name)
@pytest.mark.parametrize("s", [_good_markdown, _bad_markdown])
def test_modify_and_write(s):
_mimic_cli(s)
def test_modify_and_write_raises_no_delim():
with pytest.raises(Exception):
_mimic_cli(_missing_delims)
| 2.53125
| 3
|
examples/demo_array.py
|
bmerry/pyopencl
| 7
|
12782479
|
import pyopencl as cl
import pyopencl.array as cl_array
import numpy
import numpy.linalg as la
a = numpy.random.rand(50000).astype(numpy.float32)
b = numpy.random.rand(50000).astype(numpy.float32)
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
a_dev = cl_array.to_device(queue, a)
b_dev = cl_array.to_device(queue, b)
dest_dev = cl_array.empty_like(a_dev)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a,
__global const float *b, __global float *c)
{
int gid = get_global_id(0);
c[gid] = a[gid] + b[gid];
}
""").build()
prg.sum(queue, a.shape, None, a_dev.data, b_dev.data, dest_dev.data)
print(la.norm((dest_dev - (a_dev+b_dev)).get()))
| 2.1875
| 2
|
File Handling/ReadLines.py
|
UgainJain/LearnPythonByDoing
| 5
|
12782480
|
f = open("Writelist.txt", "r")
lines = f.readlines()
for line in lines:
print(line , end = '')
f.close()
| 3.328125
| 3
|
callbacks.py
|
chansoopark98/MobileNet-SSD
| 2
|
12782481
|
import tensorflow as tf
class Scalar_LR(tf.keras.callbacks.Callback):
def __init__(self, name, TENSORBOARD_DIR):
super().__init__()
self.name = name
# self.previous_loss = None
self.file_writer = tf.summary.create_file_writer(TENSORBOARD_DIR)
self.file_writer.set_as_default()
# def on_epoch_begin(self, epoch, logs=None):
# logs['learning rate'] = self.model.optimizer.lr
# tf.summary.scalar("lr", logs['learning rate'], step=epoch)
def on_epoch_end(self, epoch, logs=None):
logs['learning rate'] = self.model.optimizer.lr
# with self.file_writer.as_default():
# # img = self.model.predict(dummy_data)
# # y_pred = self.model.predict(self.validation_data[0])
# tf.summary.image("Training data", img, step=0)
tf.summary.scalar("end_lr", logs['learning rate'], step=epoch)
#
#
# #self.previous_loss = logs['loss']
#
# def on_train_batch_begin(self, batch, logs=None):
# logs['learning rate'] = self.model.optimizer.lr
# # tf.summary.scalar("my_metric", logs['learning rate'], step=batch)
# #
# def on_train_batch_end(self, batch, logs=None):
# print('test')
#
# # tensor = self.model.get_layer('block3b_add').output
# # tensor = self.model.layers[0].output
# # tensor = tensor[0,:,:,:]
# # print(tensor)
# # plt.imshow(tensor)
# # plt.show()
#
# # intermediate_layer_model = tf.keras.Model(inputs=self.model.input,
# # outputs=self.model.get_layer('block3b_add').output)
# # intermediate_output = intermediate_layer_model.predict(self.validation_data[0])
# # print(intermediate_output)
#
# # output_images = tf.cast(self.model.call(self.data['image']),dtype=tf.float32)
# # output_images *= 255
# # print(output_images)
#
# # tf.summary.image('test', tensor, step=batch, max_outputs=1)
#
#
| 2.6875
| 3
|
scrape.py
|
gontzalm/cycling-seer
| 0
|
12782482
|
#!/home/gontz/miniconda3/envs/ih/bin/python3
from itertools import product
import click
from conf import RACES
from src import dbops, procyclingstats
@click.command()
@click.argument("items")
@click.option("-v", "--verbose", is_flag=True)
def scrape(items, verbose):
"""Scrape ITEMS from procyclingstats.com."""
items = items.lower()
# Invalid argument
if items not in ["riders", "stages"]:
raise click.UsageError("ITEMS must be STAGES or RIDERS")
# Scrape stages
if items == "stages":
for race, params in RACES.items():
(start_year, stop_year), no_races = params
iter_years = range(start_year, stop_year)
iter_number = range(1, no_races + 1)
for year, number in product(iter_years, iter_number):
stage = [race, year, number]
if dbops.check_exists(stage):
if verbose:
click.echo(f"{stage} already in database.")
continue
stage_data = procyclingstats.get_stage(race, year, number)
# HTTP error
if isinstance(stage_data, int):
if verbose:
click.echo(f"{stage} could not be retrieved. Status code: {stage_data}")
continue
# Is TTT
if not stage_data:
if verbose:
click.echo(f"{stage} is a team time trial. Skipping...")
continue
inserted_id = dbops.insert_stage(stage_data)
if verbose:
click.echo(f"{stage} inserted with ID: {inserted_id}")
# Srape riders
else:
stages = dbops.fetch_stages(project="result")
riders = [rider for stage in stages for rider in stage["result"]]
for rider in riders:
if dbops.check_exists(rider):
if verbose:
click.echo(f"{rider} already in database.")
continue
rider_data = procyclingstats.get_rider(rider)
# HTTP error
if isinstance(rider_data, int):
if verbose:
click.echo(f"{rider} could not be retrieved. Status code: {rider_data}")
continue
inserted_id = dbops.insert_rider(rider_data)
if verbose:
click.echo(f"{rider} inserted with ID: {inserted_id}")
if __name__ == "__main__":
scrape()
| 2.65625
| 3
|
ec2objects/ec2object/networkinterface.py
|
zorani/aws-ec2-objects
| 0
|
12782483
|
from __future__ import annotations
from dataclasses import dataclass
from dataclasses import field
from ..ec2common.ec2exceptions import *
@dataclass
class NetworkInterfaceAttributes:
Association: object = None
Attachment: object = None
Description: str = None
Groups: object = field(default_factory=list)
Ipv6Addresses: object = field(default_factory=list)
MacAddress: str = None
NetworkInterfaceId: str = None
OwnerId: str = None
PrivateDnsName: str = None
PrivateIpAddress: str = None
PrivateIpAddresses: object = field(default_factory=list)
SourceDestCheck: bool = None
Status: str = None
SubnetId: str = None
VpcId: str = None
InterfaceType: str = None
Ipv4Prefixes: object = field(default_factory=list)
Ipv6Prefixes: object = field(default_factory=list)
class NetworkInterfaceManager:
def __init__(self):
pass
# def dict_to_networkinterface(self, dict):
# new_networkinterface = NetworkInterface()
# new_networkinterface.attributes = NetworkInterfaceAttributes(**dict)
# return new_networkinterface
# def dict_list_to_networkinterface_list(self, dict_list):
# networkinterface_list = []
# for dict_item in dict_list:
# networkinterface_list.append(self.dict_to_networkinterface(dict_item))
# return networkinterface_list
class NetworkInterface:
def __init__(self):
self.attributes = NetworkInterfaceAttributes()
| 2.4375
| 2
|
main.py
|
SunwardTree/LinearRegression
| 0
|
12782484
|
<filename>main.py
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 15:25:07 2017
@author: Jack
"""
import matplotlib.pyplot as plt
import functions as fun
# Read dataset
x, y = [], []
for sample in open("./data/prices.txt", "r"):
xx, yy = sample.split(",")
x.append(float(xx))
y.append(float(yy))
x, y = fun.np.array(x), fun.np.array(y)
# Perform normalization
x = (x - x.mean()) / x.std()
# Scatter dataset
plt.figure()
plt.scatter(x, y, c="r", s=20)
plt.show()
# Set degrees
test_set = (1, 4, 10)
# show line's x
s_x = fun.np.linspace(-2, 4, 100)
# Visualize results
plt.scatter(x, y, c="g", s=20)
for d in test_set:
s_y = fun.get_model(d, x, y, s_x)()
plt.plot(s_x, s_y, label="degree = {}".format(d))
plt.xlim(-2, 4)
plt.ylim(1e5, 8e5)
plt.legend()
plt.show()
#cost
print('cost')
for d in test_set:
print(fun.get_cost(d, x, y))
| 3.421875
| 3
|
Yellow_Pages_USA/unit_tests.py
|
Jay4C/Web-Scraping
| 1
|
12782485
|
from bs4 import BeautifulSoup
import requests
import time
import pymysql.cursors
import unittest
from validate_email import validate_email
class UnitTestsDataMinerYellowPagesUsa(unittest.TestCase):
def test_web_scraper_email_usa(self):
activites = [
{'id': '1',
'url': 'https://www.yellowpages.com/search?search_terms=Temporary+Employment+Agencies&geo_location_terms='},
{'id': '2', 'url': 'https://www.yellowpages.com/search?search_terms=real+estate&geo_location_terms='},
{'id': '3', 'url': 'https://www.yellowpages.com/search?search_terms=Recruiter&geo_location_terms='},
{'id': '4', 'url': 'https://www.yellowpages.com/search?search_terms=software&geo_location_terms='},
{'id': '5', 'url': 'https://www.yellowpages.com/search?search_terms=hotel&geo_location_terms='},
{'id': '6',
'url': 'https://www.yellowpages.com/search?search_terms=social+landlord&geo_location_terms='},
{'id': '7', 'url': 'https://www.yellowpages.com/search?search_terms=cleaning&geo_location_terms='},
{'id': '8', 'url': 'https://www.yellowpages.com/search?search_terms=Charities&geo_location_terms='},
{'id': '9', 'url': 'https://www.yellowpages.com/search?search_terms=financial&geo_location_terms='},
{'id': '10', 'url': 'https://www.yellowpages.com/search?search_terms=restaurant&geo_location_terms='},
{'id': '11', 'url': 'https://www.yellowpages.com/search?search_terms=building&geo_location_terms='},
{'id': '12', 'url': 'https://www.yellowpages.com/search?search_terms=hairdresser&geo_location_terms='},
{'id': '13', 'url': 'https://www.yellowpages.com/search?search_terms=florist&geo_location_terms='},
{'id': '14', 'url': 'https://www.yellowpages.com/search?search_terms=locksmith&geo_location_terms='},
{'id': '15', 'url': 'https://www.yellowpages.com/search?search_terms=bakery&geo_location_terms='},
{'id': '16', 'url': 'https://www.yellowpages.com/search?search_terms=insurance&geo_location_terms='},
{'id': '17', 'url': 'https://www.yellowpages.com/search?search_terms=Pharmacies&geo_location_terms='},
{'id': '18', 'url': 'https://www.yellowpages.com/search?search_terms=movers&geo_location_terms='},
{'id': '19', 'url': 'https://www.yellowpages.com/search?search_terms=electricity&geo_location_terms='},
{'id': '20', 'url': 'https://www.yellowpages.com/search?search_terms=plumbing&geo_location_terms='},
{'id': '21', 'url': 'https://www.yellowpages.com/search?search_terms=security&geo_location_terms='},
{'id': '22', 'url': 'https://www.yellowpages.com/search?search_terms=attorney&geo_location_terms='},
{'id': '23', 'url': 'https://www.yellowpages.com/search?search_terms=bank&geo_location_terms='},
{'id': '24', 'url': 'https://www.yellowpages.com/search?search_terms=mechanic&geo_location_terms='},
{'id': '25', 'url': 'https://www.yellowpages.com/search?search_terms=dentist&geo_location_terms='},
{'id': '26', 'url': 'https://www.yellowpages.com/search?search_terms=doctor&geo_location_terms='},
{'id': '27', 'url': 'https://www.yellowpages.com/search?search_terms=accountant&geo_location_terms='},
{'id': '28',
'url': 'https://www.yellowpages.com/search?search_terms=Grocery+Stores&geo_location_terms='},
{'id': '29', 'url': 'https://www.yellowpages.com/search?search_terms=notary&geo_location_terms='},
{'id': '30', 'url': 'https://www.yellowpages.com/search?search_terms=jewellery&geo_location_terms='},
{'id': '31', 'url': 'https://www.yellowpages.com/search?search_terms=tailors&geo_location_terms='},
{'id': '32', 'url': 'https://www.yellowpages.com/search?search_terms=butcher&geo_location_terms='},
{'id': '33', 'url': 'https://www.yellowpages.com/search?search_terms=library&geo_location_terms='},
{'id': '34', 'url': 'https://www.yellowpages.com/search?search_terms=Architects&geo_location_terms='}
]
capitales_du_monde = [
{'id': '2', 'nom': 'New+York%2C+NY'},
# {'id': '4', 'nom': 'Chicago%2C+IL'},
# {'id': '5', 'nom': 'Atlanta%2C+GA'},
# {'id': '6', 'nom': 'Houston%2C+TX'},
# {'id': '7', 'nom': 'Los+Angeles%2C+CA'},
# {'id': '9', 'nom': 'Albany%2C+NY'},
# {'id': '36', 'nom': 'Montgomery%2C+AL'},
# {'id': '37', 'nom': 'Birmingham%2C+AL'},
# {'id': '38', 'nom': 'Juneau%2C+AK'},
# {'id': '39', 'nom': 'Anchorage%2C+AK'},
# {'id': '40', 'nom': 'Phoenix%2C+AZ'},
# {'id': '41', 'nom': 'Little+Rock%2C+AR'},
# {'id': '42', 'nom': 'Sacramento%2C+CA'},
# {'id': '43', 'nom': 'Denver%2C+CO'},
# {'id': '44', 'nom': 'Hartford%2C+CT'},
# {'id': '45', 'nom': 'Bridgeport%2C+CT'},
# {'id': '46', 'nom': 'Dover%2C+DE'},
# {'id': '47', 'nom': 'Wilmington%2C+DE'},
# {'id': '48', 'nom': 'Tallahassee%2C+FL'},
# {'id': '49', 'nom': 'Jacksonville%2C+FL'},
# {'id': '50', 'nom': 'Honolulu%2C+HI'},
# {'id': '51', 'nom': 'Boise%2C+ID'},
# {'id': '52', 'nom': 'Springfield%2C+IL'},
# {'id': '53', 'nom': 'Indianapolis%2C+IN'},
# {'id': '54', 'nom': 'Des+Moines%2C+IA'},
# {'id': '55', 'nom': 'Topeka%2C+KS'},
# {'id': '56', 'nom': 'Wichita%2C+KS'},
# {'id': '57', 'nom': 'Frankfort%2C+KY'},
# {'id': '58', 'nom': 'Louisville%2C+KY'},
# {'id': '59', 'nom': 'Baton+Rouge%2C+LA'},
# {'id': '60', 'nom': 'New+Orleans%2C+LA'},
# {'id': '61', 'nom': 'Augusta%2C+ME'},
# {'id': '62', 'nom': 'Portland%2C+ME'},
# {'id': '63', 'nom': 'Annapolis%2C+MD'},
# {'id': '64', 'nom': 'Baltimore%2C+MD'},
# {'id': '65', 'nom': 'Boston%2C+MA'},
# {'id': '66', 'nom': 'Lansing%2C+MI'},
# {'id': '67', 'nom': 'Detroit%2C+MI'},
# {'id': '68', 'nom': 'Saint+Paul%2C+MN'},
# {'id': '69', 'nom': 'Minneapolis%2C+MN'},
# {'id': '70', 'nom': 'Jackson%2C+MS'},
# {'id': '71', 'nom': 'Jefferson+City%2C+MO'},
# {'id': '72', 'nom': 'Kansas+City%2C+MO'},
# {'id': '73', 'nom': 'Helena%2C+MT'},
# {'id': '74', 'nom': 'Billings%2C+MT'},
# {'id': '75', 'nom': 'Lincoln%2C+NE'},
# {'id': '76', 'nom': 'Omaha%2C+NE'},
# {'id': '77', 'nom': 'Carson+City%2C+NV'},
# {'id': '78', 'nom': 'Las+Vegas%2C+NV'},
# {'id': '79', 'nom': 'Concord%2C+NH'},
# {'id': '80', 'nom': 'Manchester%2C+NH'}
# {'id': '81', 'nom': 'Trenton%2C+NJ'},
# {'id': '82', 'nom': 'Newark%2C+NJ'},
# {'id': '83', 'nom': 'Santa+Fe%2C+NM'},
# {'id': '84', 'nom': 'Albuquerque%2C+NM'},
# {'id': '85', 'nom': 'Raleigh%2C+NC'},
# {'id': '86', 'nom': 'Charlotte%2C+NC'},
# {'id': '87', 'nom': 'Bismarck%2C+ND'},
# {'id': '88', 'nom': 'Columbus%2C+OH'},
# {'id': '89', 'nom': 'Oklahoma+City%2C+OK'},
# {'id': '90', 'nom': 'Salem%2C+OR'},
# {'id': '91', 'nom': 'Portland%2C+OR'},
# {'id': '92', 'nom': 'Harrisburg%2C+PA'},
# {'id': '93', 'nom': 'Philadelphia%2C+PA'},
# {'id': '94', 'nom': 'Providence%2C+RI'},
# {'id': '95', 'nom': 'Columbia%2C+SC'},
# {'id': '96', 'nom': 'Pierre%2C+SD'},
# {'id': '97', 'nom': 'Sioux+Falls%2C+SD'},
# {'id': '98', 'nom': 'Nashville%2C+TN'},
# {'id': '99', 'nom': 'Memphis%2C+TN'},
# {'id': '100', 'nom': 'Austin%2C+TX'},
# {'id': '101', 'nom': 'Salt+Lake+City%2C+UT'},
# {'id': '102', 'nom': 'Montpelier%2C+VT'},
# {'id': '103', 'nom': 'Burlington%2C+VT'},
# {'id': '104', 'nom': 'Richmond%2C+VA'},
# {'id': '105', 'nom': 'Olympia%2C+WA'},
# {'id': '106', 'nom': 'Seattle%2C+WA'},
# {'id': '107', 'nom': 'Charleston%2C+WV'},
# {'id': '108', 'nom': 'Madison%2C+WI'},
# {'id': '109', 'nom': 'Milwaukee%2C+WI'},
# {'id': '110', 'nom': 'Cheyenne%2C+WY'}
]
try:
for capitale_du_monde in capitales_du_monde:
for activite in activites:
i_1 = 0
i = 1
var = 1
while var == 1 and i < 102:
try:
url = activite.get('url') + capitale_du_monde.get('nom') + "&page=" + str(i)
# Request the content of a page from the url
html = requests.get(url)
time.sleep(3)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
print(url)
if soup.find("a", {"class", "business-name"}) is None:
print('sorry there is nothing')
break
else:
try:
for link in soup.find_all("a", {"class": "business-name"}):
i_1 += 1
# Request the content of a page from the url
url_page = "https://www.yellowpages.com" + link.get('href')
html_doc = requests.get(url_page)
time.sleep(3)
# Parse the content of html_doc
soup_link = BeautifulSoup(html_doc.content, 'html.parser')
if soup_link.find("a", {"class": "email-business"}) is not None:
email_business = soup_link.select(".email-business")[0].get('href')[7:]
suffixes = [
"info@"
]
for suffix in suffixes:
email = str(suffix + email_business.split("@")[1])
try:
is_valid = validate_email(
email_address=email,
check_regex=True,
check_mx=True,
from_address='',
helo_host='',
smtp_timeout=10,
dns_timeout=10,
use_blacklist=True
)
if is_valid:
try:
# Connect to the database
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale_du_monde.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : "
+ str(email))
connection.close()
except Exception as e:
print(str(i_1) + " The record already exists : "
+ str(email) + " " + str(e))
connection.close()
except Exception as e:
print("Problem connection MySQL : " + str(e))
else:
print(
str(i_1) + " The email : " + email + " doesn't exist.")
except Exception as e:
print(str(
i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + " no email business")
except Exception as e:
print("There is an error connection at url_page : " + str(e))
except Exception as e:
print("There is an error connection at url : " + str(e))
i += 1
finally:
print('done')
if __name__ == '__main__':
unittest.main()
| 2.5625
| 3
|
pygameUoN-hy-dev/restructured/walldata.py
|
moumou666/collection
| 0
|
12782486
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 19:44:15 2020
@author: Zane
"""
# width = 50,height = 40
walls=[{"x":0,"y":430},
{"x":50,"y":430},
{"x":100,"y":430},
{"x":145,"y":385},
{"x":195,"y":385},
{"x":245,"y":385},
{"x":285,"y":345},
{"x":335,"y":345},
{"x":385,"y":345},
{"x":435,"y":345},
{"x":470,"y":295},
{"x":515,"y":295},
{"x":515,"y":345},
{"x":560,"y":385},
{"x":610,"y":385},
{"x":655,"y":255},
{"x":695,"y":255},
{"x":655,"y":430},
{"x":705,"y":430},
{"x":755,"y":430},
{"x":805,"y":430},
{"x":855,"y":430},
{"x":885,"y":430},
{"x":800,"y":300},
{"x":850,"y":300},
{"x":900,"y":300},
{"x":935,"y":300},
{"x":985,"y":385},
{"x":1035,"y":385},
{"x":1080,"y":340},
{"x":1130,"y":340},
{"x":1180,"y":340},
{"x":1220,"y":300},
{"x":1230,"y":340},
{"x":1265,"y":300},
{"x":1265,"y":340},
{"x":1265,"y":385},
{"x":1315,"y":385},
{"x":935,"y":470},
{"x":985,"y":470},
{"x":1035,"y":470},
{"x":1085,"y":470},
{"x":1135,"y":470},
{"x":1185,"y":470},
{"x":1235,"y":470},
{"x":1285,"y":470},
{"x":1335,"y":470},
{"x":1385,"y":470},
{"x":1435,"y":470},
{"x":1455,"y":470},
{"x":1365,"y":385},
{"x":1405,"y":385},
{"x":1412,"y":342},
{"x":1462,"y":342},
{"x":1512,"y":342},
{"x":1562,"y":342},
{"x":1612,"y":342},
{"x":1625,"y":342},
{"x":1675,"y":470},
{"x":1625,"y":470},
{"x":1575,"y":470},
{"x":1555,"y":470},
{"x":1725,"y":430},
{"x":1775,"y":385},
{"x":1775,"y":300},
{"x":1725,"y":300},
{"x":1810,"y":300},
{"x":1820,"y":430},
{"x":1870,"y":430},
{"x":1920,"y":430},
{"x":1970,"y":430},
{"x":1918,"y":340},
{"x":2015,"y":385},
{"x":2065,"y":385},
{"x":2110,"y":342},
{"x":2160,"y":342},
{"x":2210,"y":342},
{"x":2260,"y":342},
{"x":2310,"y":342},
{"x":2360,"y":342},
{"x":2410,"y":342},
{"x":2460,"y":342},
{"x":2510,"y":342},
{"x":2540,"y":297},
{"x":2590,"y":297},
{"x":2640,"y":297},
{"x":2680,"y":297},
{"x":2685,"y":255},
{"x":2735,"y":255},
{"x":2775,"y":255},
{"x":2735,"y":340},
{"x":2785,"y":340},
{"x":2835,"y":340},
{"x":2885,"y":340},
{"x":2885,"y":210},
{"x":2915,"y":210},
{"x":2935,"y":340},
{"x":2985,"y":340},
{"x":3015,"y":340},
{"x":3065,"y":340},
{"x":3115,"y":340},
{"x":3030,"y":168},
{"x":3060,"y":168},
{"x":3170,"y":125},
{"x":3215,"y":125},
{"x":3240,"y":125},
{"x":3165,"y":340},
{"x":3215,"y":340},
{"x":3265,"y":340},
{"x":3300,"y":298},
{"x":3350,"y":298},
{"x":3380,"y":298},
{"x":3305,"y":340},
{"x":3355,"y":340},
{"x":3405,"y":340},
{"x":3455,"y":340},
{"x":3505,"y":340},
{"x":3555,"y":340},
{"x":3605,"y":340},
{"x":3655,"y":340},
{"x":3705,"y":340},
{"x":3755,"y":340},
{"x":3805,"y":340},
{"x":3840,"y":298},
{"x":3885,"y":255},
{"x":3935,"y":255},
{"x":3985,"y":255},
{"x":4050,"y":255},
{"x":4050,"y":212},
{"x":4050,"y":295},
{"x":4105,"y":340},
{"x":4155,"y":340},
{"x":4205,"y":340},
{"x":4255,"y":340},
{"x":4305,"y":340},
{"x":4355,"y":340},
{"x":4370,"y":295},
{"x":4415,"y":255},
{"x":4460,"y":210},
{"x":4465,"y":255},
{"x":4515,"y":255},
{"x":4540,"y":255},
{"x":4455,"y":295},
{"x":4505,"y":340},
{"x":4555,"y":340},
{"x":4605,"y":340},
{"x":4655,"y":340},
{"x":4705,"y":340},
{"x":4725,"y":300},
{"x":4725,"y":255},
{"x":4680,"y":255},
{"x":4645,"y":255},
{"x":4755,"y":340},
{"x":4805,"y":340},
{"x":4855,"y":340},
{"x":4905,"y":340},
{"x":4955,"y":340},
{"x":4775,"y":210},
{"x":4825,"y":210},
{"x":4875,"y":210},
{"x":4900,"y":210},
{"x":4950,"y":255},
{"x":5000,"y":340},
{"x":5050,"y":340},
{"x":5100,"y":340},
{"x":5150,"y":340},
{"x":5200,"y":340},
{"x":5250,"y":340},
{"x":5300,"y":340},
{"x":5350,"y":340},
{"x":5365,"y":295},
{"x":5410,"y":255},
{"x":5450,"y":255}]
| 2.078125
| 2
|
core/main.py
|
DploY707/static-apk-analyzer
| 0
|
12782487
|
<gh_stars>0
import os
import pickle
import utils
from code_extractor import CodeExtractor
DATASET_ROOT_PATH = '/root/workDir/data'
RESULT_ROOT_PATH = '/root/result'
def print_analyzing_status(index, dataSetSize, dataSetDir, targetAPK) :
print(utils.set_string_colored('[' + str(index+1) + ' / ' + str(dataSetSize) + ']', utils.Color.GREEN.value) + ' in "' + dataSetDir + '" Analyzing...... "' + targetAPK + '"')
def print_progress_directories(index, totalDirList) :
print('\n' + utils.set_string_colored('[' + totalDirList[index] + ']', utils.Color.GREEN.value) + ' in ' + str(totalDirList))
def print_count_completed_apk(apkNum) :
print('Complete analyzing for ' + str(apkNum) + ' apks :)')
def save_result(resultPath, data) :
result = open(resultPath, 'wb')
pickle.dump(data, result)
result.close()
def generate_methodLists_from_dataSet(dataDir, resultDir) :
dataSet = os.listdir(dataDir)
apkNum = len(dataSet)
for i in range(0, apkNum) :
print_analyzing_status(i, apkNum, dataDir, dataSet[i])
APKFilePath = dataDir + '/' + dataSet[i]
resultFilePath_methodList = resultDir + '/methodInfo/' + str(i) + '_' + dataSet[i] + '_byte_method.pickle'
resultFilePath_referenceList = resultDir + '/referenceInfo/' + str(i) + '_' + dataSet[i] + '_byte_call.pickle'
ce = CodeExtractor(APKFilePath)
save_result(resultFilePath_methodList, ce.get_methodInfoList())
save_result(resultFilePath_referenceList, ce.get_referenceInfoList())
print_count_completed_apk(apkNum)
if __name__ == '__main__' :
datasetDirList = utils.get_endpointDirList(DATASET_ROOT_PATH)
resultDirList = utils.replace_string_in_list(datasetDirList, DATASET_ROOT_PATH, RESULT_ROOT_PATH)
for resultDir in resultDirList :
utils.generate_directories_to_endpoint(resultDir)
for i in range(len(datasetDirList)) :
print_progress_directories(i, datasetDirList)
generate_methodLists_from_dataSet(datasetDirList[i], resultDirList[i])
| 2.28125
| 2
|
src/python/grpcio_testing/grpc_testing/_channel/_channel_state.py
|
clsater/grpc
| 0
|
12782488
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import threading
from grpc_testing import _common
from grpc_testing._channel import _rpc_state
class TimeoutException(Exception):
pass
class State(_common.ChannelHandler):
def __init__(self):
self._condition = threading.Condition()
self._rpc_states = collections.defaultdict(list)
def invoke_rpc(self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
rpc_state = _rpc_state.State(invocation_metadata, requests,
requests_closed)
with self._condition:
self._rpc_states[method_full_rpc_name].append(rpc_state)
self._condition.notify_all()
return rpc_state
def take_rpc_state(self, method_descriptor, timeout):
method_full_rpc_name = '/{}/{}'.format(
method_descriptor.containing_service.full_name,
method_descriptor.name)
with self._condition:
while True:
method_rpc_states = self._rpc_states[method_full_rpc_name]
if method_rpc_states:
return method_rpc_states.pop(0)
else:
if not self._condition.wait(timeout=timeout):
raise TimeoutException("Timeout while waiting for rpc.")
| 2.109375
| 2
|
scripts/modules.d/199-Keyboard-Default.py
|
pauljeremyturner/gizmod
| 0
|
12782489
|
<gh_stars>0
#***
#*********************************************************************
#*************************************************************************
#***
#*** GizmoDaemon Config Script
#*** Keyboard Default config
#***
#*****************************************
#*****************************************
#***
"""
Copyright (c) 2007, Gizmo Daemon Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################
# Imports
##########################
from GizmoDaemon import *
from GizmoScriptDefault import *
import subprocess
ENABLED = True
VERSION_NEEDED = 3.2
INTERESTED_CLASSES = [GizmoEventClass.Standard]
############################
# KeyboardDefault Class definition
##########################
class KeyboardDefault(GizmoScriptDefault):
"""
Default Fancy Keyboard Event Mapping
"""
############################
# Public Functions
##########################
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# process the key
if Event.Code == GizmoKey.KEY_EJECTCD or \
Event.Code == GizmoKey.KEY_EJECTCLOSECD:
subprocess.Popen(["eject", "/dev/dvd"])
subprocess.Popen(["eject", "/dev/cdrom"])
return True
elif Event.Code == GizmoKey.KEY_CLOSECD:
subprocess.Popen(["mount", "/media/dvd"])
subprocess.Popen(["mount", "/media/cdrom"])
subprocess.Popen(["mount", "/mnt/dvd"])
subprocess.Popen(["mount", "/mnt/cdrom"])
return True
elif Event.Code == GizmoKey.KEY_WWW:
subprocess.Popen(["firefox", "http://gizmod.sf.net"])
return True
elif Event.Code == GizmoKey.KEY_VOLUMEUP:
Gizmod.DefaultMixerVolume.VolumePlayback = Gizmod.DefaultMixerVolume.VolumePlayback + 1
return True
elif Event.Code == GizmoKey.KEY_VOLUMEDOWN:
Gizmod.DefaultMixerVolume.VolumePlayback = Gizmod.DefaultMixerVolume.VolumePlayback - 1
return True
elif Event.Code == GizmoKey.KEY_MUTE:
Gizmod.toggleMuteAllCards()
return True
elif Event.Code == GizmoKey.KEY_NEXTSONG:
# ctrl alt right (switch workspace in compiz)
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT, [GizmoKey.KEY_LEFTCTRL, GizmoKey.KEY_LEFTALT])
return True
elif Event.Code == GizmoKey.KEY_PREVIOUSSONG:
# ctrl alt left (switch workspace in compiz)
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT, [GizmoKey.KEY_LEFTCTRL, GizmoKey.KEY_LEFTALT])
return True
else:
# unmatched event, keep processing
return False
############################
# Private Functions
##########################
def __init__(self):
"""
Default Constructor
"""
GizmoScriptDefault.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES)
############################
# KeyboardDefault class end
##########################
# register the user script
KeyboardDefault()
| 1.492188
| 1
|
ansible/lib/ansible/modules/core/network/nxos/nxos_gir.py
|
kiv-box/kafka
| 0
|
12782490
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_gir
version_added: "2.2"
short_description: Trigger a graceful removal or insertion (GIR) of the switch.
description:
- Trigger a graceful removal or insertion (GIR) of the switch.
extends_documentation_fragment: nxos
author:
- <NAME> (@GGabriele)
notes:
- C(state) has effect only in combination with
C(system_mode_maintenance_timeout) or
C(system_mode_maintenance_on_reload_reset_reason).
- Using C(system_mode_maintenance) and
C(system_mode_maintenance_dont_generate_profile) would make the module
fail, but the system mode will be triggered anyway.
options:
system_mode_maintenance:
description:
- When C(system_mode_maintenance=true) it puts all enabled
protocols in maintenance mode (using the isolate command).
When C(system_mode_maintenance=false) it puts all enabled
protocols in normal mode (using the no isolate command).
required: false
default: null
choices: ['true','false']
system_mode_maintenance_dont_generate_profile:
description:
- When C(system_mode_maintenance_dont_generate_profile=true) it
prevents the dynamic searching of enabled protocols and executes
commands configured in a maintenance-mode profile.
Use this option if you want the system to use a maintenance-mode
profile that you have created.
When C(system_mode_maintenance_dont_generate_profile=false) it
prevents the dynamic searching of enabled protocols and executes
commands configured in a normal-mode profile. Use this option if
you want the system to use a normal-mode profile that
you have created.
required: false
default: null
choices: ['true','false']
system_mode_maintenance_timeout:
description:
- Keeps the switch in maintenance mode for a specified
number of minutes. Range is 5-65535.
required: false
default: null
system_mode_maintenance_shutdown:
description:
- Shuts down all protocols, vPC domains, and interfaces except
the management interface (using the shutdown command).
This option is disruptive while C(system_mode_maintenance)
(which uses the isolate command) is not.
required: false
default: null
choices: ['true','false']
system_mode_maintenance_on_reload_reset_reason:
description:
- Boots the switch into maintenance mode automatically in the
event of a specified system crash.
required: false
default: null
choices: ['hw_error','svc_failure','kern_failure','wdog_timeout',
'fatal_error','lc_failure','match_any','manual_reload']
state:
description:
- Specify desired state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Trigger system maintenance mode
- nxos_gir:
system_mode_maintenance: true
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Trigger system normal mode
- nxos_gir:
system_mode_maintenance: false
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Configure on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: manual_reload
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Add on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: hw_error
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Remove on-reload reset-reason for maintenance mode
- nxos_gir:
system_mode_maintenance_on_reload_reset_reason: manual_reload
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Set timeout for maintenance mode
- nxos_gir:
system_mode_maintenance_timeout: 30
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
# Remove timeout for maintenance mode
- nxos_gir:
system_mode_maintenance_timeout: 30
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ <PASSWORD> }}"
'''
RETURN = '''
final_system_mode:
description: describe the last system mode
returned: verbose mode
type: string
sample: normal
updates:
description: commands sent to the device
returned: verbose mode
type: list
sample: ["terminal dont-ask", "system mode maintenance timeout 10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule, NetworkError
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
except NetworkError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show_ascii'):
cmds = [command]
if module.params['transport'] == 'cli':
body = execute_show(cmds, module)
elif module.params['transport'] == 'nxapi':
body = execute_show(cmds, module, command_type=command_type)
return body
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
module.config.load_config(commands)
except NetworkError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_system_mode(module):
command = 'show system mode'
body = execute_show_command(command, module)[0]
if 'normal' in body.lower():
mode = 'normal'
else:
mode = 'maintenance'
return mode
def get_maintenance_timeout(module):
command = 'show maintenance timeout'
body = execute_show_command(command, module)[0]
timeout = body.split()[4]
return timeout
def get_reset_reasons(module):
command = 'show maintenance on-reload reset-reasons'
body = execute_show_command(command, module)[0]
return body
def get_commands(module, state, mode):
commands = list()
system_mode = ''
if module.params['system_mode_maintenance'] is True and mode == 'normal':
commands.append('system mode maintenance')
elif (module.params['system_mode_maintenance'] is False and
mode == 'maintenance'):
commands.append('no system mode maintenance')
elif (module.params[
'system_mode_maintenance_dont_generate_profile'] is True and
mode == 'normal'):
commands.append('system mode maintenance dont-generate-profile')
elif (module.params[
'system_mode_maintenance_dont_generate_profile'] is False and
mode == 'maintenance'):
commands.append('no system mode maintenance dont-generate-profile')
elif module.params['system_mode_maintenance_timeout']:
timeout = get_maintenance_timeout(module)
if (state == 'present' and
timeout != module.params['system_mode_maintenance_timeout']):
commands.append('system mode maintenance timeout {0}'.format(
module.params['system_mode_maintenance_timeout']))
elif (state == 'absent' and
timeout == module.params['system_mode_maintenance_timeout']):
commands.append('no system mode maintenance timeout {0}'.format(
module.params['system_mode_maintenance_timeout']))
elif module.params['system_mode_maintenance_shutdown'] is True:
commands.append('system mode maintenance shutdown')
elif module.params['system_mode_maintenance_on_reload_reset_reason']:
reset_reasons = get_reset_reasons(module)
if (state == 'present' and
module.params[
'system_mode_maintenance_on_reload_reset_reason'].lower() not
in reset_reasons.lower()):
commands.append('system mode maintenance on-reload '
'reset-reason {0}'.format(
module.params[
'system_mode_maintenance_on_reload_reset_reason']))
elif (state == 'absent' and
module.params[
'system_mode_maintenance_on_reload_reset_reason'].lower() in
reset_reasons.lower()):
commands.append('no system mode maintenance on-reload '
'reset-reason {0}'.format(
module.params[
'system_mode_maintenance_on_reload_reset_reason']))
if commands:
commands.insert(0, 'terminal dont-ask')
return commands
def main():
argument_spec = dict(
system_mode_maintenance=dict(required=False, type='bool'),
system_mode_maintenance_dont_generate_profile=dict(required=False,
type='bool'),
system_mode_maintenance_timeout=dict(required=False, type='str'),
system_mode_maintenance_shutdown=dict(required=False, type='bool'),
system_mode_maintenance_on_reload_reset_reason=dict(required=False,
choices=['hw_error','svc_failure','kern_failure',
'wdog_timeout','fatal_error','lc_failure',
'match_any','manual_reload']),
state=dict(choices=['absent', 'present', 'default'],
default='present', required=False)
)
module = get_network_module(argument_spec=argument_spec,
mutually_exclusive=[[
'system_mode_maintenance',
'system_mode_maintenance_dont_generate_profile',
'system_mode_maintenance_timeout',
'system_mode_maintenance_shutdown',
'system_mode_maintenance_on_reload_reset_reason'
]],
required_one_of=[[
'system_mode_maintenance',
'system_mode_maintenance_dont_generate_profile',
'system_mode_maintenance_timeout',
'system_mode_maintenance_shutdown',
'system_mode_maintenance_on_reload_reset_reason'
]],
supports_check_mode=True)
state = module.params['state']
mode = get_system_mode(module)
commands = get_commands(module, state, mode)
changed = False
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
execute_config_command(commands, module)
changed = True
result = {}
result['connected'] = module.connected
result['changed'] = changed
if module._verbosity > 0:
final_system_mode = get_system_mode(module)
result['final_system_mode'] = final_system_mode
result['updates'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| 1.320313
| 1
|
Calendars.py
|
noahwc/gtfs-pyparse
| 0
|
12782491
|
<reponame>noahwc/gtfs-pyparse<gh_stars>0
# This file models the data of a single line of a calendar_dates.txt gtfs file
from Reader import Reader
from pathlib import Path
from collections import defaultdict
class Calendar:
def __init__(self, line):
self.start_date = line["start_date"]
self.service_id = line["service_id"]
self.end_date = line["end_date"]
self.mon = line["monday"]
self.tues = line["tuesday"]
self.wed = line["wednesday"]
self.thurs = line["thursday"]
self.fri = line["friday"]
self.sat = line["saturday"]
self.sun = line["sunday"]
class Calendars:
def __init__(self, path):
read_calendars = Reader(path / "calendar.txt")
self.calendars_list = defaultdict(list)
line = read_calendars.get_line()
while line:
service = Calendar(line)
service_info = {"start" : service.start_date, "end" : service.end_date, "id" : service.service_id}
if service.mon == "1":
self.calendars_list["monday"].append(service_info)
if service.tues == "1":
self.calendars_list["tuesday"].append(service_info)
if service.wed == "1":
self.calendars_list["wednesday"].append(service_info)
if service.thurs == "1":
self.calendars_list["thursday"].append(service_info)
if service.fri == "1":
self.calendars_list["friday"].append(service_info)
if service.sat == "1":
self.calendars_list["saturday"].append(service_info)
if service.sun == "1":
self.calendars_list["sunday"].append(service_info)
line = read_calendars.get_line()
read_calendars.end()
| 3.34375
| 3
|
scripts/experiments/rpy_PELT.py
|
JackKelly/slicedpy
| 3
|
12782492
|
<filename>scripts/experiments/rpy_PELT.py
from __future__ import print_function, division
from rpy2.robjects.packages import importr
from rpy2.robjects import FloatVector
import numpy as np
from os import path
from pda.channel import Channel
"""
Documentation for changepoint R package:
http://cran.r-project.org/web/packages/changepoint/changepoint.pdf
"""
cpt = importr('changepoint')
# data = FloatVector(np.concatenate([np.random.normal( 0,1,100),
# np.random.normal( 5,1,100),
# np.random.normal( 0,1,100),
# np.random.normal(10,1,100)]))
DATA_DIR = '/data/mine/domesticPowerData/BellendenRd/wattsUp'
SIG_DATA_FILENAME = 'breadmaker1.csv'
#SIG_DATA_FILENAME = 'washingmachine1.csv'
#SIG_DATA_FILENAME = 'kettle1.csv'
chan = Channel()
chan.load_wattsup(path.join(DATA_DIR, SIG_DATA_FILENAME))
data = chan.series.values# [142:1647]# [:1353][:153]
data = FloatVector(data)
changepoints = cpt.PELT_mean_norm(data, pen=100000*log(len(data)))
plot(data)
for point in changepoints:
plot([point, point], [0, 3000], color='k')
scatter(list(changepoints), [0]*len(changepoints))
print(changepoints)
| 2.453125
| 2
|
likert_field/templatetags/likert_fa_stars.py
|
juliandehne/django-likert-field
| 17
|
12782493
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from .likert_star_tools import render_stars
from django.utils.safestring import mark_safe
register = template.Library()
# Font-awesome stars ver 3
star_set_3 = {
'star': "<i class='icon-star likert-star'></i>",
'unlit': "<i class='icon-star-empty likert-star'></i>",
'noanswer': "<i class='icon-ban-circle likert-star'></i>"
}
# Font-awesome stars ver 4
star_set_4 = {
'star': "<i class='fa fa-star likert-star'></i>",
'unlit': "<i class='fa fa-star-o likert-star'></i>",
'noanswer': "<i class='fa fa-ban likert-star'></i>"
}
def fa_stars3(num, max_stars=5):
"""
Stars for Font Awesome 3
If num is not None, the returned string will contain num solid stars
followed by max_stars - num empty stars
"""
return mark_safe(render_stars(num, max_stars, star_set_3))
register.filter('fa_stars3', fa_stars3)
def fa_stars4(num, max_stars=5):
"""
Stars for Font Awesome 4
If num is not None, the returned string will contain num solid stars
followed by max_stars - num empty stars
"""
return mark_safe(render_stars(num, max_stars, star_set_4))
register.filter('fa_stars4', fa_stars4)
| 2.359375
| 2
|
model.py
|
nachewigkeit/StegaStamp_pytorch
| 33
|
12782494
|
import sys
sys.path.append("PerceptualSimilarity\\")
import os
import utils
import torch
import numpy as np
from torch import nn
import torchgeometry
from kornia import color
import torch.nn.functional as F
from torchvision import transforms
class Dense(nn.Module):
def __init__(self, in_features, out_features, activation='relu', kernel_initializer='he_normal'):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.kernel_initializer = kernel_initializer
self.linear = nn.Linear(in_features, out_features)
# initialization
if kernel_initializer == 'he_normal':
nn.init.kaiming_normal_(self.linear.weight)
else:
raise NotImplementedError
def forward(self, inputs):
outputs = self.linear(inputs)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
return outputs
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, activation='relu', strides=1):
super(Conv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.activation = activation
self.strides = strides
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, strides, int((kernel_size - 1) / 2))
# default: using he_normal as the kernel initializer
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, inputs):
outputs = self.conv(inputs)
if self.activation is not None:
if self.activation == 'relu':
outputs = nn.ReLU(inplace=True)(outputs)
else:
raise NotImplementedError
return outputs
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, input):
return input.view(input.size(0), -1)
class StegaStampEncoder(nn.Module):
def __init__(self):
super(StegaStampEncoder, self).__init__()
self.secret_dense = Dense(100, 7500, activation='relu', kernel_initializer='he_normal')
self.conv1 = Conv2D(6, 32, 3, activation='relu')
self.conv2 = Conv2D(32, 32, 3, activation='relu', strides=2)
self.conv3 = Conv2D(32, 64, 3, activation='relu', strides=2)
self.conv4 = Conv2D(64, 128, 3, activation='relu', strides=2)
self.conv5 = Conv2D(128, 256, 3, activation='relu', strides=2)
self.up6 = Conv2D(256, 128, 3, activation='relu')
self.conv6 = Conv2D(256, 128, 3, activation='relu')
self.up7 = Conv2D(128, 64, 3, activation='relu')
self.conv7 = Conv2D(128, 64, 3, activation='relu')
self.up8 = Conv2D(64, 32, 3, activation='relu')
self.conv8 = Conv2D(64, 32, 3, activation='relu')
self.up9 = Conv2D(32, 32, 3, activation='relu')
self.conv9 = Conv2D(70, 32, 3, activation='relu')
self.residual = Conv2D(32, 3, 1, activation=None)
def forward(self, inputs):
secrect, image = inputs
secrect = secrect - .5
image = image - .5
secrect = self.secret_dense(secrect)
secrect = secrect.reshape(-1, 3, 50, 50)
secrect_enlarged = nn.Upsample(scale_factor=(8, 8))(secrect)
inputs = torch.cat([secrect_enlarged, image], dim=1)
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
up6 = self.up6(nn.Upsample(scale_factor=(2, 2))(conv5))
merge6 = torch.cat([conv4, up6], dim=1)
conv6 = self.conv6(merge6)
up7 = self.up7(nn.Upsample(scale_factor=(2, 2))(conv6))
merge7 = torch.cat([conv3, up7], dim=1)
conv7 = self.conv7(merge7)
up8 = self.up8(nn.Upsample(scale_factor=(2, 2))(conv7))
merge8 = torch.cat([conv2, up8], dim=1)
conv8 = self.conv8(merge8)
up9 = self.up9(nn.Upsample(scale_factor=(2, 2))(conv8))
merge9 = torch.cat([conv1, up9, inputs], dim=1)
conv9 = self.conv9(merge9)
residual = self.residual(conv9)
return residual
class SpatialTransformerNetwork(nn.Module):
def __init__(self):
super(SpatialTransformerNetwork, self).__init__()
self.localization = nn.Sequential(
Conv2D(3, 32, 3, strides=2, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 128, 3, strides=2, activation='relu'),
Flatten(),
Dense(320000, 128, activation='relu'),
nn.Linear(128, 6)
)
self.localization[-1].weight.data.fill_(0)
self.localization[-1].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0])
def forward(self, image):
theta = self.localization(image)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, image.size(), align_corners=False)
transformed_image = F.grid_sample(image, grid, align_corners=False)
return transformed_image
class StegaStampDecoder(nn.Module):
def __init__(self, secret_size=100):
super(StegaStampDecoder, self).__init__()
self.secret_size = secret_size
self.stn = SpatialTransformerNetwork()
self.decoder = nn.Sequential(
Conv2D(3, 32, 3, strides=2, activation='relu'),
Conv2D(32, 32, 3, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 64, 3, activation='relu'),
Conv2D(64, 64, 3, strides=2, activation='relu'),
Conv2D(64, 128, 3, strides=2, activation='relu'),
Conv2D(128, 128, 3, strides=2, activation='relu'),
Flatten(),
Dense(21632, 512, activation='relu'),
Dense(512, secret_size, activation=None))
def forward(self, image):
image = image - .5
transformed_image = self.stn(image)
return torch.sigmoid(self.decoder(transformed_image))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
Conv2D(3, 8, 3, strides=2, activation='relu'),
Conv2D(8, 16, 3, strides=2, activation='relu'),
Conv2D(16, 32, 3, strides=2, activation='relu'),
Conv2D(32, 64, 3, strides=2, activation='relu'),
Conv2D(64, 1, 3, activation=None))
def forward(self, image):
x = image - .5
x = self.model(x)
output = torch.mean(x)
return output, x
def transform_net(encoded_image, args, global_step):
sh = encoded_image.size()
ramp_fn = lambda ramp: np.min([global_step / ramp, 1.])
rnd_bri = ramp_fn(args.rnd_bri_ramp) * args.rnd_bri
rnd_hue = ramp_fn(args.rnd_hue_ramp) * args.rnd_hue
rnd_brightness = utils.get_rnd_brightness_torch(rnd_bri, rnd_hue, args.batch_size) # [batch_size, 3, 1, 1]
jpeg_quality = 100. - torch.rand(1)[0] * ramp_fn(args.jpeg_quality_ramp) * (100. - args.jpeg_quality)
rnd_noise = torch.rand(1)[0] * ramp_fn(args.rnd_noise_ramp) * args.rnd_noise
contrast_low = 1. - (1. - args.contrast_low) * ramp_fn(args.contrast_ramp)
contrast_high = 1. + (args.contrast_high - 1.) * ramp_fn(args.contrast_ramp)
contrast_params = [contrast_low, contrast_high]
rnd_sat = torch.rand(1)[0] * ramp_fn(args.rnd_sat_ramp) * args.rnd_sat
# blur
N_blur = 7
f = utils.random_blur_kernel(probs=[.25, .25], N_blur=N_blur, sigrange_gauss=[1., 3.], sigrange_line=[.25, 1.],
wmin_line=3)
if args.cuda:
f = f.cuda()
encoded_image = F.conv2d(encoded_image, f, bias=None, padding=int((N_blur - 1) / 2))
# noise
noise = torch.normal(mean=0, std=rnd_noise, size=encoded_image.size(), dtype=torch.float32)
if args.cuda:
noise = noise.cuda()
encoded_image = encoded_image + noise
encoded_image = torch.clamp(encoded_image, 0, 1)
# contrast & brightness
contrast_scale = torch.Tensor(encoded_image.size()[0]).uniform_(contrast_params[0], contrast_params[1])
contrast_scale = contrast_scale.reshape(encoded_image.size()[0], 1, 1, 1)
if args.cuda:
contrast_scale = contrast_scale.cuda()
rnd_brightness = rnd_brightness.cuda()
encoded_image = encoded_image * contrast_scale
encoded_image = encoded_image + rnd_brightness
encoded_image = torch.clamp(encoded_image, 0, 1)
# saturation
sat_weight = torch.FloatTensor([.3, .6, .1]).reshape(1, 3, 1, 1)
if args.cuda:
sat_weight = sat_weight.cuda()
encoded_image_lum = torch.mean(encoded_image * sat_weight, dim=1).unsqueeze_(1)
encoded_image = (1 - rnd_sat) * encoded_image + rnd_sat * encoded_image_lum
# jpeg
encoded_image = encoded_image.reshape([-1, 3, 400, 400])
if not args.no_jpeg:
encoded_image = utils.jpeg_compress_decompress(encoded_image, rounding=utils.round_only_at_0,
quality=jpeg_quality)
return encoded_image
def get_secret_acc(secret_true, secret_pred):
if 'cuda' in str(secret_pred.device):
secret_pred = secret_pred.cpu()
secret_true = secret_true.cpu()
secret_pred = torch.round(secret_pred)
correct_pred = torch.sum((secret_pred - secret_true) == 0, dim=1)
str_acc = 1.0 - torch.sum((correct_pred - secret_pred.size()[1]) != 0).numpy() / correct_pred.size()[0]
bit_acc = torch.sum(correct_pred).numpy() / secret_pred.numel()
return bit_acc, str_acc
def build_model(encoder, decoder, discriminator, lpips_fn, secret_input, image_input, l2_edge_gain,
borders, secret_size, M, loss_scales, yuv_scales, args, global_step, writer):
test_transform = transform_net(image_input, args, global_step)
input_warped = torchgeometry.warp_perspective(image_input, M[:, 1, :, :], dsize=(400, 400), flags='bilinear')
mask_warped = torchgeometry.warp_perspective(torch.ones_like(input_warped), M[:, 1, :, :], dsize=(400, 400),
flags='bilinear')
input_warped += (1 - mask_warped) * image_input
residual_warped = encoder((secret_input, input_warped))
encoded_warped = residual_warped + input_warped
residual = torchgeometry.warp_perspective(residual_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
if borders == 'no_edge':
encoded_image = image_input + residual
elif borders == 'black':
encoded_image = residual_warped + input_warped
encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
input_unwarped = torchgeometry.warp_perspective(image_input, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
elif borders.startswith('random'):
mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400),
flags='bilinear')
encoded_image = residual_warped + input_unwarped
encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
input_unwarped = torchgeometry.warp_perspective(input_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
ch = 3 if borders.endswith('rgb') else 1
encoded_image += (1 - mask) * torch.ones_like(residual) * torch.rand([ch])
elif borders == 'white':
mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400),
flags='bilinear')
encoded_image = residual_warped + input_warped
encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
input_unwarped = torchgeometry.warp_perspective(input_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
encoded_image += (1 - mask) * torch.ones_like(residual)
elif borders == 'image':
mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400),
flags='bilinear')
encoded_image = residual_warped + input_warped
encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear')
encoded_image += (1 - mask) * torch.roll(image_input, 1, 0)
if borders == 'no_edge':
D_output_real, _ = discriminator(image_input)
D_output_fake, D_heatmap = discriminator(encoded_image)
else:
D_output_real, _ = discriminator(input_warped)
D_output_fake, D_heatmap = discriminator(encoded_warped)
transformed_image = transform_net(encoded_image, args, global_step)
decoded_secret = decoder(transformed_image)
bit_acc, str_acc = get_secret_acc(secret_input, decoded_secret)
normalized_input = image_input * 2 - 1
normalized_encoded = encoded_image * 2 - 1
lpips_loss = torch.mean(lpips_fn(normalized_input, normalized_encoded))
cross_entropy = nn.BCELoss()
if args.cuda:
cross_entropy = cross_entropy.cuda()
secret_loss = cross_entropy(decoded_secret, secret_input)
size = (int(image_input.shape[2]), int(image_input.shape[3]))
gain = 10
falloff_speed = 4
falloff_im = np.ones(size)
for i in range(int(falloff_im.shape[0] / falloff_speed)): # for i in range 100
falloff_im[-i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2
falloff_im[i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2
for j in range(int(falloff_im.shape[1] / falloff_speed)):
falloff_im[:, -j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2
falloff_im[:, j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2
falloff_im = 1 - falloff_im
falloff_im = torch.from_numpy(falloff_im).float()
if args.cuda:
falloff_im = falloff_im.cuda()
falloff_im *= l2_edge_gain
encoded_image_yuv = color.rgb_to_yuv(encoded_image)
image_input_yuv = color.rgb_to_yuv(image_input)
im_diff = encoded_image_yuv - image_input_yuv
im_diff += im_diff * falloff_im.unsqueeze_(0)
yuv_loss = torch.mean((im_diff) ** 2, axis=[0, 2, 3])
yuv_scales = torch.Tensor(yuv_scales)
if args.cuda:
yuv_scales = yuv_scales.cuda()
image_loss = torch.dot(yuv_loss, yuv_scales)
D_loss = D_output_real - D_output_fake
G_loss = D_output_fake
loss = loss_scales[0] * image_loss + loss_scales[1] * lpips_loss + loss_scales[2] * secret_loss
if not args.no_gan:
loss += loss_scales[3] * G_loss
writer.add_scalar('loss/image_loss', image_loss, global_step)
writer.add_scalar('loss/lpips_loss', lpips_loss, global_step)
writer.add_scalar('loss/secret_loss', secret_loss, global_step)
writer.add_scalar('loss/G_loss', G_loss, global_step)
writer.add_scalar('loss/loss', loss, global_step)
writer.add_scalar('metric/bit_acc', bit_acc, global_step)
writer.add_scalar('metric/str_acc', str_acc, global_step)
if global_step % 20 == 0:
writer.add_image('input/image_input', image_input[0], global_step)
writer.add_image('input/image_warped', input_warped[0], global_step)
writer.add_image('encoded/encoded_warped', encoded_warped[0], global_step)
writer.add_image('encoded/residual_warped', residual_warped[0] + 0.5, global_step)
writer.add_image('encoded/encoded_image', encoded_image[0], global_step)
writer.add_image('transformed/transformed_image', transformed_image[0], global_step)
writer.add_image('transformed/test', test_transform[0], global_step)
return loss, secret_loss, D_loss, bit_acc, str_acc
| 2.546875
| 3
|
fuse/utils/rand/tests/test_param_sampler.py
|
alexgo1/fuse-med-ml
| 0
|
12782495
|
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import unittest
import random
from fuse.utils import Uniform, Choice, RandInt, RandBool, draw_samples_recursively , Seed
class TestParamSampler(unittest.TestCase):
"""
Test ParamSampleBase sub classes
"""
def test_uniform(self):
Seed.set_seed(0)
min = random.random() * 1000
max = random.random() * 1000 + min
uniform = Uniform(min, max)
value = uniform.sample()
# test range
self.assertGreaterEqual(value, min)
self.assertLessEqual(uniform.sample(), max)
# test generate more than a single number
self.assertNotEqual(value, uniform.sample())
# test fixed per seed
Seed.set_seed(1234)
value0 = uniform.sample()
Seed.set_seed(1234)
value1 = uniform.sample()
self.assertEqual(value0, value1)
def test_randint(self):
Seed.set_seed(0)
min = random.randint(0, 1000)
max = random.randint(0, 1000) + min
randint = RandInt(min, max)
value = randint.sample()
# test range
self.assertGreaterEqual(value, min)
self.assertLessEqual(randint.sample(), max)
# test generate more than a single number
self.assertNotEqual(value, randint.sample())
# test fixed per seed
Seed.set_seed(1234)
value0 = randint.sample()
Seed.set_seed(1234)
value1 = randint.sample()
self.assertEqual(value0, value1)
def test_randbool(self):
Seed.set_seed(0)
randbool = RandBool(0.5)
value = randbool.sample()
# test range
self.assertIn(value, [True, False])
# test generate more than a single number
Seed.set_seed(0)
values = [randbool.sample() for _ in range(4)]
self.assertIn(True, values)
self.assertIn(False, values)
# test fixed per seed
Seed.set_seed(1234)
value0 = randbool.sample()
Seed.set_seed(1234)
value1 = randbool.sample()
self.assertEqual(value0, value1)
# test probs
Seed.set_seed(0)
randbool = RandBool(0.99)
count = 0
for _ in range(1000):
if randbool.sample() == True:
count += 1
self.assertGreaterEqual(count, 980)
def test_choice(self):
Seed.set_seed(0)
lst = list(range(1000))
choice = Choice(lst)
value = choice.sample()
# test range
self.assertIn(value, lst)
# test generate more than a single number
self.assertNotEqual(value, choice.sample())
# test fixed per seed
Seed.set_seed(1234)
value0 = choice.sample()
Seed.set_seed(1234)
value1 = choice.sample()
self.assertEqual(value0, value1)
# test probs
Seed.set_seed(0)
probs = [0.01 / 999] * 1000
probs[5] = 0.99
choice = Choice(lst, probs)
count = 0
for _ in range(1000):
if choice.sample() == 5:
count += 1
self.assertGreaterEqual(count, 980)
def test_draw_samples_recursively(self):
Seed.set_seed(0)
a = {"a": 5, "b": [3, RandInt(1, 5), 9], "c": {"d": 3, "f": [1, 2, RandBool(0.5), {"h": RandInt(10, 15)}]}, "e": {"g": Choice([6, 7, 8])}}
b = draw_samples_recursively (a)
self.assertEqual(a["a"], a["a"])
self.assertEqual(b["b"][0], a["b"][0])
self.assertEqual(b["b"][2], a["b"][2])
self.assertEqual(b["c"]["d"], a["c"]["d"])
self.assertEqual(b["c"]["f"][1], a["c"]["f"][1])
self.assertIn(b["b"][1], [1, 2, 3, 4, 5])
self.assertIn(b["c"]["f"][2], [True, False])
self.assertIn(b["c"]["f"][3]["h"], [10, 11, 12, 13, 14, 15])
self.assertIn(b["e"]["g"], [6, 7, 8])
if __name__ == '__main__':
unittest.main()
| 2.5625
| 3
|
sa/profiles/Huawei/VRP/get_interface_status_ex.py
|
prorevizor/noc
| 84
|
12782496
|
<gh_stars>10-100
# ---------------------------------------------------------------------
# Huawei.VRP.get_interface_status_ex
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_interface_status_ex import Script as BaseScript
from noc.sa.interfaces.igetinterfacestatusex import IGetInterfaceStatusEx
class Script(BaseScript):
name = "Huawei.VRP.get_interface_status_ex"
interface = IGetInterfaceStatusEx
def get_iftable(self, oid, ifindex=None):
if self.is_cx200X:
ifindex = None
return super().get_iftable(oid, ifindex=ifindex)
| 2.125
| 2
|
pDESy/model/organization.py
|
swanaka/pDESy
| 0
|
12782497
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base_organization import BaseOrganization
from .base_team import BaseTeam
from typing import List
class Organization(BaseOrganization):
def __init__(self, team_list:List[BaseTeam]):
super().__init__(team_list)
| 2.71875
| 3
|
Part8/merge_features.py
|
usc-cs599-group5/Content_Enrichment
| 0
|
12782498
|
<reponame>usc-cs599-group5/Content_Enrichment<gh_stars>0
from itertools import izip
import json
from datetime import datetime
def main():
start_time = datetime.now()
data_files_paths = [
'E:\Sem2\CSCI599\Assignment2\Others\Part8\outputs\doi.json',
'E:\Sem2\CSCI599\Assignment2\Others\Part8\outputs\grobid.json',
'E:\Sem2\CSCI599\Assignment2\Others\Part8\outputs\geotopic.json',
'E:\Sem2\CSCI599\Assignment2\Others\Part8\outputs\exif.json',
'E:\Sem2\CSCI599\Assignment2\Others\Part8\outputs\file_size_data.json']
error_file = open('errors.txt', 'w+')
merged_features_file = open('merged_features.json', 'w+')
i = 0
with open(data_files_paths[0]) as opennlp_data, open(data_files_paths[1]) as grobid_data, open(
data_files_paths[2]) as geotopic_data, open(data_files_paths[3]) as exiftool_data, open(
data_files_paths[4]) as file_size_data:
for a, b, c, d, e, g in izip(opennlp_data, grobid_data, geotopic_data, exiftool_data, file_size_data):
a = json.loads(a.strip())
b = json.loads(b.strip())
c = json.loads(c.strip())
d = json.loads(d.strip())
e = json.loads(g.strip())
i += 1
if a['id'] == b['id'] and b['id'] == c['id'] and c['id'] == d['id'] and d['id'] == e['id'] and a['Content-Type'] == b['Content-Type'] and b['Content-Type'] == c[
'Content-Type'] and c['Content-Type'] == d['Content-Type'] and d['Content-Type'] == e['Content-Type']:
merged_json = {}
for key, value in a.iteritems():
merged_json[key] = value
for key, value in b.iteritems():
merged_json[key] = value
for key, value in c.iteritems():
merged_json[key] = value
for key, value in d.iteritems():
merged_json[key] = value
for key, value in e.iteritems():
merged_json[key] = value
json.dump(merged_json, merged_features_file)
merged_features_file.write('\n')
else:
error_file.write('Error in line ' + str(i) + ':\n')
error_file.write(
'opennlp_data[id]: ' + a['id'] + ' - opennlp_data[Content-Type]:' + a['Content-Type'] + '\n')
error_file.write(
'grobid_data[id]: ' + b['id'] + ' - grobid_data[Content-Type]:' + b['Content-Type'] + '\n')
error_file.write(
'geotopic_data[id]: ' + c['id'] + ' - geotopic_data[Content-Type]:' + c['Content-Type'] + '\n')
error_file.write(
'exiftool_data[id]: ' + d['id'] + ' - exiftool_data[Content-Type]:' + d['Content-Type'] + '\n')
error_file.write(
'file_size_data[id]: ' + e['id'] + ' - file_size_data[Content-Type]:' + e['Content-Type'] + '\n')
error_file.close()
end_time = datetime.now()
print(end_time - start_time)
print('Merged ' + str(i) + ' files')
if __name__ == '__main__':
main()
| 2.265625
| 2
|
python/orthomcl/tree-for-codeml.py
|
lotharwissler/bioinformatics
| 10
|
12782499
|
#!/usr/bin/python
import os, sys
import string
def usage():
print >> sys.stderr, "usage: " + sys.argv[0] + " orthomcl.out base.tree"
sys.exit(1)
def plausi():
if len(sys.argv) != 3: usage()
inOrtho, inTree = sys.argv[1:3]
return inOrtho, inTree
class OrthoCluster():
def __init__(self, line):
descr, genedefs = line.split("\t")
genedefs = genedefs.split()
self.name = descr[:descr.index('(')].lower()
self.geneHash = {}
self.speciesHash = {}
for genedef in genedefs:
geneid = genedef[:genedef.index('(')]
species = genedef[genedef.index('(')+1:-1] + "1"
self.geneHash[geneid] = species
if self.speciesHash.has_key(species): self.speciesHash[species].append(geneid)
else: self.speciesHash[species] = [geneid]
def get_name(self): return self.name
def get_count(self): return len(self.geneHash)
def get_gene_hash(self): return self.geneHash
def get_species_hash(self): return self.speciesHash
def get_species_from_first_line(inFile):
fo = open(inFile)
line = fo.readline()
o = OrthoCluster(line.rstrip())
fo.close()
species = o.get_species_hash().keys()
species.sort()
return species
def parse_orthocml_out(inFile, tree):
fo = open(inFile)
for line in fo:
o = OrthoCluster(line.rstrip())
speciesHash = o.get_species_hash()
name = o.get_name()
for species, genelist in speciesHash.iteritems():
if len(genelist) > 1: break
replacement = '(' + species[:-1] + '1 #1,' + species[:-1] + '2)'
tree_repl_1 = tree.replace(species, replacement)
replacement = '(' + species[:-1] + '1,' + species[:-1] + '2 #1)'
tree_repl_2 = tree.replace(species, replacement)
fw = open(name + ".tree.1", "w")
fw.write(tree_repl_1)
fw.close()
fw = open(name + ".tree.2", "w")
fw.write(tree_repl_2)
fw.close()
fo.close()
def read_tree_from_file(file):
fo = open(file)
tree = ""
for line in fo:
tree += line.strip()
fo.close()
return tree
def main():
inOrtho, inTree = plausi()
tree = read_tree_from_file(inTree)
parse_orthocml_out(inOrtho, tree)
main()
| 2.921875
| 3
|
eve_station/database/models/core/api_key.py
|
narthollis/eve-station
| 0
|
12782500
|
from ...database import Base
from sqlalchemy import Column, Integer, String
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import ARRAY
from ...database import Base
class ApiKey(Base):
api_key_id = Column(Integer, primary_key=True, autoincrement=False)
key = Column(String)
vcode = Column(String)
mask = Column(Integer)
character_1_id = Column(Integer, ForeignKey('character.character_id'))
character_2_id = Column(Integer, ForeignKey('character.character_id'))
character_3_id = Column(Integer, ForeignKey('character.character_id'))
corporation_id = Column(Integer, ForeignKey('corporation.corporation_id'))
| 2.703125
| 3
|
Exercicios/Resposta-EstruturaDeRepeticao/Exerc_46.py
|
ThaisAlves7/Exercicios_PythonBrasil
| 0
|
12782501
|
<reponame>ThaisAlves7/Exercicios_PythonBrasil<filename>Exercicios/Resposta-EstruturaDeRepeticao/Exerc_46.py
# Em uma competição de salto em distância cada atleta tem direito a cinco saltos. No final da série de saltos de cada atleta, o
# melhor e o pior resultados são eliminados. O seu resultado fica sendo a média dos três valores restantes. Você deve fazer um
# programa que receba o nome e as cinco distâncias alcançadas pelo atleta em seus saltos e depois informe a média dos saltos
# conforme a descrição acima informada (retirar o melhor e o pior salto e depois calcular a média). Faça uso de uma lista para
# armazenar os saltos. Os saltos são informados na ordem da execução, portanto não são ordenados. O programa deve ser
# encerrado quando não for informado o nome do atleta. A saída do programa deve ser conforme o exemplo abaixo:
# Atleta: <NAME>
# Primeiro Salto: 6.5 m
# Segundo Salto: 6.1 m
# Terceiro Salto: 6.2 m
# Quarto Salto: 5.4 m
# Quinto Salto: 5.3 m
# Melhor salto: 6.5 m
# Pior salto: 5.3 m
# Média dos demais saltos: 5.9 m
# Resultado final:
# <NAME>: 5.9 m
import numpy as np
nome_atleta = 'NomeAtleta'
nota_saltos = []
while nome_atleta != '':
nome_atleta = input('Informe o nome do Atleta: ')
if nome_atleta == '':
print('Programa encerrado')
else:
for i in range(1, 6):
valor_salto = float(input(f'Informe a {i}° distância do Salto: '))
nota_saltos.append(valor_salto)
print()
melhor_salto = max(nota_saltos)
pior_salto = min(nota_saltos)
print(f'Atleta: {nome_atleta}')
for key, valor in enumerate(nota_saltos, start=1):
print(f'{key}° salto: {valor:.1f}m')
nota_saltos.remove(melhor_salto)
nota_saltos.remove(pior_salto)
media = np.mean(nota_saltos)
print()
print(f'Melhor Salto {melhor_salto:.1f}m')
print(f'Pior Salto {pior_salto:.1f}m')
print(f'Média dos demais saltos {media:.1f}m')
print()
print('Resultado Final:')
print(f'{nome_atleta}: {media:.1f}m')
print()
print('Informe o nome do próximo atleta ou deixe em branco para finalizar o sistema')
| 3.65625
| 4
|
src/annalist_root/annalist/tests/test_render_bool_checkbox.py
|
gklyne/annalist
| 18
|
12782502
|
<gh_stars>10-100
"""
Tests for boolean value rendering as checkbox
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import unittest
import re
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_bool_checkbox import (
get_bool_checkbox_renderer,
BoolCheckboxValueMapper
)
from .field_rendering_support import FieldRendererTestSupport
class BooleanCheckboxRenderingTest(FieldRendererTestSupport):
def setUp(self):
return
def tearDown(self):
return
def test_RenderBoolCheckboxValue(self):
def expect_render(val):
if is_string(val):
valtext = val
valbool = val.lower() in ["true", "yes"]
else:
valtext = "Yes" if val else "No"
valbool = val
checked = ''' checked="checked"''' if valbool else ''''''
render_view = '''<span>%s</span>'''%valtext
render_edit = (
'''<input type="checkbox" '''+
'''name="repeat_prefix_test_field" '''+
('''value="%s"%s'''%(valtext,checked)) +
''' />'''+
''' <span class="value-placeholder">(test placeholder)</span>'''
)
return {'view': render_view, 'edit': render_edit}
test_value_context_renders = (
[ (self._make_test_context(None), expect_render(False))
, (self._make_test_context(False), expect_render(False))
, (self._make_test_context(True), expect_render(True))
, (self._make_test_context("False"), expect_render("False"))
, (self._make_test_context("True"), expect_render("True"))
, (self._make_test_context("Yes"), expect_render("Yes"))
, (self._make_test_context("No"), expect_render("No"))
, (self._make_test_context(u"yes"), expect_render("yes"))
, (self._make_test_context(u"no"), expect_render("no"))
])
renderer = get_bool_checkbox_renderer()
for render_context, expect_render in test_value_context_renders:
# print(repr(render_context['field']['field_value']))
self._check_value_renderer_results(
renderer,
context=render_context,
expect_rendered_view=expect_render['view'],
expect_rendered_edit=expect_render['edit']
)
return
def test_DecodeBoolCheckboxValue(self):
# Any text other than None or empty string indicates the box is checked, returns True
test_decode_values = (
{ None: False
, "": False
, "checked": True
, "No": True
, "Yes": True
, "False": True
, "True": True
, "false": True
, "true": True
})
for valtext, expect_valdata in test_decode_values.items():
valdata = BoolCheckboxValueMapper.decode(valtext)
self.assertEqual(
valdata, expect_valdata,
"Value decode(%s) = %r, expect %r"%(valtext, valdata, expect_valdata)
)
return
# End.
if __name__ == "__main__":
# import django
# django.setup() # Needed for template loader
# Runtests in this module
# runner = unittest.TextTestRunner(verbosity=2)
# tests = unittest.TestSuite()
# tests = getSuite(select=sel)
# if tests: runner.run(tests)
unittest.main()
| 2.359375
| 2
|
tests/stresstest/map_test.py
|
realead/tighthash
| 0
|
12782503
|
import sys
sys.path.append('..')
from tighthash import pmap
from testutils import testing_map
testing_map("TightHashMap", pmap)
| 1.296875
| 1
|
Calculate_GridShape.py
|
Valentin-Aslanyan/ASOT
| 0
|
12782504
|
<reponame>Valentin-Aslanyan/ASOT
plot_defined_region=False #Red line showing defining regions
target_R=1.0
target_phi=1.0
import sys
sys.path[:0]=['/Change/This/Path']
from ASOT_Functions_Python import *
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# rleft rrigh tleft trigh pleft prigh grdscl
grid_params=[ 1.0, 3.0, 0.0625, 0.9375, -1.0, +1.0, 7.0e+10]
# nblockr nblockt nblockp
init_data=[ 4, 9, 20]
# dmv1l dmv1r dmv2l dmv2r dmv3l dmv3r lrefdmv
refine_data=[[7.0e+10, 2.1e+11, 0.2, +0.7, -0.45, 0.5, 2],
[7.0e+10, 9.1e+10, 0.15, +0.35, -0.25, 0.1, 4],
[7.0e+10, 9.1e+10, 0.15, +0.4, 0.0, 0.4, 4],
[7.0e+10, 7.026e+10, 0.4, +0.5, -0.15, 0.1, 5],
[7.0e+10, 7.8e+10, 0.13, +0.42, -0.3, 0.4, 5],
[7.8e+10, 1.0e+11, 0.15, +0.4, -0.3, 0.4, 5]]
refine_data=[[7.0e+10, 2.1e+11, 0.2, +0.7, -0.45, 0.5, 2],
[7.0e+10, 9.1e+10, 0.15, +0.35, -0.25, 0.1, 4],
[7.0e+10, 9.1e+10, 0.15, +0.4, 0.0, 0.4, 4],
[7.0e+10, 7.026e+10, 0.15, +0.5, -0.3, 0.1, 5],
[7.0e+10, 7.026e+10, 0.15, +0.45, 0.0, 0.45, 5]]
#Preliminaries
max_ref_level=1
refine_data_proc=[]
for idx_ref in range(len(refine_data)):
temp=refine_data[idx_ref].copy()
temp[0]=np.log(temp[0]/grid_params[6])-np.log(grid_params[0])
temp[1]=np.log(temp[1]/grid_params[6])-np.log(grid_params[0])
temp[2]=temp[2]-grid_params[2]
temp[3]=temp[3]-grid_params[2]
temp[4]=temp[4]-grid_params[4]
temp[5]=temp[5]-grid_params[4]
refine_data_proc.append(temp)
if max_ref_level<refine_data[idx_ref][6]:
max_ref_level=refine_data[idx_ref][6]
#Basic grid, use units of Minimum Spacing (i.e. 1MS = size of most refined block)
r_width=np.log(grid_params[1])-np.log(grid_params[0])
t_width=grid_params[3]-grid_params[2]
p_width=grid_params[5]-grid_params[4]
r_spacing=r_width/init_data[0]/2**(max_ref_level-1)
t_spacing=t_width/init_data[1]/2**(max_ref_level-1)
p_spacing=p_width/init_data[2]/2**(max_ref_level-1)
max_r=2**(max_ref_level-1)*init_data[0]
max_t=2**(max_ref_level-1)*init_data[1]
max_p=2**(max_ref_level-1)*init_data[2]
coord_r=[]
coord_t=[]
coord_p=[]
ref_level=[]
for idx_r in range(init_data[0]):
for idx_t in range(init_data[1]):
for idx_p in range(init_data[2]):
step_size=2**(max_ref_level-1)
coord_r.append([idx_r*step_size,(idx_r+1)*step_size])
coord_t.append([idx_t*step_size,(idx_t+1)*step_size])
coord_p.append([idx_p*step_size,(idx_p+1)*step_size])
ref_level.append(1)
#Adjust refinement specs to MS units; pad spec to next (up or down) block of that size
for idx_ref in range(len(refine_data)):
current_spacing=2**(max_ref_level+1-refine_data_proc[idx_ref][6])
refine_data_proc[idx_ref][0]=int(max(0,np.floor(refine_data_proc[idx_ref][0]/r_spacing/current_spacing)*current_spacing))
refine_data_proc[idx_ref][1]=int(min(max_r,np.ceil(refine_data_proc[idx_ref][1]/r_spacing/current_spacing)*current_spacing))
refine_data_proc[idx_ref][2]=int(max(0,np.floor(refine_data_proc[idx_ref][2]/t_spacing/current_spacing)*current_spacing))
refine_data_proc[idx_ref][3]=int(min(max_t,np.ceil(refine_data_proc[idx_ref][3]/t_spacing/current_spacing)*current_spacing))
refine_data_proc[idx_ref][4]=int(max(0,np.floor(refine_data_proc[idx_ref][4]/p_spacing/current_spacing)*current_spacing))
refine_data_proc[idx_ref][5]=int(min(max_p,np.ceil(refine_data_proc[idx_ref][5]/p_spacing/current_spacing)*current_spacing))
#Refinement
for idx_l in range(2,max_ref_level+1):
new_spacing=2**(max_ref_level-idx_l)
for idx_ref in range(len(refine_data_proc)):
new_ref_level=refine_data_proc[idx_ref][6]
if new_ref_level>=idx_l:
#Adjust r to buffer adjacent blocks
r_ref_low=refine_data_proc[idx_ref][0]
r_ref_high=refine_data_proc[idx_ref][1]
for idx3 in range(new_ref_level-1,idx_l-1,-1):
current_spacing=2**(max_ref_level-idx3)
if r_ref_low % (current_spacing*2)==0:
r_ref_low-=2*current_spacing
else:
r_ref_low-=1*current_spacing
if r_ref_high % (current_spacing*2)==0:
r_ref_high+=2*current_spacing
else:
r_ref_high+=1*current_spacing
r_ref_low=max(0,r_ref_low)
r_ref_high=min(max_r,r_ref_high)
#Adjust t to buffer adjacent blocks
t_ref_low=refine_data_proc[idx_ref][2]
t_ref_high=refine_data_proc[idx_ref][3]
for idx3 in range(new_ref_level-1,idx_l-1,-1):
current_spacing=2**(max_ref_level-idx3)
if t_ref_low % (current_spacing*2)==0:
t_ref_low-=2*current_spacing
else:
t_ref_low-=1*current_spacing
if t_ref_high % (current_spacing*2)==0:
t_ref_high+=2*current_spacing
else:
t_ref_high+=1*current_spacing
t_ref_low=max(0,t_ref_low)
t_ref_high=min(max_t,t_ref_high)
#Adjust p to buffer adjacent blocks
p_ref_low=refine_data_proc[idx_ref][4]
p_ref_high=refine_data_proc[idx_ref][5]
for idx3 in range(new_ref_level-1,idx_l-1,-1):
current_spacing=2**(max_ref_level-idx3)
if p_ref_low % (current_spacing*2)==0:
p_ref_low-=2*current_spacing
else:
p_ref_low-=1*current_spacing
if p_ref_high % (current_spacing*2)==0:
p_ref_high+=2*current_spacing
else:
p_ref_high+=1*current_spacing
p_ref_low=max(0,p_ref_low)
p_ref_high=min(max_p,p_ref_high)
for idx in range(len(coord_r)-1,-1,-1):
if ref_level[idx]<idx_l and r_ref_low<=coord_r[idx][0] and r_ref_high>=coord_r[idx][1] and t_ref_low<=coord_t[idx][0] and t_ref_high>=coord_t[idx][1] and p_ref_low<=coord_p[idx][0] and p_ref_high>=coord_p[idx][1]:
r_start=coord_r[idx][0]
t_start=coord_t[idx][0]
p_start=coord_p[idx][0]
coord_r.pop(idx)
coord_t.pop(idx)
coord_p.pop(idx)
ref_level.pop(idx)
for idx_r in range(2):
for idx_t in range(2):
for idx_p in range(2):
r_low=r_start+idx_r*new_spacing
r_high=r_start+(idx_r+1)*new_spacing
t_low=t_start+idx_t*new_spacing
t_high=t_start+(idx_t+1)*new_spacing
p_low=p_start+idx_p*new_spacing
p_high=p_start+(idx_p+1)*new_spacing
coord_r.append([r_low,r_high])
coord_t.append([t_low,t_high])
coord_p.append([p_low,p_high])
ref_level.append(idx_l)
print("Num blocks: ",len(coord_r))
#Convert to real units
coord_logR=[]
coord_theta=[]
coord_phi=[]
for idx in range(len(coord_r)):
coord_logR.append([coord_r[idx][0]*r_spacing+np.log(grid_params[0]),coord_r[idx][1]*r_spacing+np.log(grid_params[0])])
coord_theta.append([(1.0-(coord_t[idx][1]*t_spacing+grid_params[2]))*np.pi,(1.0-(coord_t[idx][0]*t_spacing+grid_params[2]))*np.pi])
coord_phi.append([(coord_p[idx][0]*p_spacing+grid_params[4])*np.pi,(coord_p[idx][1]*p_spacing+grid_params[4])*np.pi])
plt.figure("Phi slice",figsize=(3.1*1.5,6*1.5))
for idx in range(len(coord_logR)):
if coord_phi[idx][0]<=target_phi/180.0*np.pi and coord_phi[idx][1]>=target_phi/180.0*np.pi:
x1=np.exp(coord_logR[idx][0])*np.sin(coord_theta[idx][0])
z1=np.exp(coord_logR[idx][0])*np.cos(np.pi-coord_theta[idx][0])
x2=np.exp(coord_logR[idx][1])*np.sin(coord_theta[idx][0])
z2=np.exp(coord_logR[idx][1])*np.cos(np.pi-coord_theta[idx][0])
x3=np.exp(coord_logR[idx][1])*np.sin(coord_theta[idx][1])
z3=np.exp(coord_logR[idx][1])*np.cos(np.pi-coord_theta[idx][1])
x4=np.exp(coord_logR[idx][0])*np.sin(coord_theta[idx][1])
z4=np.exp(coord_logR[idx][0])*np.cos(np.pi-coord_theta[idx][1])
plt.plot([x1,x2],[z1,z2],color="grey",linewidth=1)
plt.plot([x3,x4],[z3,z4],color="grey",linewidth=1)
anglearc=np.linspace(coord_theta[idx][0],coord_theta[idx][1],num=100)
arc1x=np.exp(coord_logR[idx][0])*np.sin(anglearc)
arc1y=np.exp(coord_logR[idx][0])*np.cos(np.pi-anglearc)
arc2x=np.exp(coord_logR[idx][1])*np.sin(anglearc)
arc2y=np.exp(coord_logR[idx][1])*np.cos(np.pi-anglearc)
plt.plot(arc1x,arc1y,color="grey",linewidth=1)
plt.plot(arc2x,arc2y,color="grey",linewidth=1)
plt.axis('off')
plt.xlim([0,3.1])
plt.ylim([-3,3])
plt.savefig("Predicted_Grid_XZ.pdf", format="pdf", dpi=100,bbox_inches='tight',pad_inches=0.1)
plt.figure("R slice",figsize=(20,10))
for idx in range(len(coord_logR)):
if np.exp(coord_logR[idx][0])<=target_R and np.exp(coord_logR[idx][1])>=target_R:
th1=coord_theta[idx][0]/np.pi*180.0
ph1=coord_phi[idx][0]/np.pi*180.0
th2=coord_theta[idx][1]/np.pi*180.0
ph2=coord_phi[idx][1]/np.pi*180.0
plt.plot([ph1,ph1,ph2,ph2,ph1],[th1,th2,th2,th1,th1],color="grey",linewidth=1)
if plot_defined_region:
for idx_ref in range(len(refine_data)):
if refine_data[idx_ref][0]/grid_params[6]<=target_R and refine_data[idx_ref][1]/grid_params[6]>=target_R:
th1=(1.0-refine_data[idx_ref][2])*180.0
ph1=refine_data[idx_ref][4]*180.0
th2=(1.0-refine_data[idx_ref][3])*180.0
ph2=refine_data[idx_ref][5]*180.0
plt.plot([ph1,ph1,ph2,ph2,ph1],[th1,th2,th2,th1,th1],color="red",linewidth=1)
"""
for idx_ref in range(len(refine_data)):
th1=(1.0-refine_data_proc[idx_ref][2]*t_spacing-grid_params[2])*180.0
ph1=(refine_data_proc[idx_ref][4]*p_spacing+grid_params[4])*180.0
th2=(1.0-refine_data_proc[idx_ref][3]*t_spacing-grid_params[2])*180.0
ph2=(refine_data_proc[idx_ref][5]*p_spacing+grid_params[4])*180.0
plt.plot([ph1,ph1,ph2,ph2,ph1],[th1,th2,th2,th1,th1],color="green",linewidth=1)
for idx_ref in range(len(refine_data)):
th1=(1.0-t_ref_low*t_spacing-grid_params[2])*180.0
ph1=(p_ref_low*p_spacing+grid_params[4])*180.0
th2=(1.0-t_ref_high*t_spacing-grid_params[2])*180.0
ph2=(p_ref_high*p_spacing+grid_params[4])*180.0
plt.plot([ph1,ph1,ph2,ph2,ph1],[th1,th2,th2,th1,th1],color="blue",linewidth=1)
"""
plt.axis('off')
plt.savefig("Predicted_Grid_PhiTheta.pdf", format="pdf", dpi=100,bbox_inches='tight',pad_inches=0.1)
plt.show()
| 1.9375
| 2
|
test/api/test_scope.py
|
SAEON/Open-Data-Platform
| 0
|
12782505
|
<filename>test/api/test_scope.py
from random import randint
import pytest
from sqlalchemy import select
from odp import ODPScope
from odp.db import Session
from odp.db.models import Scope
from test.api import all_scopes, all_scopes_excluding, assert_forbidden
from test.factories import ScopeFactory
@pytest.fixture
def scope_batch():
"""Create and commit a batch of Scope instances."""
return [
ScopeFactory()
for _ in range(randint(3, 5))
]
def assert_db_state(scopes):
"""Verify that the DB scope table contains the given scope batch."""
Session.expire_all()
result = Session.execute(select(Scope)).scalars().all()
assert set((row.id, row.type) for row in result) \
== set((scope.id, scope.type) for scope in scopes)
def assert_json_result(response, json, scope):
"""Verify that the API result matches the given scope object."""
assert response.status_code == 200
assert json['id'] == scope.id
assert json['type'] == scope.type
def assert_json_results(response, json, scopes):
"""Verify that the API result list matches the given scope batch."""
items = json['items']
assert json['total'] == len(items) == len(scopes)
items.sort(key=lambda i: i['id'])
scopes.sort(key=lambda s: s.id)
for n, scope in enumerate(scopes):
assert_json_result(response, items[n], scope)
@pytest.mark.parametrize('scopes', [
[ODPScope.SCOPE_READ],
[],
all_scopes,
all_scopes_excluding(ODPScope.SCOPE_READ),
])
def test_list_scopes(api, scope_batch, scopes):
authorized = ODPScope.SCOPE_READ in scopes
# add the parameterized scopes to the batch of expected scopes,
# as they will be created by the api fixture
scope_batch += [ScopeFactory.build(id=s.value, type='odp') for s in scopes]
r = api(scopes).get('/scope/')
if authorized:
assert_json_results(r, r.json(), scope_batch)
else:
assert_forbidden(r)
assert_db_state(scope_batch)
| 2.40625
| 2
|
Source/Utility/GameMetrics.py
|
MoritzGrundei/Informaticup
| 0
|
12782506
|
import json
import random
import numpy as np
from Source.Utility.Pathfinding.Graph import Graph
def get_distance_to_players(game_state):
own_player = game_state["players"][str(game_state["you"])]
distances = [0, 0, 0, 0, 0, 0]
current_position = (own_player["x"], own_player["y"])
if game_state["players"][str(game_state["you"])]["active"]:
for i in range(6):
if i + 1 == game_state["you"]:
distances[i] = 0
else:
try:
if game_state["players"][str(i + 1)]["active"]:
enemy_position = (game_state["players"][str(i + 1)]["x"], game_state["players"][str(i + 1)]["y"])
distance = np.sqrt(np.power(current_position[0] - enemy_position[0], 2) + np.power(
current_position[1] - enemy_position[1], 2))
distances[i] = distance
else:
distances[i] = 0
except KeyError:
distances[i] = 0
max_distance = np.sqrt(np.power(game_state["width"], 2) + np.power(game_state["height"], 2))
for i in range(len(distances)):
distances[i] = distances[i] / max_distance
return distances
def get_average_distance(distances):
sum = counter = 0.0
for i in range(len(distances)):
if distances[i] == 0:
pass
else:
sum += distances[i]
counter += 1
if counter == 0:
return 0
else:
return sum / counter
def get_free_spaces(new_position, game_state):
own_player = game_state["players"][str(game_state["you"])]
speed = own_player["speed"]
number_of_free_spaces = 0
for i in range(-2, 3):
for j in range(-2, 3):
try:
if game_state["cells"][new_position[1] + i][new_position[0] + j] == 0:
number_of_free_spaces += 1
except IndexError:
pass
normalised_num = (number_of_free_spaces - speed) / 25.0
return normalised_num
def get_avg_speed(game_state):
sum = 0.0
counter = 0.0
avg = 0.0
if game_state["players"][str(game_state["you"])]["active"]:
for i in range(6):
if i + 1 == game_state["you"]:
pass
else:
try:
if game_state["players"][str(i + 1)]["active"]:
sum += game_state["players"][str(i + 1)]["speed"]
counter += 1
except KeyError:
pass
if counter > 0:
avg = sum / counter
norm_avg = avg / 10
return norm_avg
def get_num_living_players(game_state):
num = 0
for i in range (6):
if game_state["players"][str(i+1)]["active"]:
num += 1
return num
def get_player_data(game_state, id):
x = game_state["players"][str(id + 1)]["x"]
y = game_state["players"][str(id + 1)]["y"]
speed = game_state["players"][str(id + 1)]["speed"]
return x, y, speed
def get_distances_to_borders(game_state, id):
board_height = game_state["height"]
board_width = game_state["width"]
position = game_state["players"][str(id + 1)]["x"], game_state["players"][str(id + 1)]["y"]
top_distance = position[1] - 1
bottom_distance = (board_height - 1) - (position[1] - 1)
right_distance = (board_width - 1) - (position[0] - 1)
left_distance = position[0] - 1
return top_distance, bottom_distance, right_distance, left_distance
def get_own_speed(game_state):
own_player = game_state["players"][str(game_state["you"])]
speed = own_player["speed"]
return speed
def get_connected_fields_for_new_position( x, y, new_direction, game_state, field_size):
game_state = json.loads(game_state)
graph = Graph(game_state["cells"],x,y, game_state["width"], game_state["height"], new_direction, field_size)
return len(graph.get_connected_components())
| 3.09375
| 3
|
settings.py
|
moki9/flickrize
| 0
|
12782507
|
FLICKR_FEED='https://api.flickr.com/services/feeds/photos_public.gne'
| 1.140625
| 1
|
authentik/outposts/controllers/k8s/service_monitor.py
|
BeryJu/passbook
| 15
|
12782508
|
<reponame>BeryJu/passbook
"""Kubernetes Prometheus ServiceMonitor Reconciler"""
from dataclasses import asdict, dataclass, field
from typing import TYPE_CHECKING
from dacite import from_dict
from kubernetes.client import ApiextensionsV1Api, CustomObjectsApi
from authentik.outposts.controllers.base import FIELD_MANAGER
from authentik.outposts.controllers.k8s.base import KubernetesObjectReconciler
if TYPE_CHECKING:
from authentik.outposts.controllers.kubernetes import KubernetesController
@dataclass
class PrometheusServiceMonitorSpecEndpoint:
"""Prometheus ServiceMonitor endpoint spec"""
port: str
path: str = field(default="/metrics")
@dataclass
class PrometheusServiceMonitorSpecSelector:
"""Prometheus ServiceMonitor selector spec"""
# pylint: disable=invalid-name
matchLabels: dict
@dataclass
class PrometheusServiceMonitorSpec:
"""Prometheus ServiceMonitor spec"""
endpoints: list[PrometheusServiceMonitorSpecEndpoint]
# pylint: disable=invalid-name
selector: PrometheusServiceMonitorSpecSelector
@dataclass
class PrometheusServiceMonitorMetadata:
"""Prometheus ServiceMonitor metadata"""
name: str
namespace: str
labels: dict = field(default_factory=dict)
@dataclass
class PrometheusServiceMonitor:
"""Prometheus ServiceMonitor"""
# pylint: disable=invalid-name
apiVersion: str
kind: str
metadata: PrometheusServiceMonitorMetadata
spec: PrometheusServiceMonitorSpec
CRD_NAME = "servicemonitors.monitoring.coreos.com"
CRD_GROUP = "monitoring.coreos.com"
CRD_VERSION = "v1"
CRD_PLURAL = "servicemonitors"
class PrometheusServiceMonitorReconciler(KubernetesObjectReconciler[PrometheusServiceMonitor]):
"""Kubernetes Prometheus ServiceMonitor Reconciler"""
def __init__(self, controller: "KubernetesController") -> None:
super().__init__(controller)
self.api_ex = ApiextensionsV1Api(controller.client)
self.api = CustomObjectsApi(controller.client)
@property
def noop(self) -> bool:
return (not self._crd_exists()) or (self.is_embedded)
def _crd_exists(self) -> bool:
"""Check if the Prometheus ServiceMonitor exists"""
return bool(
len(
self.api_ex.list_custom_resource_definition(
field_selector=f"metadata.name={CRD_NAME}"
).items
)
)
def get_reference_object(self) -> PrometheusServiceMonitor:
"""Get service monitor object for outpost"""
return PrometheusServiceMonitor(
apiVersion=f"{CRD_GROUP}/{CRD_VERSION}",
kind="ServiceMonitor",
metadata=PrometheusServiceMonitorMetadata(
name=self.name,
namespace=self.namespace,
labels=self.get_object_meta().labels,
),
spec=PrometheusServiceMonitorSpec(
endpoints=[
PrometheusServiceMonitorSpecEndpoint(
port="http-metrics",
)
],
selector=PrometheusServiceMonitorSpecSelector(
matchLabels=self.get_object_meta(name=self.name).labels,
),
),
)
def create(self, reference: PrometheusServiceMonitor):
return self.api.create_namespaced_custom_object(
group=CRD_GROUP,
version=CRD_VERSION,
plural=CRD_PLURAL,
namespace=self.namespace,
body=asdict(reference),
field_manager=FIELD_MANAGER,
)
def delete(self, reference: PrometheusServiceMonitor):
return self.api.delete_namespaced_custom_object(
group=CRD_GROUP,
version=CRD_VERSION,
namespace=self.namespace,
plural=CRD_PLURAL,
name=self.name,
)
def retrieve(self) -> PrometheusServiceMonitor:
return from_dict(
PrometheusServiceMonitor,
self.api.get_namespaced_custom_object(
group=CRD_GROUP,
version=CRD_VERSION,
namespace=self.namespace,
plural=CRD_PLURAL,
name=self.name,
),
)
def update(self, current: PrometheusServiceMonitor, reference: PrometheusServiceMonitor):
return self.api.patch_namespaced_custom_object(
group=CRD_GROUP,
version=CRD_VERSION,
namespace=self.namespace,
plural=CRD_PLURAL,
name=self.name,
body=asdict(reference),
field_manager=FIELD_MANAGER,
)
| 1.914063
| 2
|
app/views.py
|
TheMoonWalker1/HackTJ8.0
| 0
|
12782509
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.http import HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from .forms import *
from .models import *
from urllib.request import urlopen, Request
import json
import random
# Create your views here.
def landing(request):
return render(request, 'start.html')
def signup(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save()
prof = Profile(user=user)
prof.save()
messages.success(
request, f"Your account has been created! You are now able to log in"
)
return redirect("login")
else:
form = UserRegisterForm()
return render(request, 'signup.html', {'form': form})
def home(request):
if request.method == "POST":
form = CarForm(request.POST)
if form.is_valid():
vin = form.cleaned_data['vin']
thing = Request(f'https://vpic.nhtsa.dot.gov/api/vehicles/decodevin/{vin}?format=json')
response_body = {d['Variable']: d['Value'].upper() for d in json.loads(urlopen(thing).read())['Results'] if d['Value'] != '0' and d['Value'] != 0 and d['Value'] is not None and d['Value'] != '' and d['Value'] != 'Not Applicable' and d['Variable'] != 'Error Text'}
return render(request, 'results.html', {'info': response_body})
messages.error(request, 'This VIN is invalid. It must be 17 characters long.')
vins = ['3GNEC12078G276688', '5GRGN23U83H132227', '1C3CDFBB5FD165342', '1FMZU62E12ZC12617', '2FAFP71W93X138259', '1FTPW12V17FA12080', '1FT8W3BT4FEA96950', 'WDXPD944555842209', 'WVGBV7AX6CW559712', 'SCFAC23302B500083', 'JH4KA7630NC013822', 'JH4DA175XGS009825', '1GCEK14K8RE106083', '4F2CU08102KM50866', 'JH4DA1850HS006058', '5N1AN0NU6BC506916', '1FVACYDC37HW59012']
return render(request, 'home.html', {'form': CarForm(), 'vin': random.choice(vins)})
| 2.390625
| 2
|
py3status/modules/timer.py
|
ChoiZ/py3status
| 0
|
12782510
|
<filename>py3status/modules/timer.py
# -*- coding: utf-8 -*-
"""
A simple countdown timer.
This is a very basic countdown timer. You can change the timer length as well
as pausing, restarting and resetting it. Currently this is more of a demo of a
composite.
Each part of the timer can be changed independently hours, minutes, seconds using
mouse buttons 4 and 5 (scroll wheel).
Button 1 starts/pauses the countdown.
Button 2 resets timer.
Configuration parameters:
sound: path to a sound file that will be played when the timer expires.
(default None)
time: how long in seconds for the timer
(default 60)
"""
from time import time
from threading import Timer
class Py3status:
"""
"""
# available configuration parameters
sound = None
time = 60
def __init__(self):
self.running = False
self.end_time = None
self.time_left = None
self.color = None
self.alarm_timer = None
self.alarm = False
self.done = False
def _time_up(self):
"""
Called when the timer expires
"""
self.running = False
self.color = '#FF0000'
self.time_left = 0
self.done = True
if self.sound:
self.py3.play_sound(self.sound)
self.alarm = True
self.timer()
def timer(self, i3s_output_list, i3s_config):
def make_2_didget(value):
value = str(value)
if len(value) == 1:
value = '0' + value
return value
if self.running or self.done:
t = int(self.end_time - time())
if t <= 0:
t = 0
else:
if self.time_left:
t = self.time_left
else:
t = self.time
# Hours
hours, t = divmod(t, 3600)
# Minutes
mins, t = divmod(t, 60)
# Seconds
seconds = t
if self.running:
cached_until = time() + 1
else:
cached_until = self.py3.CACHE_FOREVER
response = {
'cached_until': cached_until,
'composite': [
{
'color': '#CCCCCC',
'full_text': 'Timer ',
},
{
'color': self.color,
'full_text': str(hours),
'index': 'hours',
},
{
'color': '#CCCCCC',
'full_text': ':',
},
{
'color': self.color,
'full_text': make_2_didget(mins),
'index': 'mins',
},
{
'color': '#CCCCCC',
'full_text': ':',
},
{
'color': self.color,
'full_text': make_2_didget(seconds),
'index': 'seconds',
},
]
}
return response
def on_click(self, i3s_output_list, i3s_config, event):
deltas = {
'hours': 3600,
'mins': 60,
'seconds': 1
}
index = event['index']
button = event['button']
# If played an alarm sound then cancel the sound on any putton press
if self.alarm:
self.py3.stop_sound()
self.alarm = False
return
if button == 1:
if self.running:
# pause timer
self.running = False
self.time_left = int(self.end_time - time())
self.color = '#FFFF00'
if self.alarm_timer:
self.alarm_timer.cancel()
else:
# start/restart timer
self.running = True
if self.time_left:
self.end_time = time() + self.time_left
else:
self.end_time = time() + self.time
self.color = '#00FF00'
if self.alarm_timer:
self.alarm_timer.cancel()
self.done = False
self.alarm_timer = Timer(self.time_left or self.time, self._time_up)
self.alarm_timer.start()
if button == 2:
self.running = False
self.time_left = None
self.color = None
self.done = False
if self.alarm_timer:
self.alarm_timer.cancel()
if not self.running:
self.done = False
# change timer section HH:MM:SS
if self.time_left:
t = self.time_left
else:
t = self.time
if button == 4:
t += deltas.get(index, 0)
if button == 5:
t -= deltas.get(index, 0)
if t < 0:
t = 0
if self.time_left:
self.time_left = t
else:
self.time = t
def kill(self):
# remove any timer
if self.alarm_timer:
self.alarm_timer.cancel()
if __name__ == "__main__":
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
print(x.timer([], config))
| 3.921875
| 4
|
App/models/users.py
|
MaiXiaochai/SnailAPI
| 0
|
12782511
|
<reponame>MaiXiaochai/SnailAPI
# -*- coding: utf-8 -*-
"""
--------------------------------------
@File : users.py
@Author : maixiaochai
@Email : <EMAIL>
@Created on : 2020/5/22 15:42
--------------------------------------
"""
| 0.949219
| 1
|
sentry/plugins/sentry_redmine/models.py
|
m0sth8/django-sentry
| 1
|
12782512
|
<gh_stars>1-10
"""
sentry.plugins.sentry_redmine.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import forms
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
from sentry.models import GroupedMessage
from sentry.plugins import GroupActionProvider
from sentry.plugins.sentry_redmine import conf
from sentry.utils import json
import base64
import urllib
import urllib2
class RedmineIssue(models.Model):
group = models.ForeignKey(GroupedMessage)
issue_id = models.PositiveIntegerField()
class RedmineIssueForm(forms.Form):
subject = forms.CharField(max_length=200)
description = forms.CharField(widget=forms.Textarea())
class CreateRedmineIssue(GroupActionProvider):
title = 'Create Redmine Issue'
def actions(self, request, action_list, group):
if 'redmine' not in group.data:
action_list.append((self.title, self.__class__.get_url(group.pk)))
return action_list
def view(self, request, group):
if request.POST:
form = RedmineIssueForm(request.POST)
if form.is_valid():
data = json.dumps({
'key': conf.REDMINE_API_KEY,
'issue': {
'subject': form.cleaned_data['subject'],
'description': form.cleaned_data['description'],
}
})
url = conf.REDMINE_URL + '/projects/' + conf.REDMINE_PROJECT_SLUG + '/issues.json'
req = urllib2.Request(url, urllib.urlencode({
'key': conf.REDMINE_API_KEY,
}), headers={
'Content-type': 'application/json',
})
if conf.REDMINE_USERNAME and conf.REDMINE_PASSWORD:
authstring = base64.encodestring('%s:%s' % (conf.REDMINE_USERNAME, conf.REDMINE_PASSWORD))[:-1]
req.add_header("Authorization", "Basic %s" % authstring)
try:
response = urllib2.urlopen(req, data).read()
except urllib2.HTTPError, e:
if e.code == 422:
data = json.loads(e.read())
form.errors['__all__'] = 'Missing or invalid data'
for message in data:
for k, v in message.iteritems():
if k in form.fields:
form.errors.setdefault(k, []).append(v)
else:
form.errors['__all__'] += '; %s: %s' % (k, v)
else:
form.errors['__all__'] = 'Bad response from Redmine: %s %s' % (e.code, e.msg)
except urllib2.URLError, e:
form.errors['__all__'] = 'Unable to reach Redmine host: %s' % (e.reason,)
else:
data = json.loads(response)
RedmineIssue.objects.create(group=group, issue_id=data['issue']['id'])
group.data['redmine'] = {'issue_id': data['issue']['id']}
group.save()
return HttpResponseRedirect(reverse('sentry-group', args=[group.pk]))
else:
description = 'Sentry Message: %s' % request.build_absolute_uri(group.get_absolute_url())
description += '\n\n<pre>' + (group.traceback or group.message) + '</pre>'
form = RedmineIssueForm(initial={
'subject': group.error(),
'description': description,
})
global_errors = form.errors.get('__all__')
BASE_TEMPLATE = "sentry/group/details.html"
context = locals()
context.update(csrf(request))
return render_to_response('sentry/plugins/redmine/create_issue.html', context)
def tags(self, request, tags, group):
if 'redmine' in group.data:
issue_id = group.data['redmine']['issue_id']
tags.append(mark_safe('<a href="%s">#%s</a>' % (
'%s/issues/%s' % (conf.REDMINE_URL, issue_id),
issue_id,
)))
return tags
| 1.976563
| 2
|
rsatoolbox/data/noise.py
|
smazurchuk/rsatoolbox
| 80
|
12782513
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions for estimating the precision matrix based on the covariance of
either the residuals (temporal based precision matrix) or of the measurements
(instance based precision matrix)
"""
from collections.abc import Iterable
import numpy as np
from rsatoolbox.data import average_dataset_by
from rsatoolbox.util.data_utils import get_unique_inverse
def _check_demean(matrix):
"""
checks that an input has 2 or 3 dimensions and subtracts the mean.
returns a 2D matrix for covariance/precision computation and the
degrees of freedom
Args:
matrix (np.ndarray):
n_conditions x n_channels
Returns:
numpy.ndarray:
demeaned matrix
"""
assert isinstance(matrix, np.ndarray), "input must be ndarray"
if matrix.ndim in [1, 2]:
matrix = matrix - np.mean(matrix, axis=0, keepdims=True)
dof = matrix.shape[0] - 1
elif matrix.ndim == 3:
matrix -= np.mean(matrix, axis=2, keepdims=True)
dof = (matrix.shape[0] - 1) * matrix.shape[2]
matrix = matrix.transpose(0, 2, 1).reshape(
matrix.shape[0] * matrix.shape[2], matrix.shape[1])
else:
raise ValueError('Matrix for covariance estimation has wrong # of dimensions!')
return matrix, dof
def _estimate_covariance(matrix, dof, method):
""" calls the right covariance estimation function based on the ""method" argument
Args:
matrix (np.ndarray):
n_conditions x n_channels
dof (int):
degrees of freedom
method (string):
which estimator to use
Returns:
numpy.ndarray, numpy.ndarray:
cov_mat: n_channels x n_channels sample covariance matrix
"""
matrix, dof_nat = _check_demean(matrix)
if dof is None:
dof = dof_nat
# calculate sample covariance matrix s
if method == 'shrinkage_eye':
cov_mat = _covariance_eye(matrix, dof)
elif method == 'shrinkage_diag':
cov_mat = _covariance_diag(matrix, dof)
elif method == 'diag':
cov_mat = _variance(matrix, dof)
elif method == 'full':
cov_mat = _covariance_full(matrix, dof)
return cov_mat
def _variance(matrix, dof):
"""
returns the vector of variances per measurement channel.
The formula used here implies that the mean was already removed.
Args:
matrix (np.ndarray):
n_conditions x n_channels
Returns:
numpy.ndarray:
variance vector
"""
return np.diag(np.einsum('ij, ij-> j', matrix, matrix) / dof)
def _covariance_full(matrix, dof):
"""
computes the sample covariance matrix from a 2d-array.
matrix should be demeaned before!
Args:
matrix (np.ndarray):
n_conditions x n_channels
Returns:
numpy.ndarray, numpy.ndarray:
s_mean: n_channels x n_channels sample covariance matrix
"""
return np.einsum('ij, ik-> jk', matrix, matrix) / dof
def _covariance_eye(matrix, dof):
"""
computes the sample covariance matrix from a 2d-array.
matrix should be demeaned before!
Computes an optimal shrinkage estimate of a sample covariance matrix
as described by the following publication:
<NAME> Wolfe (2004): "A well-conditioned
estimator for large-dimensional covariance matrices"
Args:
matrix (np.ndarray):
n_conditions x n_channels
Returns:
numpy.ndarray, numpy.ndarray:
s_mean: n_channels x n_channels sample covariance matrix
xt_x:
Einstein summation form of the matrix product
of the 2d-array with itself
"""
s_sum = np.zeros((matrix.shape[1], matrix.shape[1]))
s2_sum = np.zeros((matrix.shape[1], matrix.shape[1]))
for m_line in matrix:
xt_x = np.outer(m_line, m_line)
s_sum += xt_x
s2_sum += xt_x ** 2
s = s_sum / matrix.shape[0]
b2 = np.sum(s2_sum / matrix.shape[0] - s * s) / matrix.shape[0]
# calculate the scalar estimators to find the optimal shrinkage:
# m, d^2, b^2 as in Ledoit & Wolfe paper
m = np.sum(np.diag(s)) / s.shape[0]
d2 = np.sum((s - m * np.eye(s.shape[0])) ** 2)
b2 = min(d2, b2)
# shrink covariance matrix
s_shrink = b2 / d2 * m * np.eye(s.shape[0]) \
+ (d2-b2) / d2 * s
# correction for degrees of freedom
s_shrink = s_shrink * matrix.shape[0] / dof
return s_shrink
def _covariance_diag(matrix, dof, mem_threshold=(10**9)/8):
"""
computes the sample covariance matrix from a 2d-array.
matrix should be demeaned before!
Computes an optimal shrinkage estimate of a sample covariance matrix
as described by the following publication:
<NAME>., & <NAME>. (2005). "A Shrinkage Approach to Large-Scale
Covariance Matrix Estimation and Implications for Functional Genomics.""
Args:
matrix (np.ndarray):
n_conditions x n_channels
Returns:
numpy.ndarray, numpy.ndarray:
s_mean: n_channels x n_channels sample covariance matrix
xt_x:
Einstein summation form of the matrix product
of the 2d-array with itself
"""
s_sum = np.zeros((matrix.shape[1], matrix.shape[1]))
s2_sum = np.zeros((matrix.shape[1], matrix.shape[1]))
for m_line in matrix:
xt_x = np.outer(m_line, m_line)
s_sum += xt_x
s2_sum += xt_x ** 2
s = s_sum / dof
var = np.diag(s)
std = np.sqrt(var)
s_mean = s_sum / np.expand_dims(std, 0) / np.expand_dims(std, 1) / (matrix.shape[0] - 1)
s2_mean = s2_sum / np.expand_dims(var, 0) / np.expand_dims(var, 1) / (matrix.shape[0] - 1)
var_hat = matrix.shape[0] / dof ** 2 \
* (s2_mean - s_mean ** 2)
mask = ~np.eye(s.shape[0], dtype=np.bool)
lamb = np.sum(var_hat[mask]) / np.sum(s_mean[mask] ** 2)
lamb = max(min(lamb, 1), 0)
scaling = np.eye(s.shape[0]) + (1-lamb) * mask
s_shrink = s * scaling
return s_shrink
def sample_covariance_3d(tensor):
"""
computes the sample covariance matrix from a tensor by estimating the
sample covariance for each slice along the third dimension and averaging
the estimated covariance matrices.
Args:
tensor (numpy.ndarray):
n_conditions x n_channels x n_measurements
Returns:
numpy.ndarray:
s_mean: n_channels x n_channels expected sample covariance matrix
"""
xt_x = np.einsum('ij, ik-> ijk', tensor, tensor)
s = np.mean(xt_x, axis=0)
return s, xt_x
def cov_from_residuals(residuals, dof=None, method='shrinkage_diag'):
"""
Estimates a covariance matrix from measurements. Allows for shrinkage estimates.
Use 'method' to choose which estimation method is used.
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: covariance matrix over channels
"""
if not isinstance(residuals, np.ndarray) or len(residuals.shape) > 2:
cov_mat = []
for i, residual in enumerate(residuals):
if dof is None:
cov_mat.append(cov_from_residuals(
residual, method=method))
elif isinstance(dof, Iterable):
cov_mat.append(cov_from_residuals(
residuals, method=method, dof=dof[i]))
else:
cov_mat.append(cov_from_residuals(
residual, method=method, dof=dof))
else:
cov_mat = _estimate_covariance(residuals, dof, method)
return cov_mat
def prec_from_residuals(residuals, dof=None, method='shrinkage_diag'):
"""
Estimates the covariance matrix from residuals and finds its multiplicative
inverse (= the precision matrix)
Use 'method' to choose which estimation method is used.
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: precision matrix over channels
"""
cov = cov_from_residuals(residuals=residuals, dof=dof, method=method)
if not isinstance(cov, np.ndarray):
prec = [None] * len(cov)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
elif len(cov.shape) > 2:
prec = np.zeros(cov.shape)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
else:
prec = np.linalg.inv(cov)
return prec
def cov_from_measurements(dataset, obs_desc, dof=None, method='shrinkage_diag'):
"""
Estimates a covariance matrix from measurements. Allows for shrinkage estimates.
Use 'method' to choose which estimation method is used.
Args:
dataset(data.Dataset):
rsatoolbox Dataset object
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: covariance matrix over channels
"""
if isinstance(dataset, Iterable):
cov_mat = []
for i, dat in enumerate(dataset):
if dof is None:
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method))
elif isinstance(dof, Iterable):
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method, dof=dof[i]))
else:
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method, dof=dof))
else:
assert "Dataset" in str(type(dataset)), "Provided object is not a dataset"
assert obs_desc in dataset.obs_descriptors.keys(), \
"obs_desc not contained in the dataset's obs_descriptors"
tensor, _ = dataset.get_measurements_tensor(obs_desc)
# calculate sample covariance matrix s
cov_mat = _estimate_covariance(tensor, dof, method)
return cov_mat
def prec_from_measurements(dataset, obs_desc, dof=None, method='shrinkage_diag'):
"""
Estimates the covariance matrix from measurements and finds its multiplicative
inverse (= the precision matrix)
Use 'method' to choose which estimation method is used.
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: precision matrix over channels
"""
cov = cov_from_measurements(dataset, obs_desc, dof=dof, method=method)
if not isinstance(cov, np.ndarray):
prec = [None] * len(cov)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
elif len(cov.shape) > 2:
prec = np.zeros(cov.shape)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
else:
prec = np.linalg.inv(cov)
return prec
def cov_from_unbalanced(dataset, obs_desc, dof=None, method='shrinkage_diag'):
"""
Estimates a covariance matrix from an unbalanced dataset, i.e. from a
dataset that contains different numbers of samples for different
stimuli.
Args:
dataset(data.Dataset):
rsatoolbox Dataset object
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_measurements - n_stimuli, should be corrected
if this is not the case
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: covariance matrix over channels
"""
if isinstance(dataset, Iterable):
cov_mat = []
for i, dat in enumerate(dataset):
if dof is None:
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method))
elif isinstance(dof, Iterable):
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method, dof=dof[i]))
else:
cov_mat.append(cov_from_unbalanced(
dat, obs_desc=obs_desc, method=method, dof=dof))
else:
assert "Dataset" in str(type(dataset)), "Provided object is not a dataset"
assert obs_desc in dataset.obs_descriptors.keys(), \
"obs_desc not contained in the dataset's obs_descriptors"
matrix = dataset.measurements
means, values, _ = average_dataset_by(dataset, obs_desc)
values, inverse = get_unique_inverse(dataset.obs_descriptors[obs_desc])
matrix -= means[inverse]
# calculate sample covariance matrix s
if dof is None:
dof = matrix.shape[0] - len(values)
cov_mat = _estimate_covariance(matrix, dof, method)
return cov_mat
def prec_from_unbalanced(dataset, obs_desc, dof=None, method='shrinkage_diag'):
"""
Estimates the covariance matrix from measurements and finds its multiplicative
inverse (= the precision matrix)
Use 'method' to choose which estimation method is used.
Args:
residuals(numpy.ndarray or list of these): n_residuals x n_channels
matrix of residuals
dof(int or list of int): degrees of freedom for covariance estimation
defaults to n_res - 1, should be corrected for the number
of regressors in a GLM if applicable.
method(str): which estimate to use:
'diag': provides a diagonal matrix, i.e. univariate noise normalizer
'full': computes the sample covariance without shrinkage
'shrinkage_eye': shrinks the data covariance towards a multiple of the identity.
'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix.
Returns:
numpy.ndarray (or list): sigma_p: precision matrix over channels
"""
cov = cov_from_unbalanced(dataset, obs_desc, dof=dof, method=method)
if not isinstance(cov, np.ndarray):
prec = [None] * len(cov)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
elif len(cov.shape) > 2:
prec = np.zeros(cov.shape)
for i, cov_i in enumerate(cov):
prec[i] = np.linalg.inv(cov_i)
else:
prec = np.linalg.inv(cov)
return prec
| 2.9375
| 3
|
src/node.py
|
mattianeroni/MSTOP
| 0
|
12782514
|
<reponame>mattianeroni/MSTOP
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This file is part of the collaboration with Universitat Oberta de Catalunya (UOC) on
Multi-Source Team Orienteering Problem (MSTOP).
The objective of the project is to develop an efficient algorithm to solve this extension
of the classic team orienteering problem, in which the vehicles / paths may start from
several different sources.
Author: <NAME>, Ph.D., Eng.
Contact: <EMAIL>
Date: January 2022
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import collections
class Node:
"""
An instance of this class represents a node to visit or
a source some vehicles are starting from.
It is used for the depot too.
"""
def __init__(self, id, x, y, revenue, *, color='#FDDD71', issource=False, vehicles=0, isdepot=False):
"""
Initialise.
:param id: The unique id of the node.
:param x: The x-coordinateof the node.
:param y: The y-coordinate of the node.
:param revenue: The revenue.
:param issource: A boolean variable that says if the node is a source.
:param isdepot: A boolean variable that says if the node is the depot.
:param vehicles: The number of vehicles starting from this node (it is 0
if the node is not a source).
*** Parameters used by the Mapper ***
:attr assigend: True if the node is assigned to a source and 0 otherwise
:attr preferences: Used in case of source node for the round-robin process.
:attr nodes: Used in case of source for keeping the nodes assigned to it.
*** Parameters used by the PJS ***
:attr from_source: The length of the current path from the source to this node.
:attr to_depot: The length of the current path from this node to the depot.
:attr route: The current route corresponding to the node.
:attr link_left: True if the node is linked to the source, False otherwise.
:attr link_right: True if the node is linked to the depot, False otherwise.
"""
self.id = id
self.x = x
self.y = y
self.revenue = revenue
self.issource = issource
self.vehicles = vehicles
self.isdepot = isdepot
# Attributes used by the Mapper
self.assigned = False
self.preferences = collections.deque()
self.nodes = collections.deque()
# Attributes used by the PJS
self.from_source = 0
self.to_depot = 0
self.route = None
self.link_left = False
self.link_right = False
def __copy__(self):
obj = Node.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
return obj
def __repr__(self):
return f"Node {self.id}"
def __hash__(self):
return self.id
| 3.515625
| 4
|
utils.py
|
YunzhuLi/CompositionalKoopmanOperators
| 56
|
12782515
|
import sys
import h5py
import numpy as np
import torch
from torch.autograd import Variable
def print_args(args):
print("===== Experiment Configuration =====")
options = vars(args)
for key, value in options.items():
print(f'{key}: {value}')
print("====================================")
def rand_float(lo, hi):
return np.random.rand() * (hi - lo) + lo
def rand_int(lo, hi):
return np.random.randint(lo, hi)
def calc_dis(a, b):
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def norm(x, p=2):
return np.power(np.sum(x ** p), 1. / p)
def store_data(data_names, data, path):
hf = h5py.File(path, 'w')
for i in range(len(data_names)):
hf.create_dataset(data_names[i], data=data[i])
hf.close()
def load_data(data_names, path):
hf = h5py.File(path, 'r')
data = []
for i in range(len(data_names)):
d = np.array(hf.get(data_names[i]))
data.append(d)
hf.close()
return data
def combine_stat(stat_0, stat_1):
mean_0, std_0, n_0 = stat_0[:, 0], stat_0[:, 1], stat_0[:, 2]
mean_1, std_1, n_1 = stat_1[:, 0], stat_1[:, 1], stat_1[:, 2]
mean = (mean_0 * n_0 + mean_1 * n_1) / (n_0 + n_1)
std = np.sqrt(
(std_0 ** 2 * n_0 + std_1 ** 2 * n_1 + (mean_0 - mean) ** 2 * n_0 + (mean_1 - mean) ** 2 * n_1) / (n_0 + n_1))
n = n_0 + n_1
return np.stack([mean, std, n], axis=-1)
def init_stat(dim):
# mean, std, count
return np.zeros((dim, 3))
def var_norm(x):
return torch.sqrt((x ** 2).sum()).item()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_flat(x, keep_dim=False):
if keep_dim:
return x.reshape(torch.Size([1, x.size(0) * x.size(1)]) + x.size()[2:])
return x.reshape(torch.Size([x.size(0) * x.size(1)]) + x.size()[2:])
def to_var(tensor, use_gpu, requires_grad=False):
if use_gpu:
return Variable(torch.FloatTensor(tensor).cuda(), requires_grad=requires_grad)
else:
return Variable(torch.FloatTensor(tensor), requires_grad=requires_grad)
def to_np(x):
return x.detach().cpu().numpy()
def mix_iters(iters):
table = []
for i, iter in enumerate(iters):
table += [i] * len(iter)
np.random.shuffle(table)
for i in table:
yield iters[i].next()
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
self.file.flush()
def close(self):
self.__del__()
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 2.125
| 2
|
decode-vbe.py
|
Hong5489/TwoReal
| 2
|
12782516
|
#!/usr/bin/env python
__description__ = 'Decode VBE script'
__author__ = '<NAME>'
__version__ = '0.0.2'
__date__ = '2016/03/29'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2016/03/28: start
2016/03/29: 0.0.2 added support for ZIP files and literal arguments with File2StringHash
Todo:
Reference:
https://gallery.technet.microsoft.com/Encode-and-Decode-a-VB-a480d74c
"""
import optparse
import sys
import os
import signal
import textwrap
import re
import zipfile
import binascii
MALWARE_PASSWORD = '<PASSWORD>'
def PrintManual():
manual = '''
Manual:
This program reads from the given file or standard input, and converts the encoded VBE script to VBS.
The provided file can be a password protected ZIP file (with password infected) containing the VBE script.
The content of the VBE script can also be passed as a literal argument. This is similar to a Here Document in Unix.
Start the argument (the "filename") with character # to pass a literal argument.
Example: decode-vbe.py "##@~^DgAAAA==\ko$K6,JCV^GJqAQAAA==^#~@"
Result: MsgBox "Hello"
It's also possible to use hexadecimal (prefix #h#) or base64 (prefix #b#) to pass a literal argument.
Example: decode-vbe.py #h#23407E5E4467414141413D3D5C6B6F244B362C4A437F565E474A7141514141413D3D5E237E40
Result: MsgBox "Hello"
Example: decode-vbe.py #b#I<KEY>=
Result: MsgBox "Hello"
'''
for line in manual.split('\n'):
print(textwrap.fill(line))
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
def File2String(filename):
try:
f = open(filename, 'rb')
except:
return None
try:
return f.read()
except:
return None
finally:
f.close()
def File2StringHash(filename):
decoded = None
if filename.startswith('#h#'):
try:
decoded = binascii.a2b_hex(filename[3:])
finally:
return decoded
elif filename.startswith('#b#'):
try:
decoded = binascii.a2b_base64(filename[3:])
finally:
return decoded
elif filename.startswith('#'):
return filename[1:]
elif filename.lower().endswith('.zip'):
oZipfile = zipfile.ZipFile(filename, 'r')
if len(oZipfile.infolist()) == 1:
oZipContent = oZipfile.open(oZipfile.infolist()[0], 'r', C2BIP3(MALWARE_PASSWORD))
data = oZipContent.read()
oZipContent.close()
else:
data = File2String(filename)
oZipfile.close()
return data
else:
return File2String(filename)
def FixPipe():
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except:
pass
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
while data != '':
sys.stdout.write(data[0:10000])
sys.stdout.flush()
data = data[10000:]
def Decode(data):
dDecode = {}
dDecode[9] = '\x57\x6E\x7B'
dDecode[10] = '\x4A\x4C\x41'
dDecode[11] = '\x0B\x0B\x0B'
dDecode[12] = '\x0C\x0C\x0C'
dDecode[13] = '\x4A\x4C\x41'
dDecode[14] = '\x0E\x0E\x0E'
dDecode[15] = '\x0F\x0F\x0F'
dDecode[16] = '\x10\x10\x10'
dDecode[17] = '\x11\x11\x11'
dDecode[18] = '\x12\x12\x12'
dDecode[19] = '\x13\x13\x13'
dDecode[20] = '\x14\x14\x14'
dDecode[21] = '\x15\x15\x15'
dDecode[22] = '\x16\x16\x16'
dDecode[23] = '\x17\x17\x17'
dDecode[24] = '\x18\x18\x18'
dDecode[25] = '\x19\x19\x19'
dDecode[26] = '\x1A\x1A\x1A'
dDecode[27] = '\x1B\x1B\x1B'
dDecode[28] = '\x1C\x1C\x1C'
dDecode[29] = '\x1D\x1D\x1D'
dDecode[30] = '\x1E\x1E\x1E'
dDecode[31] = '\x1F\x1F\x1F'
dDecode[32] = '\x2E\x2D\x32'
dDecode[33] = '\x47\x75\x30'
dDecode[34] = '\x7A\x52\x21'
dDecode[35] = '\x56\x60\x29'
dDecode[36] = '\x42\x71\x5B'
dDecode[37] = '\x6A\x5E\x38'
dDecode[38] = '\x2F\x49\x33'
dDecode[39] = '\x26\x5C\x3D'
dDecode[40] = '\x49\x62\x58'
dDecode[41] = '\x41\x7D\x3A'
dDecode[42] = '\x34\x29\x35'
dDecode[43] = '\x32\x36\x65'
dDecode[44] = '\x5B\x20\x39'
dDecode[45] = '\x76\x7C\x5C'
dDecode[46] = '\x72\x7A\x56'
dDecode[47] = '\x43\x7F\x73'
dDecode[48] = '\x38\x6B\x66'
dDecode[49] = '\x39\x63\x4E'
dDecode[50] = '\x70\x33\x45'
dDecode[51] = '\x45\x2B\x6B'
dDecode[52] = '\x68\x68\x62'
dDecode[53] = '\x71\x51\x59'
dDecode[54] = '\x4F\x66\x78'
dDecode[55] = '\x09\x76\x5E'
dDecode[56] = '\x62\x31\x7D'
dDecode[57] = '\x44\x64\x4A'
dDecode[58] = '\x23\x54\x6D'
dDecode[59] = '\x75\x43\x71'
dDecode[60] = '\x4A\x4C\x41'
dDecode[61] = '\x7E\x3A\x60'
dDecode[62] = '\x4A\x4C\x41'
dDecode[63] = '\x5E\x7E\x53'
dDecode[64] = '\x40\x4C\x40'
dDecode[65] = '\x77\x45\x42'
dDecode[66] = '\x4A\x2C\x27'
dDecode[67] = '\x61\x2A\x48'
dDecode[68] = '\x5D\x74\x72'
dDecode[69] = '\x22\x27\x75'
dDecode[70] = '\x4B\x37\x31'
dDecode[71] = '\x6F\x44\x37'
dDecode[72] = '\x4E\x79\x4D'
dDecode[73] = '\x3B\x59\x52'
dDecode[74] = '\x4C\x2F\x22'
dDecode[75] = '\x50\x6F\x54'
dDecode[76] = '\x67\x26\x6A'
dDecode[77] = '\x2A\x72\x47'
dDecode[78] = '\x7D\x6A\x64'
dDecode[79] = '\x74\x39\x2D'
dDecode[80] = '\x54\x7B\x20'
dDecode[81] = '\x2B\x3F\x7F'
dDecode[82] = '\x2D\x38\x2E'
dDecode[83] = '\x2C\x77\x4C'
dDecode[84] = '\x30\x67\x5D'
dDecode[85] = '\x6E\x53\x7E'
dDecode[86] = '\x6B\x47\x6C'
dDecode[87] = '\x66\x34\x6F'
dDecode[88] = '\x35\x78\x79'
dDecode[89] = '\x25\x5D\x74'
dDecode[90] = '\x21\x30\x43'
dDecode[91] = '\x64\x23\x26'
dDecode[92] = '\x4D\x5A\x76'
dDecode[93] = '\x52\x5B\x25'
dDecode[94] = '\x63\x6C\x24'
dDecode[95] = '\x3F\x48\x2B'
dDecode[96] = '\x7B\x55\x28'
dDecode[97] = '\x78\x70\x23'
dDecode[98] = '\x29\x69\x41'
dDecode[99] = '\x28\x2E\x34'
dDecode[100] = '\x73\x4C\x09'
dDecode[101] = '\x59\x21\x2A'
dDecode[102] = '\x33\x24\x44'
dDecode[103] = '\x7F\x4E\x3F'
dDecode[104] = '\x6D\x50\x77'
dDecode[105] = '\x55\x09\x3B'
dDecode[106] = '\x53\x56\x55'
dDecode[107] = '\x7C\x73\x69'
dDecode[108] = '\x3A\x35\x61'
dDecode[109] = '\x5F\x61\x63'
dDecode[110] = '\x65\x4B\x50'
dDecode[111] = '\x46\x58\x67'
dDecode[112] = '\x58\x3B\x51'
dDecode[113] = '\x31\x57\x49'
dDecode[114] = '\x69\x22\x4F'
dDecode[115] = '\x6C\x6D\x46'
dDecode[116] = '\x5A\x4D\x68'
dDecode[117] = '\x48\x25\x7C'
dDecode[118] = '\x27\x28\x36'
dDecode[119] = '\x5C\x46\x70'
dDecode[120] = '\x3D\x4A\x6E'
dDecode[121] = '\x24\x32\x7A'
dDecode[122] = '\x79\x41\x2F'
dDecode[123] = '\x37\x3D\x5F'
dDecode[124] = '\x60\x5F\x4B'
dDecode[125] = '\x51\x4F\x5A'
dDecode[126] = '\x20\x42\x2C'
dDecode[127] = '\x36\x65\x57'
dCombination = {}
dCombination[0] = 0
dCombination[1] = 1
dCombination[2] = 2
dCombination[3] = 0
dCombination[4] = 1
dCombination[5] = 2
dCombination[6] = 1
dCombination[7] = 2
dCombination[8] = 2
dCombination[9] = 1
dCombination[10] = 2
dCombination[11] = 1
dCombination[12] = 0
dCombination[13] = 2
dCombination[14] = 1
dCombination[15] = 2
dCombination[16] = 0
dCombination[17] = 2
dCombination[18] = 1
dCombination[19] = 2
dCombination[20] = 0
dCombination[21] = 0
dCombination[22] = 1
dCombination[23] = 2
dCombination[24] = 2
dCombination[25] = 1
dCombination[26] = 0
dCombination[27] = 2
dCombination[28] = 1
dCombination[29] = 2
dCombination[30] = 2
dCombination[31] = 1
dCombination[32] = 0
dCombination[33] = 0
dCombination[34] = 2
dCombination[35] = 1
dCombination[36] = 2
dCombination[37] = 1
dCombination[38] = 2
dCombination[39] = 0
dCombination[40] = 2
dCombination[41] = 0
dCombination[42] = 0
dCombination[43] = 1
dCombination[44] = 2
dCombination[45] = 0
dCombination[46] = 2
dCombination[47] = 1
dCombination[48] = 0
dCombination[49] = 2
dCombination[50] = 1
dCombination[51] = 2
dCombination[52] = 0
dCombination[53] = 0
dCombination[54] = 1
dCombination[55] = 2
dCombination[56] = 2
dCombination[57] = 0
dCombination[58] = 0
dCombination[59] = 1
dCombination[60] = 2
dCombination[61] = 0
dCombination[62] = 2
dCombination[63] = 1
result = ''
index = -1
for char in data.replace('@&', chr(10)).replace('@#', chr(13)).replace('@*', '>').replace('@!', '<').replace('@$', '@'):
byte = ord(char)
if byte < 128:
index = index + 1
if (byte == 9 or byte > 31 and byte < 128) and byte != 60 and byte != 62 and byte != 64:
char = [c for c in dDecode[byte]][dCombination[index % 64]]
result += char
return result
def DecodeVBE(filename, options):
FixPipe()
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
if filename == '':
content = sys.stdin.read()
else:
content = File2StringHash(filename)
oMatch = re.search(r'#@~\^......==(.+)......==\^#~@', content)
if oMatch == None:
print('No encoded script found!')
else:
StdoutWriteChunked(Decode(oMatch.groups()[0]))
def Main():
oParser = optparse.OptionParser(usage='usage: %prog [options] [file]\n' + __description__, version='%prog ' + __version__)
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
if len(args) > 1:
oParser.print_help()
print('')
print(' Source code put in the public domain by <NAME>, no Copyright')
print(' Use at your own risk')
print(' https://DidierStevens.com')
return
elif len(args) == 0:
DecodeVBE('', options)
else:
DecodeVBE(args[0], options)
if __name__ == '__main__':
Main()
| 2.765625
| 3
|
utils.py
|
STEELISI/SENMO
| 1
|
12782517
|
import os
import time
import numpy as np
import pandas as pd
from nltk import word_tokenize
from nltk.util import ngrams
import tensorflow as tf
from transformers import TFBertModel
from transformers import BertTokenizer
from transformers import TFBertModel
from tensorflow.keras.layers import Dense, Flatten
bert_model_name = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=True)
print('...BERT tokenizer loading complete')
cols_to_use = [
'Note',
'LGBTQ',
'ADULT_CONTENT',
'HEALTH',
'DRUGS_ALCOHOL_GAMBLING',
'RACE',
'VIOLENCE_CRIME',
'POLITICS',
'RELATION',
'LOCATION'
]
label_cols = cols_to_use[1:] #exclude note (input)
class BertClassifier(tf.keras.Model):
def __init__(self, bert: TFBertModel, num_classes: int):
super().__init__()
self.bert = bert
self.classifier = Dense(num_classes, activation='sigmoid')
@tf.function
def call(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
cls_output = outputs[1]
cls_output = self.classifier(cls_output)
return cls_output
def read_df(path):
df = pd.read_csv(path)
df = df[cols_to_use]
print('Number of all sentences: {}'.format(len(df)))
df['Note'] = df.Note.replace('NA',np.nan)
df = df.dropna().sample(frac=1).reset_index(drop=True)
print('Number of non-empty sentences: {}'.format(len(df)))
return df
def get_ids_masks(sentences, MAX_LEN):
ids = []
masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
truncation = 'longest_first',
max_length = MAX_LEN, # Pad & truncate all sentences.
padding = 'max_length',
return_attention_mask = True, # Construct attn. masks.
)
ids.append(encoded_dict['input_ids'])
masks.append(encoded_dict['attention_mask'])
return ids, masks
def create_dataset(data_tuple, epochs=1, batch_size=32, buffer_size=100, train=True):
dataset = tf.data.Dataset.from_tensor_slices(data_tuple)
if train:
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.batch(batch_size)
if train:
dataset = dataset.prefetch(1)
return dataset
def get_keywords():
# retrieve keywords if exist
if os.path.exists("./data/keyword_list.txt"):
print("...Keyword list loading complete")
with open("./data/keyword_list.txt", 'r') as keyword_file:
keywords = set()
for word in keyword_file.readlines():
keywords.add(word.strip())
return keywords
# construct keywords if not exist
keywords_path = "./data/Lexicon/"
filenames = [os.path.join(keywords_path, f) for f in os.listdir(keywords_path) if os.path.isfile(os.path.join(keywords_path, f))]
keywords = set()
for fn in filenames:
with open(fn, 'r') as keyword_file:
for line in keyword_file.readlines():
word = line.strip()
if word:
keywords.add(word.lower())
with open("./data/keyword_list.txt", 'w') as keyword_file:
for word in keywords:
keyword_file.write("{}\n".format(word))
print("...Keyword list building complete")
return keywords
def not_in_keywords(note, keywords):
unigrams = word_tokenize(note)
bigrams = ngrams(unigrams, 2)
bigrams = [' '.join(bg) for bg in bigrams]
trigrams = ngrams(unigrams, 3)
trigrams = [' '.join(tg) for tg in trigrams]
tokens = unigrams + bigrams + trigrams
for t in tokens:
if t in keywords:
return False
return True
| 2.59375
| 3
|
Amazon.py
|
Shaur-Repositories/PS5-Bots
| 0
|
12782518
|
<reponame>Shaur-Repositories/PS5-Bots
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import time
options = webdriver.ChromeOptions()
prefs = {'profile.default_content_setting_values': { 'images': 2, 'javascript': 2,
'plugins': 2, 'popups': 2, 'geolocation': 2,
'notifications': 2, 'auto_select_certificate': 2, 'fullscreen': 2,
'mouselock': 2, 'mixed_script': 2, 'media_stream': 2,
'media_stream_mic': 2, 'media_stream_camera': 2, 'protocol_handlers': 2,
'ppapi_broker': 2, 'automatic_downloads': 2, 'midi_sysex': 2,
'push_messaging': 2, 'ssl_cert_decisions': 2, 'metro_switch_to_desktop': 2,
'protected_media_identifier': 2, 'app_banner': 2, 'site_engagement': 2,
'durable_storage': 2 }}
options.add_experimental_option('prefs', prefs)
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
driver= webdriver.Chrome(options=options,executable_path=r"\Drivers\chromedriver91.exe") #driver path
driver.get("https://www.amazon.in/gp/product/B08FV5GC28")#product page
first=driver.find_element_by_id("nav-link-accountList")
first.click()
email=driver.find_element_by_id("ap_email")
email.send_keys("<EMAIL>")#email id
continuee=driver.find_element_by_id("continue")
continuee.click()
password=driver.find_element_by_id("ap_password")
password.send_keys("<PASSWORD>")#password
remme=driver.find_element_by_name("rememberMe")
remme.click()
siginbtn=driver.find_element_by_id("signInSubmit")
siginbtn.click()
while True:
try:
buynow=driver.find_element_by_id("buy-now-button")
except NoSuchElementException:
print("not yet")
driver.refresh()
else:
print("yes")
buynow.click()
break
address=driver.find_element_by_id("address-book-entry-0")
addresbtn=address.find_element_by_link_text("Deliver to this address")
addresbtn.click()
addcvv=driver.find_element_by_name("addCreditCardVerificationNumber0")
addcvv.send_keys("123")#cvv no.
revieworder=driver.find_element_by_name("ppw-widgetEvent:SetPaymentPlanSelectContinueEvent")
revieworder.click()
try:
placeorder = WebDriverWait(driver, 30).until(
EC.presence_of_element_located((By.ID, "placeYourOrder"))
)
except:
print()
#placeorder=driver.find_element_by_id("placeYourOrder")
placeorder.click()
print(driver.title)
| 1.9375
| 2
|
mockingbird/unstructured_data_document/docx_document.py
|
openraven/mockingbird
| 15
|
12782519
|
#
# Copyright 2021 Open Raven Inc. and the Mockingbird project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import final
from docx import Document
from mockingbird.__base import __BaseDocument
from .__base import __BaseUnstructuredDataType
class DOCXDocument(__BaseDocument):
def __init__(self):
super().__init__(extension="docx")
# Create a list of docx formats we're going to export.
self._docx_styles = []
active_styles = self._configurable_dict["unstructured_data"]["docx_document"]["active_styles"]
if active_styles["paragraph_style"]:
self._docx_styles.append(_DocxParagraphStyle)
if active_styles["footer_style"]:
self._docx_styles.append(_DocxFooterStyle)
if active_styles["bullet_point_style"]:
self._docx_styles.append(_DocxBulletPointStyle)
if active_styles["chat_style"]:
self._docx_styles.append(_DocxChatStyle)
@final
def save(self, save_path: str) -> None:
for style in self._docx_styles:
instantiated_style = style()
instantiated_style.clone_sensitive_data(other=self)
instantiated_style.save(save_path=save_path)
self._meta_data_object.add_other_meta_data(instantiated_style._meta_data_object)
class _DocxParagraphStyle(__BaseUnstructuredDataType):
"""
Writes a simple paragraph containing sensitive-soup.
"""
def __init__(self):
super().__init__(extension="docx")
@final
def save(self, save_path: str) -> None:
"""
"""
save_file = self.setup_save_file(save_path=save_path, extension=self.extension)
document = Document()
document.add_heading('Paragraph Styled Document', 0)
sensitive_soup = self._get_sensitive_soup()
document.add_paragraph(sensitive_soup)
document.save(save_file)
self._log_save(save_file)
class _DocxFooterStyle(__BaseUnstructuredDataType):
"""
Writes a simple document with sensitive-soup in the footer.
"""
def __init__(self):
super().__init__(extension="docx")
@final
def save(self, save_path: str) -> None:
"""
"""
save_file = self.setup_save_file(save_path=save_path, extension=self.extension)
sensitive_soup = self._get_sensitive_soup()
document = Document()
document.add_heading('Sensitive-Data in Footer Styled Document', 0)
section = document.sections[0]
footer = section.footer
footer.paragraphs[0].text = sensitive_soup
document.save(save_file)
self._log_save(save_file)
class _DocxBulletPointStyle(__BaseUnstructuredDataType):
"""
Writes a simple document with sensitive-soup in the footer.
"""
def __init__(self):
super().__init__(extension="docx")
@final
def save(self, save_path: str) -> None:
"""
"""
save_file = self.setup_save_file(save_path=save_path, extension=self.extension)
enumerated_groups = self._get_enumerated_style()
document = Document()
document.add_heading('Sensitive Data Stored in Bullet Points', 0)
for group in enumerated_groups:
key, enumerated_items = group
document.add_heading(key, level=1)
for item in enumerated_items:
document.add_paragraph(item, style="List Bullet")
document.save(save_file)
self._log_save(save_file)
class _DocxChatStyle(__BaseUnstructuredDataType):
"""
Writes a simple document with sensitive-soup in the footer.
"""
def __init__(self):
super().__init__(extension="docx")
@final
def save(self, save_path: str) -> None:
"""
"""
save_file = self.setup_save_file(save_path=save_path, extension=self.extension)
chat_log = self._get_chat_log()
document = Document()
document.add_heading('A chat between two people', 0)
for line in chat_log:
document.add_paragraph(line)
document.save(save_file)
self._log_save(save_file)
| 1.78125
| 2
|
src/secondaires/tags/editeurs/selection_tags.py
|
vlegoff/tsunami
| 14
|
12782520
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# pereIBILITY OF SUCH DAMAGE.
"""Ce fichier définit le contexte-éditeur 'SelectionTags'."""
from primaires.interpreteur.editeur.selection import Selection
from primaires.format.fonctions import supprimer_accents
class SelectionTags(Selection):
"""Contexte-éditeur pour la sélection de tags."""
nom = "editeur:tags:selection"
def __init__(self, pere, objet=None, attribut=None, liste=None,
tagge=None):
Selection.__init__(self, pere, objet, attribut, liste)
self.tagge = tagge
@staticmethod
def afficher_apercu(apercu, objet, valeur, liste=None, tagge=None):
"""Affichage de l'aperçu."""
return Selection.afficher_apercu(apercu, objet, valeur, liste)
def interpreter(self, msg):
"""Interprétation du contexte"""
nom = msg
msg_sa = supprimer_accents(msg).lower()
liste = getattr(self.objet, self.attribut)
cles = list(self.liste)
cles_sa = [supprimer_accents(c).lower() for c in cles]
if msg_sa in cles_sa:
cle = cles[cles_sa.index(msg_sa)]
if cle in liste:
while cle in liste:
liste.remove(cle)
else:
liste.append(cle)
# Ajout des évènements à l'objet taggé
tag = importeur.tags.tags[cle]
script = tag.script
for evenement in script.evenements.values():
evt = self.tagge.script[evenement.nom]
evt.copier_depuis(evenement)
self.pere << "Copie de l'évènement {}.".format(
evenement.nom)
liste[:] = [e for e in liste if e]
self.actualiser()
else:
self.pere << "|err|La clé {} est introuvable.|ff|".format(
repr(msg))
| 1.414063
| 1
|
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py
|
sreecodeslayer/cookiecutter-flask-restful
| 5
|
12782521
|
<gh_stars>1-10
from passlib.hash import pbkdf2_sha256
from flask_jwt_extended import JWTManager
from flask_marshmallow import Marshmallow
from flask_mongoengine import MongoEngine
db = MongoEngine()
jwt = JWTManager()
ma = Marshmallow()
pwd_context = pbkdf2_sha256
| 1.84375
| 2
|
formidable/migrations/0009_field_parameters.py
|
jayvdb/django-formidable
| 11
|
12782522
|
<gh_stars>10-100
# Generated by Django 1.11.6 on 2018-07-23 10:37
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('formidable', '0008_formidable_item_value_field_size'),
]
operations = [
migrations.AddField(
model_name='field',
name='parameters',
field=jsonfield.fields.JSONField(
blank=True, default={}, null=True
),
),
]
| 1.5625
| 2
|
custom_email_user/models.py
|
garyburgmann/django-custom-email-user
| 2
|
12782523
|
<filename>custom_email_user/models.py
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractUser
from django.core.validators import EmailValidator
from django.contrib.auth.validators import UnicodeUsernameValidator
class UserManager(BaseUserManager):
def create_user(self, email, password=<PASSWORD>, **kwargs):
"""
Creates and saves a User with the given:
email, password
"""
existing_email = self.model.filter(
email=email
).first()
if existing_email:
raise Exception('This email is already assigned to another User')
user = self.model(
email=self.normalize_email(email),
username=kwargs.get('username', '')
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=<PASSWORD>, **kwargs):
"""
Creates and saves a Superuser with the given:
email, password
"""
existing_email = self.model.objects.filter(
email=email
).first()
if existing_email:
raise Exception('This email is already assigned to another User')
user = self.model(
email=self.normalize_email(email),
username=kwargs.get('username', '')
)
user.set_password(password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
email = models.EmailField(
verbose_name="email address",
error_messages={
'unique': "A user with that email already exists.",
},
help_text="Required. 150 characters or fewer.",
max_length=150,
unique=True,
validators=[EmailValidator],
)
username = models.CharField(
verbose_name="username",
max_length=150,
blank=True,
help_text="150 characters or fewer. Letters, digits and @/./+/-/_ only.",
validators=[UnicodeUsernameValidator]
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
abstract = True
| 2.78125
| 3
|
next/apps/AppDashboard.py
|
sumeetsk/NEXT-1
| 0
|
12782524
|
<reponame>sumeetsk/NEXT-1
import json
import numpy
import numpy.random
from datetime import datetime
from datetime import timedelta
import next.utils as utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import mpld3
MAX_SAMPLES_PER_PLOT = 100
class AppDashboard(object):
def __init__(self, db, ell):
self.db = db
self.ell = ell
def basic_info(self,app,butler):
"""
returns basic statistics like number of queries, participants, etc.
"""
experiment_dict = butler.experiment.get()
#git_hash = rm.get_git_hash_for_exp_uid(exp_uid)
git_hash = experiment_dict.get('git_hash','None')
# start_date = utils.str2datetime(butler.admin.get(uid=app.exp_uid)['start_date'])
start_date = experiment_dict.get('start_date','Unknown')+' UTC'
# participant_uids = rm.get_participant_uids(exp_uid)
participants = butler.participants.get(pattern={'exp_uid':app.exp_uid})
num_participants = len(participants)
queries = butler.queries.get(pattern={'exp_uid':app.exp_uid})
num_queries = len(queries)
return_dict = {'git_hash':git_hash,
'exp_start_data':start_date,
'num_participants':num_participants,
'num_queries':num_queries,
'meta':{'last_dashboard_update':'<1 minute ago'}}
return return_dict
def api_activity_histogram(self, app, butler):
"""
Description: returns the data to plot all API activity (for all algorithms) in a histogram with respect to time for any task in {getQuery,processAnswer,predict}
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
queries = butler.queries.get(pattern={'exp_uid':app.exp_uid})
#self.db.get_docs_with_filter(app_id+':queries',{'exp_uid':exp_uid})
start_date = utils.str2datetime(butler.admin.get(uid=app.exp_uid)['start_date'])
numerical_timestamps = [(utils.str2datetime(item['timestamp_query_generated'])-start_date).total_seconds()
for item in queries]
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'),figsize=(12,1.5))
ax.hist(numerical_timestamps,min(int(1+4*numpy.sqrt(len(numerical_timestamps))),300),alpha=0.5,color='black')
ax.set_frame_on(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.get_yaxis().set_visible(False)
ax.set_xlim(0, max(numerical_timestamps))
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def compute_duration_multiline_plot(self, app, butler, task):
"""
Description: Returns multiline plot where there is a one-to-one mapping lines to
algorithms and each line indicates the durations to complete the task (wrt to the api call)
Expected input:
(string) task : must be in {'getQuery','processAnswer','predict'}
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
alg_list = butler.experiment.get(key='args')['alg_list']
x_min = numpy.float('inf')
x_max = -numpy.float('inf')
y_min = numpy.float('inf')
y_max = -numpy.float('inf')
list_of_alg_dicts = []
for algorithm in alg_list:
alg_label = algorithm['alg_label']
list_of_log_dict,didSucceed,message = butler.ell.get_logs_with_filter(app.app_id+':ALG-DURATION',
{'exp_uid':app.exp_uid,'alg_label':alg_label,'task':task})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
x = []
y = []
t = []
k=0
for item in list_of_log_dict:
k+=1
x.append(k)
y.append( item.get('app_duration',0.) + item.get('duration_enqueued',0.) )
t.append(str(item['timestamp'])[:-3])
x = numpy.array(x)
y = numpy.array(y)
t = numpy.array(t)
num_items = len(list_of_log_dict)
multiplier = min(num_items,MAX_SAMPLES_PER_PLOT)
incr_inds = [ r*num_items/multiplier for r in range(multiplier)]
max_inds = list(numpy.argsort(-y)[0:multiplier])
final_inds = sorted(set(incr_inds + max_inds))
x = list(x[final_inds])
y = list(y[final_inds])
t = list(t[final_inds])
alg_dict = {}
alg_dict['legend_label'] = alg_label
alg_dict['x'] = x
alg_dict['y'] = y
alg_dict['t'] = t
try:
x_min = min(x_min,min(x))
x_max = max(x_max,max(x))
y_min = min(y_min,min(y))
y_max = max(y_max,max(y))
except:
pass
list_of_alg_dicts.append(alg_dict)
return_dict = {}
return_dict['data'] = list_of_alg_dicts
return_dict['plot_type'] = 'multi_line_plot'
return_dict['x_label'] = 'API Call'
return_dict['x_min'] = x_min
return_dict['x_max'] = x_max
return_dict['y_label'] = 'Duration (s)'
return_dict['y_min'] = y_min
return_dict['y_max'] = y_max
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
for alg_dict in list_of_alg_dicts:
ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label'])
ax.set_xlabel('API Call')
ax.set_ylabel('Duration (s)')
ax.set_xlim([x_min,x_max])
ax.set_ylim([y_min,y_max])
ax.grid(color='white', linestyle='solid')
ax.set_title(task, size=14)
legend = ax.legend(loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def compute_duration_detailed_stacked_area_plot(self,app,butler,task,alg_label,detailedDB=False):
"""
Description: Returns stacked area plot for a particular algorithm and task where the durations
are broken down into compute,db_set,db_get (for cpu, database_set, database_get)
Expected input:
(string) task : must be in {'getQuery','processAnswer','predict'}
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_log_dict,didSucceed,message = butler.ell.get_logs_with_filter(app.app_id+':ALG-DURATION',
{'exp_uid':app.exp_uid,'alg_label':alg_label,'task':task})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
y = []
for item in list_of_log_dict:
y.append( item.get('app_duration',0.) + item.get('duration_enqueued',0.) )
y = numpy.array(y)
num_items = len(list_of_log_dict)
multiplier = min(num_items,MAX_SAMPLES_PER_PLOT)
incr_inds = [ k*num_items/multiplier for k in range(multiplier)]
max_inds = list(numpy.argsort(-y)[0:multiplier])
final_inds = sorted(set(incr_inds + max_inds))
x = []
t = []
enqueued = []
admin = []
dbGet = []
dbSet = []
compute = []
max_y_value = 0.
min_y_value = float('inf')
for idx in final_inds:
item = list_of_log_dict[idx]
x.append(idx+1)
t.append(str(item.get('timestamp','')))
_alg_duration = item.get('duration',0.)
_alg_duration_dbGet = item.get('duration_dbGet',0.)
_alg_duration_dbSet = item.get('duration_dbSet',0.)
_duration_enqueued = item.get('duration_enqueued',0.)
_app_duration = item.get('app_duration',0.)
if (_app_duration+_duration_enqueued) > max_y_value:
max_y_value = _app_duration + _duration_enqueued
if (_app_duration+_duration_enqueued) < min_y_value:
min_y_value = _app_duration + _duration_enqueued
enqueued.append(_duration_enqueued)
admin.append(_app_duration-_alg_duration)
dbSet.append(_alg_duration_dbSet)
dbGet.append(_alg_duration_dbGet)
compute.append( _alg_duration - _alg_duration_dbSet - _alg_duration_dbGet )
try:
min_x = min(x)
max_x = max(x)
except:
min_x = 0.
max_x = 0.
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
stack_coll = ax.stackplot(x,compute,dbGet,dbSet,admin,enqueued, alpha=.5)
ax.set_xlabel('API Call')
ax.set_ylabel('Duration (s)')
ax.set_xlim([min_x,max_x])
ax.set_ylim([0.,max_y_value])
ax.grid(color='white', linestyle='solid')
ax.set_title(alg_label+' - '+task, size=14)
proxy_rects = [plt.Rectangle((0, 0), 1, 1, alpha=.5,fc=pc.get_facecolor()[0]) for pc in stack_coll]
legend = ax.legend(proxy_rects, ['compute','dbGet','dbSet','admin','enqueued'],loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def response_time_histogram(self,app,butler,alg_label):
"""
Description: returns the data to plot response time histogram of processAnswer for each algorithm
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_query_dict,didSucceed,message = self.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid,'alg_label':alg_label})
t = []
for item in list_of_query_dict:
try:
t.append(item['response_time'])
except:
pass
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'))
ax.hist(t, bins=min(len(t), MAX_SAMPLES_PER_PLOT), range=(0,30),alpha=0.5,color='black')
ax.set_xlim(0, 30)
ax.set_axis_off()
ax.set_xlabel('Durations (s)')
ax.set_ylabel('Count')
ax.set_title(alg_label + " - response time", size=14)
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
def network_delay_histogram(self, app, butler, alg_label):
"""
Description: returns the data to network delay histogram of the time it takes to getQuery+processAnswer for each algorithm
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
list_of_query_dict,didSucceed,message = self.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid,'alg_label':alg_label})
t = []
for item in list_of_query_dict:
try:
t.append(item['network_delay'])
except:
pass
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#FFFFFF'))
ax.hist(t,MAX_SAMPLES_PER_PLOT,range=(0,5),alpha=0.5,color='black')
ax.set_xlim(0, 5)
ax.set_axis_off()
ax.set_xlabel('Durations (s)')
ax.set_ylabel('Count')
ax.set_title(alg_label + " - network delay", size=14)
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
| 2.328125
| 2
|
signal_ocean/vessels/models.py
|
SignalOceanSdk/SignalSDK
| 10
|
12782525
|
"""Models instantiated by the vessels api."""
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
@dataclass(frozen=True)
class VesselClass:
"""Vessel class characteristics.
Detailed characteristics of each vessel class, including its defining
measurement and the range that corresponds to this vessel class.
Attributes:
id: The vessel class id e.g. 81 (refers to Panamax), 86 (Aframax), 85
(Suezmax).
vessel_type_id: Numeric ID corresponding to the different values of the
VesselType field. 1-> Tanker, 3-> Dry, 4 -> Containers, 5
->LNG(Liquified Natural gas) , 6-> LPG(Liquified Petroleum Gas).
from_size: The minimum value that corresponds to this vessel class
(Deadweight/TEU/CubicSize).
to_size: The maximum value that corresponds to this vessel class
(Deadweight/TEU/CubicSize).
name: The vessel class e.g. Panamax, Aframax, Suezmax.
vessel_type: Description of the type of the vessel, based on the
carried cargo. Main categories are Tankers, Dry (bulk carriers),
Containers, LNG and LPG.
defining_size: The attribute(DeadWeight, TEU, CubicSize) that defines
the size of the vesselClass.
size: The units of the DefiningSize attribute. DeadWeight->
kt(kilotons), TEU-> TEU, CubicSize-> cbm(cubic meters).
"""
id: int
vessel_type_id: int
from_size: int
to_size: int
name: Optional[str] = None
vessel_type: Optional[str] = None
defining_size: Optional[str] = None
size: Optional[str] = None
@dataclass(frozen=True)
class VesselType:
"""A vessel type.
Attributes:
id: The vessel type id, e.g. 1 -> Tanker, 3 -> Dry, 4 -> Containers,
5 -> LNG (Liquified Natural gas),
6-> LPG (Liquified Petroleum Gas).
name: The vessel type name, e.g. Tanker, Dry, Containers,
LNG (Liquified Natural gas), LPG (Liquified Petroleum Gas).
"""
id: int
name: str
@dataclass(frozen=True)
class Vessel:
"""Contains all details of a vessel.
Attributes:
imo: A seven-digits number that uniquely identifies a ship and does not
change when the ship's owner, country of registry or name of the
vessel changes.
vessel_type_id: Numeric ID corresponding to the different values of the
VesselType field. 1 -> Tanker, 3 -> Dry, 4 -> Containers, 5 ->
LNG(Liquified Natural gas), 6 -> LPG(Liquified Petroleum Gas).
built_for_trade_id: Numeric ID corresponding to the different values of
the BuiltForTrade field. 1 -> Crude, 2 -> Product, 3 -> Chemical.
trade_id: Numeric ID that takes the same values as the BuiltForTradeID
field. 1 -> Crude, 2 -> Product, 3 -> Chemical.
vessel_class_id: Numeric ID corresponding to the different vessel
classes of a certain vessel type.
commercial_operator_id: Numeric ID corresponding to the maritime
company that manages the vessel commercially.
deadweight: Numeric, measured in tonnes [t], often shortened as DWT,
denotes the total carrying capacity of the vessel including cargo,
ballast water, stores, provisions, crew and so on.
breadth_extreme: Numeric, measured in meters [m], denotes the width of
a ship over the outside of all planking or plating at the widest
frame.
gross_rated_tonnage: Numeric, measured in register tons, often
shortened as GRT, denotes the sum of all the closed and/or closable
spaces.
reduced_gross_tonnage: Numeric, measured in register tons, often
shortened as RGT, denotes a measure applicable for open-top
container ships and tankers with a double hull (ships equipped with
segregated ballast tanks).This quantity can be used to compute
various tonnage-based fees.
net_rated_tonnage: Numeric, measured in register tons, often shortened
as NRT, denotes the difference between the GRT and the sum of all
spaces which are not used for the purpose for which the ship is
built.
draught: Numeric, measured in meters [m], denotes the distance between
the ship’s keel and the waterline of the vessel. As the
instantaneous draught of a vessel is a function of the vessel's
loading status, this vessel characteristics refers to the maximum
draught of the vessel.
length_overall: Numeric, measured in meters [m], denotes the vessel's
maximum length between the extremes points, forward and aft.
moulded_depth: Numeric, measured in meters [m], denotes the vertical
distance between the moulded base line and the top of the beams of
the uppermost continuous deck.
year_built: Numeric, year format, the year the vessel was built.
geared: Boolean, denotes whether the vessel has cranes installed for
handling its cargo or not.
clean_dirty_willing: Boolean, indicates whether a tanker vessel is
‘willing’ to compete in the market complementary to the one shown
in Trade. For example an LR willing dirty will have Trade=Product
and CleanDirtyWilling=true.
main_engine_manufacturer_id: Numeric ID corresponding to the different
values of the MainEngine field. 1-> MAN B&W, 2-> Wartsila, 3->
Mitsubishi.
classification_register_id: The id of the classification register.
Default value: -2.
updated_date: Date, format YYYY-MM-DD HH:MM:SS, corresponding to the
latest update.
vessel_name: The current vessel name corresponding to that IMO.
call_sign: Alphanumeric code that uniquely identifies a vessel and is
used for radio communication with land based operators or stations
and between the vessels.
vessel_type: Description of the type of the vessel, based on the
carried cargo. Main categories are Tankers, Dry (bulk carriers),
Containers, LNG and LPG.
built_for_trade: Additional attribute to specify a Tanker vessel with
finer granularity. This classification is derived by the vessel
characteristics only. It indicates the initial cargo the vessel was
designed for, here called "trade". For example, an LR2 is a vessel
of VesselClass Aframax and BuiltForTrade Clean.
trade: Time-dependent version of the attribute BuiltForTrade. It is
specified by the last cargo carried by the vessel at the time of
query. For example, an LR2 with fueloil as last cargo has
BuiltForTrade = Crude and Trade = Product.
vessel_class: Name of the vessel class the vessel belongs to.
Assignment of a vessel to a certain VesselClass is based on the
VesselType and the value of its Deadweight (if Tanker or Dry), its
LiquidCap (if LNG/LPG) or its TEU (if Containers). For example, an
Aframax is a Tanker vessel with Deadweight within the range 82kt -
125kt, while a Capesize is a Dry vessel with Deadweight within the
range 120kt-220kt. LR2 are defined as Aframax, as only Deadweight
is used to define vessel classes.
flag_code: ISO 3166-1 alpha-2 code representing the vessel's country of
registration.
flag: The country where the vessel has been registered and whose law is
subject to.
commercial_operator: Name of the maritime company that manages the
vessel commercially.
built_country_code: Two letters code representing the country where the
vessel was built.
built_country_name: String, the name of the country where the vessel
was built.
scrapped_date: Date, with format YYYY-MM-DD, indicates when the vessel
was scrapped. If the vessel is active, ScrappedDate is null.
shipyard_built_id: Numeric ID corresponding to the geo location where
the vessel was built, for example the specific shipyard.
shipyard_built_name: String, the name of the shipyard where the vessel
was built, e.g. Hyundai Heavy Industries Co.
ice_class: Alphanumeric code that denotes the vessel's additional level
of strengthening as well as other arrangements that make navigation
through frozen seas possible. For example 1A, 1D, etc.
cranes_ton_capacity: Numeric, measured in tonnes [t], denotes the
capacity of the vessel's cranes whenever applicable.
teu: Numeric, measured in TEU (Twenty-Foot Equivalent Unit), denotes a
volumetric measure of a container's cargo carrying capacity. Used
for Containers, that is vessels with VesselType=4.
te_u14: Numeric, denotes the capacity of the vessel measured in twenty-
foot equivalent units (TEU) loaded at 14 tons.
reefers: Numeric, denotes the capacity of the vessel measured in
refrigerated twenty-foot equivalent units (TEU), i.e., the maximum
number of refrigerated containers that could be carried.
panama_canal_net_tonnage: Numeric, measured in register tons,
volumetric measure derived by the NRT (NetRatedTonnage) and
modified for Panama Canal purposes. Often used to compute tonnage-
based fees.
cubic_size: Numeric, measured in cubic meters [cbm] denotes the
carrying capacity of Gas vessels (LNG, LPG). For tankers it is the
volume of cargo tanks.
scrubbers_date: Date, format YYYY-MM-DD HH:MM:SS, best estimate of the
scrubbers installation date.
summer_tpc: Numeric, measured in [metric tonnes/cm], acronym of Summer
Tonnes Per Centimeter, denotes the cargo in metric tonnes (10^3 kg)
needed to further increase the vessel's salt water draught by one
centimeter.
lightship_tonnes: The weight of the vessels without any cargo or
bunkers. It is an important parameter to estimate the scrap value
of the vessel as it represents the amount of steel that can be
recycled.
main_engine_manufacturer: String denoting the brand of the vessel's
main engine.
delivery_date: Date, with format YYYY-MM-DD, indicates when the vessel
was delivered to the owner and commenced its first voyage.
classification_register: The name of the organization that issued the
vessel's classification certificate. Default value: Not set.
number_of_holds: Numeric, the number of separate enclosed spaces within
a ship designed for storing cargo.
grain_capacity: This is the space available for a liquid-type cargo,
like bulk grain, which can flow into every corner.
bale_capacity: This is the space available for solid cargo. Bale space
is usually about 7–10% less than grain space.
"""
imo: int
vessel_type_id: int
built_for_trade_id: int
trade_id: int
vessel_class_id: int
commercial_operator_id: int
deadweight: int
breadth_extreme: int
gross_rated_tonnage: int
reduced_gross_tonnage: int
net_rated_tonnage: int
draught: float
length_overall: float
moulded_depth: float
year_built: int
geared: bool
clean_dirty_willing: bool
main_engine_manufacturer_id: int
classification_register_id: int
updated_date: datetime
vessel_name: Optional[str] = None
call_sign: Optional[str] = None
vessel_type: Optional[str] = None
built_for_trade: Optional[str] = None
trade: Optional[str] = None
vessel_class: Optional[str] = None
flag_code: Optional[str] = None
flag: Optional[str] = None
commercial_operator: Optional[str] = None
built_country_code: Optional[str] = None
built_country_name: Optional[str] = None
scrapped_date: Optional[datetime] = None
shipyard_built_id: Optional[int] = None
shipyard_built_name: Optional[str] = None
ice_class: Optional[str] = None
cranes_ton_capacity: Optional[int] = None
teu: Optional[int] = None
te_u14: Optional[int] = None
reefers: Optional[int] = None
panama_canal_net_tonnage: Optional[int] = None
cubic_size: Optional[int] = None
scrubbers_date: Optional[datetime] = None
summer_tpc: Optional[float] = None
lightship_tonnes: Optional[int] = None
main_engine_manufacturer: Optional[str] = None
delivery_date: Optional[datetime] = None
classification_register: Optional[str] = None
number_of_holds: Optional[int] = None
grain_capacity: Optional[int] = None
bale_capacity: Optional[int] = None
| 3.46875
| 3
|
omnipresence/connection.py
|
kxz/omnipresence
| 0
|
12782526
|
<reponame>kxz/omnipresence<gh_stars>0
# -*- test-case-name: omnipresence.test.test_connection -*-
"""Core IRC connection protocol class and supporting machinery."""
from collections import defaultdict
import re
from weakref import WeakSet
from twisted.internet import reactor
from twisted.internet.defer import (DeferredList, maybeDeferred,
inlineCallbacks, returnValue)
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.logger import Logger
from twisted.words.protocols.irc import IRCClient
from . import __version__, __source__
from .case_mapping import CaseMapping, CaseMappedDict
from .compat import length_hint
from .hostmask import Hostmask
from .message import Message, MessageType
from .message.buffering import ReplyBuffer, truncate_unicode
from .message.parser import IRCV2_PARSER
from .plugin import UserVisibleError
from .settings import ConnectionSettings, PRIVATE_CHANNEL
#: The maximum length of a single command reply, in bytes.
MAX_REPLY_LENGTH = 288
class ConnectionBase(IRCClient, object):
"""Provides fundamental functionality for connection mixins."""
log = Logger()
# Instance variables handled by IRCClient.
versionName = 'Omnipresence'
versionNum = __version__
sourceURL = __source__
#: The maximum acceptable lag, in seconds. If this amount of time
#: elapses following a PING from the client with no PONG response
#: from the server, the connection has timed out. (The timeout
#: check only occurs at every `.heartbeatInterval`, so actual
#: disconnection intervals may vary by up to one heartbeat.)
max_lag = 150
#: The number of seconds to wait between sending successive PINGs
#: to the server. This overrides a class variable in Twisted's
#: implementation, hence the unusual capitalization.
heartbeatInterval = 60
def __init__(self):
#: The `.ConnectionFactory` that created this client, if any.
self.factory = None
#: The settings in use on this client.
self.settings = ConnectionSettings()
self.nickname = self.versionName
#: The reactor in use on this client. This may be overridden
#: when a deterministic clock is needed, such as in unit tests.
self.reactor = reactor
#: The time of the last PONG seen from the server.
self.last_pong = None
#: An `~twisted.internet.interfaces.IDelayedCall` used to detect
#: timeouts that occur after connecting to the server, but
#: before receiving the ``RPL_WELCOME`` message that starts the
#: normal PING heartbeat.
self.signon_timeout = None
self.log.info('Assuming default CASEMAPPING "rfc1459"')
#: The `.CaseMapping` currently in effect on this connection.
#: Defaults to ``rfc1459`` if none is explicitly provided by the
#: server.
self.case_mapping = CaseMapping.by_name('rfc1459')
# Utility methods
def _case_mapped_dict(self, initial=None):
"""Return a `.CaseMappedDict` using this connection's current
case mapping."""
return CaseMappedDict(initial, case_mapping=self.case_mapping)
def _lower(self, string):
"""Convenience alias for ``self.case_mapping.lower``."""
return self.case_mapping.lower(string)
def _upper(self, string):
"""Convenience alias for ``self.case_mapping.upper``."""
return self.case_mapping.upper(string)
def is_channel(self, name):
"""Return `True` if *name* belongs to a channel, according to
the server-provided list of channel prefixes, or `False`
otherwise."""
# We can assume the CHANTYPES feature will always be present,
# since Twisted gives it a default value.
return name[0] in self.supported.getFeature('CHANTYPES')
# Connection maintenance
def connectionMade(self):
"""See `IRCClient.connectionMade`."""
self.log.info('Connected to server')
super(ConnectionBase, self).connectionMade()
self.signon_timeout = self.reactor.callLater(
self.max_lag, self.signon_timed_out)
def signon_timed_out(self):
"""Called when a timeout occurs after connecting to the server,
but before receiving the ``RPL_WELCOME`` message that starts the
normal PING heartbeat."""
self.log.info('Sign-on timeout ({log_source.max_lag} seconds); '
'disconnecting')
self.transport.abortConnection()
def _createHeartbeat(self):
heartbeat = super(ConnectionBase, self)._createHeartbeat()
heartbeat.clock = self.reactor
return heartbeat
def _sendHeartbeat(self):
lag = self.reactor.seconds() - self.last_pong
if lag > self.max_lag:
self.log.info('Ping timeout ({lag} > {log_source.max_lag} '
'seconds); disconnecting', lag=lag)
self.transport.abortConnection()
return
super(ConnectionBase, self)._sendHeartbeat()
def startHeartbeat(self):
self.last_pong = self.reactor.seconds()
super(ConnectionBase, self).startHeartbeat()
def irc_PONG(self, prefix, secs):
self.last_pong = self.reactor.seconds()
def connectionLost(self, reason):
"""See `IRCClient.connectionLost`."""
self.log.info('Disconnected from server')
super(ConnectionBase, self).connectionLost(reason)
# Callbacks inherited from IRCClient
def isupport(self, options):
"""See `IRCClient.isupport`."""
# Update the connection case mapping if one is available.
case_mappings = self.supported.getFeature('CASEMAPPING')
if case_mappings:
name = case_mappings[0]
try:
case_mapping = CaseMapping.by_name(name)
except ValueError:
self.log.info('Ignoring unsupported server CASEMAPPING '
'"{name}"', name=name)
else:
if self.case_mapping != case_mapping:
self.case_mapping = case_mapping
self.settings.set_case_mapping(self.case_mapping)
self.log.info('Using server-provided CASEMAPPING '
'"{name}"', name=name)
def privmsg(self, prefix, channel, message):
"""See `IRCClient.privmsg`."""
if not self.is_channel(channel):
self.log.info('Message from {prefix} for {channel}: {message}',
prefix=prefix, channel=channel, message=message)
def joined(self, channel):
"""See `IRCClient.joined`."""
self.log.info('Successfully joined {channel}', channel=channel)
def left(self, channel):
"""See `IRCClient.left`."""
self.log.info('Leaving {channel}', channel=channel)
def noticed(self, prefix, channel, message):
"""See `IRCClient.noticed`."""
if not self.is_channel(channel):
self.log.info('Notice from {prefix} for {channel}: {message}',
prefix=prefix, channel=channel, message=message)
def signedOn(self):
"""See `IRCClient.signedOn`."""
self.log.info('Successfully signed on to server')
if self.signon_timeout:
self.signon_timeout.cancel()
# Resetting the connection delay when a successful connection is
# made, instead of at IRC sign-on, overlooks situations such as
# host bans where the server accepts a connection and then
# immediately disconnects the client. In these cases, the delay
# should continue to increase, especially if the problem is that
# there are too many connections!
if self.factory:
self.factory.resetDelay()
def kickedFrom(self, channel, kicker, message):
"""See `IRCClient.kickedFrom`."""
self.log.info('Kicked from {channel} by {kicker}: {message}',
channel=channel, kicker=kicker, message=message)
def join(self, channel, key=None):
"""See `IRCClient.join`."""
self.log.info('Joining {channel}', channel=channel)
super(ConnectionBase, self).join(channel, key=key)
#
# Command replies
#
def reply(self, string, request, tail=''):
"""Send a reply *string*, truncated to `MAX_REPLY_LENGTH`
characters, with `tail` appended. If the request venue is a
channel, send the reply to the venue as a standard message
addressed to *request*'s `~.Message.target`, formatted using the
`~.Message.venue`'s reply format. Otherwise, send the reply as
a notice to *request*'s `~.Message.actor`."""
if not string:
return
string = string.replace('\n', ' / ')
if isinstance(string, unicode):
encoding = request.encoding
truncated = truncate_unicode(string, MAX_REPLY_LENGTH, encoding)
if truncated != string:
truncated += u'...'
string = truncated.encode(encoding)
else:
if len(string) > MAX_REPLY_LENGTH:
string = string[:MAX_REPLY_LENGTH] + '...'
string += tail
if request.private:
self.log.info('Private reply for {request.actor.nick}: {string}',
request=request, string=string)
self.notice(request.actor.nick, string)
return
self.log.info('Reply for {request.actor.nick} in {request.venue}: '
'{string}', request=request, string=string)
reply_format = request.settings.get(
'reply_format', default='\x0314{target}: {message}')
self.msg(request.venue, reply_format.format(
target=request.target, message=string))
def reply_from_error(self, failure, request):
"""Call `.reply` with information on a *failure* that occurred
in the callback invoked to handle a command request. If
*failure* wraps a `.UserVisibleError`, or the ``show_errors``
configuration option is true, reply with its exception string.
Otherwise, log the error and reply with a generic message.
This method is automatically called whenever an unhandled
exception occurs in a plugin's command callback, and should
never need to be invoked manually.
"""
error_request = request._replace(target=request.actor.nick)
if failure.check(UserVisibleError):
self.reply(failure.getErrorMessage(), error_request)
return
message = 'Command \x02{}\x02 encountered an error'.format(
request.subaction)
if request.settings.get('show_errors', default=False):
message += ': \x02{}\x02'.format(failure.getErrorMessage())
self.log.failure('Error during command callback: '
'{request.subaction} {request.content}',
failure=failure, request=request)
self.reply(message + '.', error_request)
#
# State tracking
#
USER_MODE_PREFIX = re.compile(r'^[^A-Za-z0-9\-\[\]\\`^{}]+')
class VenueUserInfo(object):
"""A container for information about a user's state in a particular
venue."""
def __init__(self):
#: This user's current channel reply buffer.
self.reply_buffer = []
class VenueInfo(object):
"""A container for information about a venue."""
def __init__(self, case_mapping=None):
#: A dictionary mapping nicks to `.VenueUserInfo` objects.
self.nicks = CaseMappedDict(case_mapping=case_mapping)
#: This channel's topic, or the empty string if none is set.
self.topic = ''
#: A mapping of modes currently active on this channel to one of
#: `False` (not set or invalid), `True` (set, for modes that
#: take no arguments), a single number or string, or a set of
#: `Hostmask` objects.
#
# TODO: Resolve how to store unset modes that aren't just
# on/off. What's a sane default value for a numeric or string
# mode argument?
self.modes = defaultdict(bool)
def add_nick(self, nick):
self.nicks.setdefault(nick, VenueUserInfo())
def remove_nick(self, nick):
self.nicks.pop(nick, None)
class StateTrackingMixin(object):
"""A connection mixin providing venue state tracking."""
def __init__(self):
super(StateTrackingMixin, self).__init__()
self._clear_venues()
def _clear_venues(self):
"""Reset this mixin's venue information."""
#: A mapping of venue names to `VenueInfo` objects.
self.venues = CaseMappedDict(case_mapping=self.case_mapping)
self.venues[PRIVATE_CHANNEL] = VenueInfo(
case_mapping=self.case_mapping)
def isupport(self, options):
"""See `IRCClient.isupport`."""
# If the case mapping changed, update any CaseMappedDict objects
# we know about.
old_case_mapping = self.case_mapping
super(StateTrackingMixin, self).isupport(options)
if self.case_mapping != old_case_mapping:
self.venues = CaseMappedDict(self.venues,
case_mapping=self.case_mapping)
for venue_info in self.venues.itervalues():
venue_info.nicks = CaseMappedDict(
venue_info.nicks, case_mapping=self.case_mapping)
def joined(self, channel):
"""See `IRCClient.joined`."""
super(StateTrackingMixin, self).joined(channel)
self.venues[channel] = VenueInfo(case_mapping=self.case_mapping)
def modeChanged(self, user, channel, enable, modes, args):
"""See `IRCClient.modeChanged`."""
# TODO: This needs to inspect CHANMODES and PREFIX to determine
# how to handle a particular channel mode. Right now, it treats
# all modes as binary, which is obviously wrong.
for mode in modes:
self.venues[channel].modes[mode] = enable
def irc_RPL_NAMREPLY(self, prefix, params):
channel = params[2]
names = params[3].split()
self.names_arrived(channel, names)
def names_arrived(self, channel, names):
# Liberally strip out all user mode prefixes such as @%+. Some
# networks support more prefixes, so this removes any prefixes
# with characters not valid in nicknames.
for nick in names:
nick = USER_MODE_PREFIX.sub('', nick)
self.venues[channel].add_nick(nick)
def userJoined(self, prefix, channel):
"""See `IRCClient.userJoined`."""
super(StateTrackingMixin, self).userJoined(prefix, channel)
self.venues[channel].add_nick(Hostmask.from_string(prefix).nick)
def userLeft(self, prefix, channel):
"""See `IRCClient.userLeft`."""
super(StateTrackingMixin, self).userLeft(prefix, channel)
self.venues[channel].remove_nick(Hostmask.from_string(prefix).nick)
def userQuit(self, nick, quitMessage):
"""See `IRCClient.userQuit`."""
super(StateTrackingMixin, self).userQuit(nick, quitMessage)
for venue_info in self.venues.itervalues():
venue_info.remove_nick(nick)
def userKicked(self, kickee, channel, kicker, message):
"""See `IRCClient.userKicked`."""
super(StateTrackingMixin, self).userKicked(
kickee, channel, kicker, message)
# Our own kicks are echoed back to us, so we don't need to do
# anything special for them.
del self.venues[channel].nicks[kickee]
def _renamed(self, old, new):
"""Called when a user changes nicknames."""
for venue_info in self.venues.itervalues():
if old in venue_info.nicks:
venue_info.nicks[new] = venue_info.nicks[old]
venue_info.remove_nick(old)
else: # must have been asleep at the wheel
venue_info.add_nick(new)
def userRenamed(self, old, new):
"""See `IRCClient.userRenamed`."""
super(StateTrackingMixin, self).userRenamed(old, new)
self._renamed(old, new)
def setNick(self, new):
"""See `IRCClient.setNick`."""
super(StateTrackingMixin, self).setNick(new)
self._renamed(self.nickname, new)
def topicUpdated(self, nick, channel, topic):
"""See `IRCClient.topicUpdated`."""
self.venues[channel].topic = topic
def left(self, channel):
"""See `IRCClient.left`."""
super(StateTrackingMixin, self).left(channel)
del self.venues[channel]
def kickedFrom(self, channel, kicker, message):
"""See `IRCClient.kickedFrom`."""
super(StateTrackingMixin, self).kickedFrom(channel, kicker, message)
del self.venues[channel]
def quit(self, message=''):
"""See `IRCClient.quit`."""
super(StateTrackingMixin, self).quit(message)
self._clear_venues()
#
# Join suspension
#
class JoinSuspensionMixin(object):
"""A connection mixin providing join suspension."""
def __init__(self):
super(JoinSuspensionMixin, self).__init__()
#: If joins are suspended, a list of channels to join when joins
#: are resumed. Otherwise, `None`.
self.suspended_joins = None
def suspend_joins(self):
"""Suspend all channel joins until `.resume_joins` is called."""
# If suspended_joins is not None, then we've already suspended
# joins for this client, and we shouldn't clobber the queue.
if self.suspended_joins is not None:
return
self.log.info('Suspending channel joins')
self.suspended_joins = []
def resume_joins(self):
"""Resume immediate joining of channels after suspending it with
`.suspend_joins`, and perform any channel joins that have been
queued in the interim."""
if self.suspended_joins is None:
return
self.log.info('Resuming channel joins')
suspended_joins = self.suspended_joins
self.suspended_joins = None
for channel in suspended_joins:
self.join(channel)
def join(self, channel):
"""Join the given *channel*. If joins have been suspended with
`.suspend_joins`, add the channel to the join queue and actually
join it when `.resume_joins` is called."""
if self.suspended_joins is not None:
self.log.info('Adding {channel} to join queue', channel=channel)
self.suspended_joins.append(channel)
return
super(JoinSuspensionMixin, self).join(channel)
#
# Mix it all together
#
class Connection(StateTrackingMixin,
JoinSuspensionMixin,
ConnectionBase):
"""Omnipresence's core IRC client protocol."""
def __init__(self):
super(Connection, self).__init__()
#: The `.RawMessageParser` being used on this connection.
self.parser = IRCV2_PARSER
#: If the bot is currently firing callbacks, a queue of
#: `.Message` objects for which the bot has yet to fire
#: callbacks. Otherwise, `None`.
self.message_queue = None
def signedOn(self):
"""See `IRCClient.signedOn`."""
super(Connection, self).signedOn()
self.respond_to(Message(self, False, 'connected'))
for channel in self.settings.autojoin_channels:
self.join(channel)
def after_reload(self):
"""Join or part channels after a settings reload."""
for channel in self.settings.autojoin_channels:
if channel not in self.venues:
self.join(channel)
for channel in self.settings.autopart_channels:
if channel in self.venues:
self.leave(channel)
def connectionLost(self, reason):
"""See `IRCClient.connectionLost`."""
self.respond_to(Message(self, False, 'disconnected'))
super(Connection, self).connectionLost(reason)
#
# Event plugin hooks
#
# These are defined down here because they need StateTrackingMixin.
#
def respond_to(self, msg):
"""Start the appropriate event plugin callbacks for *msg*, and
return a `~twisted.internet.defer.DeferredList`."""
if self.message_queue is not None:
# We're already firing callbacks. Bail.
self.message_queue.append(msg)
return
self.message_queue = [msg]
deferreds = []
while self.message_queue:
msg = self.message_queue.pop(0)
# Build the set of plugins that should be fired.
plugins = set()
if msg.action is MessageType.command:
plugins.update(
msg.settings.plugins_by_keyword(msg.subaction))
elif msg.venue:
plugins.update(msg.settings.active_plugins().iterkeys())
elif msg.actor:
# Forward the message only to plugins enabled in at
# least one channel where the actor is present.
#
# This implementation does this by creating a synthetic
# message for every one of those channels and asking the
# settings object for each of those message's active
# plugins. This is an ugly hack and should be replaced
# with something less unsightly.
for channel, venue_info in self.venues.iteritems():
if msg.actor.nick not in venue_info.nicks:
continue
channel_msg = msg._replace(venue=channel)
plugins.update(
channel_msg.settings.active_plugins().iterkeys())
else:
# Neither a venue nor an actor. Forward the message to
# every plugin active on this connection.
plugins.update(self.settings.loaded_plugins.itervalues())
for plugin in plugins:
deferred = plugin.respond_to(msg)
if msg.action is MessageType.command:
deferred.addCallback(self.buffer_and_reply, msg)
deferred.addErrback(self.reply_from_error, msg)
else:
deferred.addErrback(lambda f: self.log.failure(
'Error in plugin {name} responding to {msg}',
failure=f, name=plugin.__class__.name, msg=msg))
deferreds.append(deferred)
# Extract any command invocations and fire events for them.
command_prefixes = tuple( # immutable copy, to be safe
msg.settings.get('command_prefixes', default=[]))
if msg.settings.get('direct_addressing', default=True):
command_prefixes += (self.nickname + ':',
self.nickname + ',')
command_msg = msg.extract_command(prefixes=command_prefixes)
if command_msg is not None:
# Get the command message in immediately after the
# current privmsg, as they come from the same event.
self.message_queue.insert(0, command_msg)
self.message_queue = None
return DeferredList(deferreds)
@inlineCallbacks
def buffer_and_reply(self, response, request):
"""Add the :ref:`command reply <command-replies>` *response* to
the appropriate user's reply buffer according to the invocation
`.Message` *request*, and reply with the first message."""
venue = PRIVATE_CHANNEL if request.private else request.venue
venue_info = self.venues[venue]
if response is None:
if request.actor.nick in venue_info.nicks:
del venue_info.nicks[request.actor.nick]
returnValue(None)
buf = ReplyBuffer(response, request)
reply_string = (yield maybeDeferred(next, buf, None)) or 'No results.'
remaining = length_hint(buf)
tail = ' (+{} more)'.format(remaining) if remaining else ''
venue_info.nicks.setdefault(request.actor.nick, VenueUserInfo())
venue_info.nicks[request.actor.nick].reply_buffer = buf
self.reply(reply_string, request, tail=tail)
def _lineReceived(self, line):
# Twisted doesn't like it when `lineReceived` returns a value,
# but we need to do so for some unit tests.
deferred = self.respond_to(self.parser.parse(self, False, line))
super(Connection, self).lineReceived(line)
return deferred
def lineReceived(self, line):
"""Overrides `.IRCClient.lineReceived`."""
self._lineReceived(line)
def sendLine(self, line):
"""Overrides `.IRCClient.sendLine`."""
deferred = self.respond_to(self.parser.parse(
self, True, line, actor=self.nickname))
super(Connection, self).sendLine(line)
return deferred
class ConnectionFactory(ReconnectingClientFactory):
"""Creates `.Connection` instances."""
protocol = Connection
log = Logger()
def __init__(self):
#: The `ConnectionSettings` object associated with this factory.
self.settings = ConnectionSettings()
#: A `WeakSet` containing associated `Connection` objects.
self.protocols = WeakSet()
def startedConnecting(self, connector):
self.log.info('Attempting to connect to server')
def buildProtocol(self, addr):
protocol = ReconnectingClientFactory.buildProtocol(self, addr)
protocol.settings = self.settings
# Set various properties defined by Twisted's IRCClient.
protocol.nickname = self.settings.nickname or protocol.nickname
protocol.password = self.settings.password or protocol.password
protocol.realname = self.settings.realname or protocol.realname
protocol.username = self.settings.username or protocol.username
protocol.userinfo = self.settings.userinfo or protocol.userinfo
self.protocols.add(protocol)
return protocol
def reload_settings(self, dct):
"""Update this connection's settings using *dct*, then call
`after_reload` on each of this factory's active connections."""
self.log.info('Reloading settings')
self.settings.replace(dct)
for protocol in self.protocols:
protocol.after_reload()
| 1.992188
| 2
|
px/nmt/utils/diverse_decoder_utils.py
|
jmrf/active-qa
| 0
|
12782527
|
<reponame>jmrf/active-qa<filename>px/nmt/utils/diverse_decoder_utils.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for decoding a seq2seq model using a diverse decoder."""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = ["DiverseBeamSearchDecoder"]
def _check_maybe(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
class DiverseBeamSearchDecoder(beam_search_decoder.BeamSearchDecoder):
"""Diverse Beam Search decoder."""
def __init__(self, decoder_scope, maximum_iterations, decoding_iterations,
*args, **kwargs):
"""Initialize the DiverseBeamSearchDecoder.
Args:
decoder_scope: Scope.
maximum_iterations: int, Maximum number of decoding iterations.
decoding_iterations: number of sequential beam search decodings.
*args: Other argments to apply to the BeamSearchDecoder class.
**kwargs: Keyword arguments to apply to the BeamSearchDecoder class.
"""
super(DiverseBeamSearchDecoder, self).__init__(*args, **kwargs)
self._maximum_iterations = maximum_iterations
self._decoding_iterations = decoding_iterations
self._decoding_iterations_remaining = decoding_iterations
self._decoder_scope = decoder_scope
self._forbidden_tokens = None
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`. The
sequence lengths determined for each beam during decode. **NOTE** These
are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
self._decoding_iterations_remaining -= 1 # Decrease counter.
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
# pylint: disable=g-long-lambda
# pylint: disable=line-too-long
final_state = final_state._replace(
cell_state=nest.map_structure(
lambda t: self._maybe_sort_array_beams(t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
# pylint: enable=g-long-lambda
# pylint: enable=line-too-long
if self._decoding_iterations_remaining >= 1:
# Transpose to [batch_size, time, beam_width]
new_forbidden_tokens = tf.transpose(predicted_ids, perm=[1, 0, 2])
# Reshape to [batch_size, time * beam_width]
new_forbidden_tokens = tf.reshape(
new_forbidden_tokens, shape=[tf.shape(new_forbidden_tokens)[0], -1])
if self._forbidden_tokens is not None:
self._forbidden_tokens = tf.concat(
[self._forbidden_tokens, new_forbidden_tokens], axis=1)
else:
self._forbidden_tokens = new_forbidden_tokens
new_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
self,
maximum_iterations=self._maximum_iterations,
output_time_major=True,
swap_memory=True,
scope=self._decoder_scope)
all_scores = tf.concat(
[outputs.scores, new_outputs.beam_search_decoder_output.scores],
axis=2)
all_predicted_ids = tf.concat([
outputs.predicted_ids,
new_outputs.beam_search_decoder_output.predicted_ids
],
axis=2)
all_parent_ids = tf.concat([
outputs.parent_ids, new_outputs.beam_search_decoder_output.parent_ids
],
axis=2)
outputs = beam_search_decoder.BeamSearchDecoderOutput(
scores=all_scores,
predicted_ids=all_predicted_ids,
parent_ids=all_parent_ids)
# Append eos token ids in case predicted_ids is shorter than new
# predicted_ids, and vice-versa.
predicted_ids = pad(
x=predicted_ids,
max_size=tf.shape(new_outputs.predicted_ids)[0],
value=self._end_token)
new_predicted_ids = pad(
x=new_outputs.predicted_ids,
max_size=tf.shape(predicted_ids)[0],
value=self._end_token)
predicted_ids = tf.concat([predicted_ids, new_predicted_ids], axis=2)
outputs = beam_search_decoder.FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
coverage_penalty_weight = self._coverage_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
if self._forbidden_tokens is not None:
def mask_forbidden(arr, forbidden_indices, end_token):
"""Replaces the elements in `arr` with a low constant.
Args:
arr: a numpy array of shape [batch_size, beam_width, vocab_size].
forbidden_indices: a numpy array of shape [batch_size,
num_forbidden_tokens].
end_token: a int32 scalar representing the eos token id.
Returns:
a numpy array of shape [batch_size, beam_width, vocab_size].
"""
batch_indices = np.arange(arr.shape[0]).repeat(
forbidden_indices.shape[1])
# Remove eos token from indices.
mask = (forbidden_indices != end_token).flatten()
batch_indices = batch_indices[mask]
forbidden_indices = forbidden_indices.flatten()[mask]
# Set a very low logit value so it is never selected by the decoder.
arr[batch_indices, :,
forbidden_indices.reshape((-1))] = np.minimum(
arr[batch_indices, :,
forbidden_indices.reshape((-1))], -1e7)
return arr
# It is faster to mask out the logits in numpy than executing the
# equivalent, but more complicated, tensorflow operations.
cell_outputs = tf.py_func(
func=mask_forbidden,
inp=[cell_outputs, self._forbidden_tokens, end_token],
Tout=(tf.float32),
name="mask_forbidden")
(beam_search_output, beam_search_state) = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def pad(x, max_size, value=0.0):
"""Makes the first dimension of x to be at least max_size.
Args:
x: a 3-D tensor.
max_size: an int32 or int64 tensor.
value: the value that the new elements of x will have.
Returns:
The expanded tensor with shape
[max(x.shape[0], max_size), x.shape[1], x.shape[2]].
"""
fill = tf.fill(
dims=[
tf.maximum(max_size - tf.shape(x)[0], 0),
tf.shape(x)[1],
tf.shape(x)[2]
],
value=value)
return tf.concat([x, fill], axis=0)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight,
coverage_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume that all
beams are equal and consider only the first beam for continuations.
logits: Logits at the current time step. A tensor of shape `[batch_size,
beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search. An instance of
`BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
not_finished = math_ops.logical_not(previously_finished)
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(not_finished)
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the accumulated attention probabilities if coverage penalty is
# enabled.
accumulated_attention_probs = None
attention_probs = get_attention_probs(next_cell_state,
coverage_penalty_weight)
if attention_probs is not None:
attention_probs *= array_ops.expand_dims(math_ops.to_float(not_finished), 2)
accumulated_attention_probs = (
beam_state.accumulated_attention_probs + attention_probs)
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight,
coverage_penalty_weight=coverage_penalty_weight,
finished=previously_finished,
accumulated_attention_probs=accumulated_attention_probs)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
next_accumulated_attention_probs = ()
if accumulated_attention_probs is not None:
next_accumulated_attention_probs = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=accumulated_attention_probs,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1],
name="next_accumulated_attention_probs")
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished,
accumulated_attention_probs=next_accumulated_attention_probs)
output = beam_search_decoder.BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def get_attention_probs(next_cell_state, coverage_penalty_weight):
"""Get attention probabilities from the cell state.
Args:
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
Returns:
The attention probabilities with shape `[batch_size, beam_width, max_time]`
if coverage penalty is enabled. Otherwise, returns None.
Raises:
ValueError: If no cell is attentional but coverage penalty is enabled.
"""
if coverage_penalty_weight == 0.0:
return None
# Attention probabilities of each attention layer. Each with shape
# `[batch_size, beam_width, max_time]`.
probs_per_attn_layer = []
if isinstance(next_cell_state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer = [attention_probs_from_attn_state(next_cell_state)]
elif isinstance(next_cell_state, tuple):
for state in next_cell_state:
if isinstance(state, attention_wrapper.AttentionWrapperState):
probs_per_attn_layer.append(attention_probs_from_attn_state(state))
if not probs_per_attn_layer:
raise ValueError(
"coverage_penalty_weight must be 0.0 if no cell is attentional.")
if len(probs_per_attn_layer) == 1:
attention_probs = probs_per_attn_layer[0]
else:
# Calculate the average attention probabilities from all attention layers.
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in probs_per_attn_layer
]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
coverage_penalty_weight, finished, accumulated_attention_probs):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape `[batch_size, beam_width,
vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
coverage_penalty_weight: Float weight to penalize the coverage of source
sentence. Disabled with 0.0.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
accumulated_attention_probs: Accumulated attention probabilities up to the
current time step, with shape `[batch_size, beam_width, max_time]` if
coverage_penalty_weight is not 0.0.
Returns:
The scores normalized by the length_penalty and coverage_penalty.
Raises:
ValueError: accumulated_attention_probs is None when coverage penalty is
enabled.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
scores = log_probs / length_penalty_
coverage_penalty_weight = ops.convert_to_tensor(
coverage_penalty_weight, name="coverage_penalty_weight")
if coverage_penalty_weight.shape.ndims != 0:
raise ValueError("coverage_penalty_weight should be a scalar, "
"but saw shape: %s" % coverage_penalty_weight.shape)
if tensor_util.constant_value(coverage_penalty_weight) == 0.0:
return scores
if accumulated_attention_probs is None:
raise ValueError(
"accumulated_attention_probs can be None only if coverage penalty is "
"disabled.")
# Add source sequence length mask before computing coverage penalty.
accumulated_attention_probs = array_ops.where(
math_ops.equal(accumulated_attention_probs, 0.0),
array_ops.ones_like(accumulated_attention_probs),
accumulated_attention_probs)
# coverage penalty =
# sum over `max_time` {log(min(accumulated_attention_probs, 1.0))}
coverage_penalty = math_ops.reduce_sum(
math_ops.log(math_ops.minimum(accumulated_attention_probs, 1.0)), 2)
# Apply coverage penalty to finished predictions.
coverage_penalty *= math_ops.to_float(finished)
weighted_coverage_penalty = coverage_penalty * coverage_penalty_weight
# Reshape from [batch_size, beam_width] to [batch_size, beam_width, 1]
weighted_coverage_penalty = array_ops.expand_dims(weighted_coverage_penalty,
2)
return scores + weighted_coverage_penalty
def attention_probs_from_attn_state(attention_state):
"""Calculates the average attention probabilities.
Args:
attention_state: An instance of `AttentionWrapperState`.
Returns:
The attention probabilities in the given AttentionWrapperState.
If there're multiple attention mechanisms, return the average value from
all attention mechanisms.
"""
# Attention probabilities over time steps, with shape
# `[batch_size, beam_width, max_time]`.
attention_probs = attention_state.alignments
if isinstance(attention_probs, tuple):
attention_probs = [
array_ops.expand_dims(prob, -1) for prob in attention_probs
]
attention_probs = array_ops.concat(attention_probs, -1)
attention_probs = math_ops.reduce_mean(attention_probs, -1)
return attention_probs
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty.
See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tensor_array_ops.TensorArray):
return gather_from
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
| 1.875
| 2
|
src/extern/cnn_on_lstm.py
|
wxy1224/cs224n_project
| 1
|
12782528
|
<filename>src/extern/cnn_on_lstm.py<gh_stars>1-10
'''
Example of an LSTM model with GloVe embeddings along with magic features
Tested under Keras 2.0 with Tensorflow 1.0 backend
Single model may achieve LB scores at around 0.18+, average ensembles can get 0.17+
'''
########################################
## import packages
########################################
import os
import re
import csv
import codecs
import numpy as np
import pandas as pd
from string import punctuation
from collections import defaultdict
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GaussianDropout, Flatten
from keras.layers.wrappers import Bidirectional
from keras.layers.merge import concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.pooling import MaxPooling1D
from sklearn.preprocessing import StandardScaler
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
########################################
## set directories and parameters
########################################
BASE_DIR = './input/'
EMBEDDING_FILE = BASE_DIR + 'glove.6B.300d.txt'#'glove.840B.300d.txt' # 'facebookfasttext.vec' # # #
TRAIN_DATA_FILE = BASE_DIR + 'train.csv'
TEST_DATA_FILE = BASE_DIR + 'test.csv'
MAX_SEQUENCE_LENGTH = 30
MAX_NB_WORDS = 200000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.1
num_lstm = np.random.randint(175, 275)
num_dense = np.random.randint(100, 150)
rate_drop_lstm = 0.15 + np.random.rand() * 0.25
rate_drop_dense = 0.15 + np.random.rand() * 0.25
act = 'relu'
re_weight = True # whether to re-weight classes to fit the 17.5% share in test set
STAMP = './extern_repo/lstm_%d_%d_%.2f_%.2f' % (num_lstm, num_dense, rate_drop_lstm, \
rate_drop_dense)
########################################
## index word vectors
########################################
print('Indexing word vectors')
embeddings_index = {}
f = open(EMBEDDING_FILE)
count = 0
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %d word vectors of glove.' % len(embeddings_index))
########################################
## process texts in datasets
########################################
print('Processing text dataset')
# The function "text_to_wordlist" is from
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return (text)
texts_1 = []
texts_2 = []
labels = []
with codecs.open(TRAIN_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
texts_1.append(text_to_wordlist(values[3]))
texts_2.append(text_to_wordlist(values[4]))
labels.append(int(values[5]))
print('Found %s texts in train.csv' % len(texts_1))
test_texts_1 = []
# test_texts_2 = []
test_ids = []
with codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
print('values: ', values)
test_texts_1.append(text_to_wordlist(values[1]))
# test_texts_2.append(text_to_wordlist(values[2]))
test_ids.append(values[0])
print('Found %s texts in test.csv' % len(test_texts_1))
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts_1 + texts_2 + test_texts_1)# + test_texts_2)
sequences_1 = tokenizer.texts_to_sequences(texts_1)
# sequences_2 = tokenizer.texts_to_sequences(texts_2)
test_sequences_1 = tokenizer.texts_to_sequences(test_texts_1)
# test_sequences_2 = tokenizer.texts_to_sequences(test_texts_2)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
data_1 = pad_sequences(sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
# data_2 = pad_sequences(sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(labels)
print('Shape of data tensor:', data_1.shape)
print('Shape of label tensor:', labels.shape)
test_data_1 = pad_sequences(test_sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
# test_data_2 = pad_sequences(test_sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
test_ids = np.array(test_ids)
########################################
## generate leaky features
########################################
train_df = pd.read_csv(TRAIN_DATA_FILE)
test_df = pd.read_csv(TEST_DATA_FILE)
ques = pd.concat([train_df[['question1', 'question2']], \
test_df[['question1', 'question2']]], axis=0).reset_index(drop='index')
q_dict = defaultdict(set)
for i in range(ques.shape[0]):
q_dict[ques.question1[i]].add(ques.question2[i])
q_dict[ques.question2[i]].add(ques.question1[i])
def q1_freq(row):
return (len(q_dict[row['question1']]))
def q2_freq(row):
return (len(q_dict[row['question2']]))
def q1_q2_intersect(row):
return (len(set(q_dict[row['question1']]).intersection(set(q_dict[row['question2']]))))
train_df['q1_q2_intersect'] = train_df.apply(q1_q2_intersect, axis=1, raw=True)
train_df['q1_freq'] = train_df.apply(q1_freq, axis=1, raw=True)
train_df['q2_freq'] = train_df.apply(q2_freq, axis=1, raw=True)
test_df['q1_q2_intersect'] = test_df.apply(q1_q2_intersect, axis=1, raw=True)
test_df['q1_freq'] = test_df.apply(q1_freq, axis=1, raw=True)
test_df['q2_freq'] = test_df.apply(q2_freq, axis=1, raw=True)
leaks = train_df[['q1_q2_intersect', 'q1_freq', 'q2_freq']]
test_leaks = test_df[['q1_q2_intersect', 'q1_freq', 'q2_freq']]
ss = StandardScaler()
ss.fit(np.vstack((leaks, test_leaks)))
leaks = ss.transform(leaks)
test_leaks = ss.transform(test_leaks)
########################################
## prepare embeddings
########################################
print('Preparing embedding matrix')
nb_words = min(MAX_NB_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
########################################
## sample train/validation data
########################################
# np.random.seed(1234)
perm = np.random.permutation(len(data_1))
idx_train = perm[:int(len(data_1) * (1 - VALIDATION_SPLIT))]
idx_val = perm[int(len(data_1) * (1 - VALIDATION_SPLIT)):]
data_1_train = np.vstack((data_1[idx_train], data_2[idx_train]))
data_2_train = np.vstack((data_2[idx_train], data_1[idx_train]))
leaks_train = np.vstack((leaks[idx_train], leaks[idx_train]))
labels_train = np.concatenate((labels[idx_train], labels[idx_train]))
data_1_val = np.vstack((data_1[idx_val], data_2[idx_val]))
data_2_val = np.vstack((data_2[idx_val], data_1[idx_val]))
leaks_val = np.vstack((leaks[idx_val], leaks[idx_val]))
labels_val = np.concatenate((labels[idx_val], labels[idx_val]))
weight_val = np.ones(len(labels_val))
if re_weight:
weight_val *= 0.472001959
weight_val[labels_val == 0] = 1.309028344
########################################
## define the model structure
########################################
embedding_layer = Embedding(nb_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
lstm_layer = Bidirectional(LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm))
sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_1 = embedding_layer(sequence_1_input)
x1 = lstm_layer(embedded_sequences_1)
sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_2 = embedding_layer(sequence_2_input)
y1 = lstm_layer(embedded_sequences_2)
leaks_input = Input(shape=(leaks.shape[1],))
leaks_dense = Dense(num_dense / 2, activation=act)(leaks_input)
conv_layer = Conv1D(filters=128, kernel_size=3, padding='valid', activation='relu')
con_1 = conv_layer(embedded_sequences_1)
con_1 = MaxPooling1D(4)(con_1)
con_1 = Dropout(0.2)(con_1)
con_1 = Flatten()(con_1)
con_1 = Dense(300)(con_1)
con_2 = conv_layer(embedded_sequences_2)
con_2 = MaxPooling1D(4)(con_2)
con_2 = Dropout(0.2)(con_2)
con_2 = Flatten()(con_2)
con_2 = Dense(300)(con_2)
merged = concatenate([x1, y1, con_1, con_2, leaks_dense])
merged = BatchNormalization()(merged)
merged = Dropout(rate_drop_dense)(merged)
merged = Dense(num_dense, activation=act)(merged)
merged = BatchNormalization()(merged)
merged = Dropout(rate_drop_dense)(merged)
preds = Dense(1, activation='sigmoid')(merged)
########################################
## add class weight
########################################
if re_weight:
class_weight = {0: 1.309028344, 1: 0.472001959}
else:
class_weight = None
########################################
## train the model
########################################
model = Model(inputs=[sequence_1_input, sequence_2_input, leaks_input], \
outputs=preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
# model.summary()
print(STAMP)
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
bst_model_path = STAMP + '.h5'
model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
model.summary()
hist = model.fit([data_1_train, data_2_train, leaks_train], labels_train, \
validation_data=([data_1_val, data_2_val, leaks_val], labels_val, weight_val), \
epochs=200, batch_size=2048, shuffle=True, \
class_weight=class_weight, callbacks=[early_stopping, model_checkpoint])
model.load_weights(bst_model_path)
bst_val_score = min(hist.history['val_loss'])
########################################
## make the submission
########################################
print('Start making the submission before fine-tuning')
preds = model.predict([test_data_1, test_data_2, test_leaks], batch_size=2048, verbose=1)
preds += model.predict([test_data_2, test_data_1, test_leaks], batch_size=2048, verbose=1)
preds /= 2
submission = pd.DataFrame({'test_id': test_ids, 'is_duplicate': preds.ravel()})
submission.to_csv('%.4f_' % (bst_val_score) + STAMP + '.csv', index=False)
| 2.25
| 2
|
tests/recipes/test_libffi.py
|
syrykh/python-for-android
| 6,278
|
12782529
|
import unittest
from tests.recipes.recipe_lib_test import BaseTestForMakeRecipe
class TestLibffiRecipe(BaseTestForMakeRecipe, unittest.TestCase):
"""
An unittest for recipe :mod:`~pythonforandroid.recipes.libffi`
"""
recipe_name = "libffi"
sh_command_calls = ["./autogen.sh", "autoreconf", "./configure"]
def test_get_include_dirs(self):
list_of_includes = self.recipe.get_include_dirs(self.arch)
self.assertIsInstance(list_of_includes, list)
self.assertTrue(list_of_includes[0].endswith("include"))
| 2.46875
| 2
|
create_trainingset_and_classifier/analyse_gt_aa_nb.py
|
ganzri/Tracking-Pixels
| 0
|
12782530
|
<reponame>ganzri/Tracking-Pixels<filename>create_trainingset_and_classifier/analyse_gt_aa_nb.py
# Copyright (C) 2022 <NAME>, ETH Zürich, Information Security Group
# Released under the MIT License
"""
Similar to analyse but to be used with the larger (200, or all) data set, but because track.hubspot.com and some others are very, very common in this data set, they are filtered out. This is just for convenience as they have been dealt with in the 20 samples set.
Prints the samples in the json to the console, to be manually analysed. Adds details on the filter list decision (which list would block it, based on what rule)
additionally compares whether the two data sets differe (this is a very rough estimate of their differing)
<gt_data> the actual data one wants to analyse: ground truth vs blocked or not
<pred_data> the same comparison but predicted vs blocked or not
e.g. if gt_data is gt_aa_not_blocked.json then pred_data should be pred_aa_not_blocked.json
Usage:
analyse.py <gt_data> <pred_data>
"""
import json
from typing import List, Dict, Any, Tuple
from docopt import docopt
import os
from abp_blocklist_parser import BlockListParser
from urllib import parse
import re
def is_third_party(url, first_party_domain) -> bool:
pixel_domain = url_to_uniform_domain(parse.urlsplit(url).netloc)
website_domain = url_to_uniform_domain(parse.urlsplit(first_party_domain).netloc)
return (pixel_domain not in website_domain)
def url_to_uniform_domain(url: str) -> str:
"""
Takes a URL or a domain string and transforms it into a uniform format.
Examples: {"www.example.com", "https://example.com/", ".example.com"} --> "example.com"
:param url: URL to clean and bring into uniform format
"""
new_url = url.strip()
new_url = re.sub("^http(s)?://", "", new_url)
new_url = re.sub("^www([0-9])?", "", new_url)
new_url = re.sub("^\\.", "", new_url)
new_url = re.sub("/$", "", new_url)
return new_url
def main() -> None:
argv = None
cargs = docopt(__doc__, argv=argv)
dat_path: str = cargs["<gt_data>"]
dat_path_2: str = cargs["<pred_data>"]
if not os.path.exists(dat_path):
print(f"datapath is not a valid path")
return
if not os.path.exists(dat_path_2):
print(f"datapath is not a valid path")
return
gt: Dict[str, Dict[str, Any]] = dict()
pred: Dict[str, Dict[str, Any]] = dict()
with open(dat_path) as fd:
gt = json.load(fd)
print(f"Nr of gt samples loaded: {len(gt)}")
with open(dat_path_2) as fd:
pred = json.load(fd)
print(f"Nr of pred samples loaded: {len(pred)}")
a = set()
b = set()
for k in gt:
a.add(k)
for k in pred:
b.add(k)
print(len(a-b))
print(len(b-a))
print(len(a))
print(len(b))
"""
print("only in gt")
keys_gt_only = a-b
for k in keys_gt_only:
print(gt[k])
print("\n")
"""
"""
print("only in pred")
keys_in_pred_only = b-a
for k in keys_in_pred_only:
print(pred[k])
print("\n")
"""
privacy = "./abp_blocklist_parser/easyprivacy.txt"
easylist = "./abp_blocklist_parser/whole_list.txt"
blocklist_parser = BlockListParser(privacy)
blocklist_parser_2 = BlockListParser(easylist)
options = dict()
options["image"] = 1
#used to count and exclude hubspot
i = 1
hub = 0
outbr = 0
for k in gt:
#print(k)
sample = gt[k]
url = sample["url"]
if "track.hubspot.com" in url:
hub += 1
elif "tr.outbrain.com" in url:
outbr += 1
else:
dom = url_to_uniform_domain(parse.urlsplit(sample["triggering_origin"]).netloc)
options["domain"] = dom
if is_third_party(sample["url"], sample["first_party_domain"]):
options["third-party"] = 1
else:
options["third-party"] = 0
print(i)
i += 1
print(sample)
print(blocklist_parser.should_block_with_items(sample["url"], options))
print(blocklist_parser_2.should_block_with_items(sample["url"],options))
print(sample["matched"])
print(sample["img_size"])
print("\n")
print(f"hubspot found {hub} times")
print(f"outbrain found {outbr} times")
if __name__ == "__main__":
exit(main())
| 3.21875
| 3
|
main.py
|
Brocenzo0599/print_notes
| 0
|
12782531
|
<reponame>Brocenzo0599/print_notes<gh_stars>0
import os, sys
import win32print, win32api
import datetime as dt
win32print.SetDefaultPrinter(win32print.GetDefaultPrinter())
#Gets list of already printed documents from printed.txt
printed_docs = []
f= open("printed.txt", "a")
f1 = open ("printed.txt", "r")
print ("already printed documents: ")
for line in f1.readlines():
if line.startswith("C"):
print (line)
printed_docs.append(line.replace("\n", ""))
f1.close()
#Gets date and adds path according to semester and year (FORMAT : "\SEMESTER YEAR")
def findpath():
path = "C:\\Users\\vince\\Google Drive\\Mcgill"
print(path.endswith("Mcgill"))
date = dt.date.today()
if date.month > 6 and path.endswith("Mcgill"):
path += "\\Fall " + str(date.year)
elif date.month <=6:
path += "\\Winter " + str(date.year)
return path
for dir in os.listdir(findpath()):
path = findpath()
print ("dir",dir)
path += "\\%s" %dir + "\\notes"
for file in os.listdir(path):
file_path = path + "\\%s" %file
if file.endswith(".pdf") and file_path not in printed_docs:
print (file_path)
printed_docs.append(file_path)
f.write("\n"+ file_path)
win32api.ShellExecute(0, "print",file_path, None, ".", 0)
f.close()
x = input()
| 3.15625
| 3
|
src/rule_part_widget.py
|
DavidKnodt/heuristica
| 0
|
12782532
|
from PySide2 import QtWidgets, QtGui, QtCore
class RulePartWidget(QtWidgets.QWidget):
def __init__(self, parent=None, feature_ranges={'feature X': [0, 100]}, rule_number=-1):
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QHBoxLayout(self)
# combobox
self.combo_box = QtWidgets.QComboBox()
self.feature_ranges = feature_ranges
self.features = list(self.feature_ranges.keys())
print(self.features)
for feature in self.features:
self.combo_box.addItem(feature)
# create widgets
feat_range = self.feature_ranges[self.features[0]]
self.min_box = QtWidgets.QDoubleSpinBox()
self.min_box.valueChanged.connect(self._min_box_changed)
self.max_box = QtWidgets.QDoubleSpinBox()
self.max_box.valueChanged.connect(self._max_box_changed)
self.min_box.setRange(*feat_range)
self.max_box.setRange(*feat_range)
self.min_box.setValue(feat_range[0])
self.max_box.setValue(feat_range[1])
self.rule_id = f'Rule Part {rule_number if rule_number != -1 else "X"}'
# add to layout
self.layout.addWidget(QtWidgets.QLabel(self.rule_id, self))
self.layout.addWidget(self.combo_box)
self.layout.addWidget(QtWidgets.QLabel('between', self))
self.layout.addWidget(self.min_box)
self.layout.addWidget(QtWidgets.QLabel('and', self))
self.layout.addWidget(self.max_box)
self.combo_box.activated.connect(self.feature_change)
def feature_change(self):
print('Current Feature:', self.combo_box.currentText())
selected_feature = self.combo_box.currentText()
feat_range = self.feature_ranges[selected_feature]
self.min_box.setRange(*feat_range)
self.max_box.setRange(*feat_range)
self.min_box.setValue(feat_range[0])
self.max_box.setValue(feat_range[1])
def _min_box_changed(self, val):
selected_feature = self.combo_box.currentText()
feat_range = self.feature_ranges[selected_feature]
# limit by chosen minimum
self.max_box.setRange(val, feat_range[1])
def _max_box_changed(self, val):
selected_feature = self.combo_box.currentText()
feat_range = self.feature_ranges[selected_feature]
# limit by chosen minimum
self.min_box.setRange(feat_range[0], val)
def get_rule(self):
selected_feature = self.combo_box.currentText()
min_val = self.min_box.value()
max_val = self.max_box.value()
return {'rule_id': self.rule_id, 'feature': selected_feature, 'range': [min_val, max_val]}
| 2.484375
| 2
|
src/LinConGauss/core/linear_constraints.py
|
alpiges/LinConGauss
| 9
|
12782533
|
import numpy as np
class LinearConstraints():
def __init__(self, A, b, mode='Intersection'):
"""
Defines linear functions f(x) = Ax + b.
The integration domain is defined as the union of where all of these functions are positive if mode='Union'
or the domain where any of the functions is positive, when mode='Intersection'
:param A: matrix A with shape (M, D) where M is the number of constraints and D the dimension
:param b: offset, shape (M, 1)
"""
self.A = A
self.b = b
self.N_constraints = b.shape[0]
self.N_dim = A.shape[1]
self.mode = mode
def evaluate(self, x):
"""
Evaluate linear functions at N locations x
:param x: location, shape (D, N)
:return: Ax + b
"""
return np.dot(self.A, x) + self.b
def integration_domain(self, x):
"""
is 1 if x is in the integration domain, else 0
:param x: location, shape (D, N)
:return: either self.indicator_union or self.indicator_intersection, depending on setting of self.mode
"""
if self.mode == 'Union':
return self.indicator_union(x)
elif self.mode == 'Intersection':
return self.indicator_intersection(x)
else:
raise NotImplementedError
def indicator_intersection(self, x):
"""
Intersection of indicator functions taken to be 1 when the linear function is >= 0
:param x: location, shape (D, N)
:return: 1 if all linear functions are >= 0, else 0.
"""
return np.where(self.evaluate(x) >= 0, 1, 0).prod(axis=0)
def indicator_union(self, x):
"""
Union of indicator functions taken to be 1 when the linear function is >= 0
:param x: location, shape (D, N)
:return: 1 if any of the linear functions is >= 0, else 0.
"""
return 1 - (np.where(self.evaluate(x) >= 0, 0, 1)).prod(axis=0)
class ShiftedLinearConstraints(LinearConstraints):
def __init__(self, A, b, shift):
"""
Class for shifted linear constraints that appear in multilevel splitting method
:param A: matrix A with shape (M, D) where M is the number of constraints and D the dimension
:param b: offset, shape (M, 1)
:param shift: (positive) scalar value denoting the shift
"""
self.shift = shift
super(ShiftedLinearConstraints, self).__init__(A, b + shift)
| 3.578125
| 4
|
paperswithcode/models/method.py
|
Kabongosalomon/paperswithcode-client
| 78
|
12782534
|
from typing import List, Optional
from tea_client.models import TeaClientModel
from paperswithcode.models.page import Page
class Method(TeaClientModel):
"""Method object.
Attributes:
id (str): Method ID.
name (str): Method short name.
full_name (str): Method full name.
description (str): Method description.
paper (str, optional): ID of the paper that describes the method.
"""
id: str
name: str
full_name: str
description: str
paper: Optional[str]
class Methods(Page):
"""Object representing a paginated page of methods.
Attributes:
count (int): Number of elements matching the query.
next_page (int, optional): Number of the next page.
previous_page (int, optional): Number of the previous page.
results (List[Method]): List of methods on this page.
"""
results: List[Method]
| 2.828125
| 3
|
gs/content/form/base/__init__.py
|
groupserver/gs.content.form.base
| 0
|
12782535
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
#lint:disable
from .checkbox import multi_check_box_widget
from .form import SiteForm
from .radio import radio_widget
from .select import select_widget
from .disabledtextwidget import disabled_text_widget
#lint:enable
| 1.015625
| 1
|
setup.py
|
Etaoni/qiagen-clinical-insights-requests-api
| 0
|
12782536
|
<reponame>Etaoni/qiagen-clinical-insights-requests-api
from setuptools import setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='qci-api',
version='0.1.1',
packages=['qci'],
url='https://github.com/Etaoni/qci-api',
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
description='A Python interface for Qiagen Clinical Insight\'s REST API',
long_description=long_description,
long_description_content_type='text/markdown',
)
| 1.179688
| 1
|
examples/demo_purge.py
|
shirui-japina/tensorboardX
| 5,378
|
12782537
|
from time import sleep
from tensorboardX import SummaryWriter
with SummaryWriter(logdir='runs/purge') as w:
for i in range(100):
w.add_scalar('purgetest', i, i)
sleep(1.0)
with SummaryWriter(logdir='runs/purge', purge_step=42) as w:
# event 42~99 are removed (inclusively)
for i in range(42, 100):
w.add_scalar('purgetest', 42, i)
| 2.125
| 2
|
meeshkan/nlp/ids/gib_detect.py
|
meeshkan/meeshkan-nlp
| 1
|
12782538
|
<filename>meeshkan/nlp/ids/gib_detect.py
"""The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# !/usr/bin/python
import math
import os
import pickle
class GibberishDetector:
_accepted_chars = "abcdefghijklmnopqrstuvwxyz "
_char_positions = dict([(char, idx) for idx, char in enumerate(_accepted_chars)])
def __init__(self):
with open(os.path.join(os.path.dirname(__file__), "gib_model.pki"), "rb") as f:
self.model_data = pickle.load(f)
def is_gibberish(self, item):
model_mat = self.model_data["mat"]
threshold = self.model_data["thresh"]
return self._avg_transition_prob(item, model_mat) <= threshold
def _avg_transition_prob(self, l, log_prob_mat):
""" Return the average transition prob from l through log_prob_mat. """
log_prob = 0.0
transition_ct = 0
for a, b in self._ngram(2, l):
log_prob += log_prob_mat[self._char_positions[a]][self._char_positions[b]]
transition_ct += 1
# The exponentiation translates from log probs to probs.
return math.exp(log_prob / (transition_ct or 1))
def _ngram(self, n, l):
""" Return all n grams from l after normalizing """
filtered = self._normalize(l)
for start in range(0, len(filtered) - n + 1):
yield "".join(filtered[start : start + n])
def _normalize(self, line):
""" Return only the subset of chars from accepted_chars.
This helps keep the model relatively small by ignoring punctuation,
infrequenty symbols, etc. """
return [c.lower() for c in line if c.lower() in self._accepted_chars]
| 1.804688
| 2
|
Packages/LiveReload/server/SimpleResourceServer.py
|
kangTaehee/st3
| 4
|
12782539
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class SimpleResourceServer(object):
"""SimpleResourceServer"""
def __init__(self):
self.static_files = []
def has_file(self, path):
"""Traverse added static_files return object"""
for l_file in self.static_files:
if path == l_file['path']:
return l_file
return False
| 2.921875
| 3
|
utility/surface.py
|
strevol-mpi-mis/RAFFT
| 1
|
12782540
|
<reponame>strevol-mpi-mis/RAFFT<gh_stars>1-10
"""Draw a surface from a set of structures
"""
import argparse
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LightSource
from scipy import interpolate
from matplotlib.pyplot import rcParams
import numpy as np
from sklearn import manifold
from RNA import bp_distance
from numpy import zeros, meshgrid, array, mgrid
from numpy.random import RandomState
from random import uniform
def get_distance_matrix(structures):
matrix = zeros((len(structures), len(structures)))
for si, (structi, nrji) in enumerate(structures):
for sj, (structj, nrjj) in enumerate(structures[si+1:], start=si+1):
dist = bp_distance(structi, structj)
matrix[si, sj] = dist
matrix[sj, si] = dist
return matrix
def parse_rafft_output(infile):
results = []
seen = set()
with open(infile) as rafft_out:
seq = rafft_out.readline().strip()
for l in rafft_out:
if not l.startswith("#"):
struct, nrj = l.strip().split()
if struct not in seen:
results += [(struct, float(nrj))]
seen.add(struct)
return results, seq
def parse_barrier_output(infile):
results = []
with open(infile) as barrier_out:
seq = barrier_out.readline().strip()
for l in barrier_out:
val = l.strip().split()
struct, nrj = val[1], float(val[2])
results += [(struct, float(nrj))]
return results, seq
def parse_subopt_output(infile, prob=1.0):
results = []
with open(infile) as barrier_out:
seq = barrier_out.readline().strip()
for l in barrier_out:
val = l.strip().split()
if uniform(0, 1) <= prob:
struct, nrj = val[0], float(val[1])
results += [(struct, float(nrj))]
return results, seq
def parse_arguments():
"""Parsing command line
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('rafft_out', help="rafft_output")
parser.add_argument('--out', "-o", help="outfile")
parser.add_argument('--bar', action="store_true",
help="read barrier output")
parser.add_argument('--sub', action="store_true",
help="read barrier output")
parser.add_argument('--samp_prob', "-sp", type=float,
help="sample from subopt file", default=1.0)
return parser.parse_args()
def main():
args = parse_arguments()
if args.bar:
structures, seq = parse_barrier_output(args.rafft_out)
if args.sub:
structures, seq = parse_subopt_output(args.rafft_out, args.samp_prob)
else:
structures, seq = parse_rafft_output(args.rafft_out)
dist_mat = get_distance_matrix(structures)
plt.rcParams["font.family"] = "serif"
fsize = 13
plt.rcParams["font.size"] = fsize
plt.rcParams['text.usetex'] = True
fig, ax = plt.subplots()
# map the structures on a plan
seed = RandomState(seed=3)
mds = manifold.MDS(n_components=2, max_iter=5000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=20)
pos = mds.fit_transform(dist_mat)
min_id, mfe_id = 0, min(list(enumerate(structures)),
key=lambda el: el[1][1])[0]
nrjs = [nrj for st, nrj in structures]
ti = np.linspace(np.min(pos) - 1, np.max(pos) + 1, 300)
XI, YI = np.meshgrid(ti, ti)
nrj_ = interpolate.Rbf(pos[:, 0], pos[:, 1], nrjs, function="thin_plate")
p1, p2 = meshgrid(ti, ti)
nrj_c = nrj_(p1, p2)
surf = ax.contour(p1, p2, nrj_c, colors="k", linewidths=0.5, levels=7)
surf = ax.contourf(p1, p2, nrj_c, cmap=cm.coolwarm, alpha=0.3, levels=7)
surf = ax.scatter(pos[:, 0], pos[:, 1], c=nrjs, s=30, lw=0, label='MDS',
cmap=cm.coolwarm, alpha=1.0)
ax.scatter(pos[[min_id, mfe_id], 0], pos[[min_id, mfe_id], 1],
c="black", s=80, lw=0, alpha=1.0)
ax.scatter(pos[[min_id, mfe_id], 0], pos[[min_id, mfe_id], 1],
c=array(nrjs)[[min_id, mfe_id]], s=30, lw=0,
label='MDS', cmap=cm.coolwarm, alpha=1.0)
if args.out:
plt.savefig(args.out, dpi=300, transparent=True)
else:
plt.show()
if __name__ == '__main__':
main()
| 2.640625
| 3
|
src/aadcUserPython/ParkingTrajectory.py
|
LITdrive/aadc2018
| 5
|
12782541
|
# Imports
import math
import numpy as np
import matplotlib.pyplot as plt
class ParkingTrajectoryGenerator:
# Class Variables
# Vehicle Parameters
__l = 0.356 # length between front and rear axle in m
__b = 0.37 # width of car in m
__l_1 = 0.12 # length between front axle and bumper in m
__l_2 = 0.108 # length between rear axle and bumper in m
__alpha_max = math.radians(45) # maximum steering angle in rad
# alpha_c = alpha_max # constant steering angle in rad
__rho_min = 1/math.tan(__alpha_max) # radius of the turning cycle of the car in m
# Driving lane and parking spot parameters
__h_cd = 0.974-2*0.03 # width of driving lane in m
__h_pd = (0.96-3*0.02)/2 # width of parking space in m
__h_pw = 0.85 # depth of parking space in m
__h_ps = (__h_pd - __b)/2 # = h_pr = h_pl = h_ps -> for symmetrical parking -> space between car and parking space boundaries in m
# Parameters for calculation of the Trajectory Points
__num_points_per_segment = 100
__pull_out_left_straight_offset = 0.2
__r_B2 = math.sqrt((__l + __l_1)**2 + (__rho_min + __b/2)**2)
__s_m = -math.sqrt((__rho_min - __b/2)**2 - (__rho_min - __h_pd/2)**2)
__s_max = __h_cd - __r_B2
__s = max(abs(__s_m), abs(__s_max))
# Points of Parking Trajectory
__parkingTrajectoryPoints_x_rear_axle = np.zeros(2*__num_points_per_segment)
__parkingTrajectoryPoints_y_rear_axle = np.zeros(2*__num_points_per_segment)
#__parkingTrajectoryPoints_x_front_axle = np.zeros(2*__num_points_per_segment)
#__parkingTrajectoryPoints_y_front_axle = np.zeros(2*__num_points_per_segment)
__pullOutLeftTrajectoryPoints_x_rear_axle = np.zeros(2*__num_points_per_segment)
__pullOutLeftTrajectoryPoints_y_rear_axle = np.zeros(2*__num_points_per_segment)
#__pullOutLeftTrajectoryPoints_x_front_axle = np.zeros(2*__num_points_per_segment)
#__pullOutLeftTrajectoryPoints_y_front_axle = np.zeros(2*__num_points_per_segment)
# Heading of Parking Trajectory
__parkingTrajectoryHeading_rear_axle = np.zeros(2*__num_points_per_segment)
# Parameter for Representing Circle Arc as Polynomial (Bezier)
__c = 0.55191502449
# Parameters of Steering Angle Controller (Saturated Control) from Paper
__K_t = 8
__K = 5.85
__a_0 = 0.17
__u = np.tan(__alpha_max)/__l
# Vehicle Heading for test purposes (idealised)
__theta = np.zeros(2*__num_points_per_segment)
# Constructor
def __init__(self, targetParkingSpot_x = 0, targetParkingSpot_y = 0):
self.__targetPoint_x_rear_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps
self.__targetPoint_y_rear_axle = targetParkingSpot_y
self.__targetPoint_x_front_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps - self.__l
self.__targetPoint_y_front_axle = targetParkingSpot_y
self.calcParkingTrajectory()
self.calcPullOutLeftTrajectory()
# Setter
def setTargetParkingSpot(self, targetParkingSpot_x = 0, targetParkingSpot_y = 0):
self.__targetPoint_x_rear_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps
self.__targetPoint_y_rear_axle = targetParkingSpot_y
self.__targetPoint_x_front_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps - self.__l
self.__targetPoint_y_front_axle = targetParkingSpot_y
self.calcParkingTrajectory()
self.calcPullOutLeftTrajectory()
# Getter
def getParkingStartPoint(self):
return self.__parkingTrajectoryPoints_x_rear_axle[-1], self.__parkingTrajectoryPoints_y_rear_axle[-1]
def getParkingEndPoint(self):
return self.__targetPoint_x_rear_axle, self.__targetPoint_y_rear_axle
def getParkingTrajectoryPolynomials(self):
return self.__parkingTrajectory_polynomial_coefficients_circle_arc_x, self.__parkingTrajectory_polynomial_coefficients_circle_arc_y, self.__parkingTrajectory_polynomial_coefficients_straight_x, self.__parkingTrajectory_polynomial_coefficients_straight_y
def gePullOutLeftTrajectoryPolynomials(self):
return self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x, self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y
# Functions
def calcParkingTrajectory(self):
# = Pull Out Right Trajectory
# Target Point Rear End of the Parking Spot (Rear end of the axle)
S_x_rear_axle = self.__targetPoint_x_rear_axle - self.__h_pw + self.__l_2 + self.__h_ps + self.__s
S_y_rear_axle = self.__targetPoint_y_rear_axle
#S_x_front_axle = self.targetPoint_x_front_axle - self.h_pw + self.l_2 + self.h_ps + self.s + self.l
#S_y_front_axle = self.targetPoint_y_front_axle
O_x_rear_axle = S_x_rear_axle
O_y_rear_axle = S_y_rear_axle + self.__rho_min
#O_x_front_axle = S_x_front_axle
#O_y_front_axle = S_y_front_axle + self.rho_min
# Points on Unit circle with Origin O
P_0_circle_arc_x = O_x_rear_axle
P_0_circle_arc_y = O_y_rear_axle - 1
P_1_circle_arc_x = O_x_rear_axle - self.__c
P_1_circle_arc_y = O_y_rear_axle - 1
P_2_circle_arc_x = O_x_rear_axle - 1
P_2_circle_arc_y = O_y_rear_axle - self.__c
P_3_circle_arc_x = O_x_rear_axle - 1
P_3_circle_arc_y = O_y_rear_axle
# Polynomial of the circle arc
self.__parkingTrajectory_polynomial_coefficients_circle_arc_x = np.poly1d([self.__rho_min*(P_3_circle_arc_x + 3.*P_1_circle_arc_x - 3.*P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min*3*(P_2_circle_arc_x - 2*P_1_circle_arc_x + P_0_circle_arc_x), self.__rho_min*3*(P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min*P_0_circle_arc_x])
self.__parkingTrajectory_polynomial_coefficients_circle_arc_y = np.poly1d([self.__rho_min*(P_3_circle_arc_y + 3.*P_1_circle_arc_y - 3.*P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min*3*(P_2_circle_arc_y - 2*P_1_circle_arc_y + P_0_circle_arc_y), self.__rho_min*3*(P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min*P_0_circle_arc_y])
# Polynomial of the straight
self.__parkingTrajectory_polynomial_coefficients_straight_x = np.poly1d([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.__targetPoint_x_rear_axle])
self.__parkingTrajectory_polynomial_coefficients_straight_y = np.poly1d([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.__targetPoint_y_rear_axle])
self.__parkingTrajectoryPoints_x_rear_axle[ : self.__num_points_per_segment] = np.linspace(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.__num_points_per_segment)
self.__parkingTrajectoryPoints_y_rear_axle[ : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*self.__targetPoint_y_rear_axle
#self.__parkingTrajectoryHeading_rear_axle[ : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*math.pi
#self.parkingTrajectoryPoints_x_front_axle[0 : self.num_points_per_segment] = np.linspace(self.targetPoint_x_front_axle, S_x_front_axle, self.num_points_per_segment)
#self.parkingTrajectoryPoints_y_front_axle[0 : self.num_points_per_segment] = np.ones(self.num_points_per_segment)*self.targetPoint_y_front_axle
circle_arc_angle = np.linspace(math.pi, math.pi*(3/2), self.__num_points_per_segment)
#heading_angle = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment)
# Vehicle Heading for test
self.__theta[ : self.__num_points_per_segment] = math.pi
self.__theta[self.__num_points_per_segment : ] = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment)
#i = self.__num_points_per_segment
#for angle in circle_arc_angle :
self.__parkingTrajectoryPoints_x_rear_axle[self.__num_points_per_segment : ] = self.__rho_min*np.cos(circle_arc_angle) + O_x_rear_axle
self.__parkingTrajectoryPoints_y_rear_axle[self.__num_points_per_segment : ] = self.__rho_min*np.sin(circle_arc_angle) + O_y_rear_axle
#self.__parkingTrajectoryPoints_x_front_axle[ : self.__num_points_per_segment] = self.__parkingTrajectoryPoints_x_rear_axle[ : self.__num_points_per_segment] - self.__l
#self.__parkingTrajectoryPoints_y_front_axle[ : self.__num_points_per_segment] = self.__parkingTrajectoryPoints_y_rear_axle[ : self.__num_points_per_segment]
#self.__parkingTrajectoryPoints_x_front_axle[self.__num_points_per_segment : ] = self.__parkingTrajectoryPoints_x_rear_axle[self.__num_points_per_segment : ] + np.cos(self.__theta[self.__num_points_per_segment : ])*self.__l
#self.__parkingTrajectoryPoints_y_front_axle[self.__num_points_per_segment : ] = self.__parkingTrajectoryPoints_y_rear_axle[self.__num_points_per_segment : ] + np.sin(self.__theta[self.__num_points_per_segment : ])*self.__l
#self.__parkingTrajectoryHeading_rear_axle[self.__num_points_per_segment : ] = heading_angle
#self.parkingTrajectoryPoints_x_front_axle[i] = self.rho_min*math.cos(angle) + O_x_front_axle
#self.parkingTrajectoryPoints_y_front_axle[i] = self.rho_min*math.sin(angle) + O_y_front_axle
# i += 1
# Printing
#t = np.linspace(0, 1, 100)
#poly_circle_arc_x = self.__parkingTrajectory_polynomial_coefficients_circle_arc_x(t)
#poly_circle_arc_y = self.__parkingTrajectory_polynomial_coefficients_circle_arc_y(t)
#poly_straight_x = self.__parkingTrajectory_polynomial_coefficients_straight_x(t)
#poly_straight_y = self.__parkingTrajectory_polynomial_coefficients_straight_y(t)
#plt.plot(self.__parkingTrajectoryPoints_x_rear_axle, self.__parkingTrajectoryPoints_y_rear_axle, 'b.')
#plt.plot(poly_circle_arc_x, poly_circle_arc_y, 'r.')
#plt.plot(poly_straight_x, poly_straight_y, 'r.')
#plt.show()
#plt.stem(self.__parkingTrajectoryHeading_rear_axle)
#plt.show()
return self.__parkingTrajectory_polynomial_coefficients_circle_arc_x, self.__parkingTrajectory_polynomial_coefficients_circle_arc_y, self.__parkingTrajectory_polynomial_coefficients_straight_x, self.__parkingTrajectory_polynomial_coefficients_straight_y
def calcPullOutLeftTrajectory(self):
# Target Point Rear End of the Parking Spot (Rear end of the axle)
S_x_rear_axle = self.__targetPoint_x_rear_axle - self.__h_pw + self.__l_2 + self.__h_ps + self.__s - self.__pull_out_left_straight_offset
S_y_rear_axle = self.__targetPoint_y_rear_axle
#S_x_front_axle = self.targetPoint_x_front_axle - self.h_pw + self.l_2 + self.h_ps + self.s + self.l
#S_y_front_axle = self.targetPoint_y_front_axle
O_x_rear_axle = S_x_rear_axle
O_y_rear_axle = S_y_rear_axle - self.__rho_min
#O_x_front_axle = S_x_front_axle
#O_y_front_axle = S_y_front_axle + self.rho_min
# Points on Unit circle with Origin O
P_0_circle_arc_x = O_x_rear_axle - 1
P_0_circle_arc_y = O_y_rear_axle
P_1_circle_arc_x = O_x_rear_axle - 1
P_1_circle_arc_y = O_y_rear_axle + self.__c
P_2_circle_arc_x = O_x_rear_axle - self.__c
P_2_circle_arc_y = O_y_rear_axle + 1
P_3_circle_arc_x = O_x_rear_axle
P_3_circle_arc_y = O_y_rear_axle + 1
# Polynomial of the circle arc
self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x = np.poly1d([self.__rho_min*(P_3_circle_arc_x + 3.*P_1_circle_arc_x - 3.*P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min*3*(P_2_circle_arc_x - 2*P_1_circle_arc_x + P_0_circle_arc_x), self.__rho_min*3*(P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min*P_0_circle_arc_x])
self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y = np.poly1d([self.__rho_min*(P_3_circle_arc_y + 3.*P_1_circle_arc_y - 3.*P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min*3*(P_2_circle_arc_y - 2*P_1_circle_arc_y + P_0_circle_arc_y), self.__rho_min*3*(P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min*P_0_circle_arc_y])
# Polynomial of the straight
self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x = np.poly1d([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.__targetPoint_x_rear_axle])
self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y = np.poly1d([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.__targetPoint_y_rear_axle])
self.__pullOutLeftTrajectoryPoints_x_rear_axle[0 : self.__num_points_per_segment] = np.linspace(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.__num_points_per_segment)
self.__pullOutLeftTrajectoryPoints_y_rear_axle[0 : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*self.__targetPoint_y_rear_axle
#self.parkingTrajectoryPoints_x_front_axle[0 : self.num_points_per_segment] = np.linspace(self.targetPoint_x_front_axle, S_x_front_axle, self.num_points_per_segment)
#self.parkingTrajectoryPoints_y_front_axle[0 : self.num_points_per_segment] = np.ones(self.num_points_per_segment)*self.targetPoint_y_front_axle
circle_arc_angle = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment)
i = self.__num_points_per_segment
for angle in circle_arc_angle :
self.__pullOutLeftTrajectoryPoints_x_rear_axle[i] = self.__rho_min*np.cos(angle) + O_x_rear_axle
self.__pullOutLeftTrajectoryPoints_y_rear_axle[i] = self.__rho_min*np.sin(angle) + O_y_rear_axle
#self.parkingTrajectoryPoints_x_front_axle[i] = self.rho_min*math.cos(angle) + O_x_front_axle
#self.parkingTrajectoryPoints_y_front_axle[i] = self.rho_min*math.sin(angle) + O_y_front_axle
i += 1
# Printing
#t = np.linspace(0, 1, 100)
#poly_circle_arc_x = self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x(t)
#poly_circle_arc_y = self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y(t)
#poly_straight_x = self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x(t)
#poly_straight_y = self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y(t)
#plt.plot(self.__parkingTrajectoryPoints_x_rear_axle, self.__parkingTrajectoryPoints_y_rear_axle, 'b.')
#plt.plot(self.__pullOutLeftTrajectoryPoints_x_rear_axle, self.__pullOutLeftTrajectoryPoints_y_rear_axle, 'b.')
#plt.plot(poly_circle_arc_x, poly_circle_arc_y, 'r.')
#plt.plot(poly_straight_x, poly_straight_y, 'r.')
#plt.show()
return self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x, self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y
def getSteeringAngle(self, actualPoint_y, vehicle_heading):
theta = vehicle_heading - math.pi
print(theta)
v = self.__K*(theta - self.__a_0*actualPoint_y)
alpha = np.arctan(self.__l*self.__u*np.tanh(self.__K_t*v))
return alpha
ParkingTrajectoryGenerator1 = ParkingTrajectoryGenerator()
[a, b, c, d] = ParkingTrajectoryGenerator1.getParkingTrajectoryPolynomials()
print(a)
print(b)
print(c)
print(d)
#plt.plot(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_x_front_axle, ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_front_axle, 'b.', ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_x_rear_axle, ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle, 'r.')
#plt.show()
steering_angle = np.zeros(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle.size)
i = 0
for elem in ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle :
steering_angle[i] = ParkingTrajectoryGenerator1.getSteeringAngle(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle[i], ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta[i])
i += 1
plt.stem(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta)
plt.show()
plt.stem(np.degrees(steering_angle))
plt.show()
#ParkingTrajectoryGenerator1.calcPullOutLeftTrajectory()
| 2.984375
| 3
|
asset/test.py
|
745184532/cmdb
| 251
|
12782542
|
<reponame>745184532/cmdb
import time
print(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
| 2.21875
| 2
|
test1.py
|
Aqua5lad/CDs-Sample-Python-Code
| 0
|
12782543
|
<reponame>Aqua5lad/CDs-Sample-Python-Code
# testing commands & logic by <NAME>
# Using While, If, Else, Print.
n = 20
while n <= 100:
if (n % 2 == 0):
print("this number", n, "is even")
else:
print("this number", n, "is odd")
n = n + 1
| 3.765625
| 4
|
Image_Stitching/testVideoCode.py
|
RogerZhangsc/VDAS
| 0
|
12782544
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 13:07:35 2018
@author: Sunny
"""
import numpy as np
import cv2
print(cv2.__version__)
TOTAL_CAMERAS=1
HEIGHT = 240
WIDTH = 320
RECORD_WIDTH = WIDTH*3
RECORD_HEIGHT = HEIGHT
FPS = 90
cam = []
frame = []
ret = []
rgb = []
i = 0
rgb_current=0
cam = cv2.VideoCapture(0)
#cam1 = cv2.VideoCapture(1)
#cam2 = cv2.VideoCapture(2)
cam.set(3,WIDTH)
cam.set(4,HEIGHT)
cam.set(cv2.CAP_PROP_FPS,FPS)
print(cam.get(3))
print(cam.get(4))
print(cam.get(5))
print(cam.get(cv2.CAP_PROP_FPS))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('C:\\Users\\Sunny\\Desktop\\saveOutput.avi',fourcc, FPS, (RECORD_WIDTH,RECORD_HEIGHT))
x=0
rgb_previous = 0
cv2.namedWindow("Live Feed")
flag_record=True
while(True):
final_frame=0
j = 0
ret_current, frame_current = cam.read()
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb_current = cv2.cvtColor(frame_current, cv2.COLOR_RGBA2RGB)
rgb_current = cv2.resize(rgb_current,(WIDTH,HEIGHT),interpolation=cv2.INTER_CUBIC);
horizontal_img = cv2.flip(rgb_current, 1 )
# Display the resulting frame
numpy_horizontal = np.hstack((horizontal_img, rgb_current, horizontal_img))
numpy_vertical = np.vstack((numpy_horizontal,numpy_horizontal))
if(flag_record == True ):
out.write(numpy_horizontal)
cv2.imshow("Live Feed",numpy_horizontal)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elif cv2.waitKey(1) & 0xFF == ord('r'):
if(flag_record==False):
flag_record = True
else:
flag_record = False
cam.release()
if(flag_record==True):
out.release()
cv2.destroyAllWindows()
| 2.515625
| 3
|
src/biking/views.py
|
AlexDevelop/seen-movies
| 0
|
12782545
|
<gh_stars>0
from rest_framework import viewsets
from rest_framework.viewsets import ModelViewSet
from biking.models import BikeRide
from biking.serializers import BikeRideSerializer
class BikeRideViewSet(viewsets.ViewSet, ModelViewSet):
serializer_class = BikeRideSerializer
queryset = BikeRide.objects.all()
| 1.867188
| 2
|
appengine/monorail/framework/servlet_helpers.py
|
allaparthi/monorail
| 2
|
12782546
|
<reponame>allaparthi/monorail
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions used by the Monorail servlet base class."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import calendar
import datetime
import logging
import urllib
from framework import framework_bizobj
from framework import framework_helpers
from framework import permissions
from framework import template_helpers
from framework import urls
from framework import xsrf
_ZERO = datetime.timedelta(0)
class _UTCTimeZone(datetime.tzinfo):
"""UTC"""
def utcoffset(self, _dt):
return _ZERO
def tzname(self, _dt):
return "UTC"
def dst(self, _dt):
return _ZERO
_UTC = _UTCTimeZone()
def GetBannerTime(timestamp):
"""Converts a timestamp into EZT-ready data so it can appear in the banner.
Args:
timestamp: timestamp expressed in the following format:
[year,month,day,hour,minute,second]
e.g. [2009,3,20,21,45,50] represents March 20 2009 9:45:50 PM
Returns:
EZT-ready data used to display the time inside the banner message.
"""
if timestamp is None:
return None
ts = datetime.datetime(*timestamp, tzinfo=_UTC)
return calendar.timegm(ts.timetuple())
def AssertBasePermissionForUser(user, user_view):
"""Verify user permissions and state.
Args:
user: user_pb2.User protocol buffer for the user
user_view: framework.views.UserView for the user
"""
if permissions.IsBanned(user, user_view):
raise permissions.BannedUserException(
'You have been banned from using this site')
def AssertBasePermission(mr):
"""Make sure that the logged in user can view the requested page.
Args:
mr: common information parsed from the HTTP request.
Returns:
Nothing
Raises:
BannedUserException: If the user is banned.
PermissionException: If the user does not have permisssion to view.
"""
AssertBasePermissionForUser(mr.auth.user_pb, mr.auth.user_view)
if mr.project_name and not CheckPerm(mr, permissions.VIEW):
logging.info('your perms are %r', mr.perms)
raise permissions.PermissionException(
'User is not allowed to view this project')
def CheckPerm(mr, perm, art=None, granted_perms=None):
"""Convenience method that makes permission checks easier.
Args:
mr: common information parsed from the HTTP request.
perm: A permission constant, defined in module framework.permissions
art: Optional artifact pb
granted_perms: optional set of perms granted specifically in that artifact.
Returns:
A boolean, whether the request can be satisfied, given the permission.
"""
return mr.perms.CanUsePerm(
perm, mr.auth.effective_ids, mr.project,
permissions.GetRestrictions(art), granted_perms=granted_perms)
def CheckPermForProject(mr, perm, project, art=None):
"""Convenience method that makes permission checks for projects easier.
Args:
mr: common information parsed from the HTTP request.
perm: A permission constant, defined in module framework.permissions
project: The project to enforce permissions for.
art: Optional artifact pb
Returns:
A boolean, whether the request can be satisfied, given the permission.
"""
perms = permissions.GetPermissions(
mr.auth.user_pb, mr.auth.effective_ids, project)
return perms.CanUsePerm(
perm, mr.auth.effective_ids, project, permissions.GetRestrictions(art))
def ComputeIssueEntryURL(mr, config):
"""Compute the URL to use for the "New issue" subtab.
Args:
mr: commonly used info parsed from the request.
config: ProjectIssueConfig for the current project.
Returns:
A URL string to use. It will be simply "entry" in the non-customized
case. Otherewise it will be a fully qualified URL that includes some
query string parameters.
"""
if not config.custom_issue_entry_url:
return '/p/%s/issues/entry' % (mr.project_name)
base_url = config.custom_issue_entry_url
sep = '&' if '?' in base_url else '?'
token = xsrf.GenerateToken(
mr.auth.user_id, '/p/%s%s%s' % (mr.project_name, urls.ISSUE_ENTRY, '.do'))
role_name = framework_helpers.GetRoleName(mr.auth.effective_ids, mr.project)
continue_url = urllib.quote(framework_helpers.FormatAbsoluteURL(
mr, urls.ISSUE_ENTRY + '.do'))
return '%s%stoken=%s&role=%s&continue=%s' % (
base_url, sep, urllib.quote(token),
urllib.quote(role_name or ''), continue_url)
def IssueListURL(mr, config, query_string=None):
"""Make an issue list URL for non-members or members."""
url = '/p/%s%s' % (mr.project_name, urls.ISSUE_LIST)
if query_string:
url += '?' + query_string
elif framework_bizobj.UserIsInProject(mr.project, mr.auth.effective_ids):
if config and config.member_default_query:
url += '?q=' + urllib.quote_plus(config.member_default_query)
return url
| 1.882813
| 2
|
app/account/migrations/0001_initial.py
|
newer027/hlf_backend
| 0
|
12782547
|
# Generated by Django 2.2.3 on 2019-09-20 05:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contract', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('usernamename', models.CharField(max_length=20)),
('realname', models.CharField(max_length=20)),
('passwordword', models.CharField(max_length=20)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('telphone_num', models.CharField(max_length=20)),
('comment', models.TextField(null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='settings', to=settings.AUTH_USER_MODEL)),
('vendor', models.ManyToManyField(related_name='user_settings', to='contract.Vendor')),
],
options={
'verbose_name_plural': 'User Settings',
},
),
]
| 1.765625
| 2
|
sancus/lib/cogs/filter/filter.py
|
Solar-Productions/sancus
| 1
|
12782548
|
import discord
from discord import Embed
from discord.errors import NotFound
from discord.ext import commands
import requests
import asyncio
from lib.bot import bot
class Filter(commands.Cog):
def __init__(self, client: bot):
self.client = client
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot:
check = requests.get(
f"https://www.purgomalum.com/service/containsprofanity?text=${message.content}")
check = check.text
if str(message.guild.id) in self.client.guilds_:
guild = self.client.guilds_[str(message.guild.id)]
filter = guild["filter"]
words = guild["filterWords"]
delete = guild["filterDelete"]
else:
return
if filter:
if check == 'true':
if delete == True:
try:
await message.delete()
except NotFound:
return
elif delete == False:
response = requests.get(
"https://insult.mattbas.org/api/insult")
embed = Embed(
colour=0x000ff0000,
description=response.text
)
await message.channel.send(embed=embed)
elif message.content in words:
if delete == True:
try:
await message.delete()
except NotFound:
return
elif delete == False:
response = requests.get(
"https://insult.mattbas.org/api/insult")
embed = Embed(
colour=0x000ff0000,
description=response.text
)
await message.channel.send(embed=embed)
| 2.703125
| 3
|
dlotter/arguments.py
|
dmidk/dlotter
| 0
|
12782549
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Master module for dlotter.arguments
Called from dlotter.__main__
"""
import sys
import argparse
from argparse import ArgumentDefaultsHelpFormatter
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class arguments:
def __init__(self) -> None:
return
def get_args(self, sysargs):
parent_parser = MyParser(
description='Plot data quick and dirty from NWP output',
formatter_class=ArgumentDefaultsHelpFormatter)
subparser = parent_parser.add_subparsers(dest="cmd")
# Parser for NWP Deterministic
parser_plot = subparser.add_parser('plot', help='Plot NWP output')
parser_plot.add_argument('-p',
'--parameters',
metavar='PARAMETERS',
type=str,
help='Parameters to plot. Seperate with ":", eg: "t2m:w10m:precip:slp:td2m:tcc:lmhc".',
required=True)
parser_plot.add_argument('-f',
'--filetype',
metavar='FILETYPE',
type=str,
help='What filetype are we using? (Options are: grib2)',
default='grib2',
required=False)
parser_plot.add_argument('-d',
'--directory',
type=str,
help='directory to read data from',
default='.')
parser_plot.add_argument('--prefix',
type=str,
help='Set to prefix of files if any',
default='',
required=False)
parser_plot.add_argument('--postfix',
type=str,
help='Set to postfix of files if any',
default='',
required=False)
parser_plot.add_argument('-o',
'--output-dir',
metavar='OUTDIR',
type=str,
help='Directory to place output into',
default='.',
required=False)
parser_plot.add_argument('-l',
'--limit-files',
metavar='LIMIT',
type=int,
help='Only use the first LIMIT files. If set to 0, not limit is used. If Limit > 0, files will be sorted by name first',
default=0,
required=False)
parser_plot.add_argument('-a',
'--area',
metavar='AREA',
type=str,
help='Over which area to plot (Options are: dk, neu, sjalland, disko)',
default="dk",
required=False)
parser_plot.add_argument('--verbose',
action='store_true',
help='Verbose output',
default=False)
if len(sysargs)==1:
parent_parser.print_help()
sys.exit(2)
args = parent_parser.parse_args()
return args
| 2.703125
| 3
|
navmenu/io/__init__.py
|
rashidsh/navmenu
| 0
|
12782550
|
from navmenu.io.base import BaseIO
from navmenu.io.console import ConsoleIO
from navmenu.io.telegram import TelegramIO
from navmenu.io.vk import VKIO
__all__ = 'BaseIO', 'ConsoleIO', 'TelegramIO', 'VKIO',
| 1.203125
| 1
|