content stringlengths 5 1.05M |
|---|
from .optimize import minimize_variational
__all__ = ['minimize_variational']
|
"""This module defines all the Lambda functions to be deployed to AWS.
The function name would be AWS Lambda function name as well. Everything to be
packaged to Lambda must be contained *inside* the function. Module level
imports are for local testing or packaging use only.
Use ``bert.constants.AWS_LAMBDA_FUNCTION`` to check if function is running
locally or in AWS.
"""
from bert import binding, utils
@binding.follow('noop')
def bert_tess_fullframe_worker_2():
"""Collect light curve data from individual full frame images.
Use the data to build a light curve file.
Then, upload the file to S3 bucket.
TESS FFI Light Curve Format documented at
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf#page=32
"""
import os
from datetime import datetime
import boto3
from astropy.table import Table
work_queue, done_queue, ologger = utils.comm_binders(
bert_tess_fullframe_worker_2)
s3 = boto3.resource('s3')
inbucket = s3.Bucket(name=os.environ.get('CACHEBUCKETNAME'))
bucket_name = os.environ.get('AWSBUCKETNAME')
bucket = s3.Bucket(name=bucket_name)
homedir = os.environ.get('HOME')
# NOTE: To test this, use the example below as the manual test event.
# DEBUG=true setting will grab it (cannot use DynamoDB but irrelevant
# for now).
#
# Example event:
# {
# "tic_id": "25155310",
# "sector": 1,
# "camera": 4,
# "ccd": 1
# "radius": 2.5,
# "cutout_width": 30,
# "use_cache": "true"
# }
for event in work_queue:
tic_id = event['tic_id']
sector = int(event['sector'])
camera = int(event['camera'])
ccd = int(event['ccd'])
radius = float(event['radius'])
cutout_width = int(event['cutout_width'])
sec_id = f's{sector:04}-{camera}-{ccd}'
in_pfx = f'tic{tic_id:0>12}/{sec_id}/r{radius}/w{cutout_width}'
basename = f'tic{tic_id:0>12}_{sec_id}_lcc.fits'
s3key = f'tic{tic_id:0>12}/{basename}'
outfilename = os.path.join(homedir, basename)
# Use cached LC generated by previous run and skip recalculations.
# Skipping also means BLS Lambda listening for S3 upload will not run.
use_cache = event['use_cache'] == 'true'
# If this output exists and user wants to use the cache, there is
# nothing to do.
if use_cache:
try:
s3.Object(bucket_name, s3key).load()
except Exception: # Does not exist
pass
else: # It exists; nothing to do
ologger.info(f'{s3key} exists, skipping...')
continue
# Table header
lc_meta = {
'TELESCOP': 'TESS',
'CAMERA': camera,
'SECTOR': sector,
'CCD': ccd,
'OBJECT': f'TIC {tic_id}',
'RADESYS': 'ICRS',
'AP_RAD': radius,
'SKYWIDTH': cutout_width,
'DATE': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}
# f4 = np.float32, f8 = np.float64, i4 = np.int32
lc_tab = Table(names=('TIME', 'SAP_FLUX', 'SAP_BKG', 'QUALITY'),
dtype=('f8', 'f4', 'f4', 'i4'),
meta=lc_meta)
# Grab all the light curve data points and piece them together.
for obj in inbucket.objects.filter(
Prefix=in_pfx, RequestPayer='requester'):
filename = os.path.join(homedir, os.path.basename(obj.key))
inbucket.download_file(
obj.key, filename, ExtraArgs={"RequestPayer": "requester"})
with open(filename, 'r') as fin:
row = fin.read().split(',')
# Clean up
os.remove(filename)
midtime = float(row[0])
signal = float(row[1])
background = float(row[2])
dqflag = int(row[3])
xpos = int(row[4])
ypos = int(row[5])
ra = float(row[6])
dec = float(row[7])
lc_tab.add_row((midtime, signal, background, dqflag))
# Sort table by observation time.
lc_tab.sort('TIME')
# More metadata
lc_tab.meta.update({
'RA_OBJ': ra,
'DEC_OBJ': dec,
'APCEN_X': xpos,
'APCEN_Y': ypos})
# Write locally to FITS table.
# Table data and metadata will go to EXT 1.
lc_tab.write(outfilename, format='fits')
ologger.info(f'Light Curve File[{outfilename}]')
# Upload to S3 bucket.
try:
bucket.upload_file(
outfilename, s3key, ExtraArgs={"RequestPayer": "requester"})
except Exception as exc:
ologger.error(str(exc))
else:
ologger.info(f'Uploaded {s3key} to S3')
finally:
# Clean up
os.remove(outfilename)
|
#
# this is the app/__init__.py file
#
# the presence of this file, even if empty,
# ... facilitates local imports from one file to another,
# ... including importing app code for testing purposes
#
|
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ignore and export unmatched logs into a file for debugging.
Functions:
+ on_unmatched_message: write into a file the unmatched log
+ on_ignored_message: ignore this matched log.
"""
# Disable warnings about unused arguments
# pylint: disable=W0613
UNMATCHED_LOG_FILENAME = "unmatched.txt"
def on_unmatched_message(match, state, logger):
"""Write into a file the unmatched log."""
with open(UNMATCHED_LOG_FILENAME, "a") as unmatched_file:
unmatched_file.write(match[0] + "\n")
def on_ignored_message(match, state, logger):
"""Ignore this matched log."""
pass
|
import colorsys
black = (0,0,0)
white = (1,1,1)
whiteish = (0.9, 0.9, 0.9)
grey = (0.5,0.5,0.5)
darkgrey = (0.32, 0.32, 0.32)
lightgrey = (0.75,0.75,0.75)
red = (1,0,0)
redish = (0.5, 0, 0)
green = (0,1,0)
greenish = (0, 0.5, 0)
blue = (0,0,1)
blueish = (0, 0, 0.5)
darkblue = (0, 0, 0.2)
yellow = (1,1,0)
yellowish = (0.5, 0.5, 0)
cyan = (0,1,1)
cyanish = (0, 0.5, 0.5)
magenta = (1,0,1)
magentish = (0.5, 0, 0.5)
dirtyYellow = (0.5,0.5,0)
orange = (1,0.6,0)
nightshade = (0.12, 0.12, 0.12)
def gray(luminance):
return (luminance,luminance,luminance)
def rgb_to_hsv(T):
if len(T) > 3:
T = T[:3]
return colorsys.rgb_to_hsv(*T)
def hsv_to_rgb(T):
if len(T) > 3:
T = T[:3]
return colorsys.hsv_to_rgb(*T)
def rgb_to_grayscale(T):
if len(T) > 3:
T = T[:3]
luminance = 0.21*T[0] + 0.71*T[1] + 0.07*T[2]
return (luminance, luminance, luminance)
|
# 335-self-crossing.py
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
l = len(x)
if (l < 4): return False
flag = 0 # Is square decreasing
hasBound = 0 # Has previous edge, cannot cross this bound
prev4 = 0
prev3 = 0
prev2 = x[0]
prev1 = x[1]
for i in xrange(2, l):
curr = x[i]
if flag == 0:
if curr <= prev2:
flag = 1 # Start decreasing
# No need to track p3 & p4
if curr < prev2 -prev4: # Region 1
prev2 = prev1
prev1 = curr
else:
prev2 = prev1 - prev3
prev1 = curr
else:
prev4 = prev3
prev3 = prev2
prev2 = prev1
prev1 = curr
else:
if curr >= prev2: return True
prev2 = prev1
prev1 = curr
return False
s = Solution()
print(s.isSelfCrossing([3, 3, 4, 2, 2]))
|
import rospy
from mavros_msgs.msg import PositionTarget
from geometry_msgs.msg import PoseStamped, Point
from arion.offboard import OffboardControl
from arion.subscriber.point_subscriber import PointSubscriber
from arion.subscriber.position_subscriber import CurrentPositionSubscriber
class PositionControlRawNode(OffboardControl, CurrentPositionSubscriber, PointSubscriber):
LOITER = 12288
def __init__(self):
topic_in = rospy.get_param('~raw_point_topic', '/arion/raw_point')
self.rate = rospy.get_param('~raw_point_rate', 20)
self.message_pub = rospy.Publisher('mavros/setpoint_raw/local', PositionTarget, queue_size=10)
self.target_position = PositionTarget()
self.seq = 1
self.start_point(topic_in)
self.start_offboard()
self.start_current_position()
self.smooth_factor = 0.9
self.mask = PositionControlRawNode.LOITER
def publish_position_message(self):
self.target_position.header.stamp = rospy.Time.now()
self.target_position.header.seq = self.seq
self.target_position.header.frame_id = "enu_world"
self.target_position.coordinate_frame = PositionTarget.FRAME_LOCAL_NED
self.target_position.position.x = self.p.x
self.target_position.position.y = self.p.y
self.target_position.position.z = self.p.z
self.target_position.type_mask = self.mask
self.target_position.yaw = 0
self.target_position.yaw_rate = 1
self.message_pub.publish(self.target_position)
self.seq = self.seq + 1
def warm_position(self, rate):
for i in range(100):
p = self.current_position.pose.position
self.smooth_point(p.x, p.y, p.z, self.smooth_factor)
self.publish_position_message()
rate.sleep()
def run(self):
rospy.init_node('control_arion', anonymous=True, log_level= rospy.INFO)
r = rospy.Rate(self.rate)
self.warm_position(r)
self.take_control(self.publish_position_message)
while not rospy.is_shutdown():
self.publish_position_message()
r.sleep()
self.release_control()
|
import json
import os
import slurm
from assertpy import assert_that
from jsonschema import validate
from slurm.pcluster_slurm_config_generator import generate_slurm_config_files
INPUT_SCHEMA = {
"type": "object",
"properties": {
"cluster": {
"type": "object",
"properties": {
"queue_settings": {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9-_]+$": {
"type": "object",
"properties": {
"compute_resource_settings": {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9-]+$": {
"type": "object",
"properties": {
"instance_type": {"type": "string"},
"min_count": {"type": "integer"},
"max_count": {"type": "integer"},
"vcpus": {"type": "integer"},
"gpus": {"type": "integer"},
"spot_price": {"type": "number"},
"enable_efa": {"type": "boolean"},
},
"additionalProperties": False,
"required": ["instance_type", "min_count", "max_count", "vcpus", "gpus"],
}
},
},
"placement_group": {"type": ["string", "null"]},
"enable_efa": {"type": "boolean"},
"disable_hyperthreading": {"type": "boolean"},
"compute_type": {"type": "string"},
},
"additionalProperties": False,
"required": ["compute_resource_settings"],
}
},
"additionalProperties": False,
},
"scaling": {
"type": "object",
"properties": {"scaledown_idletime": {"type": "integer"}},
"required": ["scaledown_idletime"],
},
"default_queue": {"type": "string"},
"label": {"type": "string"},
},
"additionalProperties": False,
"required": ["queue_settings", "scaling", "default_queue", "label"],
}
},
"additionalProperties": False,
"required": ["cluster"],
}
def _test_input_file_format(input_file):
cluster_config = json.load(open(input_file))
validate(instance=cluster_config, schema=INPUT_SCHEMA)
def test_generate_slurm_config_files(mocker, test_datadir, tmpdir):
input_file = str(test_datadir / "sample_input.json")
_test_input_file_format(input_file)
mocker.patch("slurm.pcluster_slurm_config_generator.gethostname", return_value="ip-1-0-0-0", autospec=True)
mocker.patch(
"slurm.pcluster_slurm_config_generator._get_head_node_private_ip", return_value="ip.1.0.0.0", autospec=True
)
template_directory = os.path.dirname(slurm.__file__) + "/templates"
generate_slurm_config_files(tmpdir, template_directory, input_file, dryrun=False)
for queue in ["efa", "gpu", "multiple_spot"]:
for file_type in ["partition", "gres"]:
file_name = f"pcluster/slurm_parallelcluster_{queue}_{file_type}.conf"
_assert_files_are_equal(tmpdir / file_name, test_datadir / "expected_outputs" / file_name)
for file in ["slurm_parallelcluster.conf", "slurm_parallelcluster_gres.conf"]:
_assert_files_are_equal(tmpdir / file, test_datadir / "expected_outputs" / file)
_assert_files_are_equal(
tmpdir / "pcluster/instance_name_type_mappings.json",
test_datadir / "expected_outputs/pcluster/instance_name_type_mappings.json",
)
def _assert_files_are_equal(file, expected_file):
with open(file, "r") as f, open(expected_file, "r") as exp_f:
expected_file_content = exp_f.read()
expected_file_content = expected_file_content.replace("<DIR>", os.path.dirname(file))
assert_that(f.read()).is_equal_to(expected_file_content)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LMDBIOTensor."""
import os
import sys
import shutil
import tempfile
import numpy as np
import tensorflow_io as tfio
def test_lmdb_read_from_file():
"""test_read_from_file"""
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_lmdb", "data.mdb"
)
tmp_path = tempfile.mkdtemp()
filename = os.path.join(tmp_path, "data.mdb")
shutil.copy(path, filename)
lmdb = tfio.IOTensor.from_lmdb(filename)
assert np.all([key.numpy() for key in lmdb] == [str(i).encode() for i in range(10)])
for key in lmdb:
assert lmdb[key].numpy() == str(chr(ord("a") + int(key.numpy()))).encode()
# TODO: Not working for Windows yet
if sys.platform in ("linux", "darwin"):
shutil.rmtree(tmp_path)
if __name__ == "__main__":
test.main()
|
"""
This is the main class and the entry point for the programmer.
It provides a mechanism to treat multiple dictionaries as if they were one
dictionary.
Also provided is the ability to reference parts and still maintain the idea of it
being one dictionary.
With the ability to delete from the dictionary and the ability to convert values
on access.
"""
from .versioning import versioned_iterable, versioned_value
from .converter import Converters, Converter
from .not_found import NotFound
from .joiner import dot_joiner
from .path import Path
from collections.abc import Mapping
import logging
log = logging.getLogger("delfick_project.option_merge.merge")
class KeyValuePairsConverter(object):
"""Converts a list of key,value pairs to a dictionary"""
def __init__(self, pairs, source=None):
self.pairs = pairs
self.source = source
def convert(self):
"""Return us a MergedOptions from our pairs"""
from . import helper as hp
return MergedOptions().using(
*[hp.make_dict(key[0], key[1:], value) for key, value in self.pairs], source=self.source
)
class AttributesConverter(object):
"""Converts an object with particular attributes to a dictionary"""
def __init__(
self,
obj,
attributes=None,
include_underlined=False,
lift=None,
ignoreable_values=None,
source=None,
):
self.obj = obj
self.lift = lift
self.source = source
self.attributes = attributes
self.ignoreable_values = ignoreable_values
self.include_underlined = include_underlined
if isinstance(self.lift, str):
self.lift = [self.lift]
def convert(self):
"""Return us a MergedOptions from our attributes"""
options = MergedOptions()
attributes = self.attributes
if not attributes:
attributes = dir(self.obj)
if not self.include_underlined:
attributes = [attr for attr in attributes if not attr.startswith("_")]
for attr in attributes:
if hasattr(self.obj, attr):
val = getattr(self.obj, attr)
if not self.ignoreable_values or val not in self.ignoreable_values:
options[attr] = val
if self.lift:
lifted = MergedOptions()
lifted.storage.add(Path.convert(self.lift), options)
return lifted
else:
return options
class ConverterProperty(object):
"""Creates a Property for accessing a converter"""
def __init__(self, converter):
self.converter = converter
def __get__(self, obj=None, owner=None):
return lambda *args, **kwargs: self.converter(*args, **kwargs).convert()
class MergedOptions(dict, Mapping):
"""
Wrapper around multiple dictionaries to behave as one.
Usage::
options = MergedOptions.using(options1, options2, source="SomePlace")
Is equivalent to::
options = MergedOptions()
options.update(options1, source="SomePlace")
options.update(options2, source="SomePlace")
The later an option is added, the more influence it has.
i.e. when a key is accessed, later options are looked at first.
When you delete a key, it removes it from the first dictionary it can find.
This means a key can change value when deleted rather than disappearing altogether
It will also merge deeply.
So::
options1 = {'a':{'b':1, 'c':3}, 'b':5}
options2 = {'a':{'b':4'}, 'd':7}
merged = MergedOptions.using(options1, options2)
merged['a'] == MergedOptions(prefix='a', <same_options>)
merged['a']['b'] == 4
merged['a']['c'] == 3
merged['d'] == 7
You can also change deeply nested keys::
# You can get keys with "a.b" but setting them must separate the parts of the structure
merged[["a", "b"]] = 5
merged["a"].as_dict() == {"b": 5, "c": 3}
.. note:: MergedOptions uses a cache system to avoid having to repeatedly iterate
through the underlying data structures.
A side effect of this caching is that changes in the underlying structures won't
cause a cache invalidation in the MergedOptions object.
If you wish for changes to be made, make them on the MergedOptions object.
(Note that changing a merged options object is an additive operation and will not change
the underlying data)
"""
Attributes = ConverterProperty(AttributesConverter)
KeyValuePairs = ConverterProperty(KeyValuePairsConverter)
def __init__(
self, prefix=None, storage=None, dont_prefix=None, converters=None, ignore_converters=False
):
self.prefix_list = prefix
self.converters = converters
self.dont_prefix = dont_prefix
self.ignore_converters = ignore_converters
if not self.dont_prefix:
self.dont_prefix = []
if not self.prefix_list:
self.prefix_list = []
if not self.converters:
self.converters = Converters()
if isinstance(self.prefix_list, str):
self.prefix_list = [self.prefix_list]
self.prefix_string = dot_joiner(self.prefix_list, list)
self.storage = storage
from .storage import Storage
if self.storage is None:
self.storage = Storage()
@classmethod
def using(cls, *options, **kwargs):
"""
Convenience for calling update multiple times
.. code-block:: python
m = MergedOptions.using({"a": 1}, {"b": 2})
is equivalent to:
.. code-block:: python
m = MergedOptions()
m.update({"a": 1})
m.update({"b": 2})
Any kwargs given to ``using`` is passed into ``update`` for
each provided dictionary.
"""
prefix = kwargs.get("prefix")
storage = kwargs.get("storage")
converters = kwargs.get("converters")
dont_prefix = kwargs.get("dont_prefix")
ignore_converters = kwargs.get("ignore_converters")
merged = cls(
prefix=prefix,
storage=storage,
dont_prefix=dont_prefix,
converters=converters,
ignore_converters=ignore_converters,
)
for opts in options:
merged.update(opts, **kwargs)
return merged
@property
def version(self):
return self.storage.version
def update(self, options, source=None, **kwargs):
"""
Add new options to the storage under this prefix.
The later options are added, the more influence they have.
"""
if options is None:
return
self.storage.add(Path(self.prefix_list), options, source=source)
@versioned_value
def __getitem__(self, path, ignore_converters=False):
"""
Access some path
Return the first value it comes across
Raise KeyError if nothing has the specified key
for example:
.. code-block:: python
m = MergedOptions.using({"a": {"b": 2}})
assert m['a.b'] == 2
"""
path = self.converted_path(
path,
ignore_converters=ignore_converters
or self.ignore_converters
or getattr(path, "ignore_converters", False),
)
for val, return_as_is in self.values_for(path, ignore_converters=path.ignore_converters):
if return_as_is:
return val
if any(isinstance(val, unprefixed) for unprefixed in self.dont_prefix):
return val
elif type(val) in (dict, MergedOptions) or isinstance(val, dict):
return self.prefixed(path, already_prefixed=True)
else:
return val
raise KeyError(path)
def __contains__(self, path):
"""
Ask storage if it has a path
.. code-block:: python
m = MergedOptions.using({"a": 1})
assert "a" in m
assert "b" not in m
"""
try:
self.storage.get(self.converted_path(path))
return True
except KeyError:
return False
def get(self, path, default=None, ignore_converters=False):
"""
Get some path or return default value
.. code-block:: python
m = MergedOptions.using({"a": 1})
assert m.get("a") == 1
assert m.get("b") == None
assert m.get("b", 2) == 2
You may also specify ``ignore_converters`` and it won't take the
the converters into account.
.. code-block:: python
m = MergedOptions.using({"a": 1})
m.add_converter(Converter(convert=add_one, convert_path=["a"]))
m.converters.activate()
assert m["a"] == 2
assert m.get("a", ignore_converters=True) == 1
"""
try:
return self.__getitem__(path, ignore_converters=ignore_converters)
except KeyError:
return default
def source_for(self, path, chain=None):
"""
Proxy self.storage.source_for
Source is specifying in calls to ``__init__`` and ``update`` and this
will find the entries for the specified path an return the first source
it finds.
"""
path = Path.convert(path, self).ignoring_converters(True)
return self.storage.source_for(path, chain)
def __setitem__(self, path, value):
"""
Set a key in the storage
This takes into account the prefix on this option as well as the provided
path.
.. code-block:: python
m = MergedOptions.using({"a": 1})
assertEqual(m.as_dict(), {"a": 1})
a = m["a"]
a['b'] = 2
assertEqual(m.as_dict(), {"a": 1, "b": 2})
"""
if isinstance(path, str):
path = [path]
self.storage.add(self.converted_path(path), value)
def __delitem__(self, path):
"""
Delete a key from the storage
.. code-block:: python
m = MergedOptions.using({"a": 1}, {"a": 2})
assert m['a'] == 2
del m['a']
assert m['a'] == 1
"""
self.storage.delete(self.converted_path(path))
def __iter__(self):
"""Iterate over the keys"""
return iter(self.keys())
def __len__(self):
"""Get number of keys we have"""
return len(list(self.keys()))
def __eq__(self, other):
"""Equal to another merged options if has same storage and prefix"""
return (
isinstance(other, self.__class__)
and other.storage is self.storage
and other.prefix_list == self.prefix_list
)
@versioned_iterable
def values_for(self, path, ignore_converters=False):
"""Get all known values for some path"""
path = self.converted_path(
path, ignore_converters=ignore_converters or getattr(path, "ignore_converters", False)
)
if not path.ignore_converters and not path.waiting():
if path.converted():
yield path.converted_val(), True
return
if path.find_converter()[1]:
untouched = self[path.ignoring_converters()]
yield path.do_conversion(untouched)
return
for info in self.storage.get_info(path):
try:
yield info.value_after(path), False
except NotFound:
pass
def prefixed(self, path, ignore_converters=False, already_prefixed=False):
"""Return a MergedOptions prefixed to this path"""
return self.__class__(
self.converted_path(path, ignore_converters=ignore_converters),
storage=self.storage,
dont_prefix=self.dont_prefix,
converters=self.converters,
ignore_converters=ignore_converters,
)
def root(self):
"""Return a MergedOptions looking at the root of the storage"""
return self.__class__(
"",
storage=self.storage,
dont_prefix=self.dont_prefix,
converters=self.converters,
ignore_converters=self.ignore_converters,
)
def wrapped(self):
"""
Return a MergedOptions with this inside
Equivalent to:
.. code-block:: python
m = MergedOptions.using("a")
wrapped = MergedOptions.using(m, converters=m.converters, dont_prefix=m.dont_prefix)
"""
return self.__class__.using(self, converters=self.converters, dont_prefix=self.dont_prefix)
def keys(self, ignore_converters=False):
"""Return a de-duplicated list of the keys we know about"""
return self.storage.keys_after(self.prefix_string, ignore_converters=ignore_converters)
reversed_keys = keys
def items(self, ignore_converters=False):
"""Iterate over [(key, value), ...] pairs"""
for key in self.keys(ignore_converters=ignore_converters):
yield key, self.__getitem__(key, ignore_converters=ignore_converters)
@versioned_iterable
def values(self):
"""Return the values in the MergedOptions."""
for key in self.keys():
yield self[key]
def __repr__(self):
return f"MergedOptions({self.prefix_string})"
def path(self, path, **kwargs):
"""Return us a path instance"""
for key, val in (
("configuration", self),
("converters", self.converters),
("ignore_converters", self.ignore_converters),
):
if key not in kwargs:
kwargs[key] = val
return Path(path, **kwargs)
def converted_path(self, path, ignore_converters=False, converters=None):
"""Convert a path into a Path object with a prefixed path"""
if converters is None:
converters = self.converters
path_type = type(path)
isPath = path_type is Path
if isPath and path.ignore_converters is ignore_converters and path.converters is converters:
return path
if isPath:
joined = path.joined()
else:
from . import helper as hp
if path_type in (list, tuple):
path, joined = hp.prefixed_path_list(path, self.prefix_list)
else:
path, joined = hp.prefixed_path_string(path, self.prefix_string)
return Path.convert(path, self, converters=converters, joined=joined).ignoring_converters(
ignore_converters
)
def add_converter(self, converter):
"""Add a converter to our collection"""
if converter not in self.converters:
self.converters.append(converter)
def install_converters(self, converters, make_converter):
"""
For each specified converter, make a converter function and install
a converter for that name.
.. code-block:: python
def make_converter(name, transformer):
def convert(path, val):
return transformer(val)
return convert
m = MergedOptions.using({"a": 1, "b": 2})
m.install_converters({"a": lambda v: v+1, "b": lambda v: v*2}, make_converter)
m.converters.activate()
assert m['a'] == 2
assert m['b'] == 4
"""
for name, spec in converters.items():
convert = make_converter(name, spec)
self.add_converter(Converter(convert=convert, convert_path=[name]))
def as_dict(self, key="", ignore_converters=True, seen=None, ignore=None):
"""Collapse the storage at this prefix into a single dictionary"""
return self.storage.as_dict(
self.converted_path(key, ignore_converters=ignore_converters), seen=seen, ignore=ignore
)
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import subprocess
class ServerHandler(BaseHTTPRequestHandler):
def _set_headers(self,header='text/html'):
self.send_response(200)
self.send_header('Content-type', header)
self.end_headers()
def do_GET(self):
rt = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'create-own-server')
#HTML pages routing----------------
if self.path == '/abc.html' or self.path == '/':
self.path = '/abc.html'
filename = rt + self.path
self._set_headers()
with open(filename, 'rb') as fh:
html = fh.read()
self.wfile.write(html)
#Python pages routing---------------------------
elif self.path == '/xyz.py':
filename = rt + self.path
self._set_headers()
html = subprocess.check_output(["python", filename], shell=True)
self.wfile.write(html)
#PHP pages routing----------------
elif self.path == '/test.php':
filename = rt + self.path
self._set_headers()
html = subprocess.check_output(["php",filename], shell=True)
self.wfile.write(html)
#JPG images routing----------------
elif self.path.endswith(".jpg"):
self._set_headers('image/jpg')
filename = rt + self.path
with open(filename, 'rb') as fh:
html = fh.read()
self.wfile.write(html)
#NOT FOUND pages routing----------------
else:
self._set_headers()
self.wfile.write("<!DOCTYPE html><html><body><div><h1>404 NOT FOUND</h1></body></html".encode())#convert into bytes
def do_HEAD(self):
self._set_headers()
def do_POST(self):
self._set_headers()
self.wfile.write("<html><body><h1>POST!</h1></body></html>")
def run(server_class=HTTPServer, handler_class=ServerHandler, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print ('Start working server...')
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
if __name__ == "__main__":
run()
|
from django.contrib import messages
from django.views.generic import FormView, TemplateView, View
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.utils.translation import ugettext as __
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
from .forms import RegistrationForm
# Create your views here.
class LoginView(FormView):
form_class = AuthenticationForm
template_name = 'authentication/login.html'
class RegistrationView(FormView):
form_class = RegistrationForm
template_name = 'authentication/registration.html'
def form_valid(self, form):
try:
form.save()
messages.success(self.request, __('User account successfully created'))
return HttpResponseRedirect(reverse_lazy('login'))
except Exception as exception:
print("Exception occurred --> {}".format(exception.__str__()))
messages.success(self.request, __('User account could not be created, please try again'))
return HttpResponseRedirect(reverse_lazy('registration'))
class LogoutView(View):
def get(self, request, *args, **kwargs):
logout(request)
return HttpResponseRedirect(reverse_lazy('homepage'))
|
import matplotlib.pyplot as plt
from pytrends.request import TrendReq
from datetime import datetime
import json
import pandas as pd
import pprint
# プログラミング言語の人気度を取得
pytrends = TrendReq(hl='ja-JP', tz=360)
kw_list = ["Java","Python","Ruby","Javascript","PHP"]
pytrends.build_payload(kw_list, timeframe='today 5-y', geo='JP')
interest_over_time_df = pytrends.interest_over_time()
print(interest_over_time_df)
# 可視化
interest_over_time_df.plot(figsize=(10, 2))
plt.savefig('./test.png')
plt.close('all')
# pandasからjsonへ変換
# interest_over_time_json = interest_over_time_df.to_json()
# print(interest_over_time_json)
|
version = "2020-05-27" |
# Copyright (c) 2013-2014 Parallels, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import os
from oslo.config import cfg
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from pcsnovadriver.pcs import prlsdkapi_proxy
pc = prlsdkapi_proxy.consts
pcs_vif_opts = [
cfg.BoolOpt('pcs_use_dhcp',
default=False,
help='Use DHCP agent for network configuration.'),
]
CONF = cfg.CONF
CONF.register_opts(pcs_vif_opts)
LOG = logging.getLogger(__name__)
def get_bridge_ifaces(bridge):
return os.listdir(os.path.join('/sys', 'class', 'net', bridge, 'brif'))
def pcs_create_ovs_vif_port(bridge, dev, iface_id, iface_name,
mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-name=%s' % iface_name,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id,
run_as_root=True)
class PCSVIFDriver(object):
def get_firewall_required(self):
"""Nova's firewall is deprecated, let's assume, that we
always use neutron's firewall and connect instances to
integration bridge via intermediate linux bridge.
"""
return True
def _get_vif_class(self, instance, vif):
if vif['type'] is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif['type'] == network_model.VIF_TYPE_OVS:
if self.get_firewall_required():
return VifOvsHybrid()
else:
return VifOvsEthernet()
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif['type'])
def setup_dev(self, driver, instance, sdk_ve, vif):
"""This method is called before VE start and should
do all work, that can't be done on running VE.
"""
LOG.info("vif.setup_dev: %s:%s" % (instance['name'], vif['devname']))
vif_class = self._get_vif_class(instance, vif)
vif_class.setup_dev(driver, instance, sdk_ve, vif)
def plug(self, driver, instance, sdk_ve, vif):
LOG.info("plug: %s:%s" % (instance['name'], vif['devname']))
vif_class = self._get_vif_class(instance, vif)
vif_class.plug(driver, instance, sdk_ve, vif)
def unplug(self, driver, instance, sdk_ve, vif):
LOG.info("unplug: %s:%s" % (instance['name'], vif['devname']))
vif_class = self._get_vif_class(instance, vif)
vif_class.unplug(driver, instance, sdk_ve, vif)
class BaseVif:
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ('pcsbr-' + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (('pcsvb-' + iface_id)[:network_model.NIC_NAME_LEN],
('pcsvo-' + iface_id)[:network_model.NIC_NAME_LEN])
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_prl_name(self, sdk_ve, netdev):
if sdk_ve.get_vm_type() == pc.PVT_VM:
return "vme%08x.%d" % (sdk_ve.get_env_id(), netdev.get_index())
else:
return "veth%d.%d" % (sdk_ve.get_env_id(), netdev.get_index())
def get_prl_dev(self, driver, sdk_ve, mac):
"""Return first network device with given MAC address
or None, if it's not found.
"""
mac = netaddr.EUI(mac)
ndevs = sdk_ve.get_devs_count_by_type(pc.PDE_GENERIC_NETWORK_ADAPTER)
for i in xrange(ndevs):
netdev = sdk_ve.get_dev_by_type(pc.PDE_GENERIC_NETWORK_ADAPTER, i)
if netdev.get_emulated_type() == pc.PNA_ROUTED:
continue
if netaddr.EUI(netdev.get_mac_address()) == mac:
return netdev
else:
return None
def create_prl_dev(self, driver, sdk_ve, vif):
"""Add network device to VE and set MAC address.
Set virtual network to some unexistent value, so that
device will not be plugged into any bridged and we can
do it by ourselves.
"""
srv_config = driver.psrv.get_srv_config().wait()[0]
sdk_ve.begin_edit().wait()
netdev = sdk_ve.add_default_device_ex(srv_config,
pc.PDE_GENERIC_NETWORK_ADAPTER)
mac = netaddr.EUI(vif['address'])
mac.dialect = netaddr.mac_bare
netdev.set_mac_address(str(mac))
netdev.set_virtual_network_id('_fake_unexistent')
sdk_ve.commit().wait()
return netdev
def setup_prl_dev(self, driver, sdk_ve, vif):
"""Sets up device in VE, so that one end will be inside
VE with given MAC. Another end - in host with specified
device name.
"""
if_name = vif['devname']
netdev = self.get_prl_dev(driver, sdk_ve, vif['address'])
if not netdev:
netdev = self.create_prl_dev(driver, sdk_ve, vif)
prl_name = self.get_prl_name(sdk_ve, netdev)
if not linux_net.device_exists(if_name):
utils.execute('ip', 'link', 'set', prl_name,
'up', run_as_root=True)
return netdev
def configure_ip(self, sdk_ve, netdev, vif):
"""Configure IP parameters inside VE
"""
sdk_ve.begin_edit().wait()
if CONF.pcs_use_dhcp:
netdev.set_configure_with_dhcp(1)
else:
if len(vif['network']['subnets']) != 1:
raise NotImplementedError(
"Only one subnet per vif is supported.")
subnet = vif['network']['subnets'][0]
# Disable DHCP
netdev.set_configure_with_dhcp(1)
# Setup IP addresses
iplist = prlsdkapi_proxy.sdk.StringList()
for ip in subnet['ips']:
cidr = netaddr.IPNetwork(subnet['cidr'])
if ip['type'] != 'fixed':
raise NotImplementedError("Only fixed IPs are supported.")
iplist.add_item("%s/%s" % (ip['address'], cidr.prefixlen))
netdev.set_net_addresses(iplist)
# Setup gateway
if subnet['gateway']:
gw = subnet['gateway']
if gw['type'] != 'gateway':
raise NotImplementedError(
"Only 'gateway' type gateways are supported.")
netdev.set_default_gateway(gw['address'])
netdev.set_auto_apply(1)
sdk_ve.commit().wait()
class VifOvsHybrid(BaseVif):
def setup_dev(self, driver, instance, sdk_ve, vif):
netdev = self.create_prl_dev(driver, sdk_ve, vif)
def plug(self, driver, instance, sdk_ve, vif):
iface_id = self.get_ovs_interfaceid(vif)
if_name = vif['devname']
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
netdev = self.setup_prl_dev(driver, sdk_ve, vif)
prl_name = self.get_prl_name(sdk_ve, netdev)
if_name = prl_name
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
pcs_create_ovs_vif_port(self.get_bridge_name(vif), v2_name,
iface_id, prl_name, vif['address'],
instance['uuid'])
if if_name not in get_bridge_ifaces(br_name):
utils.execute('brctl', 'addif', br_name, if_name, run_as_root=True)
self.configure_ip(sdk_ve, netdev, vif)
if sdk_ve.get_vm_type() == pc.PVT_VM and \
if_name not in get_bridge_ifaces(br_name):
# FIXME: dispatcher removes interface from bridge after
# changing configuration
utils.execute('brctl', 'addif', br_name, if_name, run_as_root=True)
def unplug(self, driver, instance, sdk_ve, vif):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
netdev = self.get_prl_dev(driver, sdk_ve, vif['address'])
if not netdev:
return
prl_name = self.get_prl_name(sdk_ve, netdev)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), v2_name)
utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
class VifOvsEthernet(BaseVif):
def setup_dev(self, driver, instance, sdk_ve, vif):
netdev = self.create_prl_dev(driver, sdk_ve, vif)
def plug(self, driver, instance, sdk_ve, vif):
iface_id = self.get_ovs_interfaceid(vif)
if_name = vif['devname']
netdev = self.setup_prl_dev(driver, sdk_ve, vif)
prl_name = self.get_prl_name(sdk_ve, netdev)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
prl_name, iface_id, vif['address'],
instance['uuid'])
self.configure_ip(sdk_ve, netdev, vif)
def unplug(self, driver, instance, sdk_ve, vif):
netdev = self.get_prl_dev(driver, sdk_ve, vif['address'])
if not netdev:
return
prl_name = self.get_prl_name(sdk_ve, netdev)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), prl_name)
|
import numpy
import cPickle as pkl
import gzip
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, target, label, source_synset, target_synset,
all_dict, syn_dict,
batch_size=128,
maxlen=100,
n_words_source=-1,
n_words_target=-1):
self.source = fopen(source, 'r')
self.target = fopen(target, 'r')
self.source_synset = fopen(source_synset, 'r')
self.target_synset = fopen(target_synset, 'r')
self.label = fopen(label, 'r')
with open(all_dict, 'rb') as f:
self.all_dict = pkl.load(f)
self.label_dict = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
with open(syn_dict, 'rb') as f:
self.syn_dict = pkl.load(f)
self.batch_size = batch_size
self.maxlen = maxlen
self.n_words_source = n_words_source
self.n_words_target = n_words_target
self.source_buffer = []
self.target_buffer = []
self.source_synset_buffer = []
self.target_synset_buffer = []
self.label_buffer = []
self.k = batch_size * 20
self.end_of_data = False
self.countBlank = 0
def __iter__(self):
return self
def reset(self):
self.source.seek(0)
self.target.seek(0)
self.source_synset.seek(0)
self.target_synset.seek(0)
self.label.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
source_synset = []
target_synset = []
label = []
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for k_ in xrange(self.k):
ss = self.source.readline()
if ss == "":
break
tt = self.target.readline()
if tt == "":
break
ss_synset = self.source_synset.readline()
if ss_synset == "":
break
tt_synset = self.target_synset.readline()
if tt_synset == "":
break
ll = self.label.readline()
if ll == "":
break
self.source_buffer.append(ss.strip().split())
self.target_buffer.append(tt.strip().split())
self.source_synset_buffer.append(ss_synset.strip().split())
self.target_synset_buffer.append(tt_synset.strip().split())
self.label_buffer.append(ll.strip().split())
# sort by target buffer
tlen = numpy.array([len(t) for t in self.target_buffer])
tidx = tlen.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
_sbuf_synset = [self.source_synset_buffer[i] for i in tidx]
_tbuf_synset = [self.target_synset_buffer[i] for i in tidx]
_lbuf = [self.label_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
self.source_synset_buffer = _sbuf_synset
self.target_synset_buffer = _tbuf_synset
self.label_buffer = _lbuf
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0 or len(self.source_synset_buffer) == 0 or len(self.target_synset_buffer) == 0 or len(self.label_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop()
except IndexError:
break
ss = [self.all_dict[w] if w in self.all_dict else 1
for w in ss]
if self.n_words_source > 0:
ss = [w if w < self.n_words_source else 3 for w in ss]
ss_syn = self.source_synset_buffer.pop()
ss_syn = [self.syn_dict[w] if w in self.syn_dict else 3
for w in ss_syn]
# read from source file and map to word index
tt = self.target_buffer.pop()
tt = [self.all_dict[w] if w in self.all_dict else 1
for w in tt]
if self.n_words_target > 0:
tt = [w if w < self.n_words_target else 3 for w in tt]
tt_syn = self.target_synset_buffer.pop()
tt_syn = [self.syn_dict[w] if w in self.syn_dict else 3
for w in tt_syn]
# get label
ll = self.label_dict[self.label_buffer.pop()[0]]
if len(ss) > self.maxlen and len(tt) > self.maxlen:
continue
#need fix here!!!!
if len(ss) != len(ss_syn) or len(tt) != len(tt_syn):
self.countBlank += 1
#print self.countBlank
#raise Exception, 'orign != syn'
continue
source.append(ss)
source_synset.append(ss_syn)
target.append(tt)
target_synset.append(tt_syn)
label.append(ll)
#if len(ss) != len(ss_syn) or len(tt) != len(tt_syn):
# raise Exception, 'orign != syn'
if len(source) >= self.batch_size or \
len(target) >= self.batch_size or \
len(label) >= self.batch_size :
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(target) <= 0 or len(target) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return source, source_synset, target, target_synset, label
|
def quicksort(xs):
if len(xs) == 0:
return []
pivot = xs[0]
xs = xs[1:]
left = [x for x in xs if x <= pivot]
right = [x for x in xs if x > pivot]
res = quicksort(left)
res.append(pivot)
res += quicksort(right)
return res
xs = [1, 3, 2, 4, 5, 2]
sorted_xs = quicksort(xs)
|
import numpy as np
import scipy.linalg
import sklearn.metrics
def kernel(ker, X1, X2, gamma):
K = None
if not ker or ker == 'primal':
K = X1
elif ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T, np.asarray(X2).T)
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1).T)
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, np.asarray(X2).T, gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1).T, None, gamma)
return K
class TCA:
def __init__(self, kernel_type = 'primal', dim = 30, lamb = 1, gamma = 1):
'''
Init func
:param kernel_type: kernel, values: 'primal' | 'linear' | 'rbf'
:param dim: dimension after transfer
:param lamb: lambda value in equation
:param gamma: kernel bandwidth for rbf kernel
'''
self.kernel_type = kernel_type
self.dim = dim
self.lamb = lamb
self.gamma = gamma
def fit(self, Xs, Xt):
'''
Transform Xs and Xt
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: Xs_new and Xt_new after TCA
'''
X = np.hstack((Xs.T, Xt.T))
X /= np.linalg.norm(X, axis=0)
m, n = X.shape
ns, nt = len(Xs), len(Xt)
e = np.vstack((1 / ns * np.ones((ns, 1)), -1 / nt * np.ones((nt, 1))))
M = e * e.T
M = M / np.linalg.norm(M, 'fro')
H = np.eye(n) - 1 / n * np.ones((n, n))
K = kernel(self.kernel_type, X, None, gamma=self.gamma)
n_eye = m if self.kernel_type == 'primal' else n
a, b = np.linalg.multi_dot([K, M, K.T]) + self.lamb * np.eye(n_eye), np.linalg.multi_dot([K, H, K.T])
w, V = scipy.linalg.eig(a, b)
ind = np.argsort(w)
A = V[:, ind[:self.dim]]
Z = np.dot(A.T, K)
Z /= np.linalg.norm(Z, axis=0)
Xs_new, Xt_new = Z[:, :ns].T, Z[:, ns:].T
return Xs_new, Xt_new
if __name__ == "__main__":
Xs = np.random.rand(5, 4)
Xt = np.random.rand(5, 4)
tca = TCA(kernel_type = 'linear', dim = 30, lamb = 1, gamma = 1)
Xs_new, Xt_new = tca.fit(Xs, Xt) |
import asyncio
from subprocess import call
import os
import dotenv
import aiohttp
import requests
import youtube_dl as ytdl
from apiclient.discovery import build
__BASE_URL__ = "https://www.googleapis.com/youtube/v3"
class BaseYoutubeAPI:
"""Base Youtube API Client.
Handles users credentials API key.
"""
def __init__(self, dev_key=""):
if dev_key != "":
self.DEVELOPER_KEY = dev_key
else:
self.DEVELOPER_KEY = self.get_credential_key()
@staticmethod
def get_credential_key():
"""Get credentials api key from os environment.
Set environment variable named 'DEVELOPER_KEY' and put the credentials api key there.
"""
dotenv.load_dotenv()
DEVELOPER_KEY = os.environ.get("YOUTUBE_API")
return DEVELOPER_KEY
class YoutubeVideo:
"""Represents youtube's videos attributes."""
def __init__(self, title="", url="", thumbnails={}, duration="", description=""):
self.title = title
self.url = url
self.thumbnails = thumbnails
self.duration = duration
self.desc = description
def __str__(self):
object_to_string = "{} -- {}\n{}\n".format(self.title, self.url, self.desc)
return object_to_string
class YoutubeService(BaseYoutubeAPI):
"""Youtube's service client."""
def __init__(self, dev_key=""):
super(YoutubeService, self).__init__(dev_key=dev_key)
self.YOUTUBE_API_SERVICE_NAME = "youtube"
self.YOUTUBE_API_VERSION = "v3"
self.youtube = build(
self.YOUTUBE_API_SERVICE_NAME,
self.YOUTUBE_API_VERSION,
developerKey=self.DEVELOPER_KEY,
)
def search(self, keyword, max_results=7):
"""Search videos/playlist/channel by keywords."""
search_response = (
self.youtube.search()
.list(q=keyword, part="id,snippet", maxResults=max_results)
.execute()
)
list_of_videos = []
for search_result in search_response.get("items", []):
if (
search_result["id"]["kind"] == "youtube#video"
): # #video #playlist #channel
url = "http://www.youtube.com/watch?v=" + search_result["id"]["videoId"]
title = search_result["snippet"]["title"]
thumbnails = search_result["snippet"][
"thumbnails"
] # high/medium/default
desc = search_result["snippet"]["description"]
url_detail = "{}/videos?id={}&part=contentDetails&key={}".format(
__BASE_URL__, search_result["id"]["videoId"], self.DEVELOPER_KEY
)
response = requests.get(url_detail)
response = response.json()
vid = YoutubeVideo(
title=title,
url=url,
thumbnails=thumbnails,
duration=response["items"][0]["contentDetails"]["duration"],
description=desc,
)
list_of_videos.append(vid)
return list_of_videos
def download(self, video_url="", threads=2):
"""Downloads video from given video url."""
meta = ytdl.YoutubeDL({}).extract_info(video_url, download=False)
quality = ""
for fmt in meta["formats"]:
if fmt["format_note"] == "720p":
quality = "136"
elif fmt["format_note"] == "1080p":
quality = "137"
try:
if quality != "":
call(
[
"youtube-dl",
"-f " + quality + "+171",
video_url,
"--external-downloader",
"aria2c",
"--external-downloader-args",
"-x" + str(threads),
]
)
else:
call(
[
"youtube-dl",
video_url,
"--external-downloader",
"aria2c",
"--external-downloader-args",
"-x" + str(threads),
]
)
except Exception as e:
print("failed to download {}".format(video_url))
print(e)
class AioYoutubeService(BaseYoutubeAPI):
"""Asynchronous youtube service client"""
@staticmethod
async def create_session():
return await aiohttp.ClientSession()
def __init__(self, dev_key=""):
super(AioYoutubeService, self).__init__(dev_key=dev_key)
async def search(self, q="", part="snippet", raw=False):
"""Search video by keywords & parts
url: GET {BASE_URL}/search/?q=q&part=part
params:
q -> stands for query, search key. default: empty string
part -> snippet, contentDetails, player, statistics, status. default: snippet
raw -> returns json type object, raw from the api response. default: False
returns a list of YoutubeVideo Object
"""
url = "{}/search/?q={}&part={}&key={}&maxResults=7".format(
__BASE_URL__, q, part, self.DEVELOPER_KEY
)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
search_results = await response.json()
if raw or part != "snippet":
return search_results
videos = []
for item in search_results["items"]:
if item["id"]["kind"] == "youtube#video":
url_detail = "{}/videos?id={}&part=contentDetails&key={}".format(
__BASE_URL__, item["id"]["videoId"], self.DEVELOPER_KEY
)
response = await session.get(url_detail)
vid_details = await response.json()
video_url = (
"http://www.youtube.com/watch?v=" + item["id"]["videoId"]
)
ytvid = YoutubeVideo(
title=item["snippet"]["title"],
url=video_url,
thumbnails=item["snippet"]["thumbnails"],
duration=vid_details["items"][0]["contentDetails"]["duration"],
description=item["snippet"]["description"],
)
videos.append(ytvid)
return videos
# TESTS
if __name__ == "__main__":
async def main():
ays = AioYoutubeService()
response = await ays.search(q="lorem ipsum", part="snippet")
print(response) # raw true
for video in response:
print(str(video))
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
# Copyright 2015-2018 The NATS Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse, sys
import tornado.ioloop
import tornado.gen
import time
from nats.io.client import Client as NATS
def show_usage():
print("nats-pub SUBJECT [-d DATA] [-s SERVER]")
print("")
print("Example:")
print("")
print(
"nats-pub hello -d world -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223"
)
def show_usage_and_die():
show_usage()
sys.exit(1)
@tornado.gen.coroutine
def main():
parser = argparse.ArgumentParser()
# e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223
parser.add_argument('subject', default='hello', nargs='?')
parser.add_argument('-d', '--data', default="hello world")
parser.add_argument('-s', '--servers', default=[], action='append')
args = parser.parse_args()
nc = NATS()
try:
servers = args.servers
if len(args.servers) < 1:
servers = ["nats://127.0.0.1:4222"]
opts = {"servers": servers}
yield nc.connect(**opts)
yield nc.publish(args.subject, args.data)
yield nc.flush()
print("Published to '{0}'".format(args.subject))
except Exception, e:
print(e)
show_usage_and_die()
if __name__ == '__main__':
tornado.ioloop.IOLoop.instance().run_sync(main)
|
"""Delete a kubeflow instance."""
import fire
import json
import logging
import retrying
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from kubeflow.testing import util
@retrying.retry(stop_max_delay=10*60*1000, wait_exponential_max=60*1000,
wait_exponential_multiplier=1000)
def delete_deployment(dm, project, name):
deployments_client = dm.deployments()
try:
op = deployments_client.delete(project=project, deployment=name).execute()
except errors.HttpError as e:
if not e.content:
raise
error_content = json.loads(e.content)
message = error_content.get('error', {}).get('message', "")
logging.info("delete deployment error %s", message)
code = error_content.get('error', {}).get('code', 0)
if code == 404: # pylint: disable=no-else-return
logging.info("Project %s doesn't have deployment %s", project, name)
return
elif code == 409:
logging.info("Conflicting operation in progress")
raise ValueError("Can't delete deployment confliction operation in "
"progress")
raise
zone = None
op = util.wait_for_gcp_operation(dm.operations(), project, zone, op["name"])
logging.info("Final op: %s", op)
class KFDeleter:
def delete_kf(self, project, name):
"""Delete a KF instance with the specified name in the specified project."""
# TODO(jlewi): This is a bit of a hack due to the fact that kfctl
# doesn't properly handle deletion just given the name of a kubeflow
# deployment. Once that's fixed we should just use that.
util.maybe_activate_service_account()
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
for dm_name in [name, name + "-storage"]:
logging.info("Deleting project %s deployment %s", project, dm_name)
delete_deployment(dm, project, dm_name)
# TODO(jlewi): Cleanup other resources like certificates and backends
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
fire.Fire(KFDeleter)
|
import ccobra
import numpy as np
class RandomModel(ccobra.CCobraModel):
def __init__(self, name='Random'):
super(RandomModel, self).__init__(name, ['syllogistic-belief'], ['verify'])
def predict(self, item, **kwargs):
return np.random.choice([True, False])
def predict_rating(self, item, **kwargs):
return int(np.random.randint(1, 7)) |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS
from django.db.models import BooleanField
__all__ = [
'VariableHelpTextBooleanField',
]
class VariableHelpTextBooleanField(BooleanField):
"""Fixes an issue with help_text depending on a variable.
See https://github.com/Aladom/django-mailing/issues/2 for details.
"""
# FIXME DEPRECATED: remove when squashing migrations
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if 'help_text' in kwargs:
del kwargs['help_text']
return name, path, args, kwargs
|
# -*- coding: UTF-8 -*-
# in: ProteinDBs with and without decoys, number of samples for simulation and Inspect
# out: FDR-matrix (and several result files)
# Attention: For using Inspect, this script must be called out of the Inspect-directory! The
# "cd /home/franziska/bin/Inspect" command is called be the main routine of the script
# procedure:
# 1. sample sequences from Protein-DB (for faster simulation)
# 2. call the simulator with these sequences
# 3. convert spectra to mgf-format (input format Inspect and Biceps)
# 4. sample spectra from simulated ones
# 5. run Inspect
# 6. evaluate results from Inspect to calculate an FDR-matrix
import re
import random
from Bio import SeqIO
import os
import sys
import optparse
import time
import platform
from inspectparser import parseInspect
# 1. sampling from DB (def)
def samplProt(inpath, n):
"""
Sampling a given number of protein sequences from a given fasta-file
inpath: fasta-file, protein DB which will be sampled from
n: number of samples
"""
protIDs = [] # IDs of all proteins from db
print 'Reading protein IDs from '+inpath+' ...'
for record in SeqIO.parse(open(inpath, "rU"), "fasta") :
# Add ID of this record to protIDs list
protIDs.append(record.id)
nprot = len(protIDs)
print "Found %i proteins" % nprot
if n == 0 or n > nprot:
print "Take the complete list of %i proteins" %nprot
return inpath
else:
print 'Choosing %i proteins ...' %n
rprot = random.sample(protIDs,n)
if inpath[-3] == '.': outpath = inpath[:-3] + "_sample" + str(n) +".fasta"
else: outpath = inpath[:-6] + "_sample" + str(n) +".fasta"
print 'Writing selected proteins to '+outpath+" ..."
output_handle = open(outpath, "w")
for record in SeqIO.parse(open(inpath, "rU"), "fasta") :
if record.id in rprot: # for every selected protein
SeqIO.write(record, output_handle, "fasta")
output_handle.close()
print "done"
return outpath
# 2. use OpenMS simulator (def)
def run_MSSim(DBfasta, outmzML, ini="/data/NG4/anke/MSSim/mscconf_sebio02.ini", param=""):
command = "MSSimulator -in {inp} -out {out} -ini {ini} {param}".format(inp=DBfasta, out=outmzML, ini=ini, param=param)
print "Executing:",command
sys.stdout.flush()
os.system(command)
return 1
# 3. convert mzML to mgf format (def)
def convertSpectra(mzMLfile, param=""):
mgf = mzMLfile[:-5]+".mgf"
command = "FileConverter -in {infile} -in_type mzML -out {outfile} -out_type mgf {param}".format(infile=mzMLfile, outfile=mgf, param=param)
print "Executing: ",command
sys.stdout.flush()
os.system(command)
return mgf
# 4. sampling from simulated spectra (def)
from readMGF import sampleMGF as samplSpecs
'''def samplSpecs(inpath, n):
"""
Sampling a given number of spectra from a given mgf-file
inpath: mgf-file which will be sampled from
n: number of samples
"""
infile = open(inpath,"r")
specs = [] # names of all spectra
print 'Reading specTitles ...'
for line in infile:
if re.search("TITLE",line):
specs.append(line.rstrip())
if n == 0 or n > len(specs):
print "Take the complete list of %i spectra" %len(specs)
return inpath
else:
print 'Choosing %i spectra ...' %n
rspecs = random.sample(specs,n)
infile.seek(0)
outpath = inpath[:-4] + "_sample" + str(n) +".mgf"
print 'Writing selected specs to '+outpath+" ..."
outfile = open(outpath,"w")
for line in infile:
if line.rstrip() in rspecs: # for ervery selected title write spectrum to new file
outfile.write("BEGIN IONS\n")
outfile.write(line)
for line2 in infile:
if re.search("BEGIN IONS",line2): break
outfile.write(line2)
outfile.close()
print "done"
return outpath
'''
def prepDB(fastaDB, path="/home/franziska/bin/Inspect/"):
""" 5.a prepare decoyDB input for Inspect (def)
The function will use the decoyDB to create
Args:
fastaDB: decoy fasta file
path: where inspect is put
Returns:
This creates two files which will be used by
InsPecT: myDB.trie and myDB.index. Once these files are created,
they can be reused for later InsPecT runs.
"""
if not os.path.exists(os.path.join(path,"PrepDB.py")):
print "InsPecT files not found, please correct path!"
return 0
# PrepDB.py from inSpecT source code:
# Convert a protein database into concatenated format.
# Processes FASTA format.
command = "python {path} FASTA {db}".format(path=os.path.join(path, 'PrepDB.py'), db=fastaDB)
print "Executing: ",command
# sys.stdout.flush()
# os.system(command)
return 1
# 5. match spectra against database with Inspect (def)
def run_inspect(configfile, outputfile, path="", param=""):
if platform.system() == 'Windows':
inspect_path = os.path.join(path, 'inspect.exe')
else:
inspect_path = os.path.join(path, 'inspect')
if len(path) > 0:
command = inspect_path + " -i {input} -o {output} -r {path} {param}".format(input=configfile, output=outputfile, path=path, param=param)
else:
command = inspect_path + " -i {input} -o {output} {param}".format(input=configfile, output=outputfile, param=param)
print "Executing: ",command , "\n"
sys.stdout.flush()
os.system(command)
return 1
# 1.-6. go all steps
def calculateSimilarityMatrix(DBs, db_path="/data/NG4/anke/proteome/", nProt=100, nSpec=1000,
sim_out_path="/data/NG4/anke/MSSim/sampled_data/", MSSim_ini="/data/NG4/anke/MSSim/mscconf_sebio02.ini",
inspect_config="/data/NG4/anke/Inspect/config_Inspect_MSSim.txt", inspect_dir="/home/franziska/bin/Inspect/"):
"""
"""
rng = range(len(DBs))
simMat = [ [0 for i in rng] for j in rng ] # initializing output
configfile = inspect_config[:-4]+"_py.txt" # Inspect configuration file (final version)
for i in rng:
# 1. sampling from DB (run)
prot_path = samplProt(db_path+DBs[i]+".fasta",nProt)
# 2. use OpenMS simulator with sampled proteins (run)
out_path = sim_out_path+DBs[i]+"_sampl"+str(nProt)+"MSSim.mzML"
run_MSSim(prot_path, out_path, ini=MSSim_ini, param="-threads 4")
# 3. convert mzML to mgf format
sampl_path = convertSpectra(out_path)
# 4. sampling from simulated spectra
spec_path = samplSpecs(sampl_path,nSpec)
# (*) with runInspect_user_config.runInspect_config for all DBs
# runInspect_config(spectra=spec_path, DBs=DBs, spec_path="", db_path=db_path, inspect_dir=inspect_dir, conf=configfile, user_mods="")
for j in rng:
# 5. calling InSpecT (*)
db_j = db_path+DBs[j]+"_decoy.trie"
# 5.a create trie if necessary
if not os.path.exists(db_j):
prepDB(db_path+DBs[j]+"_decoy.fasta",path=inspect_dir)
inspect_out = spec_path[:-4] +"_"+DBs[j]+"_InspectOut.txt" # (!!)
# prepare configfile # <-- call runInspect_user_config.runInspect_config instead!! (bei 5. ansetzen, j-Schleife nur mit inspect_out behalten)
conf_in = open(inspect_config,'r')
conf_out = open(configfile,'w')
for line in conf_in:
if re.match("spectra,",line):
conf_out.write("spectra,"+spec_path+"\n")
elif re.match("DB,",line):
conf_out.write("DB,"+db_j+"\n")
else:
conf_out.write(line)
conf_in.close()
conf_out.close()
run_inspect(configfile, inspect_out, path=inspect_dir)# from 5. remove all but (!!) up to here
# 6. evaluate results from Inspect to calculate an FDR-matrix
simMat[i][j] = parseInspect(inspect_out, silent=False)[1]
normSimMat = [ [0 for i in rng] for j in rng ]
for k in rng:
for l in rng:
normSimMat[k][l] = simMat[k][l] / simMat[k][k]
return simMat, normSimMat
if __name__=="__main__":
usage = """%prog DB_LIST -d DB_DIR -p NPROT -n NSPEC -i INIFILE
Sample a given number of proteins from DB for simulation of spectra,
sample a given number of spectra from simulation for running inspect
and calculate FDR from Inspect output.
"""
t0 = time.time()
print time.strftime("%Y/%m/%d, %H:%M:%S: Starting whole procedure and overall time measurement", time.localtime(t0))
sys.stdout.flush()
# configure the parser
optparser = optparse.OptionParser(usage=usage)
optparser.add_option('-d', '--dbdir', type='string', dest='db_dir', default="/data/NG4/anke/proteome/", help='directory of proteinDBs. [default: %default]')
optparser.add_option('-p', '--proteinnumber', type='int', dest='nprot', default=100, help='number of samples from proteinDB. [default: %default]')
optparser.add_option('-n', '--numberspectra', type='int', dest='nspec', default=1000, help='number of samples from spectra. [default: %default]')
optparser.add_option('-c', '--configfile', type='string', dest='MSSim_ini', default="/data/NG4/anke/MSSim/mscconf_sebio02.ini", help='configuration file for MSSImulator. [default: %default]')
optparser.add_option('-i', '--inspectdir', type='string', dest='insp_dir', default="/home/franziska/bin/Inspect/", help="path to 'inspect.exe'. [default: %default]")
optparser.add_option('-o', '--outfile', type='string', dest='out', default="/data/NG4/anke/Inspect/collectOutput.txt", help='name of file resulting matrix is written to. [default: %default]')
# parse options and arguments
options, args = optparser.parse_args()
if len(args) == 1:
db_list = args[0].split(',')
sim_out_path = "/data/NG4/anke/MSSim/sampled_data/"
nProt = options.nprot # nr. of sampled proteins (from DB)
nSpec = options.nspec # nr. of sampled spectra (from simulation)
config_path = "/data/NG4/anke/Inspect/config_Inspect_MSSim.txt" # Inspect configuration file (initial version)
M, M_n = calculateSimilarityMatrix(DBs = db_list, db_path = options.db_dir, nProt=nProt, nSpec=nSpec, sim_out_path=sim_out_path, MSSim_ini=options.MSSim_ini, inspect_config=config_path, inspect_dir=options.insp_dir)#print db_list
else:
optparser.print_help()
sys.exit(1)
outfile = open(options.out,"a")
outfile.write("normalized similarity matrix derived from InSpect with %i sampled spectra "%nSpec)
outfile.write("(from simulation with %i sampled proteins): \n"%nProt)
print "normalized: \n"
for line in M_n:
print line
outfile.write(str(line)+"\n")
outfile.write("\n\n\n")
outfile.close()
print time.strftime("%Y/%m/%d, %H:%M:%S: all done\n", time.localtime(time.time()))
print "took %.3f seconds overall (that's %.3f minutes)"%(time.time() - t0, (time.time() - t0)/60)
print "----------------------------------------------------------------------------------------------------\n\n\n"
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2019 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import math
print ("Example: demonstration of using friction models")
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
# chrono.SetChronoDataPath('../../../../data/')
# Helper class to define a cylindrical shape
class MyObstacle:
def __init__(self, r, pos):
self.radius = r
self.center = pos
def GetVisualization(self):
cyl = chrono.ChCylinderShape()
cyl.GetCylinderGeometry().rad = self.radius
cyl.GetCylinderGeometry().p1 = self.center + chrono.ChVectorD(0, 0, 0)
cyl.GetCylinderGeometry().p2 = self.center + chrono.ChVectorD(0, 1.1, 0)
cyl.SetColor(chrono.ChColor(0.6, 0.3, 0.0))
return cyl
# Custom collision detection callback class
class MyCustomCollisionDetection(chrono.CustomCollisionCallback):
def __init__(self, ball, ground,
ball_mat, obst_mat,
ball_radius, obstacle):
super().__init__()
self.m_ball = ball
self.m_ground = ground
self.m_ball_mat = ball_mat
self.m_obst_mat = obst_mat
self.m_ball_radius = ball_radius
self.m_obst_radius = obstacle.radius
self.m_obst_center = obstacle.center
def OnCustomCollision(self, sys):
# super().OnCustomCollision(sys)
r_sum = self.m_ball_radius + self.m_obst_radius
# Get current ball position and project on horizontal plane.
b_pos = self.m_ball.GetPos()
b_center = chrono.ChVectorD(b_pos.x, 0.0, b_pos.z)
# Check collision with obstacle (working in the horizontal plane).
o_center = chrono.ChVectorD(self.m_obst_center.x, 0.0, self.m_obst_center.z)
delta = o_center - b_center
# Get the squared euclidean norm
dist2 = delta.Length2()
if dist2 >= r_sum * r_sum:
return
# Find collision points on the ball and obstacle and the contact normal.
dist = math.sqrt(dist2)
normal = delta / dist
pt_ball = b_center + normal * self.m_ball_radius
pt_obst = o_center - normal * self.m_obst_radius
# Populate the collision info object (express all vectors in 3D).
# We pass null pointers to collision shapes.
contact = chrono.ChCollisionInfo()
contact.modelA = self.m_ball.GetCollisionModel()
contact.modelB = self.m_ground.GetCollisionModel()
contact.shapeA = None
contact.shapeB = None
contact.vN = chrono.ChVectorD(normal.x, 0.0, normal.z)
contact.vpA = chrono.ChVectorD(pt_ball.x, b_pos.y, pt_ball.z)
contact.vpB = chrono.ChVectorD(pt_obst.x, b_pos.y, pt_obst.z)
contact.distance = dist - r_sum
sys.GetContactContainer().AddContact(contact, self.m_ball_mat, self.m_obst_mat)
# ---------------------------------------------------------------------
#
# Create the simulation sys and add items
#
# Change use_NSC to specify different contact method
use_NSC = 0
ball_radius = 0.5
obst_radius = 2.0
obst_center = chrono.ChVectorD(2.9, 0, 2.9)
obstacle = MyObstacle(obst_radius, obst_center)
# Create the sys and the various contact materials
if use_NSC:
sys = chrono.ChSystemNSC()
g_mat = chrono.ChMaterialSurfaceNSC()
g_mat.SetRestitution(0.9)
g_mat.SetFriction(0.4)
b_mat = chrono.ChMaterialSurfaceNSC()
b_mat.SetRestitution(0.9)
b_mat.SetFriction(0.5)
o_mat = chrono.ChMaterialSurfaceNSC()
o_mat.SetRestitution(0.9)
o_mat.SetFriction(0.4)
ground_mat = g_mat
ball_mat = b_mat
obst_mat = o_mat
time_step = 1e-3
frame_skip = 10
else: # use SMC contact method
sys = chrono.ChSystemSMC()
g_mat = chrono.ChMaterialSurfaceSMC()
g_mat.SetRestitution(0.9)
g_mat.SetFriction(0.4)
b_mat = chrono.ChMaterialSurfaceSMC()
b_mat.SetRestitution(0.9)
b_mat.SetFriction(0.5)
o_mat = chrono.ChMaterialSurfaceSMC()
o_mat.SetRestitution(0.9)
o_mat.SetFriction(0.4)
ground_mat = g_mat
ball_mat = b_mat
obst_mat = o_mat
time_step = 1e-4
frame_skip = 100
sys.Set_G_acc(chrono.ChVectorD(0, -9.8, 0))
# Create the ground body with a plate and side walls (both collision and visualization).
ground = chrono.ChBody()
sys.AddBody(ground)
ground.SetCollide(True)
ground.SetBodyFixed(True)
ground.GetCollisionModel().ClearModel()
ground.GetCollisionModel().AddBox(ground_mat, 5.0, 1.0, 5.0, chrono.ChVectorD(0, -1, 0))
ground.GetCollisionModel().AddBox(ground_mat, 0.1, 1.0, 5.1, chrono.ChVectorD(-5, 0, 0))
ground.GetCollisionModel().AddBox(ground_mat, 0.1, 1.0, 5.1, chrono.ChVectorD( 5, 0, 0))
ground.GetCollisionModel().AddBox(ground_mat, 5.1, 1.0, 0.1, chrono.ChVectorD(0, 0, -5))
ground.GetCollisionModel().AddBox(ground_mat, 5.1, 1.0, 0.1, chrono.ChVectorD(0, 1, 5))
ground.GetCollisionModel().BuildModel()
ground_vis_mat = chrono.ChVisualMaterial()
ground_vis_mat.SetKdTexture(chrono.GetChronoDataFile("textures/blue.png"))
vshape_1 = chrono.ChBoxShape()
vshape_1.GetBoxGeometry().SetLengths(chrono.ChVectorD(10, 2, 10))
vshape_1.SetMaterial(0, ground_vis_mat)
ground.AddVisualShape(vshape_1, chrono.ChFrameD(chrono.ChVectorD(0, -1, 0)))
vshape_2 = chrono.ChBoxShape()
vshape_2.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.2, 2, 10.2))
vshape_2.SetMaterial(0, ground_vis_mat)
ground.AddVisualShape(vshape_2, chrono.ChFrameD(chrono.ChVectorD(-5, 0, 0)))
vshape_3 = chrono.ChBoxShape()
vshape_3.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.2, 2, 10.2))
vshape_3.SetMaterial(0, ground_vis_mat)
ground.AddVisualShape(vshape_3, chrono.ChFrameD(chrono.ChVectorD(5, 0, 0)))
vshape_4 = chrono.ChBoxShape()
vshape_4.GetBoxGeometry().SetLengths(chrono.ChVectorD(10.2, 2, 0.2))
vshape_4.SetMaterial(0, ground_vis_mat)
ground.AddVisualShape(vshape_4, chrono.ChFrameD(chrono.ChVectorD(0, 0, -5)))
vshape_5 = chrono.ChBoxShape()
vshape_5.GetBoxGeometry().SetLengths(chrono.ChVectorD(10.2, 2, 0.2))
vshape_5.SetMaterial(0, ground_vis_mat)
ground.AddVisualShape(vshape_5, chrono.ChFrameD(chrono.ChVectorD(0, 0, 5)))
# Add obstacle visualization
ground.AddVisualShape(obstacle.GetVisualization())
# Create the falling ball
ball = chrono.ChBody()
sys.AddBody(ball)
ball.SetMass(10)
comp = 4 * ball_radius * ball_radius
ball.SetInertiaXX(chrono.ChVectorD(comp, comp, comp))
ball.SetPos(chrono.ChVectorD(-3, 1.2 * ball_radius, -3))
ball.SetPos_dt(chrono.ChVectorD(5, 0, 5))
ball.SetCollide(True)
ball.GetCollisionModel().ClearModel()
ball.GetCollisionModel().AddSphere(ball_mat, ball_radius)
ball.GetCollisionModel().BuildModel()
vshape_s = chrono.ChSphereShape()
vshape_s.GetSphereGeometry().rad = ball_radius
vshape_s.GetSphereGeometry().Pos = ball.GetPos()
vshape_s.SetTexture(chrono.GetChronoDataFile("textures/bluewhite.png"))
ball.AddVisualShape(vshape_s)
# Create a custom collision detection callback object and register it with the sys
my_collision = MyCustomCollisionDetection(ball, ground, ball_mat, obst_mat, ball_radius, obstacle)
sys.RegisterCustomCollisionCallback(my_collision)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the sys
#
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Custom contact demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(8, 8, -6))
vis.AddTypicalLights()
# ---------------------------------------------------------------------
#
# Run the simulation
#
frame = 0
while vis.Run():
if frame % 100 == 0:
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(1e-4)
frame += 1
|
# traveling salesman algorithm implementation in jython
# This also prints the index of the points of the shortest route.
# To make a plot of the route, write the points at these indexes
# to a file and plot them in your favorite tool.
import sys
import os
import time
import csv
import java.io.FileReader as FileReader
import java.io.File as File
import java.lang.String as String
import java.lang.StringBuffer as StringBuffer
import java.lang.Boolean as Boolean
import java.util.Random as Random
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import dist.Distribution as Distribution
import dist.DiscretePermutationDistribution as DiscretePermutationDistribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import opt.EvaluationFunction as EvaluationFunction
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.HillClimbingProblem as HillClimbingProblem
import opt.NeighborFunction as NeighborFunction
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.example.FourPeaksEvaluationFunction as FourPeaksEvaluationFunction
import opt.ga.CrossoverFunction as CrossoverFunction
import opt.ga.SingleCrossOver as SingleCrossOver
import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.ga.GeneticAlgorithmProblem as GeneticAlgorithmProblem
import opt.ga.MutationFunction as MutationFunction
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.ga.UniformCrossOver as UniformCrossOver
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.example.TravelingSalesmanEvaluationFunction as TravelingSalesmanEvaluationFunction
import opt.example.TravelingSalesmanRouteEvaluationFunction as TravelingSalesmanRouteEvaluationFunction
import opt.SwapNeighbor as SwapNeighbor
import opt.ga.SwapMutation as SwapMutation
import opt.example.TravelingSalesmanCrossOver as TravelingSalesmanCrossOver
import opt.example.TravelingSalesmanSortEvaluationFunction as TravelingSalesmanSortEvaluationFunction
import shared.Instance as Instance
import util.ABAGAILArrays as ABAGAILArrays
from array import array
# Problem parameters
#=======================
# set N value. This is the number of points
N = 50
random = Random()
points = [[0 for x in xrange(2)] for x in xrange(N)]
for i in range(0, len(points)):
points[i][0] = random.nextDouble()
points[i][1] = random.nextDouble()
# Algorithm hyperparameters
#============================
maxiters_rhc= [int(10**(0.25*i)) for i in range(6,26)]
maxiters_sa= [int(10**(0.25*i)) for i in range(6,26)]
maxiters_ga = [int(10**(0.25*i)) for i in range(4,21)]
maxiters_mimic = [10*i for i in range(1,21)]
SA_start_temp = 1E11
SA_temp_decay = 0.999
GA_popsize = 50
GA_toMate = 25
GA_mutationPercent = 0.5
GA_toMutate = int(GA_mutationPercent*GA_toMate)
MIMIC_samples = 400
MIMIC_toKeep = 40
#========================
ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscretePermutationDistribution(N)
nf = SwapNeighbor()
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
"""
#=======================
# Random Hiil Climbing
#=======================
print "Starting Random Hill Climbing Seacrh..."
rhc = RandomizedHillClimbing(hcp)
rhc_iters = []
rhc_fitness = []
rhc_time = []
for i in maxiters_rhc:
fit = FixedIterationTrainer(rhc, i)
t1=time.time()
error=fit.train()
t2=time.time()
fitness = ef.value(rhc.getOptimal())
time_ms=round(1000*(t2-t1),2)
rhc_fitness.append(fitness)
rhc_time.append(time_ms)
rhc_iters.append(i)
#print "RHC: " + str(fitness)
print "RHC inverse of distance using "+ str(i)+" fixed iterations: " + str(fitness)
print "Time taken for RHC using fixed iterations: "+str(time_ms)+" milliseconds"
print "Finished Random Hill Climbing Seacrh."
print "="*100
"""
"""
#=======================
# Simulated Annealing
#=======================
print "Starting Simulated Annealing Seacrh..."
sa = SimulatedAnnealing(SA_start_temp, SA_temp_decay, hcp)
sa_iters = []
sa_fitness = []
sa_time = []
for i in maxiters_sa:
fit = FixedIterationTrainer(sa, i)
t1=time.time()
fit.train()
t2=time.time()
fitness = ef.value(sa.getOptimal())
time_ms=round(1000*(t2-t1),2)
sa_fitness.append(fitness)
sa_time.append(time_ms)
sa_iters.append(i)
print "SA inverse of distance using "+ str(i)+" fixed iterations: " + str(fitness)
print "Time taken for SA using fixed iterations: "+str(time_ms)+" milliseconds"
print "Finished Simulated Annealing Seacrh."
print "="*100
"""
#"""
#=======================
# Genetic Algorithm
#=======================
print "Starting Genetic Algorithm Seacrh..."
ga = StandardGeneticAlgorithm(GA_popsize, GA_toMate, GA_toMutate, gap)
ga_iters = []
ga_fitness = []
ga_time = []
for i in maxiters_ga:
fit = FixedIterationTrainer(ga, i)
t1=time.time()
fit.train()
t2=time.time()
fitness = ef.value(ga.getOptimal())
time_ms=round(1000*(t2-t1),2)
ga_fitness.append(fitness)
ga_time.append(time_ms)
ga_iters.append(i)
print "GA inverse of distance using "+ str(i)+" fixed iterations: " + str(fitness)
print "Time taken for GA using fixed iterations: "+str(time_ms)+" milliseconds"
print "Finished Genetic Algorithm Seacrh."
print "="*100
#"""
"""
#=======================
# MIMIC
#=======================
# for mimic we use a sort encoding
ef = TravelingSalesmanSortEvaluationFunction(points);
fill = [N] * N
ranges = array('i', fill)
odd = DiscreteUniformDistribution(ranges);
df = DiscreteDependencyTree(.1, ranges);
pop = GenericProbabilisticOptimizationProblem(ef, odd, df);
print "Starting MIMIC Seacrh..."
mimic = MIMIC(MIMIC_samples, MIMIC_toKeep, pop)
mimic_iters = []
mimic_fitness = []
mimic_time = []
for i in maxiters_mimic:
fit = FixedIterationTrainer(mimic, i)
t1=time.time()
fit.train()
t2=time.time()
fitness = ef.value(mimic.getOptimal())
time_ms=round(1000*(t2-t1),2)
mimic_fitness.append(fitness)
mimic_time.append(time_ms)
mimic_iters.append(i)
print "MIMIC inverse of distance using "+ str(i)+" fixed iterations: " + str(fitness)
print "Time taken for MIMIC using fixed iterations: "+str(time_ms)+" milliseconds"
print "Finished MIMIC Seacrh."
print "="*100
"""
"""
# Writing RHC performance to a CSV
spamWriter = csv.writer(open('tsp_rhc.csv', 'w'), delimiter=' ',quotechar='|')
spamWriter.writerow(rhc_iters)
spamWriter.writerow(rhc_fitness)
spamWriter.writerow(rhc_time)
"""
"""
# Writing SA performance to a CSV
spamWriter = csv.writer(open('tsp_sa.csv', 'w'), delimiter=' ',quotechar='|')
spamWriter.writerow(sa_iters)
spamWriter.writerow(sa_fitness)
spamWriter.writerow(sa_time)
"""
#"""
# Writing GA performance to a CSV
spamWriter = csv.writer(open('tsp_ga.csv', 'w'), delimiter=' ',quotechar='|')
spamWriter.writerow(ga_iters)
spamWriter.writerow(ga_fitness)
spamWriter.writerow(ga_time)
#"""
"""
# Writing MIMIC performance to a CSV
spamWriter = csv.writer(open('tsp_mimic.csv', 'w'), delimiter=' ',quotechar='|')
spamWriter.writerow(mimic_iters)
spamWriter.writerow(mimic_fitness)
spamWriter.writerow(mimic_time)
"""
|
import os
import unittest
from biolinkml.generators.pythongen import PythonGenerator
from tests.test_utils.environment import env
from tests.utils.generatortestcase import GeneratorTestCase
from tests.utils.filters import metadata_filter
from tests.utils.python_comparator import compare_python
class IfAbsentTestCase(GeneratorTestCase):
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
env = env
model_name: str = None
output_name: str = None
def do_test(self):
""" Test the metadata options"""
self.single_file_generator('py', PythonGenerator, comparator=compare_python)
def test_good_ifabsent(self):
""" Test isabsent with no default_prefix """
IfAbsentTestCase.model_name = "ifabsents"
self.do_test()
def test_good_ifabsent2(self):
""" Test isabsents with default_prefix specified """
IfAbsentTestCase.model_name = "ifabsents2"
self.do_test()
def test_good_ifabsent3(self):
""" Test isabsent with no default_prefix, but prefix specified that matches the module id """
IfAbsentTestCase.model_name = "ifabsents3"
self.do_test()
def test_bad_ifabsent(self):
IfAbsentTestCase.model_name = "ifabsents_error"
with self.assertRaises(ValueError):
self.single_file_generator('py', PythonGenerator, filtr=metadata_filter)
def test_ifabsent_uri(self):
IfAbsentTestCase.model_name = "ifabsent_uri"
self.do_test()
if __name__ == '__main__':
unittest.main()
|
from dataclasses import dataclass
from itertools import chain
from typing import List, Optional, Iterable
def is_junk_line(line: str):
if line.strip() == "":
return True
if line == "\n":
return True
if "Кубок Европы" in line:
return True
if line.startswith("На главную страницу"):
return True
return False
def flatten(list_of_lists: Iterable) -> List:
return list(chain.from_iterable(list_of_lists))
@dataclass(frozen=True)
class Team:
id: int
name: str
city: str
year: int
@dataclass(frozen=True)
class Player:
id: int
team: Team
first_name: str
middle_name: str
last_name: str
@dataclass(frozen=True)
class TeamQuestions:
team_id: int
year: int
questions: List[int]
@dataclass(frozen=True)
class TeamNameQuestions:
name: str
city: str
country: str
year: int
questions: List[int]
@dataclass(frozen=False)
class SIPlayer:
team: str
city: str
first_name: str
last_name: str
points: int = None
shootout: int = None
place: float = None
@dataclass(frozen=True)
class SIGame:
stage_name: str
game_name: str
players: List[SIPlayer]
@dataclass(frozen=True)
class BRGame:
stage_name: str
team_one: str
team_two: str
team_one_points: int
team_two_points: int
team_one_shootout_points: Optional[int] = None
team_two_shootout_points: Optional[int] = None
@dataclass(frozen=False)
class BRGroupTeamResult:
stage_name: str
group_name: str
team_name: str
wins: int = 0
losses: int = 0
draws: int = 0
plus: int = 0
minus: int = 0
points: int = 0
place: float = 0
@dataclass(frozen=True)
class EQGameTeamResult:
stage_name: str
game_name: str
team_name: str
place: float
points: int = None
shootout: int = None
@dataclass(frozen=False)
class EQGame:
stage_name: str
game_name: str
teams: List[EQGameTeamResult]
@dataclass(frozen=False)
class SIWrittenPlayer:
name: str
sum: int
place: float
points: List[int]
|
import os
import json
import socket
import psutil
from uuid import uuid4
from time import time, sleep
from multiprocessing.connection import Listener, Client
from traceback import format_exc
from qaviton_handlers.try_functions import try_to
# from qaviton_package_manager.conf import ignore_list
from qaviton_package_manager.utils.functions import find_free_port
from qaviton_git.git_wrapper import get_root
from qaviton_processes import python_code_async
class ServerDownError(ConnectionAbortedError):
...
class Cache:
authkey = bytes(str(uuid4()), 'utf-8')
root = get_root()
file = root + os.sep + '.qaviton_package_manager_cache'
errors = root + os.sep + '.qaviton_package_manager_cache.errors'
request_timeout = 30
class method:
get = 'GET'
delete = 'DELETE'
post = 'POST'
def kill_server(self):
pid = self.get_file_content()['pid']
try_to(psutil.Process(pid).kill)
self.remove_file()
def get_file_content(self):
with open(self.file) as f:
content = json.load(f)
return content
def is_file(self):
return os.path.exists(self.file)
def remove_file(self):
if self.is_file():
os.remove(self.file)
def wait_for_file(self, timeout):
t = time()
while not self.is_file():
if time()-t > timeout:
raise TimeoutError("timed out while waiting for server details")
sleep(0.1)
def server_is_alive(self):
if not self.is_file():
return False
try:
self.get()
return True
except ServerDownError:
return False
def log_server_error(self):
if not os.path.exists(self.errors):
open(self.errors, 'w').close()
with open(self.errors, 'a') as f:
f.write('\n\n' + format_exc())
def server(self, cache_timeout, **kwargs):
try:
def send_response(response: dict):
with Client(client_address, authkey=token) as conn:
try:
conn.send(response)
except:
conn.send({'error': format_exc()})
timeout = cache_timeout
port = find_free_port()
server_address = ('localhost', port)
with Listener(server_address, authkey=self.authkey) as listener:
if timeout != -1:
listener._listener._socket.settimeout(timeout if timeout < 60*60*24 else 60*60*24) # avoid OverflowError: timeout doesn't fit into C timeval
pid = os.getpid()
p = psutil.Process(pid)
with open(self.file, 'w') as f:
json.dump({
'key': self.authkey.decode('utf-8'),
'address': list(server_address),
'timeout': timeout,
'pid': pid,
'name': p.name(),
'created': p.create_time()
}, f, indent=2)
del pid
del p
while True:
try:
conn = listener.accept()
except socket.timeout:
break
try:
data: dict = conn.recv()
client_address = tuple(data['address'])
token = data['token'].encode('utf-8')
if data['method'] == self.method.get:
send_response({key: kwargs[key] for key in data['kwargs'] if key in kwargs})
elif data['method'] == self.method.post:
kwargs.update(data['kwargs'])
send_response({})
elif data['method'] == self.method.delete:
send_response({})
if len(data['kwargs']) == 0:
break
for key in data['kwargs']:
if key in kwargs:
del kwargs[key]
else:
send_response({'error': 'unsupported method'})
except:
self.log_server_error()
finally:
conn.close()
if time() > timeout + 5 and timeout != -1:
break
except:
self.log_server_error()
def create_server(self, cache_timeout, **kwargs):
self.remove_file()
p = python_code_async(
'from qaviton_package_manager.utils.cache_cred import Cache',
'cache = Cache()',
f'cache.server({cache_timeout}, **{kwargs})'
)
self.wait_for_file(timeout=10)
return p
def request(self, method, **kwargs)->dict:
self.wait_for_file(self.request_timeout)
d = self.get_file_content()
server_key = d['key']
server_address = tuple(d['address'])
server_timeout = d['timeout']
server_pid = d['pid']
server_name = d['name']
server_created = d['created']
if (time() > server_timeout or server_timeout == -1) and psutil.pid_exists(server_pid):
server_process = psutil.Process(server_pid)
connections = [c for c in server_process.connections() if c.status == psutil.CONN_LISTEN]
for c in connections:
# check process is listening to port
if server_address[1] == c[3][1]\
and server_process.name() == server_name\
and server_process.create_time() == server_created:
client_address = ('localhost', find_free_port())
with Client(server_address, authkey=server_key.encode('utf-8')) as conn:
conn.send({
'token': self.authkey.decode('utf-8'),
'address': list(client_address),
'method': method,
'kwargs': kwargs
})
with Listener(client_address, authkey=self.authkey) as listener:
listener._listener._socket.settimeout(self.request_timeout)
conn = listener.accept()
data: dict = conn.recv()
if 'error' in data:
raise ConnectionError(data['error'])
return data
raise ServerDownError("the cache server is down")
def get(self, *args) -> dict: return self.request(self.method.get, **{key: True for key in args})
def post(self, **kwargs) -> dict: return self.request(self.method.post, **kwargs)
def delete(self, *args) -> dict: return self.request(self.method.delete, **{key: True for key in args})
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.objects import external_event as external_event_obj
from nova.tests.unit.objects import test_objects
class _TestInstanceExternalEventObject(object):
def test_make_key(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
self.assertEqual('foo-bar', key)
def test_make_key_no_tag(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo')
self.assertEqual('foo', key)
def test_key(self):
event = external_event_obj.InstanceExternalEvent(
name='network-changed',
tag='bar')
with mock.patch.object(event, 'make_key') as make_key:
make_key.return_value = 'key'
self.assertEqual('key', event.key)
make_key.assert_called_once_with('network-changed', 'bar')
def test_event_names(self):
for event in external_event_obj.EVENT_NAMES:
external_event_obj.InstanceExternalEvent(name=event, tag='bar')
self.assertRaises(ValueError,
external_event_obj.InstanceExternalEvent,
name='foo', tag='bar')
class TestInstanceExternalEventObject(test_objects._LocalTest,
_TestInstanceExternalEventObject):
pass
class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
_TestInstanceExternalEventObject):
pass
|
import logging
from os import getenv
LOGGER = None
if LOGGER is None:
LOGGER = logging.getLogger("bs.log")
LOGGER.setLevel(getenv("BS_LOG_LEVEL", "INFO"))
def log(*args, **kwargs):
"""Pass-through to the global LOGGER objects log method."""
LOGGER.log(*args, **kwargs)
def info(*args, **kwargs):
"""Pass-through to the global LOGGER objects info method."""
LOGGER.info(*args, **kwargs)
def critical(*args, **kwargs):
"""Pass-through to the global LOGGER objects critical method."""
LOGGER.critical(*args, **kwargs)
def exception(*args, **kwargs):
"""Pass-through to the global LOGGER objects exception method."""
LOGGER.exception(*args, **kwargs)
def warning(*args, **kwargs):
"""Pass-through to the global LOGGER objects warning method."""
LOGGER.warning(*args, **kwargs)
def debug(*args, **kwargs):
"""Pass-through to the global LOGGER objects debug method."""
LOGGER.debug(*args, **kwargs)
|
# coding=utf-8
from mongoengine import *
class OnlineCount(Document):
charge_count = IntField(verbose_name=u"充值人数")
online_count = IntField(verbose_name=u"在线人数")
update_time = DateTimeField(verbose_name=u"更新时间")
|
'''
for testing Python snippets
'''
from datetime import datetime
currentSecond= datetime.now().second
currentMinute = datetime.now().minute
currentHour = datetime.now().hour
currentDay = datetime.now().day
currentMonth = datetime.now().month
currentYear = datetime.now().year
f = lambda : (print('hello'),'world')[1]
print(f()) |
import subprocess
import secrets
import getpass
import os
import requests
import urllib.parse
import time
from google.colab import files, drive, auth
from google.cloud import storage
import glob
def connect(LOG_DIR = '/log/fit'):
print('It may take a few seconds for processing. Please wait.')
root_password = secrets.token_urlsafe()
subprocess.call('apt-get update -qq', shell=True)
subprocess.call('apt-get install -qq -o=Dpkg::Use-Pty=0 openssh-server pwgen > /dev/null', shell=True)
subprocess.call(f'echo root:{root_password} | chpasswd', shell=True)
subprocess.call('mkdir -p /var/run/sshd', shell=True)
subprocess.call('echo "PermitRootLogin yes" >> /etc/ssh/sshd_config', shell=True)
subprocess.call('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config', shell=True)
get_ipython().system_raw('/usr/sbin/sshd -D &')
subprocess.call('mkdir -p /content/ngrok-ssh', shell=True)
os.chdir('/content/ngrok-ssh')
subprocess.call('wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip -O ngrok-stable-linux-amd64.zip', shell=True)
subprocess.call('unzip -u ngrok-stable-linux-amd64.zip', shell=True)
subprocess.call('cp /content/ngrok-ssh/ngrok /ngrok', shell=True)
subprocess.call('chmod +x /ngrok', shell=True)
print("Copy&paste your authtoken from https://dashboard.ngrok.com/auth")
authtoken = getpass.getpass()
get_ipython().system_raw(f'/ngrok authtoken {authtoken} &')
_create_tunnels()
get_ipython().system_raw(f'tensorboard --logdir {LOG_DIR} --host 0.0.0.0 --port 6006 &')
time.sleep(3) # synchronize.
with open('/content/ngrok-ssh/ngrok-tunnel-info.txt', 'w') as f:
url, port = urllib.parse.urlparse(_get_ngrok_url('ssh')).netloc.split(':')
# f.write('Run the command below on local machines to SSH into the Colab instance:\n')
f.write(f'ssh -p {port} root@{url}\n')
f.write('Password:\n')
f.write(f'{root_password}\n')
if 'COLAB_TPU_ADDR' in os.environ:
tpu_address = 'grpc://' + os.environ['COLAB_TPU_ADDR']
f.write(f"""Copy and paste the commands below to the beginning of your TPU program:
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='{tpu_address}')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)""")
url_tensorboard = _get_ngrok_url('tensorboard')
# f.write(f'To view tensorboard, visit {url_tensorboard}')
f.write(f'Tensorboard: {url_tensorboard}')
# f.write('after running the following two commands on the Colab notebook:\n')
# f.write(f' %load_ext tensorboard')
# f.write(f' %tensorboard --logdir {LOG_DIR}')
# f.write('Run kill() to close all the tunnels.\n')
# print('SSH connection is successfully established. Run info() for connection configuration.')
def info():
with open('/content/ngrok-ssh/ngrok-tunnel-info.txt', 'r') as f:
lines = f.readlines()
for line in lines:
print(line)
def kill():
os.system("kill $(ps aux | grep ngrok | awk '{print $2}')")
print('Done.')
def _create_tunnels():
with open('/content/ngrok-ssh/ssh.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' ssh:\n')
f.write(' proto: tcp\n')
f.write(' addr: 22')
with open('/content/ngrok-ssh/tensorboard.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' tensorboard:\n')
f.write(' proto: http\n')
f.write(' addr: 6006\n')
f.write(' inspect: false\n')
f.write(' bind_tls: true')
with open('/content/ngrok-ssh/http8080.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' http8080:\n')
f.write(' proto: http\n')
f.write(' addr: 8080\n')
f.write(' inspect: false\n')
f.write(' bind_tls: true')
with open('/content/ngrok-ssh/tcp8080.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' tcp8080:\n')
f.write(' proto: tcp\n')
f.write(' addr: 8080')
if 'COLAB_TPU_ADDR' in os.environ:
with open('/content/ngrok-ssh/tpu.yml', 'w') as f:
COLAB_TPU_ADDR = os.environ['COLAB_TPU_ADDR']
f.write('tunnels:\n')
f.write(' tpu:\n')
f.write(' proto: tcp\n')
f.write(f' addr: {COLAB_TPU_ADDR}')
with open('/content/ngrok-ssh/run_ngrok.sh', 'w') as f:
f.write('#!/bin/sh\n')
f.write('set -x\n')
if 'COLAB_TPU_ADDR' in os.environ:
f.write('/ngrok start --config ~/.ngrok2/ngrok.yml --config /content/ngrok-ssh/ssh.yml --log=stdout --config /content/ngrok-ssh/tensorboard.yml --config /content/ngrok-ssh/http8080.yml --config /content/ngrok-ssh/tcp8080.yml --config /content/ngrok-ssh/tpu.yml "$@"')
else:
f.write('/ngrok start --config ~/.ngrok2/ngrok.yml --config /content/ngrok-ssh/ssh.yml --log=stdout --config /content/ngrok-ssh/tensorboard.yml --config /content/ngrok-ssh/http8080.yml --config /content/ngrok-ssh/tcp8080.yml "$@"')
if 'COLAB_TPU_ADDR' in os.environ:
get_ipython().system_raw('bash /content/ngrok-ssh/run_ngrok.sh ssh tensorboard tcp8080 tpu &')
else:
get_ipython().system_raw('bash /content/ngrok-ssh/run_ngrok.sh ssh tensorboard tcp8080 &')
def _get_ngrok_info():
return requests.get('http://localhost:4040/api/tunnels').json()
def _get_ngrok_tunnels():
for tunnel in _get_ngrok_info()['tunnels']:
name = tunnel['name']
yield name, tunnel
def _get_ngrok_tunnel(name):
for name1, tunnel in _get_ngrok_tunnels():
if name == name1:
return tunnel
def _get_ngrok_url(name, local=False):
if local:
return _get_ngrok_tunnel(name)['config']['addr']
else:
return _get_ngrok_tunnel(name)['public_url']
def kaggle(data='tabular-playground-series-mar-2021', output='/kaggle/input'):
subprocess.call('sudo apt -q update', shell=True)
subprocess.call('sudo apt -q install unar nano less p7zip', shell=True)
subprocess.call('pip install -q --upgrade --force-reinstall --no-deps kaggle kaggle-cli', shell=True)
subprocess.call('mkdir -p /root/.kaggle', shell=True)
os.chdir('/root/.kaggle')
if 'kaggle.json' not in os.listdir('/root/.kaggle'):
print('Upload your kaggle API token')
files.upload()
subprocess.call('chmod 600 /root/.kaggle/kaggle.json', shell=True)
subprocess.call(f'mkdir -p {output}', shell=True)
os.chdir(f'{output}')
subprocess.call(f'kaggle competitions download -c {data}', shell=True)
subprocess.call(f'7z x {data}.zip -o{output}', shell=True)
print(f'\nUnzipped {data}.zip to {output}.')
subprocess.call('mkdir -p /kaggle/working', shell=True)
os.chdir('/kaggle/working')
def google_drive(dir='/gdrive'):
print(f'\nGoogle Drive authentication starts...')
drive.mount(dir)
def GCSconnect(key_file=None):
if key_file:
if not os.path.exists('/root/.kaggle/'):
os.makedirs('/root/.kaggle/')
print('Upload your Google Storage API token')
os.chdir('/root/.kaggle/')
files.upload()
subprocess.call(f'chmod 600 /root/.kaggle/{key_file}', shell=True)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = f'/root/.kaggle/{key_file}'
subprocess.call('echo $GOOGLE_APPLICATION_CREDENTIALS', shell=True)
else:
print('\nGCS authentication starts...')
auth.authenticate_user()
def _create_bucket(project, bucket_name):
storage_client = storage.Client(project=project)
bucket = storage_client.bucket(bucket_name)
bucket.create(location='US')
print(f'bucket {bucket.name} created.')
def _list_blobs(project, bucket_name):
storage_client = storage.Client(project=project)
blobs = storage_client.list_blobs(bucket_name)
blist = []
for blob in blobs:
blist.append(blob.name)
if not len(blist):
print('empty bucket!')
else:
print('\n'.join(blist))
def create_bucket(project, bucket_name):
try:
_create_bucket(project, bucket_name)
except Exception as e:
print(f"create_bucket('{bucket_name}') fails. Code:", e)
def list_blobs(project, bucket_name):
try:
_list_blobs(project, bucket_name)
except Exception as e:
print(f"list_blobs('{bucket_name}') fails. Code:", e)
def upload_to_gcs(project, bucket_name, destination_blob, source_directory):
# Upload file(s) from Google Colaboratory to GCS Bucket.
# type: {string} project name
# {string} bucket name
# {string} source directory
# rtype: None
# usage:
# upload_to_gcs("strategic-howl-123", "gcs-station-16", 'temp8/a.pkl', '/a.pkl')
# note: DON'T put a leading slash in the third argument.
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
# paths = glob.glob(os.path.join(source_directory, file if file else f'*.{ext}'))
# for path in paths:
# filename = os.path.join(source_directory, file) if file else path.split('/')[-1]
# blob = bucket.blob(filename)
# blob.upload_from_filename(path)
# print(f'{path} uploaded to {os.path.join(bucket_name, filename)}')
blob = bucket.blob(destination_blob)
blob.upload_from_filename(source_directory)
def download_to_colab(project, bucket_name, destination_directory, remote_blob_path='', local_file_name=''):
# Download file(s) from Google Cloud Storage Bucket to Colaboratory.
# type: {string} project name
# {string} bucket name
# {string} destination directory
# {string} (optional) filename: If set, the target file is downloaded.
# rtype: None
# usage:
# project = "strategic-howl-123456522"
# bucket_name = "gcs-station-168"
# >>> download_to_colab(project, bucket_name, '/temp8')
# >>> download_to_colab(project, bucket_name, destination_directory = '/temp9/fun', remote_blob_path='tps-apr-2021-label/data_fare_age.pkl', local_file_name='data_fare_age.pkl')
storage_client = storage.Client(project=project)
os.makedirs(destination_directory, exist_ok = True)
if local_file_name and remote_blob_path:
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(remote_blob_path)
blob.download_to_filename(os.path.join(destination_directory, local_file_name))
print('download finished.')
else:
from pathlib import Path
os.chdir(destination_directory)
blobs = storage_client.list_blobs(bucket_name)
count = 1
for blob in blobs:
if blob.name.endswith("/"): continue #
file_split = blob.name.split("/")
directory = "/".join(file_split[0:-1])
Path(directory).mkdir(parents=True, exist_ok=True) # (2)
blob.download_to_filename(blob.name)
des = os.path.join(destination_directory, directory)
if count==1: print(f"Destination: {des}")
print(f'{count}. {blob.name.split("/")[-1]:>50s}')
count += 1
|
from __future__ import absolute_import, division, print_function
from .version import __version__
from .evaluate import *
|
from pypylon import pylon
class Camera():
def __init__(self):
self.camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
self.converter = pylon.ImageFormatConverter()
self.converter.OutputPixelFormat = pylon.PixelType_BGR8packed
self.converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
def __del__(self):
self.camera.StopGrabbing()
def GetImage(self):
grabResult = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
while not grabResult.GrabSucceeded():
grabResult = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
image = self.converter.Convert(grabResult)
img = image.GetArray()
grabResult.Release()
return img |
"""
Check that doc/source/reference/general_utility_functions.rst documents
all exceptions and warnings in pandas/errors/__init__.py.
This is meant to be run as a pre-commit hook - to run it manually, you can do:
pre-commit run pandas-errors-documented --all-files
"""
from __future__ import annotations
import argparse
import ast
import pathlib
import sys
from typing import Sequence
API_PATH = pathlib.Path("doc/source/reference/general_utility_functions.rst").resolve()
def get_defined_errors(content: str) -> set[str]:
errors = set()
for node in ast.walk(ast.parse(content)):
if isinstance(node, ast.ClassDef):
errors.add(node.name)
elif isinstance(node, ast.ImportFrom):
for alias in node.names:
errors.add(alias.name)
return errors
def main(argv: Sequence[str] | None = None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("path")
args = parser.parse_args(argv)
with open(args.path, encoding="utf-8") as f:
file_errors = get_defined_errors(f.read())
with open(API_PATH) as f:
doc_errors = {
line.split(".")[1].strip() for line in f.readlines() if "errors" in line
}
missing = file_errors.difference(doc_errors)
if missing:
sys.stdout.write(
f"The follow exceptions and/or warnings are not documented "
f"in {API_PATH}: {missing}"
)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()
|
import io
from setuptools import find_packages
from setuptools import setup
with io.open('README.md', 'rt', encoding='utf8') as f:
readme = f.read()
setup(
name='cranserver',
version='1.0.0',
url='http://github.com/UptakeOpenSource/cran-server',
license='BSD 3-Clause',
maintainer='Troy de Freitas',
maintainer_email='troy.defretas@uptake.com',
description='An application for serving CRAN packages in a cloud environment.',
long_description=readme,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'boto3',
'botocore',
'flask',
'python-debian'
],
extras_require={
'test': [
'pytest',
'coverage',
'requests'
]
}
)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__doctest_requires__ = {'*': ['matplotlib']}
""" This module contains dictionaries that can be used to set a
matplotlib plotting style.
It is mostly here to allow a consistent plotting style in tutorials,
but can be used to prepare any matplotlib figure.
Using a matplotlib version > 1.4 you can do::
>>> import matplotlib.pyplot as plt
>>> from astropy.visualization import astropy_mpl_style
>>> plt.style.use(astropy_mpl_style)
for older versions of matplotlib the following works::
>>> import matplotlib as mpl
>>> from astropy.visualization import astropy_mpl_style
>>> mpl.rcParams.update(astropy_mpl_style)
This applies the astropy style on top of your existing matplotlib
default parameters. If you want an exactly reproducible plot (again, this
is useful if you are writing teaching material and you want the plot
to come out exactly the same, independent of the users configuration for
example), you should reset the matplotlib settings to the library defaults
*before* applying the astropy style, e.g.::
>>> import matplotlib as mpl
>>> from astropy.visualization import astropy_mpl_style
>>> mpl.rcdefaults()
>>> mpl.rcParams.update(astropy_mpl_style)
"""
astropy_mpl_style_1 = {
# Lines
'lines.linewidth': 1.7,
'lines.antialiased': True,
# Patches
'patch.linewidth': 1.0,
'patch.facecolor': '#348ABD',
'patch.edgecolor': '#CCCCCC',
'patch.antialiased': True,
# images
'image.cmap': 'gist_heat',
'image.origin': 'upper',
# Font
'font.size': 12.0,
# Axes
'axes.facecolor': '#FFFFFF',
'axes.edgecolor': '#AAAAAA',
'axes.linewidth': 1.0,
'axes.grid': True,
'axes.titlesize': 'x-large',
'axes.labelsize': 'large',
'axes.labelcolor': 'k',
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD', # blue
'#7A68A6', # purple
'#A60628', # red
'#467821', # green
'#CF4457', # pink
'#188487', # turquoise
'#E24A33'], # orange
# Ticks
'xtick.major.size': 0,
'xtick.minor.size': 0,
'xtick.major.pad': 6,
'xtick.minor.pad': 6,
'xtick.color': '#565656',
'xtick.direction': 'in',
'ytick.major.size': 0,
'ytick.minor.size': 0,
'ytick.major.pad': 6,
'ytick.minor.pad': 6,
'ytick.color': '#565656',
'ytick.direction': 'in',
# Legend
'legend.fancybox': True,
'legend.loc': 'best',
# Figure
'figure.figsize': [8, 6],
'figure.facecolor': '1.0',
'figure.edgecolor': '0.50',
'figure.subplot.hspace': 0.5,
# Other
'savefig.dpi': 72,
}
'''
Version 1 astropy plotting style for matplotlib.
This style improves some settings over the matplotlib default.
'''
astropy_mpl_style = astropy_mpl_style_1
'''
Most recent version of the astropy plotting style for matplotlib.
This style improves some settings over the matplotlib default.
'''
astropy_mpl_docs_style = astropy_mpl_style_1.copy()
'''
The style used in the astropy documentation.
'''
astropy_mpl_docs_style['axes.color_cycle'] = [
'#E24A33', # orange
'#348ABD', # blue
'#467821', # green
'#A60628', # red
'#7A68A6', # purple
'#CF4457', # pink
'#188487' # turquoise
]
astropy_mpl_docs_style['axes.grid'] = False
|
from sklearn.naive_bayes import GaussianNB
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from eye_detector.train.models.decorator import ProbModelDecorator
def bayes(x, y, shape):
return ProbModelDecorator(GaussianNB())
def gauss_rbf(x, y, shape):
return ProbModelDecorator(
GaussianProcessClassifier(1.0 * RBF(1.0))
)
|
import numpy as np
from vunet.train.config import config
import logging
import gc
from pathlib import Path
logger = logging.getLogger("tensorflow")
def get_max_complex(data, keys=["vocals", "mixture", "accompaniment"]):
# sometimes the max is not the mixture
pos = np.argmax([np.abs(complex_max(data[i])) for i in keys])
return np.array([complex_max(data[i]) for i in keys])[pos]
def visualization(spec, features, mask=False):
import matplotlib.pyplot as plt
f, (ax1, ax2) = plt.subplots(2, 1)
ax1.imshow(features, aspect="auto", origin="lower")
if mask:
features = np.ma.masked_where(features == 0, features)
ax2 = plt.subplot(212, sharex=ax1)
ax2.imshow(spec, aspect="auto", origin="lower")
if mask and np.max(features.data) > 0:
ax2.imshow(
features,
origin="lower",
interpolation="none",
alpha=0.9,
cmap="Blues",
aspect="auto",
)
plt.show()
return
def complex_max(d):
return d[np.unravel_index(np.argmax(np.abs(d), axis=None), d.shape)]
def complex_min(d):
return d[np.unravel_index(np.argmin(np.abs(d), axis=None), d.shape)]
def normlize_complex(data, c_max=1):
if c_max != 1:
factor = np.divide(complex_max(data), c_max)
else:
factor = 1
# normalize between 0-1
output = np.divide(
(data - complex_min(data)), (complex_max(data) - complex_min(data))
)
return np.multiply(output, factor) # scale to the original range
def split_overlapped(data):
output = np.zeros(data.shape)
ndx = [i for i in np.argsort(data[:, 0])[::-1] if data[:, 0][i] > 0][::-1]
if len(ndx) > 1:
s = np.round(data.shape[1] / len(ndx)).astype(np.int)
for i, v in enumerate(ndx):
output[v, i * s : i * s + s] = 1
output[v, i * s :] = 1
else:
output = data
return output
def as_categorical(data):
data_binary = data.__copy__()
data_binary[data > 0] = 1
init = np.sum(np.diff(data_binary, axis=1), axis=0)
init = np.hstack((1, init))
ndx = np.where(init != 0)[0]
for i in range(len(ndx)):
if i + 1 < len(ndx):
e = ndx[i + 1]
else:
e = len(init)
data_binary[:, ndx[i] : e] = split_overlapped(data[:, ndx[i] : e])
data_binary[data_binary > 0] = 1
return np.expand_dims(data_binary, axis=-1).astype(np.float32)
def binarize(data):
data[data > 0] = 1
# data[-1, :] = 0 # blank to 0
return np.expand_dims(data, axis=-1).astype(np.float32)
def load_a_file(v, i, end, condition):
name = v.split("/")[-2]
print("Loading the file %s %i out of %i" % (name, i, end))
tmp = np.load(v)
data = {}
# data.setdefault(name, {})
data["normalization"] = {
"vocals": [complex_max(tmp["vocals"]), complex_min(tmp["vocals"])],
"accompaniment": [
complex_max(tmp["accompaniment"]),
complex_min(tmp["accompaniment"]),
],
"mixture": [complex_max(tmp["mixture"]), complex_min(tmp["mixture"])],
}
c_max = get_max_complex(tmp)
data["mix"] = normlize_complex(tmp["mixture"], c_max)
data["vocals"] = normlize_complex(tmp["vocals"], c_max)
data["acc"] = normlize_complex(tmp["accompaniment"], c_max)
data["cond"] = np.concatenate(
[binarize(tmp[condition]), as_categorical(tmp[condition])], axis=-1
)
data["ncc"] = tmp["ncc"]
return (name, data)
def load_data(files):
from joblib import Parallel, delayed
"""The data is loaded in memory just once for the generator to have direct
access to it"""
# for i, v in enumerate(files):
# data = load_a_file(v=v, i=i, end=len(files))
data = {
k: v
for k, v in Parallel(n_jobs=16, verbose=5)(
delayed(load_a_file)(v=v, i=i, end=len(files), condition=config.CONDITION)
for i, v in enumerate(files)
)
}
_ = gc.collect()
from joblib.externals.loky import get_reusable_executor
get_reusable_executor().shutdown(wait=True)
return data
def get_data(ids=None):
files = [str(i) for i in Path(config.PATH_BASE).rglob("*features.npz")]
if ids:
files = [i for i in files if i.split("/")[-2] in ids]
return load_data(files)
|
import ctypes
from ctypes import (
c_bool, c_char, c_char_p, c_double, c_float, c_int,
c_int8, c_int16, c_int32, c_int64,
c_size_t,
c_uint8, c_uint16, c_uint32, c_uint64,
c_void_p)
import enum
import glob
import os
import sys
from . import library
c_char_p_p = ctypes.POINTER(c_char_p)
c_double_p = ctypes.POINTER(c_double)
c_float_p = ctypes.POINTER(c_float)
c_int_p = ctypes.POINTER(c_int)
c_int8_p = ctypes.POINTER(c_int8)
c_int16_p = ctypes.POINTER(c_int16)
c_int32_p = ctypes.POINTER(c_int32)
c_int64_p = ctypes.POINTER(c_int64)
c_uint8_p = ctypes.POINTER(c_uint8)
c_uint16_p = ctypes.POINTER(c_uint16)
c_uint32_p = ctypes.POINTER(c_uint32)
c_uint64_p = ctypes.POINTER(c_uint64)
############
# matrix.h #
############
# https://fr.mathworks.com/help/matlab/apiref/mxarray.html
class mxArray(ctypes.Structure): pass
mxArray_p = ctypes.POINTER(mxArray)
# https://www.mathworks.com/help/matlab/apiref/mxclassid.html
mxClassID = c_int
class ClassID(enum.IntEnum):
UNKNOWN = 0
CELL = 1
STRUCT = 2
LOGICAL = 3
CHAR = 4
VOID = 5
DOUBLE = 6
SINGLE = 7
INT8 = 8
UINT8 = 9
INT16 = 10
UINT16 = 11
INT32 = 12
UINT32 = 13
INT64 = 14
UINT64 = 15
FUNCTION = 16
# https://www.mathworks.com/help/matlab/apiref/mxchar.html
mxChar = c_char
mxChar_p = ctypes.POINTER(mxChar)
class mxComplexDouble(ctypes.Structure):
_fields_ = [("real", c_double), ("imag", c_double)]
mxComplexDouble_p = ctypes.POINTER(mxComplexDouble)
class mxComplexSingle(ctypes.Structure):
_fields_ = [("real", c_float), ("imag", c_float)]
mxComplexSingle_p = ctypes.POINTER(mxComplexSingle)
class mxComplexInt8(ctypes.Structure):
_fields_ = [("real", c_int8), ("imag", c_int8)]
mxComplexInt8_p = ctypes.POINTER(mxComplexInt8)
class mxComplexInt16(ctypes.Structure):
_fields_ = [("real", c_int16), ("imag", c_int16)]
mxComplexInt16_p = ctypes.POINTER(mxComplexInt16)
class mxComplexInt32(ctypes.Structure):
_fields_ = [("real", c_int32), ("imag", c_int32)]
mxComplexInt32_p = ctypes.POINTER(mxComplexInt32)
class mxComplexInt64(ctypes.Structure):
_fields_ = [("real", c_int64), ("imag", c_int64)]
mxComplexInt64_p = ctypes.POINTER(mxComplexInt64)
class mxComplexUint8(ctypes.Structure):
_fields_ = [("real", c_uint8), ("imag", c_uint8)]
mxComplexUint8_p = ctypes.POINTER(mxComplexUint8)
class mxComplexUint16(ctypes.Structure):
_fields_ = [("real", c_uint16), ("imag", c_uint16)]
mxComplexUint16_p = ctypes.POINTER(mxComplexUint16)
class mxComplexUint32(ctypes.Structure):
_fields_ = [("real", c_uint32), ("imag", c_uint32)]
mxComplexUint32_p = ctypes.POINTER(mxComplexUint32)
class mxComplexUint64(ctypes.Structure):
_fields_ = [("real", c_uint64), ("imag", c_uint64)]
mxComplexUint64_p = ctypes.POINTER(mxComplexUint64)
# https://www.mathworks.com/help/matlab/apiref/mxcomplexity.html
mxComplexity = c_int
class Complexity(enum.IntEnum):
REAL = 0
COMPLEX = 1
# https://www.mathworks.com/help/matlab/apiref/mwindex.html
mwIndex = c_size_t
mwIndex_p = ctypes.POINTER(mwIndex)
# https://www.mathworks.com/help/matlab/apiref/mxlogical.html
mxLogical = c_bool
mxLogical_p = ctypes.POINTER(mxLogical)
# https://www.mathworks.com/help/matlab/apiref/mwsize.html
mwSize = c_size_t
mwSize_p = ctypes.POINTER(mwSize)
# https://www.mathworks.com/help/matlab/cc-mx-matrix-library.html
api = {
######################
# mxArray Attributes #
######################
"mxIsNumeric": [[mxArray_p], c_bool],
"mxIsComplex": [[mxArray_p], c_bool],
"mxGetNumberOfDimensions": [[mxArray_p], mwSize],
"mxGetElementSize": [[mxArray_p], c_size_t], # WARNING: Behavior changed in R2018a
"mxGetDimensions": [[mxArray_p], mwSize_p],
"mxSetDimensions":
[[mxArray_p, mwSize_p, mwSize], c_int, library.fail_on_non_zero],
"mxGetNumberOfElements": [[mxArray_p], c_size_t],
"mxCalcSingleSubscript": [[mxArray_p, mwSize, mwIndex_p], mwIndex],
"mxGetM": [[mxArray_p], c_size_t],
"mxSetM": [[mxArray_p, mwSize], None],
"mxGetN": [[mxArray_p], c_size_t],
"mxSetN": [[mxArray_p, mwSize], None],
"mxIsEmpty": [[mxArray_p], c_bool],
"mxIsFromGlobalWS": [[mxArray_p], c_bool],
########################################
# Create, Query, and Access Data Types #
########################################
# Numeric types
"mxCreateDoubleMatrix":
[[mwSize, mwSize, mxComplexity], mxArray_p, library.fail_on_zero],
"mxCreateDoubleScalar": [[c_double], mxArray_p, library.fail_on_zero],
"mxCreateNumericMatrix":
[[mwSize, mwSize, mxClassID, mxComplexity], mxArray_p, library.fail_on_zero],
"mxCreateNumericArray":
[[mwSize, mwSize_p, mxClassID, mxComplexity], mxArray_p, library.fail_on_zero],
"mxCreateUninitNumericMatrix":
[[mwSize, mwSize, mxClassID, mxComplexity], mxArray_p, library.fail_on_zero],
"mxCreateUninitNumericArray":
[[mwSize, mwSize_p, mxClassID, mxComplexity], mxArray_p, library.fail_on_zero],
# Noncomplex Float
"mxIsScalar": [[mxArray_p], c_bool],
"mxGetScalar": [[mxArray_p], c_double],
"mxIsDouble": [[mxArray_p], c_bool],
"mxGetDoubles": [[mxArray_p], c_double_p, library.fail_on_zero],
"mxSetDoubles": [[mxArray_p, c_double_p], c_int, library.fail_on_zero],
"mxGetSingles": [[mxArray_p], c_float_p, library.fail_on_zero],
"mxSetSingles": [[mxArray_p, c_float_p], c_int, library.fail_on_zero],
"mxGetPr": [[mxArray_p], c_double_p, library.fail_on_zero], # WARNING: Behavior changed in R2018a
"mxSetPr": [[mxArray_p, c_double_p], None], # WARNING: Behavior changed in R2018a
# Noncomplex Integer
"mxIsInt8": [[mxArray_p], c_bool],
"mxGetInt8s": [[mxArray_p], c_int8_p, library.fail_on_zero],
"mxSetInt8s": [[mxArray_p, c_int8_p], c_int, library.fail_on_zero],
"mxIsUint8": [[mxArray_p], c_bool],
"mxGetUint8s": [[mxArray_p], c_uint8_p, library.fail_on_zero],
"mxSetUint8s": [[mxArray_p, c_uint8_p], c_int, library.fail_on_zero],
"mxIsInt16": [[mxArray_p], c_bool],
"mxGetInt16s": [[mxArray_p], c_int16_p, library.fail_on_zero],
"mxSetInt16s": [[mxArray_p, c_int16_p], c_int, library.fail_on_zero],
"mxIsUint16": [[mxArray_p], c_bool],
"mxGetUint16s": [[mxArray_p], c_uint16_p, library.fail_on_zero],
"mxSetUint16s": [[mxArray_p, c_uint16_p], c_int, library.fail_on_zero],
"mxIsInt32": [[mxArray_p], c_bool],
"mxGetInt32s": [[mxArray_p], c_int32_p, library.fail_on_zero],
"mxSetInt32s": [[mxArray_p, c_int32_p], c_int, library.fail_on_zero],
"mxIsUint32": [[mxArray_p], c_bool],
"mxGetUint32s": [[mxArray_p], c_uint32_p, library.fail_on_zero],
"mxSetUint32s": [[mxArray_p, c_uint32_p], c_int, library.fail_on_zero],
"mxIsInt64": [[mxArray_p], c_bool],
"mxGetInt64s": [[mxArray_p], c_int64_p, library.fail_on_zero],
"mxSetInt64s": [[mxArray_p, c_int64_p], c_int, library.fail_on_zero],
"mxIsUint64": [[mxArray_p], c_bool],
"mxGetUint64s": [[mxArray_p], c_uint64_p, library.fail_on_zero],
"mxSetUint64s": [[mxArray_p, c_uint64_p], c_int, library.fail_on_zero],
# Complex Float
"mxGetComplexDoubles":
[[mxArray_p], mxComplexDouble_p, library.fail_on_zero],
"mxSetComplexDoubles":
[[mxArray_p, mxComplexDouble_p], c_int, library.fail_on_zero],
"mxGetComplexSingles":
[[mxArray_p], mxComplexSingle_p, library.fail_on_zero],
"mxSetComplexSingles":
[[mxArray_p, mxComplexSingle_p], c_int, library.fail_on_zero],
"mxGetImagData": [[mxArray_p], c_void_p, library.fail_on_zero], # WARNING: Behavior changed in R2018a
"mxSetImagData": [[mxArray_p, c_void_p], None], # WARNING: Behavior changed in R2018a
"mxGetPi": [[mxArray_p], c_double_p, library.fail_on_zero], # WARNING: Behavior changed in R2018a
"mxSetPi": [[mxArray_p, c_double_p], None], # WARNING: Behavior changed in R2018a
# Complex Integer
"mxGetComplexInt8s": [[mxArray_p], mxComplexInt8_p, library.fail_on_zero],
"mxSetComplexInt8s":
[[mxArray_p, mxComplexInt8_p], c_int, library.fail_on_zero],
"mxGetComplexUint8s": [[mxArray_p], mxComplexUint8_p, library.fail_on_zero],
"mxSetComplexUint8s":
[[mxArray_p, mxComplexUint8_p], c_int, library.fail_on_zero],
"mxGetComplexInt16s": [[mxArray_p], mxComplexInt16_p, library.fail_on_zero],
"mxSetComplexInt16s":
[[mxArray_p, mxComplexInt16_p], c_int, library.fail_on_zero],
"mxGetComplexUint16s": [[mxArray_p], mxComplexUint16_p, library.fail_on_zero],
"mxSetComplexUint16s":
[[mxArray_p, mxComplexUint16_p], c_int, library.fail_on_zero],
"mxGetComplexInt32s": [[mxArray_p], mxComplexInt32_p, library.fail_on_zero],
"mxSetComplexInt32s":
[[mxArray_p, mxComplexInt32_p], c_int, library.fail_on_zero],
"mxGetComplexUint32s": [[mxArray_p], mxComplexUint32_p, library.fail_on_zero],
"mxSetComplexUint32s":
[[mxArray_p, mxComplexUint32_p], c_int, library.fail_on_zero],
"mxGetComplexInt64s": [[mxArray_p], mxComplexInt64_p, library.fail_on_zero],
"mxSetComplexInt64s":
[[mxArray_p, mxComplexInt64_p], c_int, library.fail_on_zero],
"mxGetComplexUint64s": [[mxArray_p], mxComplexUint64_p, library.fail_on_zero],
"mxSetComplexUint64s":
[[mxArray_p, mxComplexUint64_p], c_int, library.fail_on_zero],
# Sparse
"mxCreateSparse":
[[mwSize, mwSize, mwSize, mxComplexity], mxArray_p, library.fail_on_zero],
"mxCreateSparseLogicalMatrix":
[[mwSize, mwSize, mwSize], mxArray_p, library.fail_on_zero],
"mxIsSparse": [[mxArray_p], c_bool],
"mxGetNzmax": [[mxArray_p], mwSize],
"mxSetNzmax": [[mxArray_p, mwSize], None],
"mxGetIr": [[mxArray_p], mwIndex_p, library.fail_on_zero],
"mxSetIr": [[mxArray_p, mwIndex], None],
"mxGetJc": [[mxArray_p], mwIndex_p, library.fail_on_zero],
"mxSetJc": [[mxArray_p, mwIndex], None],
# Nonnumeric Types
"mxGetData": [[mxArray_p], c_void_p, library.fail_on_zero], # WARNING: Behavior changed in R2018a
"mxSetData": [[mxArray_p, c_void_p], None], # WARNING: Behavior changed in R2018a
# Character
"mxCreateString": [[c_char_p], mxArray_p, library.fail_on_zero],
"mxCreateCharMatrixFromStrings":
[[mwSize, c_char_p], mxArray_p, library.fail_on_zero],
"mxCreateCharArray": [[mwSize, mwSize_p], mxArray_p, library.fail_on_zero],
"mxIsChar": [[mxArray_p], c_bool],
"mxGetChars": [[mxArray_p], mxChar_p, library.fail_on_zero],
# Logical
"mxIsLogical": [[mxArray_p], c_bool],
"mxIsLogicalScalar": [[mxArray_p], c_bool],
"mxIsLogicalScalarTrue": [[mxArray_p], c_bool],
"mxCreateLogicalArray": [[mwSize, mwSize_p], mxArray_p, library.fail_on_zero],
"mxCreateLogicalMatrix": [[mwSize, mwSize], mxArray_p, library.fail_on_zero],
"mxCreateLogicalScalar": [[mxLogical], mxArray_p, library.fail_on_zero],
"mxGetLogicals": [[mxArray_p], mxLogical_p],
# Object
"mxIsClass": [[mxArray_p, c_char_p], c_bool],
"mxGetClassID": [[mxArray_p], mxClassID],
"mxGetClassName": [[mxArray_p], c_char_p],
"mxSetClassName": [[mxArray_p, c_char_p], c_int, library.fail_on_non_zero],
"mxGetProperty":
[[mxArray_p, mwIndex, c_char_p], mxArray_p, library.fail_on_zero],
"mxSetProperty": [[mxArray_p, mwIndex, c_char_p, mxArray_p], None],
# Structure
"mxCreateStructMatrix":
[[mwSize, mwSize, c_int, c_char_p_p], mxArray_p, library.fail_on_zero],
"mxCreateStructArray":
[[mwSize, mwSize_p, c_int, c_char_p_p], mxArray_p, library.fail_on_zero],
"mxIsStruct": [[mxArray_p], c_bool],
"mxGetField":
[[mxArray_p, mwIndex, c_char_p], mxArray_p, library.fail_on_zero],
"mxSetField": [[mxArray_p, mwIndex, c_char_p, mxArray_p], None],
"mxGetNumberOfFields": [[mxArray_p], c_int],
"mxGetFieldNameByNumber":
[[mxArray_p, c_int], c_char_p, library.fail_on_zero],
"mxGetFieldNumber":
[[mxArray_p, c_char_p], c_int, library.fail_on_minus_one],
"mxGetFieldByNumber": [[mxArray_p, mwIndex, c_int], mxArray_p, library.fail_on_zero],
"mxSetFieldByNumber": [[mxArray_p, mwIndex, c_int, mxArray_p], None],
"mxAddField": [[mxArray_p, c_char_p], c_int, library.fail_on_minus_one],
"mxRemoveField": [[mxArray_p, c_int], None],
# Cell
"mxCreateCellMatrix": [[mwSize, mwSize], mxArray_p, library.fail_on_zero],
"mxCreateCellArray": [[mwSize, mwSize_p], mxArray_p, library.fail_on_zero],
"mxIsCell": [[mxArray_p], c_bool],
"mxGetCell": [[mxArray_p, mwIndex], mxArray_p, library.fail_on_zero],
"mxSetCell": [[mxArray_p, mwIndex, mxArray_p], None],
################################
# Delete and Duplicate mxArray #
################################
"mxDestroyArray": [[mxArray_p], None],
"mxDuplicateArray": [[mxArray_p], mxArray_p, library.fail_on_zero],
###################
# Convert mxArray #
###################
# Numeric
# mxMakeArrayComplex, mxMakeArrayReal: >= R2018a
# Character
"mxArrayToString": [[mxArray_p], c_char_p, library.fail_on_zero],
"mxArrayToUTF8String": [[mxArray_p], c_char_p, library.fail_on_zero], # >= R2015a
"mxGetString":
[[mxArray_p, c_char_p, mwSize], c_int, library.fail_on_non_zero],
##########################
# Data Memory Management #
##########################
"mxCalloc": [[mwSize, mwSize], c_void_p, library.fail_on_zero],
"mxMalloc": [[mwSize], c_void_p, library.fail_on_zero],
"mxRealloc": [[c_void_p, mwSize], c_void_p, library.fail_on_zero],
"mxFree": [[c_void_p], None],
###########
# Asserts #
###########
# "mxAssert": [[c_int, c_char_p], None],
# "mxAssertS": [[c_int, c_char_p], None],
#############
# Constants #
#############
"mxIsInf": [[c_double], c_bool],
"mxIsFinite": [[c_double], c_bool],
"mxIsNaN": [[c_double], c_bool],
}
from meg import matlab_root
try:
path = glob.glob(os.path.join(matlab_root, "bin", "glnxa64", "libmx.*"))[0]
except StopIteration:
path = glob.glob(os.path.join(matlab_root, "bin", "maci64", "libmx.*"))[0]
lib = ctypes.CDLL(path)
library.set_api(lib, api, sys.modules[__name__], ["_730", "_800"])
|
from __future__ import absolute_import
from require.require import *
|
from setuptools import setup, find_packages
install_requires = [
'opencv-python >= 4.1.0',
'Pillow >= 6.0.0',
'numpy >=1.16.4'
]
setup(
name='imbo',
version='0.1.1',
python_requires='>=2.7',
packages=find_packages(),
include_package_data=True,
author='Nitin Rai',
author_email='mneonizer@gmail.com',
description='A tool to plot pretty bounding boxes around objects.',
long_description='See https://github.com/imneonizer/imbo for complete user guide.',
url='https://github.com/imneonizer/imbo',
install_requires=install_requires,
license='BSD',
)
|
# -*- coding:utf-8 -*-
__author__ = 'Ren Kang'
__date__ = '2018/3/27 13:32'
import uuid
import hashlib
from django.db.models.signals import pre_save, post_delete, post_save
from django.dispatch import receiver, Signal
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.sites.models import Site
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
from django.conf import settings
from django.utils.html import mark_safe, format_html
from django.template import loader
from haystack.signals import BaseSignalProcessor
from guardian.shortcuts import assign_perm
from blog.models import Post
from blog import enums
from comment.models import Comment
from comment.signals import post_comment, post_like
change_post_public = Signal(providing_args=['instance'])
change_post_private = Signal(providing_args=['instance'])
User = get_user_model()
class PostSignalProcessor(BaseSignalProcessor):
"""
Allows for observing when post update status or post deletes fire & automatically updates the
search engine appropriately.
当博文公开时候建立索引,
当博文删除或者变成私有时候删除索引
"""
def setup(self):
# Naive (listen to all post change status).
change_post_public.connect(self.handle_save)
post_delete.connect(self.handle_delete)
change_post_private.connect(self.handle_delete)
# Efficient would be going through all backends & collecting all models
# being used, then hooking up signals only for those.
def teardown(self):
# Naive (listen to all post change status).
change_post_public.disconnect(self.handle_save)
post_delete.disconnect(self.handle_delete)
change_post_private.disconnect(self.handle_delete)
# Efficient would be going through all backends & collecting all models
# being used, then disconnecting signals only for those.
def gen_default_excerpt(post, length=223):
if post.excerpt is '':
post.excerpt = post.content[:length]
def gen_default_post_sn(post, created):
"""
生成博文序列号
:param sender:
:param kwargs:
:return:
"""
if created:
post.post_sn = str(uuid.uuid1())
return post
def gen_default_cover_url(post, created):
if created:
if not post.cover_url:
post.cover_url = post.cover.url
return post
def gen_default_object_id(post, created):
if created:
if post.url_object_id is None:
post.url_object_id = hashlib.md5(post.get_absolute_url().encode('utf-8')).hexdigest()
return post
def assign_post_perms(post, created):
"""
创建或者获取readers组赋予该组对所有博文实例的读取权限
"""
if created:
readers, _ = Group.objects.get_or_create(name='readers')
if _:
assign_perm('blog.view_post', readers) # 给readers组赋予可以访问所有博文实例的模型权限
assign_perm('blog.add_post', readers)
try:
assign_perm("view_post", readers, post) # 给readers组赋予可访问指定的博文的对象权限
assign_perm('blog.change_post', post.author, post) # 给博文作者分配修改和删除博文的模型权限
assign_perm('blog.delete_post', post.author, post)
except:
pass
return post
def user_as_reader(user, created):
if created and not user.is_anonymous:
readers, _ = Group.objects.get_or_create(name='readers')
if _:
try:
assign_perm('blog.view_post', readers)
assign_perm('blog.add_post', readers)
except:
pass
user.groups.add(readers)
return user
@receiver(pre_save, sender=Post)
def on_post_pre_save(sender, **kwargs):
"""
博文保存前预处理
:param sender:
:param kwargs:
:return:
"""
post = kwargs['instance']
if not post.hasbe_indexed:
if post.status == enums.POST_STATUS_PUBLIC:
change_post_public.send(sender=post.__class__, instance=post)
post.hasbe_indexed = True
if post.hasbe_indexed and post.status == enums.POST_STATUS_PRIVATE:
change_post_private.send(sender=post.__class__, instance=post)
post.hasbe_indexed = False
gen_default_excerpt(post)
@receiver(post_save, sender=Post)
def on_post_post_save(sender, **kwargs):
post, created = kwargs['instance'], kwargs['created']
if created:
post = gen_default_post_sn(post, created)
post = gen_default_object_id(post, created)
post = gen_default_cover_url(post, created)
post = assign_post_perms(post, created)
post.save()
@receiver(post_save, sender=User)
def on_user_post_save(sender, **kwargs):
user, created = kwargs['instance'], kwargs['created']
user_as_reader(user, created)
@receiver(post_comment, sender=Comment)
def handler_post_comment(sender, comment_obj, content_type, object_id, request, **kwargs):
post_ct = ContentType.objects.get_for_model(Post)
comment_ct = ContentType.objects.get_for_model(Comment)
email_post_comment_template = 'blog/email/email_post_comment.html'
email_comment_reply_template = 'blog/email/email_comment_reply.html'
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
protocol = 'https' if request.is_secure() else 'http'
if content_type == post_ct:
"""博文评论"""
try:
post = Post.objects.get(pk=object_id)
except Post.DoesNotExist as e:
pass
else:
context = {
'post': post,
'site_name': site_name,
'comment_object': comment_obj,
'protocol': protocol,
'domain': domain
}
# 给博文作者发送一条消息和一份邮件
subject = '{} 博文 {} {}://{}{} 收到 {} 的评论'.format(site_name, post.title, protocol, domain,
post.get_absolute_url(), comment_obj.author)
noti_text = '{} 博文 {} {}://{}{} 收到 {} 的评论 {}'.format(site_name, post.title, protocol, domain,
post.get_absolute_url(), comment_obj.author,
mark_safe(comment_obj.content))
message = loader.render_to_string(email_post_comment_template, context)
post.author.notify_user(noti_text)
post.author.email_user(subject, message, html_msg=message)
elif content_type == comment_ct:
"""评论回复"""
try:
pass
except Comment.DoesNotExist as e:
pass
else:
comment = Comment.objects.get(pk=object_id)
context = {
'comment': comment,
'site_name': site_name,
'comment_object': comment_obj,
'protocol': protocol,
'domain': domain,
}
subject = '你的评论收到{}的回复'.format(comment_obj.author)
noti_text = '你的评论{}收到{}的回复 {}'.format(comment.content, comment_obj.author, comment_obj.content)
message = loader.render_to_string(email_comment_reply_template, context)
comment.author.notify_user(noti_text)
comment.author.email_user(subject, message, html_msg=message) |
import elementary
import evas
import os
class stationWindow(elementary.Box):
def __init__( self, parent ):
#builds a elementary box to accept login information
elementary.Box.__init__(self, parent.mainWindow)
self.ourPlayer = parent.ourPlayer
self.win = parent.mainWindow
self.rent = parent
self.lst = lst = elementary.List(self.win)
lst.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
lst.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
lst.callback_clicked_double_add(self.station_popup)
lst.callback_longpressed_add(self.station_popup)
stations = self.ourPlayer.getStations()
for station in stations:
lst.item_append(str(station['stationName']))
lst.show()
chng = elementary.Button(self.win)
chng.text_set("Change Station")
chng.callback_unpressed_add(self.change_station)
chng.show()
sep = elementary.Separator(self.win)
sep.show()
crt = elementary.Button(self.win)
crt.text = "Create Station"
crt.callback_unpressed_add(lambda x: parent.spawn_create())
crt.show()
ex = elementary.Button(self.win)
ex.text_set("Back")
ex.callback_unpressed_add(lambda x: parent.nf.item_pop())
ex.show()
bbox = elementary.Box(self.win)
bbox.horizontal = True
bbox.pack_end(chng)
bbox.pack_end(sep)
bbox.pack_end(crt)
bbox.pack_end(sep)
bbox.pack_end(ex)
bbox.show()
self.pack_end(lst)
self.pack_end(bbox)
def popup_message(self, message, title, callback=False):
popup = elementary.Popup(self.win)
popup.text = message
popup.part_text_set("title,text", title)
bt = elementary.Button(self.win)
bt.text = "OK"
if callback:
bt.callback_clicked_add(callback, popup)
bt.callback_clicked_add(lambda x: popup.hide())
popup.part_content_set("button1", bt)
popup.show()
def station_popup(self, lst, item):
print lst
print item.text
cp = elementary.Ctxpopup(self.win)
cp.item_append("Play", None, self.change_station)
cp.item_append("Rename", None, self.station_rename)
cp.item_append("Delete", None, self.station_delete)
pos = self.win.evas.pointer_canvas_xy_get()
cp.pos = pos
cp.show()
def station_rename(self, lst, i):
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
self.rent.spawn_rename(item.text)
def station_delete(self, lst, i):
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
print item.text
popup = elementary.Popup(self.win)
popup.text = item.text
popup.part_text_set("title,text", "Really Delete?")
bt = elementary.Button(self.win)
bt.text = "Cancel"
bt.callback_clicked_add(lambda x: popup.hide())
ys = elementary.Button(self.win)
ys.text = "Yes"
ys.callback_clicked_add(self.really_delete, item.text)
ys.callback_clicked_add(lambda x: popup.hide())
popup.part_content_set("button1", bt)
popup.part_content_set("button2", ys)
popup.show()
def really_delete(self, pop, name):
station = self.ourPlayer.getStationFromName(name)
print station
self.ourPlayer.deleteStation(station)
self.rent.nf.item_pop()
def change_station(self, bt=False, i=False):
if i:
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
#self.rent.spawn_player()
if item:
self.ourPlayer.setStation(self.ourPlayer.getStationFromName(item.text))
home = os.path.expanduser("~")
if not os.path.exists("%s/.config/eAndora"%home):
os.makedirs("%s/.config/eAndora"%home)
if os.path.exists("%s/.config/eAndora/stationinfo"%home):
os.remove('%s/.config/eAndora/stationinfo'%home)
f = open('%s/.config/eAndora/stationinfo'%home, 'w')
f.write('%s\n'%item.text)
f.close()
self.ourPlayer.pauseSong()
self.ourPlayer.clearSongs()
self.ourPlayer.addSongs()
self.ourPlayer.gui.refreshInterface(True)
self.rent.nf.item_pop()
|
from sys import exit
import argparse
from . import configurator
def main():
c = configurator.Configurator()
def parse_options():
parser = argparse.ArgumentParser(description="Configure Xfce4")
parser.add_argument('theme', help='theme name', choices=c.theme_names, nargs='?')
parser.add_argument('-l', '--themes', help='list theme names', action='store_true')
parser.add_argument('-c', '--current', help='display current theme', action='store_true')
parser.add_argument('-V', '--version', action='version', version=c.version())
return parser.parse_args()
args = parse_options()
if args.themes:
print("\n".join(c.theme_names))
exit(0)
if args.theme:
c.set_theme(args.theme)
c.save_config()
exit(0)
c.display_config()
|
from dataclasses import dataclass
from math import isclose
@dataclass(frozen=True)
class Point:
x: float
y: float
@dataclass(frozen=True)
class Triangle:
a: Point
b: Point
c: Point
def get_area(self) -> float:
"""Computes the area of the triangle abc."""
area = (
abs(
self.a.x * (self.b.y - self.c.y)
+ self.b.x * (self.c.y - self.a.y)
+ self.c.x * (self.a.y - self.b.y)
)
/ 2
)
return area
def is_point_in_triangle(
a: Point, b: Point, c: Point, p: Point, method: str
) -> bool:
"""Tests whether point p is contained in the triangle formed by a, b, and c."""
if method == "area":
return is_point_in_triangle_area(a, b, c, p)
else:
raise NotImplementedError(f"Method: {method} is not supported.")
def is_point_in_triangle_area(a: Point, b: Point, c: Point, p: Point) -> bool:
"""Tests whether point p is contained in the triangle formed by a, b, and c.
This is done by comparing the area of abc with the areas of abp, apc, and pbc.
"""
t_abc = Triangle(a, b, c)
t_abp = Triangle(a, b, p)
t_apc = Triangle(a, p, c)
t_pbc = Triangle(p, b, c)
area_triangles = t_abp.get_area() + t_apc.get_area() + t_pbc.get_area()
return isclose(t_abc.get_area(), area_triangles)
|
#!/usr/bin/env python3
import os
import argparse
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.options.pipeline_options import PipelineOptions
from file2csv.Converter import Converter
def run_pipeline(fixedfile: str, specfile: str, csvfile: str) -> bool:
converter = Converter(specfile=specfile)
# Load pipeline options from the script's arguments
options = PipelineOptions()
# Create a pipeline and run it after leaving the 'with' block
with beam.Pipeline(options=options) as p:
# Wrap in paranthesis to avoid Python indention issues
(p
# Load data from input file
| 'Read Lines' >> beam.io.ReadFromText(fixedfile)
# convert to encoded value
| 'Convert to CSV' >> beam.Map(lambda line: converter.encode(line))
# Filter out the False record
| 'Filter out False' >> beam.Filter(lambda pair: pair[0])
# extract just line
| 'Extract for CSV' >> beam.Map(lambda pair: pair[1])
# Write to CSV
| 'Write to CSV File' >> beam.io.WriteToText(csvfile, num_shards=3)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--spec", help="specification file", required=True)
parser.add_argument("-f", "--fixedfile", help="Fixed Width file", required=True)
parser.add_argument("-o", "--outfile", help="Output file", required=True)
args = parser.parse_args()
if not os.path.isfile(args.fixedfile):
print(f"input file {args.fixedfile} is not found")
exit
run_pipeline(args.fixedfile, args.spec, args.outfile)
|
@foo.bar
class C:
pass
|
"""This is the installation toolset for this project."""
from datetime import datetime
from pathlib import Path
from subprocess import Popen, PIPE
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
def _build_time():
ctime = datetime.utcnow()
return ctime.strftime('%Y%m%d.%H%M%S')
def _revision():
cwd = str(Path(__file__).parent.absolute())
with Popen('git rev-parse --short HEAD', shell=True, stdout=PIPE, cwd=cwd) as proc:
outb, errb = proc.communicate()
result = outb.decode('utf8').strip()
return result or 'nogit'
setup(
name='osdc',
version=f'{_build_time()}-{_revision()}',
description='The OpenSky Data Collector',
long_description=long_description,
packages=find_packages(exclude=('tests',)),
install_requires=[
'dataclasses;python_version<"3.7"',
'conf',
'opensky_api',
'influxdb',
'requests',
'apscheduler',
],
tests_require=[
'wheel',
'setuptools',
'pylint',
'coverage',
'pytest',
'mock',
],
entry_points={
'console_scripts': [
'osdc = osdc.__main__:main'
]
},
classifiers=[
'Programming Language :: Python :: 3.6',
'License :: ALL RIGHTS RESERVED.',
'Operating System :: POSIX :: Linux',
],
)
|
import torch
import numpy as np
def compute_cmvn_epoch(opt, train_loader, enhance_model, feat_model):
enhance_model.eval()
feat_model.eval()
torch.set_grad_enabled(False)
##print(enhance_model.state_dict())
enhance_cmvn_file = os.path.join(opt.exp_path, 'enhance_cmvn.npy')
for i, (data) in enumerate(train_loader, start=0):
utt_ids, spk_ids, clean_inputs, clean_log_inputs, mix_inputs, mix_log_inputs, cos_angles, targets, input_sizes, target_sizes = data
enhance_out = enhance_model(mix_inputs, mix_log_inputs, input_sizes)
enhance_cmvn = feat_model.compute_cmvn(enhance_out, input_sizes)
if enhance_cmvn is not None:
np.save(enhance_cmvn_file, enhance_cmvn)
print('save enhance_cmvn to {}'.format(enhance_cmvn_file))
break
enhance_cmvn = torch.FloatTensor(enhance_cmvn)
enhance_model.train()
feat_model.train()
torch.set_grad_enabled(True)
return enhance_cmvn |
"""
__S1ThenClauseComplete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Wed Oct 16 11:56:05 2013
__________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__SoftwareComposition import *
from MT_pre__System import *
from MT_pre__ComponentPrototype import *
from MT_pre__directLink_T import *
from MT_pre__CompositionType import *
from graph_MT_pre__CompositionType import *
from graph_MT_pre__System import *
from graph_MT_pre__ComponentPrototype import *
from graph_LHS import *
from graph_MT_pre__directLink_T import *
from graph_MT_pre__SoftwareComposition import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def S1ThenClauseComplete_MDL(self, rootNode, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('S1ThenClauseComplete')
# --- ASG attributes over ---
self.obj4524=LHS(self)
self.obj4524.isGraphObjectVisual = True
if(hasattr(self.obj4524, '_setHierarchicalLink')):
self.obj4524._setHierarchicalLink(False)
# constraint
self.obj4524.constraint.setValue('if PreNode(\'1\')[\'cardinality\']==\'+\' and PreNode(\'2\')[\'cardinality\']==\'+\' and PreNode(\'3\')[\'cardinality\']==\'+\' and PreNode(\'4\')[\'cardinality\']==\'+\' and PreNode(\'5\')[\'associationType\']==\'softwareComposition\' and PreNode(\'6\')[\'associationType\']==\'softwareComposition\' and PreNode(\'7\')[\'associationType\']==\'component\':\n return True\nreturn False\n')
self.obj4524.constraint.setHeight(15)
self.obj4524.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(60.0,40.0,self.obj4524)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj4524.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4524)
self.globalAndLocalPostcondition(self.obj4524, rootNode)
self.obj4524.postAction( rootNode.CREATE )
self.obj4525=MT_pre__SoftwareComposition(self)
self.obj4525.isGraphObjectVisual = True
if(hasattr(self.obj4525, '_setHierarchicalLink')):
self.obj4525._setHierarchicalLink(False)
# MT_pivotOut__
self.obj4525.MT_pivotOut__.setValue('')
self.obj4525.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4525.MT_subtypeMatching__.setValue(('True', 0))
self.obj4525.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj4525.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4525.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj4525.MT_pivotIn__.setValue('')
self.obj4525.MT_pivotIn__.setNone()
# MT_label__
self.obj4525.MT_label__.setValue('2')
# MT_pre__cardinality
self.obj4525.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4525.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj4525.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4525.MT_pre__name.setHeight(15)
self.obj4525.graphClass_= graph_MT_pre__SoftwareComposition
if self.genGraphics:
new_obj = graph_MT_pre__SoftwareComposition(256.0,135.0,self.obj4525)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__SoftwareComposition", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj4525.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4525)
self.globalAndLocalPostcondition(self.obj4525, rootNode)
self.obj4525.postAction( rootNode.CREATE )
self.obj4526=MT_pre__System(self)
self.obj4526.isGraphObjectVisual = True
if(hasattr(self.obj4526, '_setHierarchicalLink')):
self.obj4526._setHierarchicalLink(False)
# MT_pivotOut__
self.obj4526.MT_pivotOut__.setValue('')
self.obj4526.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4526.MT_subtypeMatching__.setValue(('True', 0))
self.obj4526.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj4526.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4526.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj4526.MT_pivotIn__.setValue('element1')
# MT_label__
self.obj4526.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj4526.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4526.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj4526.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4526.MT_pre__name.setHeight(15)
self.obj4526.graphClass_= graph_MT_pre__System
if self.genGraphics:
new_obj = graph_MT_pre__System(73.0,53.0,self.obj4526)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__System", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj4526.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4526)
self.globalAndLocalPostcondition(self.obj4526, rootNode)
self.obj4526.postAction( rootNode.CREATE )
self.obj4527=MT_pre__ComponentPrototype(self)
self.obj4527.isGraphObjectVisual = True
if(hasattr(self.obj4527, '_setHierarchicalLink')):
self.obj4527._setHierarchicalLink(False)
# MT_pivotOut__
self.obj4527.MT_pivotOut__.setValue('')
self.obj4527.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4527.MT_subtypeMatching__.setValue(('True', 0))
self.obj4527.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj4527.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4527.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj4527.MT_pivotIn__.setValue('element5')
# MT_label__
self.obj4527.MT_label__.setValue('4')
# MT_pre__cardinality
self.obj4527.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4527.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj4527.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4527.MT_pre__name.setHeight(15)
self.obj4527.graphClass_= graph_MT_pre__ComponentPrototype
if self.genGraphics:
new_obj = graph_MT_pre__ComponentPrototype(80.0,300.0,self.obj4527)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__ComponentPrototype", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj4527.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4527)
self.globalAndLocalPostcondition(self.obj4527, rootNode)
self.obj4527.postAction( rootNode.CREATE )
self.obj4528=MT_pre__directLink_T(self)
self.obj4528.isGraphObjectVisual = True
if(hasattr(self.obj4528, '_setHierarchicalLink')):
self.obj4528._setHierarchicalLink(False)
# MT_label__
self.obj4528.MT_label__.setValue('5')
# MT_pivotOut__
self.obj4528.MT_pivotOut__.setValue('')
self.obj4528.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4528.MT_subtypeMatching__.setValue(('True', 0))
self.obj4528.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj4528.MT_pivotIn__.setValue('')
self.obj4528.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj4528.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4528.MT_pre__associationType.setHeight(15)
self.obj4528.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(339.5,167.0,self.obj4528)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj4528.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4528)
self.globalAndLocalPostcondition(self.obj4528, rootNode)
self.obj4528.postAction( rootNode.CREATE )
self.obj4529=MT_pre__directLink_T(self)
self.obj4529.isGraphObjectVisual = True
if(hasattr(self.obj4529, '_setHierarchicalLink')):
self.obj4529._setHierarchicalLink(False)
# MT_label__
self.obj4529.MT_label__.setValue('6')
# MT_pivotOut__
self.obj4529.MT_pivotOut__.setValue('')
self.obj4529.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4529.MT_subtypeMatching__.setValue(('True', 0))
self.obj4529.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj4529.MT_pivotIn__.setValue('')
self.obj4529.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj4529.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4529.MT_pre__associationType.setHeight(15)
self.obj4529.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(443.0,270.5,self.obj4529)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj4529.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4529)
self.globalAndLocalPostcondition(self.obj4529, rootNode)
self.obj4529.postAction( rootNode.CREATE )
self.obj4530=MT_pre__directLink_T(self)
self.obj4530.isGraphObjectVisual = True
if(hasattr(self.obj4530, '_setHierarchicalLink')):
self.obj4530._setHierarchicalLink(False)
# MT_label__
self.obj4530.MT_label__.setValue('7')
# MT_pivotOut__
self.obj4530.MT_pivotOut__.setValue('')
self.obj4530.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4530.MT_subtypeMatching__.setValue(('True', 0))
self.obj4530.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj4530.MT_pivotIn__.setValue('')
self.obj4530.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj4530.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4530.MT_pre__associationType.setHeight(15)
self.obj4530.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(354.0,353.0,self.obj4530)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj4530.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4530)
self.globalAndLocalPostcondition(self.obj4530, rootNode)
self.obj4530.postAction( rootNode.CREATE )
self.obj4531=MT_pre__CompositionType(self)
self.obj4531.isGraphObjectVisual = True
if(hasattr(self.obj4531, '_setHierarchicalLink')):
self.obj4531._setHierarchicalLink(False)
# MT_pivotOut__
self.obj4531.MT_pivotOut__.setValue('')
self.obj4531.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj4531.MT_subtypeMatching__.setValue(('True', 0))
self.obj4531.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj4531.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4531.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj4531.MT_pivotIn__.setValue('')
self.obj4531.MT_pivotIn__.setNone()
# MT_label__
self.obj4531.MT_label__.setValue('3')
# MT_pre__cardinality
self.obj4531.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4531.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj4531.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj4531.MT_pre__name.setHeight(15)
self.obj4531.graphClass_= graph_MT_pre__CompositionType
if self.genGraphics:
new_obj = graph_MT_pre__CompositionType(280.0,260.0,self.obj4531)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__CompositionType", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj4531.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj4531)
self.globalAndLocalPostcondition(self.obj4531, rootNode)
self.obj4531.postAction( rootNode.CREATE )
# Connections for obj4524 (graphObject_: Obj12) of type LHS
self.drawConnections(
)
# Connections for obj4525 (graphObject_: Obj13) of type MT_pre__SoftwareComposition
self.drawConnections(
(self.obj4525,self.obj4529,[438.0, 208.0, 443.0, 270.5],"true", 2) )
# Connections for obj4526 (graphObject_: Obj14) of type MT_pre__System
self.drawConnections(
(self.obj4526,self.obj4528,[241.0, 126.0, 339.5, 167.0],"true", 2) )
# Connections for obj4527 (graphObject_: Obj15) of type MT_pre__ComponentPrototype
self.drawConnections(
)
# Connections for obj4528 (graphObject_: Obj16) of type MT_pre__directLink_T
self.drawConnections(
(self.obj4528,self.obj4525,[339.5, 167.0, 438.0, 208.0],"true", 2) )
# Connections for obj4529 (graphObject_: Obj17) of type MT_pre__directLink_T
self.drawConnections(
(self.obj4529,self.obj4531,[443.0, 270.5, 448.0, 333.0],"true", 2) )
# Connections for obj4530 (graphObject_: Obj18) of type MT_pre__directLink_T
self.drawConnections(
(self.obj4530,self.obj4527,[354.0, 353.0, 260.0, 373.0],"true", 2) )
# Connections for obj4531 (graphObject_: Obj19) of type MT_pre__CompositionType
self.drawConnections(
(self.obj4531,self.obj4530,[448.0, 333.0, 354.0, 353.0],"true", 2) )
newfunction = S1ThenClauseComplete_MDL
loadedMMName = ['MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
|
'''
Created on 1.12.2016
@author: Darren
'''
'''
Given a binary tree, return all root-to-leaf paths.
For example, given the following binary tree:
1
/ \
2 3
\
5
All root-to-leaf paths are:
["1->2->5", "1->3"]
Credits:Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases."
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
if not root:
return []
res=[]
def util(root,path,res):
if not root.left and not root.right:
res.append("->".join(path+[str(root.val)]))
return
if root.left:
util(root.left,path+[str(root.val)],res)
if root.right:
util(root.right,path+[str(root.val)],res)
util(root,[],res)
return res |
import logging
import os
import tempfile
import pytest
from quetz.config import Config, ConfigEntry, ConfigSection, configure_logger
from quetz.dao import Dao
from quetz.errors import ConfigError
@pytest.fixture
def config_extra():
return r"""[users]
admins=["bartosz"]
default_role = "member"
create_default_channel = true
"""
def test_config_without_file_path_set(config_str):
# the env variable should not be defined for this test to work
assert not os.environ.get("QUETZ_CONFIG_FILE")
# we need to check whether Config was not initialised before
assert not Config._instances
with pytest.raises(ValueError, match="Environment"):
Config()
# check if it works with path even if QUETZ_CONFIG_FILE is
# not defined
with tempfile.NamedTemporaryFile("w", delete=False) as fid:
fid.write(config_str)
fid.flush()
config = Config(fid.name)
assert config.configured_section("users")
def test_config_users(config):
assert config.users_default_role == "member"
assert config.users_create_default_channel
assert config.users_admins == ["bartosz"]
assert not config.users_maintainers
assert not config.users_members
@pytest.mark.parametrize(
"config_extra", ["[users]\nadmins=[]", "[users]\nmaintainers=[]"]
)
def test_config_empty_users_section(dao: Dao, user, config):
assert config.configured_section("users")
assert not config.users_admins
assert not config.users_maintainers
assert not config.users_members
assert not config.users_default_role
assert not config.users_create_default_channel
def test_config_is_singleton(config):
c = Config()
assert c is config
Config._instances = {}
c_new = Config()
assert c_new is not config
c_file = Config("config.toml")
assert c_file is c_new
def test_config_with_path(config_dir, config_base):
one_path = os.path.join(config_dir, "one_config.toml")
other_path = os.path.join(config_dir, "other_config.toml")
with open(one_path, 'w') as fid:
fid.write("\n".join([config_base, "[users]\nadmins=['one']"]))
with open(other_path, 'w') as fid:
fid.write("\n".join([config_base, "[users]\nadmins=['other']"]))
Config._instances = {}
c_one = Config(one_path)
assert c_one.configured_section("users")
assert c_one.users_admins == ["one"]
c_other = Config(other_path)
assert c_other.configured_section("users")
assert c_other.users_admins == ["other"]
c_new = Config(one_path)
assert c_new is c_one
def test_config_extend_require(config):
with pytest.raises(ConfigError):
config.register(
[
ConfigSection(
"other_plugin",
[
ConfigEntry("some_config_value", str),
],
)
]
)
# remove last entry again
config._config_map.pop()
@pytest.mark.parametrize(
"config_extra", ["[extra_plugin]\nsome=\"testvalue\"\nconfig=\"othervalue\"\n"]
)
def test_config_extend(config):
config.register(
[
ConfigSection(
"extra_plugin",
[
ConfigEntry("some", str),
ConfigEntry("config", str),
ConfigEntry("has_default", str, "iamdefault"),
],
)
]
)
assert config.extra_plugin_some == 'testvalue'
assert config.extra_plugin_config == 'othervalue'
assert config.extra_plugin_has_default == 'iamdefault'
config._config_map.pop()
def test_configure_logger(capsys):
"configure_logger should be idempotent"
configure_logger()
logger = logging.getLogger("quetz")
logger.error("my test")
captured = capsys.readouterr()
assert "[quetz]" in captured.err
assert "ERROR" in captured.err
assert "my test" in captured.err
assert len(captured.err.splitlines()) == 1
captured = capsys.readouterr()
assert not captured.err
configure_logger()
logger.info("second")
captured = capsys.readouterr()
assert "[quetz]" in captured.err
assert "INFO" in captured.err
assert "second" in captured.err
assert captured.err.count("second") == 1
assert "my test" not in captured.err
assert len(captured.err.splitlines()) == 1
|
import glob
import os
from more_itertools import ichunked
from PIL import Image
import torch
from torchvision import transforms
import tqdm
from tartangan.utils.fs import maybe_makedirs
from tartangan.trainers.utils import set_device_from_args
from .base import GOutputApp
class InfoGANEncodeImage(GOutputApp):
"""Encode images using an InfoGAN discriminator"""
app_name = "InfoGAN image encoder"
@torch.no_grad()
def run(self):
self.setup()
ids = []
codes = []
filename_iter = ichunked(self.gen_filenames(), self.args.batch_size)
for batch_i, in_filenames in tqdm.tqdm(enumerate(filename_iter)):
# load up images from a batch of filenames
batch_imgs = []
batch_filenames = []
for in_filename in in_filenames:
try:
target_img = Image.open(in_filename).convert('RGB')
except IOError:
print(f'Error opening {in_filename}')
continue
target_img = self.transform_input_image(target_img)
batch_imgs.append(target_img)
batch_filenames.append(os.path.basename(in_filename))
_, p_code = self.d(torch.stack(batch_imgs).to(self.args.device))
batch_ids = [os.path.splitext(f)[0] for f in batch_filenames]
ids += batch_ids
codes.append(p_code.cpu())
if self.args.recon:
recon = self.g(p_code)
self.save_image(
recon, f'{self.args.output_prefix}_{batch_i}.png'
)
codes = [c.numpy() for c in codes]
self.save_codes(ids, codes)
def save_codes(self, ids, codes):
import pandas as pd
codes = [c[i] for c in codes for i in range(len(c))]
df = pd.DataFrame(dict(id=ids, features=codes))
print(df.columns)
print(df.head(100))
df.to_pickle(f'{self.args.output_prefix}_codes.pkl')
def gen_filenames(self):
"""
Yields filenames from the CLI args which are either explict
filenames or glob expressions.
"""
for name in self.args.target_images:
if os.path.isfile(name):
yield name
else:
for filename in glob.iglob(name):
yield filename
def setup(self):
set_device_from_args(self.args)
self.load_generator(target=False)
self.g = self.g.eval()
self.load_disciminator()
self.d = self.d.eval()
img_size = self.g.max_size
self.transform_input_image = transforms.Compose([
transforms.Resize(img_size, interpolation=Image.LANCZOS),
transforms.RandomCrop((img_size, img_size)),
transforms.ToTensor(),
lambda x: x * 2 - 1
])
if os.path.dirname(self.args.output_prefix):
maybe_makedirs(os.path.dirname(self.args.output_prefix))
@classmethod
def add_args_to_parser(cls, p):
super().add_args_to_parser(p)
p.add_argument('target_images', nargs='+',
help='Filenames of images to be encoded')
p.add_argument('--recon', action='store_true',
help='Visualize the encoding provided by D')
p.add_argument('--batch-size', default=32, type=int)
if __name__ == '__main__':
app = InfoGANEncodeImage.create_from_cli()
app.run()
|
"""
Import any enabled modules as specified in the settings.py
file for the project.
"""
import imp
import sys
from django.conf import settings
def import_module(name, globals=None, locals=None, fromlist=None):
# Fast path: see if the module has already been imported.
try:
return sys.modules[name]
except KeyError:
pass
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
_, pathname, description = imp.find_module("lifeflow")
path = u"%s/mods/%s" % (pathname, name)
return imp.load_module(name, None, path, description)
for mod in getattr(settings, u"LIFEFLOW_MODS", ()):
import_module(mod)
|
from django.shortcuts import redirect
from django.http import HttpResponse
from django.template import loader
from http.server import HTTPStatus
from .User import User
import iotweb.views.urls_and_messages as UM
import requests
import json
def tokens(request, shdw_id):
"""
GET request: renders the token page
POST request: revokes a specific token
"""
user = User.get_instance()
if not request.POST:
template = loader.get_template('../templates/shadow_tokens.html')
url = UM.DB_URL+'getShadowTokens/{}/'.format(shdw_id)
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
tkn_list = json.loads(req.text)['tokens']
context = {'tokens': [], 'email': user.user_email}
if tkn_list:
for tkn in tkn_list:
json_object = json.loads(tkn)
if json_object["revoked"]:
json_object['status'] = "REVOKED"
else:
json_object['status'] = "VALID"
context['tokens'].append(json_object)
context['shadow'] = shdw_id
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/profile/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else: # it's a post (to revoke a token)
url = UM.DB_URL + 'revokeToken/'
token = request.POST['token']
headers = {'Authorization': 'Token {}'.format(token)}
req = requests.get(url=url, headers=headers) # HERE THE TOKEN IS REVOKED
if req.status_code == 200:
return redirect('/viewDevices/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/login/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
def new_token(request, shdw_id):
'''generates a new token and refresh the page'''
user = User.get_instance()
url = UM.DB_URL+'generateToken/'
headers = {'Authorization': 'Token {}'.format(user.user_token)}
data = {'shadow_id': shdw_id, 'type': 'DEVICE'}
req = requests.post(url=url, data=data, headers=headers) # HERE THE TOKEN IS CREATED
if req.status_code == 200:
tkn_id = json.loads(req.text)['token']
url_update_shadow = UM.DB_URL+'updateShadow/{}/'.format(shdw_id)
data_update = {'token': tkn_id}
req_update = requests.post(url=url_update_shadow, data=data_update, headers=headers) # HERE WE UPDATE THE SHADOW
if req_update.status_code == 200:
return redirect('/viewTokens/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req_update.status_code,
'message': req_update.text,
'error_name': HTTPStatus(req_update.status_code).phrase,
'back': '/viewTokens/{}/'.format(shdw_id)
}
if req_update.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/viewDevices/{}/'.format(shdw_id)
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
|
# -*- coding: utf-8 -*-
"""
This class manages the fetching of stock data and notifications.
"""
import pync
import os, sys, atexit, signal, socket
import time
from stock_config.nasdaq_stock_config import NasdaqStockConfig
class StockNotifier:
PIDFILE = "/tmp/stocknotifier.pid"
LOCALHOST = '127.0.0.1'
PORT = 8888
def __init__(self, pidfile = PIDFILE, stock_symbols=[], stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.stock_symbols = stock_symbols
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((StockNotifier.LOCALHOST, StockNotifier.PORT))
self.sock.setblocking(0)
self.sock.listen(1)
self.message_to_action_map = {
"TEST" : self.test_message_action,
"ADD": self.add_stock_action,
"REMOVE": self.remove_stock_action
}
def test_message_action(self, data):
"""
Tests communication between process and handler
"""
pync.notify(data[1])
def add_stock_action(self, data):
"""
Action which adds a stock symbol to monitor
"""
self._add_stock_symbol(data[1])
def remove_stock_action(self, data):
"""
Action which removes a stock symbol from the monitor
"""
self._remove_stock_symbol(data[1])
def _show_notification_message(self, stock):
"""
Displays notification message to the screen.
"""
_title = "%s Stock Notification" % (stock.get_stock_symbol().upper())
_message = "Your stock value is %s. The stock has %s by %s." % \
(stock.get_stock_value(), stock.get_stock_margin(), stock.get_stock_value_difference())
pync.notify(_message, title = _title)
def _notify(self):
"""
Fetches stock data and displays notifications based on configuration.
"""
for stock_symbol in self.stock_symbols:
stock = NasdaqStockConfig.get_stock_data(stock_symbol)
self._show_notification_message(stock)
def _add_stock_symbol(self, stock_symbol):
"""
Adds stock symbol to the list of monitored stocks
"""
if stock_symbol not in self.stock_symbols:
self.stock_symbols.append(stock_symbol)
def _remove_stock_symbol(self, stock_symbol):
"""
Removes stock symbol from the list of monitored stocks
"""
if stock_symbol in self.stock_symbols:
self.stock_symbols.remove(stock_symbol)
def _daemonize(self):
"""
Creates a daemon process.
"""
try:
pid = os.fork()
if pid > 0:
# exit the first parent
sys.exit(0)
except OSError, e:
print("#1 Fork failed: %d (%s)\n" %(e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit the first parent
sys.exit(0)
except OSError, e:
print("#2 Fork failed: %d (%s)\n" %(e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
#write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
"""
Removes the pid file created for the process.
"""
os.remove(self.pidfile)
def start(self):
"""
Starts the daemon
"""
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
print(message % self.pidfile)
sys.exit(1)
self._daemonize()
self.run()
def run(self):
"""
Method which is run as a daemon
"""
while True:
try:
conn, addr = self.sock.accept()
message = conn.recv(1000)
data = message.split(" ")
if message:
self.message_to_action_map[data[0]](data)
except socket.error:
pass
self._notify()
time.sleep(15)
def stop(self):
"""
Method to stop the daemon process.
"""
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restarts the daemon process
"""
self.stop()
self.start()
def get_pid(self):
"""
Returns the process id of the process.
"""
return self.pidfile
|
import os
import weakref
import numpy as np
from yt.data_objects.index_subobjects.unstructured_mesh import SemiStructuredMesh
from yt.data_objects.static_output import Dataset
from yt.funcs import setdefaultattr
from yt.geometry.unstructured_mesh_handler import UnstructuredIndex
from yt.utilities.file_handler import HDF5FileHandler
from yt.utilities.on_demand_imports import _h5py as h5py
from .fields import MoabFieldInfo, PyneFieldInfo
class MoabHex8Mesh(SemiStructuredMesh):
_connectivity_length = 8
_index_offset = 1
class MoabHex8Hierarchy(UnstructuredIndex):
def __init__(self, ds, dataset_type="h5m"):
self.dataset = weakref.proxy(ds)
self.dataset_type = dataset_type
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
self._fhandle = h5py.File(self.index_filename, mode="r")
UnstructuredIndex.__init__(self, ds, dataset_type)
self._fhandle.close()
def _initialize_mesh(self):
con = self._fhandle["/tstt/elements/Hex8/connectivity"][:]
con = np.asarray(con, dtype="int64")
coords = self._fhandle["/tstt/nodes/coordinates"][:]
coords = np.asarray(coords, dtype="float64")
self.meshes = [MoabHex8Mesh(0, self.index_filename, con, coords, self)]
def _detect_output_fields(self):
self.field_list = [
("moab", f) for f in self._fhandle["/tstt/elements/Hex8/tags"].keys()
]
def _count_grids(self):
self.num_grids = 1
class MoabHex8Dataset(Dataset):
_index_class = MoabHex8Hierarchy
_field_info_class = MoabFieldInfo
periodicity = (False, False, False)
def __init__(
self,
filename,
dataset_type="moab_hex8",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
self.fluid_types += ("moab",)
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
self.filename = filename
self._handle = HDF5FileHandler(filename)
def _set_code_unit_attributes(self):
# Almost everything is regarded as dimensionless in MOAB, so these will
# not be used very much or at all.
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, mode="r")
coords = self._handle["/tstt/nodes/coordinates"]
self.domain_left_edge = coords[0]
self.domain_right_edge = coords[-1]
self.domain_dimensions = self.domain_right_edge - self.domain_left_edge
self.refine_by = 2
self.dimensionality = len(self.domain_dimensions)
self.current_time = 0.0
self.unique_identifier = self.parameter_filename
self.cosmological_simulation = False
self.num_ghost_zones = 0
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
self.cosmological_simulation = 0
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
return filename.endswith(".h5m")
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
class PyneHex8Mesh(SemiStructuredMesh):
_connectivity_length = 8
_index_offset = 0
class PyneMeshHex8Hierarchy(UnstructuredIndex):
def __init__(self, ds, dataset_type="moab_hex8_pyne"):
self.dataset = weakref.proxy(ds)
self.dataset_type = dataset_type
self.index_filename = self.dataset.parameter_filename
self.directory = os.getcwd()
self.pyne_mesh = ds.pyne_mesh
super().__init__(ds, dataset_type)
def _initialize_mesh(self):
from pymoab import types
ents = list(self.pyne_mesh.structured_iterate_vertex())
coords = self.pyne_mesh.mesh.get_coords(ents).astype("float64")
coords = coords.reshape(len(coords) // 3, 3)
hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX)
vind = []
for h in hexes:
vind.append(
self.pyne_mesh.mesh.get_adjacencies(
h, 0, create_if_missing=True, op_type=types.UNION
)
)
vind = np.asarray(vind, dtype=np.int64)
vind = vind.reshape(len(vind) // 8, 8)
self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)]
def _detect_output_fields(self):
self.field_list = [("pyne", f) for f in self.pyne_mesh.tags.keys()]
def _count_grids(self):
self.num_grids = 1
class PyneMoabHex8Dataset(Dataset):
_index_class = PyneMeshHex8Hierarchy
_fieldinfo_fallback = MoabFieldInfo
_field_info_class = PyneFieldInfo
periodicity = (False, False, False)
def __init__(
self,
pyne_mesh,
dataset_type="moab_hex8_pyne",
storage_filename=None,
units_override=None,
unit_system="cgs",
):
self.fluid_types += ("pyne",)
filename = "pyne_mesh_" + str(id(pyne_mesh))
self.pyne_mesh = pyne_mesh
Dataset.__init__(
self,
str(filename),
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
self.filename = filename
def _set_code_unit_attributes(self):
# Almost everything is regarded as dimensionless in MOAB, so these will
# not be used very much or at all.
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
def _parse_parameter_file(self):
ents = list(self.pyne_mesh.structured_iterate_vertex())
coords = self.pyne_mesh.mesh.get_coords(ents)
self.domain_left_edge = coords[0:3]
self.domain_right_edge = coords[-3:]
self.domain_dimensions = self.domain_right_edge - self.domain_left_edge
self.refine_by = 2
self.dimensionality = len(self.domain_dimensions)
self.current_time = 0.0
self.unique_identifier = self.parameter_filename
self.cosmological_simulation = False
self.num_ghost_zones = 0
self.current_redshift = 0.0
self.omega_lambda = 0.0
self.omega_matter = 0.0
self.hubble_constant = 0.0
self.cosmological_simulation = 0
@classmethod
def _is_valid(cls, filename, *args, **kwargs):
return False
def __repr__(self):
return self.basename.rsplit(".", 1)[0]
|
from neuron import h
class Cell(object):
""" Interface class to build different types of cells. """
def __init__(self):
""" Object initialization. """
# Neuron cell - to be used as the source of a h.NetCon
self.cell = None
def connect_to_target(self, target, weight: float = 0, delay: float = 1, **kwargs) -> h.NetCon:
""" Connect the current cell to a target cell and return the netCon object.
Keyword arguments:
target -- the target object to which we want to connect
weight -- the weight of the connection (default 0)
delay -- communication time delay in ms (default 1)
"""
nc = h.NetCon(self.cell, target)
nc.delay = delay
nc.weight[0] = weight
return nc
def is_artificial(self) -> bool:
""" Return a flag to check whether the cell is an integrate-and-fire or artificial cell.
By default the flag is set to True.
"""
return True
|
# flake8: noqa
from .version import __version__
default_app_config = 'querybuilder.apps.QueryBuilderConfig'
|
# -*- coding: utf-8 -*-
"""Main module."""
import os
import typing
from pathlib import Path
from urllib.parse import urljoin
import pyperclip
from wand.color import Color
from wand.image import Image
PathLike = typing.Union[str, os.PathLike]
def convert(
img: Image,
size: int,
filename: PathLike,
background: str = "white",
gravity: str = "center",
alpha: bool = False,
colors: int = 256,
interpolate: str = "nearest",
) -> None:
img.interpolate_method = interpolate
img.alpha_channel = alpha
img.transform(resize="%dx%d^" % (size, size))
img.crop(width=size, height=size, gravity=gravity)
img.quantize(colors, None, 0, False, False)
img.save(filename=filename)
favicons = [
"apple-icon-57x57.png",
"apple-icon-60x60.png",
"apple-icon-72x72.png",
"apple-icon-76x76.png",
"apple-icon-114x114.png",
"apple-icon-120x120.png",
"apple-icon-144x144.png",
"apple-icon-152x152.png",
"apple-icon-180x180.png",
"android-icon-192x192.png",
"favicon-32x32.png",
"favicon-96x96.png",
"favicon-16x16.png",
"ms-icon-144x144.png",
"favicon.ico",
]
usage = """<link rel="apple-touch-icon" sizes="57x57" href="%s">
<link rel="apple-touch-icon" sizes="60x60" href="%s">
<link rel="apple-touch-icon" sizes="72x72" href="%s">
<link rel="apple-touch-icon" sizes="76x76" href="%s">
<link rel="apple-touch-icon" sizes="114x114" href="%s">
<link rel="apple-touch-icon" sizes="120x120" href="%s">
<link rel="apple-touch-icon" sizes="144x144" href="%s">
<link rel="apple-touch-icon" sizes="152x152" href="%s">
<link rel="apple-touch-icon" sizes="180x180" href="%s">
<link rel="icon" type="image/png" sizes="192x192" href="%s">
<link rel="icon" type="image/png" sizes="32x32" href="%s">
<link rel="icon" type="image/png" sizes="96x96" href="%s">
<link rel="icon" type="image/png" sizes="16x16" href="%s">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="%s">
<link rel="icon" href="%s">"""
def generate(
image: Image, _output: PathLike, base: str = "/", normalize_urls: bool = True
) -> None:
output = Path(_output)
if output.exists() and not output.is_dir():
raise ValueError("%s does exist and is not a directory" % output)
favicon_urls = tuple([base + favicon for favicon in favicons])
if normalize_urls:
favicon_urls = tuple([urljoin(base + "/", favicon) for favicon in favicons])
# make sure the output dir exists
output.mkdir(parents=True, exist_ok=True)
ico_sizes = [32, 16, 32, 48, 64]
ico = None
with image as img:
for size in ico_sizes:
print("Processing favicon.ico (%dx%d)" % (size, size))
with img.clone() as i:
i.interpolate_method = "nearest"
i.alpha_channel = False
# i.resize(size, size, filter="point")
i.transform(resize="%dx%d^" % (size, size))
i.crop(width=size, height=size, gravity="center")
i.quantize(256, None, 0, False, False)
if ico is None:
ico = i.clone()
else:
ico.sequence.append(i)
if ico is not None:
ico.save(filename=output / "favicon.ico")
for size in [57, 60, 72, 76, 114, 120, 144, 152, 180]:
print("Processing apple icon (%dx%d)" % (size, size))
convert(img.clone(), size, output / ("apple-icon-%dx%d.png" % (size, size)))
for size in [36, 48, 72, 96, 114, 192]:
print("Processing android icon (%dx%d)" % (size, size))
convert(
img.clone(), size, output / ("android-icon-%dx%d.png" % (size, size))
)
for size in [16, 32, 96]:
print("Processing favicon (%dx%d)" % (size, size))
convert(img.clone(), size, output / ("favicon-%dx%d.png" % (size, size)))
for size in [70, 144, 150, 310]:
print("Processing ms icon (%dx%d)" % (size, size))
convert(img.clone(), size, output / ("ms-icon-%dx%d.png" % (size, size)))
# print usage
templated = usage % favicon_urls
print("")
print("To use the favicons, paste this into the <head> of your website.")
try:
pyperclip.copy(templated)
print("We already copied it into your clipboard!")
except pyperclip.PyperclipException:
pass
print("")
print(templated)
def generate_from_image(
image: PathLike, output: PathLike, base: str = "/", normalize_urls: bool = True
) -> None:
img = Image(filename=Path(image), background=Color("white"))
generate(img, output, base=base, normalize_urls=normalize_urls)
def generate_from_color(
color: str, output: PathLike, base: str = "/", normalize_urls: bool = True
) -> None:
img = Image(width=1000, height=1000, background=Color(color))
generate(img, output, base=base, normalize_urls=normalize_urls)
|
from unittest import TestCase
# toolbox imports
from dltb.thirdparty.keras.datasource import KerasDatasource
class TestKeras(TestCase):
def setUp(self):
self.datasource = KerasDatasource('mnist', 'test')
def test_prepare(self):
self.datasource.prepare()
self.assertTrue(self.datasource.prepared)
self.datasource.unprepare()
self.assertFalse(self.datasource.prepared)
def test_len(self):
self.datasource.prepare()
self.assertEqual(len(self.datasource), 10000)
|
from .compiler import StaticDigestCompiler
from .digest import StaticDigest
__all__ = ['StaticDigest', 'StaticDigestCompiler']
|
from . import augmenters
__version__ = '0.1.17'
|
import setuptools
import sys
import os
import io
import re
###############################################################################
name = 'veracode-python'
description = 'Python wrapper for the Veracode XML APIs'
install_requires = [
'lxml>=4.4.1',
'python-dateutil>=2.8.1',
'requests>=2.22.0',
'strconv>=0.4.2',
'xmltodict>=0.12.0',
'Click>=7.0',
'tabulate>=0.8.6',
]
keywords = []
entry_points = {'console_scripts': [
'veracode=veracode.utils.cli:main'
]}
python_requires='>= 3.5'
author = 'Chuck Orde'
author_email = 'chuckorde@gmail.com'
repo_user = 'chuckorde'
license = 'BSD-3-Clause'
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
repo_name = name
repo_url = 'https://github.com/{}'
download_url = '{}/archive/v{}/.tar.gz'
release_api_url = 'https://api.github.com/repos/{}/{}/releases?access_token={}'
###############################################################################
# >>> python setup.py -q version
# 0.1.0
# >>> python setup.py -q version -i patch
# 0.1.1
# >>> python setup.py -q version -i minor
# 0.2.0
# -q turns off 'running version' output
class Version(setuptools.Command):
version_re = r"__version__ = '(.*?)'"
user_options = [ ('increment=', 'i', None), ]
def initialize_options(self):
self.increment = None
def finalize_options(self):
pass
def run(self):
if self.increment:
self._increment_semantic_version()
print(self.get())
def _increment_semantic_version(self):
current_version = self.get()
(major, minor, patch) = [int(n) for n in current_version.split('.')]
release = self.increment.upper()
assert(release in ['MAJOR', 'MINOR', 'PATCH'])
if release == 'PATCH': patch = patch + 1
if release == 'MINOR': minor = minor + 1; patch = 0
if release == 'MAJOR': major = major + 1; minor = 0; patch = 0
new_version = '{}.{}.{}'.format( major, minor, patch)
with io.open(self._find_init_file(), 'r', encoding='utf8') as f:
init_file = f.read()
with io.open(self._find_init_file(), 'w', encoding='utf8') as f:
f.write(init_file.replace(current_version, new_version))
def _find_init_file(self):
for walk in os.walk('.'):
(path, directory, module) = walk
if '__init__.py' in module:
return os.path.join(path, '__init__.py')
@classmethod
def get(self):
with io.open(self._find_init_file(self), 'r', encoding='utf8') as f:
version = re.search(self.version_re, f.read()).group(1)
return version
class Github(setuptools.Command):
user_options = [ ('create-release=', 'r', None), ]
def initialize_options(self):
self.create_release = None
def finalize_options(self):
pass
def run(self):
if not self.create_release:
raise Exception('You must specifiy --release=<API TOKEN>')
import requests
version = Version.get()
json = {
"tag_name": "v{}".format(version),
"target_commitish": "master",
"name": "v{}".format(version),
"body": "Release v{}".format(version),
"draft": False,
"prerelease": False
}
api_url = release_api_url.format(
repo_user, repo_name, self.create_release)
res = requests.post(api_url, json=json)
print (res.status_code, res.json())
version = Version.get()
url = '{}/{}'.format(repo_url.format(repo_user), repo_name)
download_url = download_url.format(url, version)
with io.open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name = name,
packages = setuptools.find_packages(),
version = version,
license = license,
description = description,
long_description = long_description,
long_description_content_type = 'text/markdown',
author = author,
author_email = author_email,
url = url,
download_url = download_url,
keywords = keywords,
install_requires = install_requires,
classifiers = classifiers,
python_requires = python_requires,
cmdclass = { 'version': Version, 'github': Github },
entry_points = entry_points,
)
|
from streamz import Stream
import json
import csv
def preprocess_stream(preprocs, stream):
for prep in preprocs:
func = prep["func"]
params = {}
if "parameters" in prep:
params = prep["parameters"]
if func == "lower":
stream = stream.map(str.lower)
return stream
def parse_mock_json_source(source):
data = json.loads(source["parameters"]["raw"])
return [data for x in range(0, source["parameters"]["repeat"])], Stream()
def parse_mock_csv_source(source):
params = source["parameters"]
preproc = source["preprocess"]
data = csv.DictReader(params["raw"], fieldnames=params["fields"])
stream = preprocess_stream(preproc, Stream())
return data, stream
def parse_json_source(source):
params = source["parameters"]
return open(params["filename"], "r"), Stream()
def parse_sources(sources):
TYPE_MAP = {
"MockJsonSource" : parse_mock_json_source,
"MockCsvSource" : parse_mock_csv_source,
"JsonSource" : parse_json_source,
}
streams = []
for source in sources:
stream = TYPE_MAP[source["type"]](source)
streams.append(stream)
return streams
def norm_to_ecs(row, params):
return row
def lookup_geo_from_ip(row, params):
return row
def tag(row, params):
return row
def parse_dataflow(df):
so = Stream()
s = so
for flow in df:
func = flow["func"]
params = {}
if "parameters" in flow:
params = flow["parameters"]
if func == "norm_to_ecs":
s = s.map(norm_to_ecs, params)
if func == "lookup_geo_from_ip":
s = s.map(lookup_geo_from_ip, params)
if func == "tag":
s = s.map(tag, params)
return so, s
def parse_debug_console(df, params):
return df.sink(print)
def parse_file_direct(df, params):
return df.sink_to_textfile(params["filename"])
def parse_sinks(sinks, stream):
SINK_MAP = {
"DebugConsole" : parse_debug_console,
"FileDirect" : parse_file_direct
}
streams = []
for sink in sinks:
stream = SINK_MAP[sink["type"]](stream, sink["parameters"])
streams.append(stream)
return streams
with open("dataflows/example.json") as fd:
dataflow = json.load(fd)
sources = parse_sources(dataflow["sources"])
ds, df = parse_dataflow(dataflow["dataflow"])
for d, s in sources:
s.connect(ds)
sinks = parse_sinks(dataflow["sinks"], df)
for sink in sinks:
df.connect(sink)
df.visualize("df.png")
for d, s in sources:
for r in d:
s.emit(str(r))
|
import sqlite3
conexao =sqlite3.connect('livraria.db')
cur=conexao.cursor()
sql="insert into tb_cliente(cpf,nome,idade) values(?,?,?)"
cpf1=input('423')
nome1 = input('Ana')
idade1 = input(23)
cur.execute(sql,(cpf1,nome1,idade1))
conexao.commit()
print("one record added successfully")
cur.close()
conexao.close() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crowd_brew', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.IntegerField(default=5, choices=[(1, b'Appearance'), (2, b'Smell'), (3, b'Taste'), (4, b'Mouthfeel'), (5, b'Overall')])),
('key', models.CharField(max_length=128)),
('tasting', models.ForeignKey(related_name='keywords', to='crowd_brew.Brew')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tasting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('appearance', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(5.0)])),
('smell', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(5.0)])),
('taste', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(5.0)])),
('mouthfeel', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(5.0)])),
('overall', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(5.0)])),
('brew', models.ForeignKey(related_name='tastings', to='crowd_brew.Brew')),
('user', models.ForeignKey(related_name='tastings', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='brewer',
name='brewery',
field=models.ForeignKey(related_name='brewers', blank=True, to='crowd_brew.Brewery', null=True),
preserve_default=True,
),
]
|
from unittest import TestCase
from uuid import uuid4
from zeroos.core0.client import Client
from loguru import logger
import configparser
import redis
import base64
import json
import redis
logger.add("tf_gateway.log", format="{time} {level} {message}", level="INFO")
class BaseTest(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config = configparser.ConfigParser()
config.optionxform = str
config.read("config.ini")
self.jwt = config["main"]["jwt"]
self.deploy = config["main"]["deploy"]
self.destroy = config["main"]["destroy"]
self.coredns_node_ip = config["tf_gateway"]["node_ip"]
self.coredns_redis_port = config["tf_gateway"]["redis_port"]
@classmethod
def setUpClass(cls):
self = cls()
if self.deploy == "True":
# deploy tf gateway container
cl = Client(host=self.coredns_node_ip, password=self.jwt)
cls.tf_gateway_id = cl.container.create(
name="test_tf_gateway",
root_url="https://hub.grid.tf/tf-autobuilder/threefoldtech-tf_gateway-tf-gateway-master.flist",
nics=[{"type": "default", "name": "defaultnic", "id": " None"}],
port={"53|udp": 53, "443": 443, self.coredns_redis_port: 6379},
).get()
@classmethod
def tearDownClass(cls):
self = cls()
if self.destroy == "True":
cl = Client(host=self.coredns_node_ip, password=self.jwt)
cl.container.terminate(self.tf_gateway_id)
def setUp(self):
pass
def tearDown(self):
pass
def log(self, msg):
logger.info(msg)
def random_string(self):
return str(uuid4())[:10]
def backup_file(self, path):
with open(path, "r") as f:
backup = f.read()
return backup
def wirte_file(self, path, content, rw="w+"):
with open(path, rw) as f:
f.write(content)
def delete_redis_record(self, name):
r = redis.Redis()
r.delete(name)
def delete_tcp_record(self, name):
if not name.startswith("/tcprouter/service/"):
name = "/tcprouter/service/{}".format(name)
self.delete_redis_record(name)
def delete_all_tcp_records(self):
r = redis.Redis()
keys = r.keys()
for key in keys:
try:
key = key.decode()
except Exception:
continue
if key.startswith("/tcprouter/service"):
self.delete_redis_record(key)
def delete_coredns_record(self, name):
r = redis.Redis()
r.hdel("bots.grid.tf.", name)
def delete_all_coredns_records(self):
self.delete_redis_record("bots.grid.tf.")
|
import torchvision.models as models
from torchvision import transforms
from PIL import Image
import torch
import json
import os
# mobilenet = models.mobilenet_v2()
# torch.save(mobilenet, 'mobilenet.pt')
def predict_pytorch(model_fname, img):
model = load_model(model_fname)
batch_t = preprocess_image(img)
out = model(batch_t)
cwd = os.getcwd()
labels_path = os.path.join(cwd, "../prediction/imagenet_classes.txt")
preds = decode_predictions(out, labels_path)
preds_json = json.dumps(preds)
print(preds_json)
def load_model(model_fname):
model = models.mobilenet_v2()
model.load_state_dict(torch.load(model_fname))
# change into eval mode
model.eval()
return model
def preprocess_image(img):
# Normalize and preprocess data
transform = transforms.Compose([ #[1]
transforms.Resize(256), #[2]
transforms.CenterCrop(224), #[3]
transforms.ToTensor(), #[4]
transforms.Normalize( #[5]
mean=[0.485, 0.456, 0.406], #[6]
std=[0.229, 0.224, 0.225] #[7]
)])
# apply transform
img_t = transform(img)
# convert into tensor
batch_t = torch.unsqueeze(img_t, 0)
return batch_t
def decode_predictions(out, labels_path):
# get labels from file
with open(labels_path) as f:
labels = [line.strip() for line in f.readlines()]
# get best prediction with softmax
_, index = torch.max(out, 1)
percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
# get best label and confidence
label, confidence = labels[index[0]], percentage[index[0]].item()
preds = {}
preds["label"] = str(label)
preds["confidence"] = str(confidence)
return preds
|
#! /usr/bin/env python3
#from numba import jit
from random import choice
# by bar (horizontal):
# a b a c
# or (vertical)
# a b c d
# a e c g
# or (both)
# a b a c
# a d a e
# by segment (horizontal):
# a b a b
# or (vertical)
# a b c d
# a b e f
# or (both)
# a b c a b
# a b d a b
# by phrase:
# a b c d
# a b c d
# by segment 2:
# a a b a b
# a b c a b
# a b a a b
# a b c a b
# a b a b b
# a b c a b
# a b a b c
# by segment 2 (horizontal)
# a a b a b
# a b a b c
# a b c a b
# [[3, 3], [3, 4]]
# [[3, 3], [3, 4]]
# [[3, 3], [3, 1, 1, 2]]
# (a a) (a b) => vary seg0, repeat seg00, vary seg01
# (a a) (a c c d) => repeat seg0, vary seg1, repeat seg10, split seg11
phrases_db = [
[0],
[0],
[0],
[0],
[0, 0],
[0, 1],
[0, 0],
[0, 1],
[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 2],
#[0, 1, 1, 2],
#[0, 0, 0, 1],
#[0, 0, 1, 2],
#[0, 1, 0, 1],
#[0, 1, 0, 2],
#[0, 1, 2, 2],
]
#@jit
class Phrase:
def __init__ (self, segments):
self.segments = segments
self.uniq = list (set (segments))
def random_phrase (nphrase):
if nphrase <= 2: segments = choice (phrases_db[4:])
else: segments = choice (phrases_db[:8])
return Phrase (segments)
def apply_phrase (section):
nphrase = section.uniq[-1] + 1
#print ("nphrase=%s" % nphrase, end="\n")
#phrases = []
#for phrase_no in range (0, nphrase):
# phrases[phrase_no] = random_phrase ()
phrases = [random_phrase (nphrase) for _ in range (0, nphrase)]
#print ("phrases=%s" % phrases, end="\n")
tot = 0
for phrase_no in section.phrases:
phrase = phrases[phrase_no]
tot = tot + len (phrase.segments)
segments = apply_segment (section, nphrase, phrases, tot)
tot = 0
for phrase_no in section.phrases:
phrase = phrases[phrase_no]
for segment_no in phrase.segments:
segment = segments[segment_no]
tot = tot + len (segment.bars)
#print ("tot=%s" % tot, end="\n")
#bars = []
#for bar_no in range (0, nbar):
# bars[bar_no] = random_bar ()
#bars = [random_bar (len (section.phrases) * nbar) for _ in range (0, nbar)]
bars = [random_bar (tot) for _ in range (0, nbar)]
#print ("bars=%s" % bars, end="\n")
mappings1 = []
segno = 0
seg_ndx = list (range (0, nsegment))
shuffle (seg_ndx)
#print ("seg_ndx=%s" % seg_ndx, end="\n")
for phrase_no in range (0, len (phrases)):
phrase = phrases[phrase_no]
nsegment = phrase.uniq[-1] + 1
temp = []
for segment_no in range (0, nsegment):
temp = temp + [seg_ndx[segno]]
segno = segno + 1
mappings1 = mappings1 + [temp]
mappings2 = []
barno = 0
bar_ndx = list (range (0, nbar))
shuffle (bar_ndx)
#print ("bar_ndx=%s" % bar_ndx, end="\n")
for seg_no in range (0, len (segments)):
segment = segments[seg_no]
nbar = segment.uniq[-1] + 1
temp = []
for bar_no in range (0, nbar):
temp = temp + [bar_ndx[barno]]
barno = barno + 1
mappings2 = mappings2 + [temp]
return phrases, segments, bars, mappings1, mappings2
|
import altair as alt
import pandas as pd
def macro_plot(df, goal_df,plot_width=200):
chart1 = alt.Chart(df[df['cat']=='calories']).mark_bar().encode(
alt.Y('cat', title='Calories', axis=alt.Axis(labels=False, ticks=False, titleAngle=0, titlePadding=40)),
alt.X('value', title='', axis=alt.Axis(values=df.loc[df['cat']=='calories']['value'].to_list()+ \
goal_df.loc[goal_df['cat']=='calories']['value'].to_list(),
ticks=False)),
).properties(
width=plot_width,
height=30)
tick1 = alt.Chart(goal_df[goal_df['cat']=='calories']).mark_tick(
color='red',
thickness=2,
size=40 * 0.9, # controls width of the tick.
).encode(
alt.X('value'),
alt.Y('cat')
)
chart2 = alt.Chart(df[df['cat']=='protein']).mark_bar().encode(
alt.Y('cat', title='Protein', axis=alt.Axis(labels=False, ticks=False, titleAngle=0, titlePadding=40)),
alt.X('value', title='', axis=alt.Axis(values=df.loc[df['cat']=='protein']['value'].to_list()+ \
goal_df.loc[goal_df['cat']=='protein']['value'].to_list(),
ticks=False)),
).properties(
width=plot_width,
height=30)
tick2 = alt.Chart(goal_df[goal_df['cat']=='protein']).mark_tick(
color='red',
thickness=2,
size=40 * 0.9, # controls width of tick.
).encode(
alt.X('value'),
alt.Y('cat')
)
chart3 = alt.Chart(df[df['cat']=='carb']).mark_bar().encode(
alt.Y('cat', title='Carb', axis=alt.Axis(labels=False, ticks=False, titleAngle=0, titlePadding=40)),
alt.X('value', title='', axis=alt.Axis(values=df.loc[df['cat']=='carb']['value'].to_list()+ \
goal_df.loc[goal_df['cat']=='carb']['value'].to_list(),
ticks=False)),
).properties(
width=plot_width,
height=30)
tick3 = alt.Chart(goal_df[goal_df['cat']=='carb']).mark_tick(
color='red',
thickness=2,
size=40 * 0.9, # controls width of tick.
).encode(
alt.X('value'),
alt.Y('cat')
)
chart4 = alt.Chart(df[df['cat']=='fat']).mark_bar().encode(
alt.Y('cat', title='Fat', axis=alt.Axis(labels=False, ticks=False, titleAngle=0, titlePadding=40)),
alt.X('value', title='', axis=alt.Axis(values=df.loc[df['cat']=='fat']['value'].to_list()+ \
goal_df.loc[goal_df['cat']=='fat']['value'].to_list(),
ticks=False)),
).properties(
width = plot_width,
height= 30)
tick4 = alt.Chart(goal_df[goal_df['cat']=='fat']).mark_tick(
color='red',
thickness=2,
size=40 * 0.9, # controls width of tick.
).encode(
alt.X('value'),
alt.Y('cat')
)
plot = alt.vconcat(chart1+tick1,chart2+tick2,chart3+tick3, chart4+tick4)
return plot
|
from sendbee_api.query_params import QueryParams
class ChatbotActivity(QueryParams):
"""Parameters for turning the chatbot on or off for a conversation"""
conversation_id = 'conversation_id', 'Conversation UUID'
active = 'active', 'Chatbot activity'
class ChatbotActivityStatus(QueryParams):
"""Parameters for getting chatbot status for a conversation"""
conversation_id = 'conversation_id', 'Conversation UUID'
|
from utils.myqrcode import Qrcode
if __name__ == '__main__':
# icon_path = 'icon.jpeg'
# icon_img = Qrcode.convert_local_icon_to_img(icon_path=icon_path)
icon_url = 'http://thirdwx.qlogo.cn/mmopen/vi_32/3IySpcoGTPQu2WbNUGFhq2icLicJpMS1uFdfDWFdE0lv9cjPGeEBors5E6fXqo0ictQlRws1ZwpUTVQS6yDeVsDeQ/132'
icon_img = Qrcode.convert_web_icon_to_img(icon_url=icon_url)
text = 'http://www.baidu.com'
save_path = 'out.png'
Qrcode.add_icon_on_qrcode(qrcode_text=text, icon_img=icon_img, save_path=save_path)
|
import pandas as pd
import numpy as np
import copy
import sympy as sp
from sympy import sympify
def get_pivotzeile(copy_tableau, pivot_spalte, anzahl_zeilen):
# soll original Tableau nicht ändern
copy_tableau = copy.deepcopy(copy_tableau)
# wähle Ressourcenverbrauchskoeffizienten der Pivotspalte
pivot_spalte_values = copy_tableau.iloc[copy_tableau.index.difference([0, 1, (anzahl_zeilen-1), (anzahl_zeilen-2)]), pivot_spalte]
# wähle Menge der Restriktionen
quantity = copy_tableau.iloc[copy_tableau.index.difference([0, 1, (anzahl_zeilen-1), (anzahl_zeilen-2)]), 2]
#verhinden von teilen durch negative Zahlen und 0
pivot_spalte_values.mask(pivot_spalte_values <= 0 , np.nan, inplace = True)
#Hilfsmatrix zum ermitteln der Pivotspalte
copy_tableau = quantity / pivot_spalte_values
#übergabe der Zeilenid mit dem kleinsten Wert
return copy_tableau.astype(float).idxmin(skipna=True)
def get_pivotspalte(copy_tableau, infinite):
# soll original Tableau nicht ändern
copy_tableau = copy.deepcopy(copy_tableau)
#Schleife über alle Spalten
for column in copy_tableau:
#nur Zeilen mit Ressourcenverbrauchskoeffizienten werden angesehen
if column != 0 and column != 1 and column != 2:
#zum Berechnen der größten cj-zj Zeile muss wenn nötig M durch ansatzweise unendlich ersetzt werden
if isinstance(copy_tableau.iloc[-1,column], sp.Basic): # Filtern der Felder mit M
copy_tableau.iloc[-1,column] = copy_tableau.iloc[-1,column].subs(infinite, 9999)
copy_tableau.iloc[-1,column] = int(copy_tableau.iloc[-1,column])
#bestimmen des Spaltenid, welche den größten Wert enthält
pivot_spalte = copy_tableau.iloc[-1,3:].astype(float).idxmax(axis=0)
return pivot_spalte
#-----------------------------------------------------------------------------
def update_simplex_tableau(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen):
#Pivotelelement wird auf Wert 1 gebracht indem man die Zeile durch das Pivotelement teilt
copy_tableau.iloc[pivot_zeile, 2:] = (copy_tableau.iloc[pivot_zeile, 2:] / copy_tableau.iloc[pivot_zeile,pivot_spalte])
#neue Basisvariable wird durch alte getauscht
copy_tableau = update_pivotzeile(copy_tableau, pivot_zeile, pivot_spalte)
#aktualisiere die restlichen Restritkionsmengen und die Ressourenverbrauchskoeffizienten
copy_tableau = update_basis_variables(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen)
return copy_tableau
def update_pivotzeile(copy_tableau, alte_basis_var, neue_basis_var):
#aktualisiere den cj Wert der neuen Basisvariable
copy_tableau.iloc[alte_basis_var, 0] = copy_tableau.iloc[0, neue_basis_var]
#aktualisiere den Namen der neuen Basisvariable
copy_tableau.iloc[alte_basis_var, 1] = copy_tableau.iloc[1, neue_basis_var]
return copy_tableau
def update_basis_variables(copy_tableau, pivot_zeile, pivot_spalte, anzahl_zeilen):
for index in copy_tableau.index:
#wähle jede Zeile der gleich bleibenden Basisvariablen und bringen die Pivotspalte auf 0
if index != pivot_zeile and index != 0 and index != 1 and index != anzahl_zeilen-1 and index != anzahl_zeilen-2:
copy_tableau.iloc[index, copy_tableau.columns.difference([0, 1], sort=False)] = copy_tableau.iloc[index, copy_tableau.columns.difference([0,1], sort=False)] - ((copy_tableau.iloc[pivot_zeile, copy_tableau.columns.difference([0, 1], sort=False)] * copy_tableau.iloc[index, pivot_spalte]))
return copy_tableau
#----------------------------------------------------------------------------
def get_cj_zj(copy_tableau):
#print(anzahl_zeilen)
anzahl_zeilen = len(copy_tableau.index)
#berechne Zeile zj
for column in range(0, len(copy_tableau.columns)):
if column != 0 and column != 1:
cj_basisvar = copy_tableau.iloc[copy_tableau.index.difference([0,1, anzahl_zeilen-1, anzahl_zeilen-2], sort=False ), 0]
restr_var = copy_tableau.iloc[copy_tableau.index.difference([0,1, anzahl_zeilen-1, anzahl_zeilen-2], sort=False ), column]
temp = cj_basisvar * restr_var
copy_tableau.iloc[-2, column] = temp.sum()
#berechne Zeile cj-zj
copy_tableau.iloc[-1, copy_tableau.columns.difference([0, 1, 2], sort=False )] = copy_tableau.iloc[0, copy_tableau.columns.difference([0 ,1 ,2], sort=False )] - copy_tableau.iloc[-2, copy_tableau.columns.difference([0, 1,2], sort=False )]
return copy_tableau
#Berechne maximalen cj-zj Wert
def get_max_cj_zj(copy_tableau, infinite):
copy_tableau = copy.deepcopy(copy_tableau)
for column in copy_tableau:
if column != 0 and column != 1 and column != 2:
if isinstance(copy_tableau.iloc[-1,column], sp.Expr):
copy_tableau.iloc[-1,column] = copy_tableau.iloc[-1,column].subs(infinite, 9999)
copy_tableau.iloc[-1,column] = int(copy_tableau.iloc[-1,column])
max_value = copy_tableau.iloc[-1,3:].astype(float).max(axis=0)
return max_value
#Prüfe auf Ausführbarkeit
def check_infeasibility(last_tableau, liste_meldungen, finished):
#Wenn in der finalen Lösungsmenge ein M ist, ist auch eine künstliche Variable in der Lösung
#prüfe ob M vorhanden ist und ob eine Lösung gefunden wurde
if isinstance(last_tableau.iloc[-2,2], sp.Basic) and finished:
liste_meldungen.append("Spezialfall: Unausführbarkeit (Infeasibility) -> Falls ein optimales Tableau eine künstliche Variable enthält, ist das Problem unlösbar („infeasible“).")
#Prüfe auf unbeschraenkten Lösungsraum
def check_unbeschraenkter_loesungsraum(check, liste_meldungen):
#Wenn die Pivotzeile keine Zahl enthält wurde konnte kein Wert berechnet werden
if np.isnan(check):
liste_meldungen.append("Spezialfall: Unbeschränkter Lösungsraum -> keine zulässige Pivotzeile => Lösungsraum unbeschränkt.")
return True
else:
return False
def simplex_algorithm(tableau, counter_limit, infinite):
anzahl_zeilen = len(tableau.index)
counter = 0 #Zähler für die Anzahl an Iterationen bis abgebrochen wird
ende = False #Überprüfung ob der Simplex ein Ergebnis gefunden hat
Meldungen = [] # Liste für die Fehlermeldung wird erzeugt
list_pivot_elements = []
list_tableaus = [copy.deepcopy(tableau.fillna(''))] # Anfangstableau wird in eine liste kopiert
#Solange cj-zj noch einen positiven Wert hat, wird der Simplex Algorithmus ausgeführt
while get_max_cj_zj(tableau, infinite) > 0 :
Meldungen.append([]) #erzeuge eine Liste für Meldunge (bezieht sich auf vorheriges Tableau)
Pivotspalte = get_pivotspalte(tableau, infinite)
Pivotzeile = get_pivotzeile(tableau, Pivotspalte, anzahl_zeilen)
list_pivot_elements.append([Pivotzeile, Pivotspalte])
if check_unbeschraenkter_loesungsraum(Pivotzeile, Meldungen[counter]):
#wenn der Lösungsraum unbeschränkt ist, wird abgebrochen
break
update_simplex_tableau(tableau, Pivotzeile, Pivotspalte, anzahl_zeilen)
tableau = get_cj_zj(tableau)
tableau = tableau.fillna('') #alle unnötigen Felder werden geleert
list_tableaus.append(copy.deepcopy(tableau)) #füge das neue Tableau wieder in die Liste hinzu
counter += 1
if counter == counter_limit:
break
if get_max_cj_zj(tableau, infinite) <= 0:
#Überprüfung ob ein Ergebnis gefunden wurde
ende = True
#Meldungen für das letzte Tableau
Meldungen.append([])
list_pivot_elements.append([None,None])
# kontrolliere Lösbarkeit
check_infeasibility(list_tableaus[-1], Meldungen[-1], ende )
return list_tableaus, Meldungen, list_pivot_elements
|
"""Environment for training automated vehicles in a mixed-autonomy setting."""
import numpy as np
from gym.spaces import Box
from copy import deepcopy
from hbaselines.envs.mixed_autonomy.envs import AVEnv
from hbaselines.envs.mixed_autonomy.envs.av import CLOSED_ENV_PARAMS
from hbaselines.envs.mixed_autonomy.envs.av import OPEN_ENV_PARAMS
from hbaselines.envs.mixed_autonomy.envs.utils import get_rl_accel
from hbaselines.envs.mixed_autonomy.envs.utils import update_rl_veh
class AVMultiAgentEnv(AVEnv):
"""Multi-agent variants of AVEnv."""
@property
def action_space(self):
"""See class definition."""
if self.env_params.additional_params["use_follower_stopper"]:
return Box(
low=0,
high=15,
shape=(1,),
dtype=np.float32)
else:
return Box(
low=-1,
high=1,
shape=(1,),
dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(
low=-float('inf'),
high=float('inf'),
shape=(3 * self._obs_frames,),
dtype=np.float32)
def step(self, rl_actions):
"""See parent class."""
obs, reward, done, info = super(AVMultiAgentEnv, self).step(rl_actions)
# Replace done mask with dictionary.
done = {key: done for key in obs.keys()}
return obs, reward, done, info
def _apply_rl_actions(self, rl_actions):
"""See class definition."""
if self.env_params.additional_params["use_follower_stopper"]:
for veh_id in rl_actions.keys():
self._av_controller.veh_id = veh_id
self._av_controller.v_des = rl_actions[veh_id][0]
acceleration = self._av_controller.get_action(self)
# Apply the action via the simulator.
self.k.vehicle.apply_acceleration(veh_id, acceleration)
else:
rl_ids = list(rl_actions.keys())
acceleration = get_rl_accel(
accel=[deepcopy(rl_actions[veh_id][0]) for veh_id in rl_ids],
vel=self.k.vehicle.get_speed(rl_ids),
max_accel=self.env_params.additional_params["max_accel"],
dt=self.sim_step,
)
# Run the action through the controller, to include failsafe
# actions.
for i, veh_id in enumerate(rl_ids):
acceleration[i] = self.k.vehicle.get_acc_controller(
veh_id).get_action(self, acceleration=acceleration[i])
# Apply the action via the simulator.
self.k.vehicle.apply_acceleration(
acc=acceleration, veh_ids=list(rl_actions.keys()))
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
# In case no vehicles were available in the current step, pass an empty
# reward dict.
if rl_actions is None:
return {}
rl_ids = list(rl_actions.keys())
num_vehicles = self.k.vehicle.num_vehicles
vel = np.array(self.k.vehicle.get_speed(self.k.vehicle.get_ids()))
if any(vel < -100) or kwargs["fail"] or num_vehicles == 0:
# Return a reward of 0 case of collisions or an empty network.
reward = {key: 0 for key in rl_ids}
else:
c1 = 0.005 # reward scale for the speeds
c2 = 0.100 # reward scale for the accelerations
reward = {
key: (- c1 * (self.k.vehicle.get_speed(key) - self._v_eq) ** 2
- c2 * self.k.vehicle.get_accel(key) ** 2)
for key in rl_ids
}
return reward
def get_state(self):
"""See class definition."""
# Update the storage of observations for individual vehicles.
self._update_obs_history()
# Initialize a set on empty observations
obs = {key: None for key in self.rl_ids()}
for i, veh_id in enumerate(self.rl_ids()):
# Concatenate the past n samples for a given time delta in the
# output observations.
obs_t = np.concatenate(self._obs_history[veh_id][::-self._skip])
obs_vehicle = np.array([0. for _ in range(3 * self._obs_frames)])
obs_vehicle[:len(obs_t)] = obs_t
obs[veh_id] = obs_vehicle
return obs
class AVClosedMultiAgentEnv(AVMultiAgentEnv):
"""Closed network variant of AVMultiAgentEnv."""
def __init__(self, env_params, sim_params, network, simulator='traci'):
"""See parent class."""
for p in CLOSED_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError('Env parameter "{}" not supplied'.format(p))
super(AVClosedMultiAgentEnv, self).__init__(
env_params=env_params,
sim_params=sim_params,
network=network,
simulator=simulator,
)
class AVOpenMultiAgentEnv(AVMultiAgentEnv):
"""Open network variant of AVMultiAgentEnv."""
def __init__(self, env_params, sim_params, network, simulator='traci'):
"""See parent class."""
for p in OPEN_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError('Env parameter "{}" not supplied'.format(p))
assert not (env_params.additional_params["warmup_path"] is not None
and env_params.additional_params["inflows"] is not None), \
"Cannot assign a value to both \"warmup_path\" and \"inflows\""
super(AVOpenMultiAgentEnv, self).__init__(
env_params=env_params,
sim_params=sim_params,
network=network,
simulator=simulator,
)
def step(self, rl_actions):
"""See parent class."""
obs, reward, done, info = super(AVOpenMultiAgentEnv, self).step(
rl_actions)
# Set the done mask for cars the exited the control range to True.
for key in obs.keys():
if self.k.vehicle.get_x_by_id(key) > self._control_range[1]:
done[key] = True
return obs, reward, done, info
def rl_ids(self):
"""See parent class."""
return self.rl_veh
def additional_command(self):
"""See definition in AVOpenEnv."""
super(AVOpenMultiAgentEnv, self).additional_command()
# Update the RL lists.
self.rl_queue, self.rl_veh, self.removed_veh = update_rl_veh(
self,
rl_queue=self.rl_queue,
rl_veh=self.rl_veh,
removed_veh=self.removed_veh,
control_range=self._control_range,
num_rl=self.num_rl,
rl_ids=reversed(sorted(
self.k.vehicle.get_rl_ids(), key=self.k.vehicle.get_x_by_id)),
)
# Specify actions for the uncontrolled RL vehicles based on human-
# driven dynamics.
for veh_id in list(
set(self.k.vehicle.get_rl_ids()) - set(self.rl_veh)):
self._rl_controller.veh_id = veh_id
acceleration = self._rl_controller.get_action(self)
self.k.vehicle.apply_acceleration(veh_id, acceleration)
|
from regression_tests import *
class Test(Test):
settings = TestSettings(
tool='fileinfo',
input='VMProtect-3.4_demo.ex',
args='--json'
)
def test_correctly_analyzes_input_file(self):
self.assertTrue(self.fileinfo.succeeded)
self.assertEqual(self.fileinfo.output['tools'][0]['name'], 'VMProtect')
self.assertEqual(self.fileinfo.output['tools'][0]['version'], '2.04+')
|
from logging import handlers
class BufferingSMTPHandler(handlers.SMTPHandler):
"""
BufferingSMTPHandler works like SMTPHandler log handler except that
it buffers log messages until buffer size reaches or exceeds the specified
capacity at which point it will then send everything that was buffered up
until that point in one email message. Contrast this with SMTPHandler
which sends one email per log message received.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None,
secure=None, capacity=1024):
handlers.SMTPHandler.__init__(self, mailhost, fromaddr,
toaddrs, subject,
credentials, secure)
self.capacity = capacity
self.buffer = []
def emit(self, record):
try:
self.buffer.append(record)
if len(self.buffer) >= self.capacity:
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def flush(self):
# buffer on termination may be empty if capacity is an exact multiple
# of lines that were logged--thus we need to check for empty buffer
if not self.buffer:
return
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = ""
for record in self.buffer:
msg = msg + self.format(record) + "\r\n"
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(self.buffer[0]),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
self.buffer = []
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(self.buffer[0])
|
"""
Write knitting pattern data to a bitmap file.
---
Based on an example by Anders Holden Deleuran
https://discourse.mcneel.com/t/write-a-bitmap-pixel-by-pixel-in-python/81116/2
Inputs:
Write: Set to True to save the bitmap file.
{item, bool}
PixelData: The knitting pattern pixel data to write to the bitmap file.
Use the MakePatternData component to obtain the pixel data
for a KnitNetwork.
{item, PixelData}
Path: The file path to a .bmp file where the knitting pattern should
be written.
{item, str}
Remarks:
Author: Max Eschenbach
License: MIT License
Version: 200813
"""
# PYTHON STANDARD LIBRARY IMPORTS
from __future__ import division
from os import path
# GHPYTHON SDK IMPORTS
from ghpythonlib.componentbase import executingcomponent as component
import Grasshopper, GhPython
import System
import Rhino
import rhinoscriptsyntax as rs
# GHENV COMPONENT SETTINGS
ghenv.Component.Name = "WriteKnittingPatternToBitmap"
ghenv.Component.NickName ="WKPTB"
ghenv.Component.Category = "Cockatoo"
ghenv.Component.SubCategory = "09 Pattern Data"
class WriteKnittingPatternToBitmap(component):
def RunScript(self, Write, PixelData, Path):
if Write and PixelData and Path:
# reverse the data so that the start is at the bottom of the image
PixelData.reverse()
# Get number of columns and rows in csv data
columns = max([len(row) for row in PixelData])
rows = len(PixelData)
# initialize empty bitmap
bitmap = System.Drawing.Bitmap(columns, rows)
# add pixels
for i in range(columns):
for j in range(rows):
try:
col = PixelData[j][i]
except IndexError:
col = System.Drawing.Color.Gray
# set pixel
bitmap.SetPixel(i, j, col)
# save to file
bitmap.Save(path.normpath(Path.strip("\n\r")),
System.Drawing.Imaging.ImageFormat.Bmp)
else:
if not PixelData:
rml = self.RuntimeMessageLevel.Warning
self.AddRuntimeMessage(rml, "No PixelData input!")
if not Path:
rml = self.RuntimeMessageLevel.Warning
self.AddRuntimeMessage(rml, "No Path input!")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\source\gui_simple_designer.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(930, 537)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.buttonLoad = QtWidgets.QPushButton(self.centralwidget)
self.buttonLoad.setObjectName("buttonLoad")
self.horizontalLayout.addWidget(self.buttonLoad)
self.cbSelectPlayer = QtWidgets.QComboBox(self.centralwidget)
self.cbSelectPlayer.setObjectName("cbSelectPlayer")
self.cbSelectPlayer.addItem("")
self.horizontalLayout.addWidget(self.cbSelectPlayer)
self.buttonRun = QtWidgets.QPushButton(self.centralwidget)
self.buttonRun.setObjectName("buttonRun")
self.horizontalLayout.addWidget(self.buttonRun)
self.verticalLayout.addLayout(self.horizontalLayout)
self.outputField = QtWidgets.QPlainTextEdit(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.outputField.sizePolicy().hasHeightForWidth())
self.outputField.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(12)
self.outputField.setFont(font)
self.outputField.setObjectName("outputField")
self.verticalLayout.addWidget(self.outputField)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 930, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.buttonLoad.setText(_translate("MainWindow", "Load replay"))
self.cbSelectPlayer.setItemText(0, _translate("MainWindow", "<-- load replay first"))
self.buttonRun.setText(_translate("MainWindow", "Extract build order for selected player"))
self.outputField.setPlainText(_translate("MainWindow", "1. Load replay\n"
"2. select player\n"
"3. Extracted Build order will show here"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# Generated by Django 3.2.8 on 2021-10-06 17:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe_app', '0003_review'),
]
operations = [
migrations.RenameField(
model_name='ingredient',
old_name='measurement',
new_name='unit',
),
]
|
from glob import glob
from json import dump, load
from pathlib import Path
from random import choice
from re import search
from click import get_app_dir
from inflect import engine
from rich import print
from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Prompt
p = engine()
def display_name(string: str) -> str:
if string == 'verb_ing':
return 'Verb ending in -ing'
if string == 'verb_ed':
return 'Verb ending in -ed'
return ' '.join(string.split('_')).title()
def get_setting(setting: str):
with settings_file().open() as file:
settings = load(file)
return settings[setting]
def prompt_for_word(type_: str) -> str:
"""Prompts the user for a word"""
if type_ == 'plural_noun':
if 'noun' in words_dict().keys():
random_word = choice(words_dict()['noun'])
else:
random_word = None
return p.plural(Prompt.ask(display_name('noun'), default=random_word))
else:
if type_.strip('_AN') in words_dict().keys():
random_word = choice(words_dict()[type_.strip('_AN')])
else:
random_word = None
user_input = Prompt.ask(
display_name(type_.strip('_AN')), default=random_word
)
if '_AN' in type_:
return p.a(user_input)
else:
return user_input
def set_setting(setting: str, value):
with settings_file().open() as file:
settings = load(file)
settings[setting] = value
with settings_file().open('w') as file:
dump(settings, file)
def settings_file() -> Path:
"""Gets the path to the Angry Libs settings file"""
settings_path = Path(get_app_dir(app_name='Angry Libs')) / 'settings.json'
if not settings_path.is_file():
settings_path.parent.mkdir(parents=True, exist_ok=True)
with settings_path.open('w') as file:
file.write('{}')
return settings_path
def show_directions():
"""Shows the user directions for the program"""
print(
Panel(
Markdown(
'''**Here's how to get started:**
* Pick a story from the random choices given
* Fill in the blanks! Some word types will give you a random default value, which you can use by just pressing Enter!
* Read your story and laugh!'''
),
title='[bold green]WELCOME TO ANGRY LIBS!',
)
)
def story_list():
"""Gets a list of story files"""
return glob(str(Path(__file__).parent / 'stories/*.story.txt'))
def story_name_from_path(path: str) -> str:
"""Gets the name of a story from its path"""
match = search(r'stories[\\/]([a-z\d_]+)\.story\.txt', str(path))
return display_name(match.groups()[0])
def words_dict() -> dict:
"""Gets the dictionary from words.json"""
with open(Path(__file__).parent / 'words.json') as file:
return load(file)
|
#!/usr/bin/python3
from http.server import BaseHTTPRequestHandler, HTTPServer
from http import cookies
import json
# User table.
users = {}
def addUsers(l):
for u in l:
users[u.get("sub")] = u
# Add numeric user ID.
u["id"] = len(users)
addUsers([
{
"sub": "arthur",
"name": "Arthur Dent",
"email": "arthur@earth.local",
},
{
"sub": "trillian",
"name": "Trillian",
"email": "trillian@galaxy.local",
},
{
"sub": "ford",
"name": "Ford Prefect",
"email": "ford@betelgeuse.local",
}
])
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
cookie = cookies.SimpleCookie()
cookie.load(self.headers.get("Cookie", ""))
id = cookie.get("minioidc-simple", None)
if id is None:
self.send_response(401)
self.end_headers()
return
user = users.get(id.value, None)
if user is None:
self.send_response(403)
self.end_headers()
return
self.send_response(202)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(user, indent=4, sort_keys=True).encode())
return
def run(server_class=HTTPServer, handler_class=Handler, port=8080):
address = ('127.0.0.1', port)
server = server_class(address, handler_class)
print('Starting HTTP server ...')
server.serve_forever()
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
import os
import re
import subprocess
import sys
from setuptools import setup
def get_version(package: str) -> str:
"""Return version of the package."""
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), package, "__init__.py"
)
with open(path, "r") as file:
source = file.read()
m = re.search(r'__version__ = ["\'](.+)["\']', source)
if m:
return m.group(1)
else:
return "0.0.0"
def get_packages(package):
"""Return root package and all sub-packages."""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
long_description = ""
def check():
def run(command):
assert subprocess.run(command.split()).returncode == 0
print(f"'{command}' --- OK")
run("pycodestyle mkapi")
run("pyflakes mkapi")
run("mypy mkapi")
run("pycodestyle tests")
run("pyflakes tests")
def publish():
check()
subprocess.run("python setup.py sdist bdist_wheel".split())
subprocess.run("twine upload dist/*".split())
version = get_version("mkapi")
subprocess.run(["git", "tag", "-a", f"{version}", "-m", f"'Version {version}'"])
subprocess.run(["git", "push", "origin", "--tags"])
sys.exit(0)
if sys.argv[-1] == "publish":
publish()
if sys.argv[-1] == "check":
check()
setup(
name="mkapi-fix",
version=get_version("mkapi"),
description="An Auto API Documentation tool.",
long_description=long_description,
url="https://github.com/pwwang/mkapi",
author="pwwang",
author_email="pwwang@pwwang.com",
license="MIT",
packages=get_packages("mkapi") + ["mkapi/templates", "mkapi/theme"], # FIXME
include_package_data=True,
install_requires=["markdown", "jinja2"],
python_requires=">=3.7",
entry_points={
"console_scripts": ["mkapi = mkapi.main:cli"],
"mkdocs.plugins": ["mkapi = mkapi.plugins.mkdocs:MkapiPlugin"],
},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Documentation",
],
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from bakery.views import BuildableListView, BuildableDetailView
from .models import Show, Category
# Create your views here.
class HomePageView(BuildableListView):
queryset = Category.objects.filter(display_on_home=True)
model = Category # This also means the template name is category_list.html
class ShowListView(BuildableListView):
queryset = Show.objects.all()
model = Show
class ShowDetailView(BuildableDetailView):
model = Show
template_name = 'shows/show_detail.html'
def get_object(self):
if self.kwargs['slug']:
return Show.objects.get(slug=self.kwargs['slug'])
return super(ShowDetailView, self).get_objects() |
from .test_smoothers import * |
"""OpenAPI core licenses models module"""
class License(object):
def __init__(self, name, url=None, extensions=None):
self.name = name
self.url = url
self.extensions = extensions and dict(extensions) or {}
|
"""
Training and evaluating script for part segmentation with PartNet dataset
"""
import os
import sys
import time
import pprint
import psutil
import argparse
import subprocess
import numpy as np
import tensorflow as tf
from sklearn.neighbors import KDTree
FILE_DIR = os.path.abspath(__file__)
BASE_DIR = os.path.dirname(FILE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from datasets import PartNetDataset
from models import PartSegModel
from utils.config import config, update_config
from utils.average_gradients import average_gradients
from utils.AdamWOptimizer import AdamWeightDecayOptimizer
from utils.logger import setup_logger
from utils.scheduler import StepScheduler
from utils.metrics import AverageMeter, partnet_metrics
from utils.ply import read_ply
def parse_option():
parser = argparse.ArgumentParser("Training and evaluating PartNet")
parser.add_argument('--cfg', help='yaml file', type=str)
parser.add_argument('--gpus', type=int, default=0, nargs='+', help='gpus to use [default: 0]')
parser.add_argument('--num_threads', type=int, default=4, help='num of threads to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--base_learning_rate', type=float, help='base learning rate for batch size 8')
# IO
parser.add_argument('--log_dir', default='log', help='log dir [default: log]')
parser.add_argument('--load_path', help='path to a check point file for load')
parser.add_argument('--print_freq', type=int, help='print frequency')
parser.add_argument('--save_freq', type=int, help='save frequency')
parser.add_argument('--val_freq', type=int, help='val frequency')
# Misc
parser.add_argument('--save_memory', action='store_true', help='use memory_saving_gradients')
parser.add_argument("--rng-seed", type=int, default=0, help='manual seed')
args, _ = parser.parse_known_args()
# Update config
update_config(args.cfg)
ddir_name = args.cfg.split('.')[-2].split('/')[-1]
config.log_dir = os.path.join(args.log_dir, 'partnet', f'{ddir_name}_{int(time.time())}')
config.load_path = args.load_path
config.gpus = args.gpus if isinstance(args.gpus, list) else [args.gpus]
config.num_gpus = len(config.gpus)
if args.num_threads:
config.num_threads = args.num_threads
else:
cpu_count = psutil.cpu_count()
gpu_count = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
config.num_threads = config.num_gpus * cpu_count // gpu_count
if args.batch_size:
config.batch_size = args.batch_size
if args.base_learning_rate:
config.base_learning_rate = args.base_learning_rate
if args.print_freq:
config.print_freq = args.print_freq
if args.save_freq:
config.save_freq = args.save_freq
if args.val_freq:
config.save_freq = args.val_freq
# Set manual seed
tf.set_random_seed(args.rng_seed)
np.random.seed(args.rng_seed)
# If args.save_memory is True, use gradient-checkpointing to save memory
if args.save_memory: # if save memory
import utils.memory_saving_gradients
tf.__dict__["gradients"] = utils.memory_saving_gradients.gradients_collection
return args, config
def training(config):
with tf.Graph().as_default():
# Get dataset
logger.info('==> Preparing datasets...')
dataset = PartNetDataset(config, config.num_threads)
config.num_classes = dataset.num_classes
config.num_parts = dataset.num_parts
print("config.num_classes: {}".format(config.num_classes))
print("config.num_parts: {}".format(config.num_parts))
flat_inputs = dataset.flat_inputs
train_init_op = dataset.train_init_op
val_init_op = dataset.val_init_op
test_init_op = dataset.test_init_op
val_vote_init_op = dataset.val_vote_init_op
test_vote_init_op = dataset.test_vote_init_op
is_training_pl = tf.placeholder(tf.bool, shape=())
# Set learning rate and optimizer
lr_scheduler = StepScheduler('learning_rate',
config.base_learning_rate * config.batch_size * config.num_gpus / 8.0,
config.decay_rate,
config.decay_epoch, config.max_epoch)
learning_rate = tf.get_variable('learning_rate', [],
initializer=tf.constant_initializer(
config.base_learning_rate * config.batch_size * config.num_gpus / 8.0),
trainable=False)
if config.optimizer == 'sgd':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=config.momentum)
elif config.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif config.optimizer == 'adamW':
optimizer = AdamWeightDecayOptimizer(learning_rate=config.base_learning_rate / 8.0,
weight_decay_rate=config.weight_decay,
exclude_from_weight_decay=["bias"])
else:
raise NotImplementedError
# -------------------------------------------
# Get model and loss on multiple GPU devices
# -------------------------------------------
# Allocating variables on CPU first will greatly accelerate multi-gpu training.
# Ref: https://github.com/kuza55/keras-extras/issues/21
PartSegModel(flat_inputs[0], is_training_pl, config=config)
tower_grads = []
tower_logits_with_point_label = []
tower_logits_all_shapes = []
tower_labels = []
total_loss_gpu = []
total_segment_loss_gpu = []
total_weight_loss_gpu = []
tower_super_labels = []
tower_object_inds = []
tower_in_batches = []
for i, igpu in enumerate(config.gpus):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.device('/gpu:%d' % (igpu)), tf.name_scope('gpu_%d' % (igpu)) as scope:
flat_inputs_i = flat_inputs[i]
model = PartSegModel(flat_inputs_i, is_training_pl, config=config)
logits_with_point_label = model.logits_with_point_label
logits_all_shapes = model.logits_all_shapes
labels = model.labels
model.get_loss()
losses = tf.get_collection('losses', scope)
weight_losses = tf.get_collection('weight_losses', scope)
segment_losses = tf.get_collection('segmentation_losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
total_weight_loss = tf.add_n(weight_losses, name='total_weight_loss')
total_segment_loss = tf.add_n(segment_losses, name='total_segment_loss')
grad_var_list = tf.trainable_variables()
if config.optimizer == 'adamW':
grads = tf.gradients(total_segment_loss, grad_var_list)
else:
grads = tf.gradients(total_loss, grad_var_list)
grads = list(zip(grads, grad_var_list))
tower_grads.append(grads)
tower_logits_with_point_label.append(logits_with_point_label)
tower_logits_all_shapes.append(logits_all_shapes)
tower_labels.append(labels)
total_loss_gpu.append(total_loss)
total_segment_loss_gpu.append(total_segment_loss)
total_weight_loss_gpu.append(total_weight_loss)
super_labels = model.inputs['super_labels']
object_inds = model.inputs['object_inds']
in_batches = model.inputs['in_batches']
tower_super_labels.append(super_labels)
tower_object_inds.append(object_inds)
tower_in_batches.append(in_batches)
# Average losses from multiple GPUs
total_loss = tf.reduce_mean(total_loss_gpu)
total_segment_loss = tf.reduce_mean(total_segment_loss_gpu)
total_weight_loss = tf.reduce_mean(total_weight_loss_gpu)
# Get training operator
grads = average_gradients(tower_grads, grad_norm=config.grad_norm)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads)
# Add ops to save and restore all the variables.
save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='PartSegModel')
saver = tf.train.Saver(save_vars)
# Create a session
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
tfconfig.allow_soft_placement = True
tfconfig.log_device_placement = False
sess = tf.Session(config=tfconfig)
# Initialize variables, resume if needed
if config.load_path is not None:
init = tf.global_variables_initializer()
sess.run(init)
saver.restore(sess, config.load_path)
logger.info("Model loaded in file: %s" % config.load_path)
else:
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
logger.info("init global")
# Printing model parameters
all_params = [v for v in tf.trainable_variables() if 'weights' in v.name]
logger.info("==> All params")
for param in all_params:
logger.info(str(param))
all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for v in all_params])
all_params_size_np = sess.run(all_params_size)
logger.info("==> Model have {} total Params".format(all_params_size_np))
ops = {
'train_init_op': train_init_op,
'val_init_op': val_init_op,
'val_vote_init_op': val_vote_init_op,
'test_init_op': test_init_op,
'test_vote_init_op': test_vote_init_op,
'is_training_pl': is_training_pl,
'tower_logits_with_point_label': tower_logits_with_point_label,
'tower_logits_all_shapes': tower_logits_all_shapes,
'tower_labels': tower_labels,
'tower_super_labels': tower_super_labels,
'tower_object_inds': tower_object_inds,
'tower_in_batches': tower_in_batches,
'loss': total_loss,
'segment_loss': total_segment_loss,
'weight_loss': total_weight_loss,
'train_op': train_op,
'learning_rate': learning_rate}
for epoch in range(1, config.max_epoch + 1):
lr = lr_scheduler.step()
tic1 = time.time()
train_one_epoch(sess, ops, epoch, lr)
tic2 = time.time()
logger.info("Epoch: {} total time: {:2f}s, learning rate: {:.5f}".format(epoch, tic2 - tic1, lr))
if epoch % config.val_freq == 0:
logger.info("==> Validating...")
val_one_epoch(sess, ops, dataset, epoch, 'val')
val_one_epoch(sess, ops, dataset, epoch, 'test')
if epoch % config.save_freq == 0:
save_path = saver.save(sess, os.path.join(config.log_dir, "model.ckpt"), global_step=epoch)
logger.info("==> Model saved in file: {}".format(save_path))
epoch += 1
val_one_epoch(sess, ops, dataset, epoch, 'val')
val_one_epoch(sess, ops, dataset, epoch, 'test')
val_vote_one_epoch(sess, ops, dataset, epoch, 'val', num_votes=10)
val_vote_one_epoch(sess, ops, dataset, epoch, 'test', num_votes=10)
save_path = saver.save(sess, os.path.join(config.log_dir, "model.ckpt"), global_step=epoch)
logger.info("==> Model saved in file: {}".format(save_path))
return save_path
def evaluating(config, save_path, GPUs=0):
logger.info("==> Start evaluating.........")
if isinstance(GPUs, list):
logger.warning("We use the fisrt gpu for evaluating")
GPUs = [GPUs[0]]
elif isinstance(GPUs, int):
GPUs = [GPUs]
else:
raise RuntimeError("Check GPUs for evaluate")
config.num_gpus = 1
with tf.Graph().as_default():
logger.info('==> Preparing datasets...')
dataset = PartNetDataset(config, config.num_threads)
config.num_classes = dataset.num_classes
config.num_parts = dataset.num_parts
print("config.num_classes: {}".format(config.num_classes))
print("config.num_parts: {}".format(config.num_parts))
flat_inputs = dataset.flat_inputs
val_init_op = dataset.val_init_op
test_init_op = dataset.test_init_op
val_vote_init_op = dataset.val_vote_init_op
test_vote_init_op = dataset.test_vote_init_op
is_training_pl = tf.placeholder(tf.bool, shape=())
PartSegModel(flat_inputs[0], is_training_pl, config=config)
tower_logits_with_point_label = []
tower_logits_all_shapes = []
tower_labels = []
tower_super_labels = []
tower_object_inds = []
tower_in_batches = []
for i, igpu in enumerate(GPUs):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.device('/gpu:%d' % (igpu)), tf.name_scope('gpu_%d' % (igpu)) as scope:
flat_inputs_i = flat_inputs[i]
model = PartSegModel(flat_inputs_i, is_training_pl, config=config)
logits_with_point_label = model.logits_with_point_label
logits_all_shapes = model.logits_all_shapes
labels = model.labels
tower_logits_with_point_label.append(logits_with_point_label)
tower_logits_all_shapes.append(logits_all_shapes)
tower_labels.append(labels)
super_labels = model.inputs['super_labels']
object_inds = model.inputs['object_inds']
in_batches = model.inputs['in_batches']
tower_super_labels.append(super_labels)
tower_object_inds.append(object_inds)
tower_in_batches.append(in_batches)
# Add ops to save and restore all the variables.
save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='PartSegModel')
saver = tf.train.Saver(save_vars)
# Create a session
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
tfconfig.allow_soft_placement = True
tfconfig.log_device_placement = False
sess = tf.Session(config=tfconfig)
ops = {'val_init_op': val_init_op,
'test_init_op': test_init_op,
'val_vote_init_op': val_vote_init_op,
'test_vote_init_op': test_vote_init_op,
'is_training_pl': is_training_pl,
'tower_logits_with_point_label': tower_logits_with_point_label,
'tower_logits_all_shapes': tower_logits_all_shapes,
'tower_labels': tower_labels,
'tower_super_labels': tower_super_labels,
'tower_object_inds': tower_object_inds,
'tower_in_batches': tower_in_batches}
# Load the pretrained model
init = tf.global_variables_initializer()
sess.run(init)
saver.restore(sess, save_path)
logger.info("Model loaded in file: %s" % save_path)
# Evaluating
logger.info("==> Evaluating Last epoch")
val_one_epoch(sess, ops, dataset, 'FINAL', 'val')
val_one_epoch(sess, ops, dataset, 'FINAL', 'test')
val_vote_one_epoch(sess, ops, dataset, 'FINAL', 'val', num_votes=10)
val_vote_one_epoch(sess, ops, dataset, 'FINAL', 'test', num_votes=10)
return
def train_one_epoch(sess, ops, epoch, lr):
"""
One epoch training
"""
is_training = True
batch_time = AverageMeter()
loss_meter = AverageMeter()
weight_loss_meter = AverageMeter()
seg_loss_meter = AverageMeter()
sess.run(ops['train_init_op'])
feed_dict = {ops['is_training_pl']: is_training,
ops['learning_rate']: lr}
batch_idx = 0
end = time.time()
while True:
try:
_, loss, segment_loss, weight_loss = sess.run([ops['train_op'],
ops['loss'],
ops['segment_loss'],
ops['weight_loss']],
feed_dict=feed_dict)
loss_meter.update(loss)
seg_loss_meter.update(segment_loss)
weight_loss_meter.update(weight_loss)
batch_time.update(time.time() - end)
end = time.time()
if (batch_idx + 1) % config.print_freq == 0:
logger.info(f'Train: [{epoch}][{batch_idx}] '
f'T {batch_time.val:.3f} ({batch_time.avg:.3f}) '
f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f}) '
f'seg loss {seg_loss_meter.val:.3f} ({seg_loss_meter.avg:.3f}) '
f'weight loss {weight_loss_meter.val:.3f} ({weight_loss_meter.avg:.3f})')
batch_idx += 1
except tf.errors.OutOfRangeError:
break
def val_one_epoch(sess, ops, dataset, epoch, split):
"""
One epoch validating
"""
is_training = False
sess.run(ops[f'{split}_init_op'])
feed_dict = {ops['is_training_pl']: is_training}
preds = []
targets = []
objects = []
obj_inds = []
idx = 0
while True:
try:
tower_logits_all_shapes, tower_labels, \
tower_object_labels, tower_o_inds, tower_batches = sess.run([ops['tower_logits_all_shapes'],
ops['tower_labels'],
ops['tower_super_labels'],
ops['tower_object_inds'],
ops['tower_in_batches']],
feed_dict=feed_dict)
# Get predictions and labels per instance
for logits_all_shapes, labels, object_labels, o_inds, batches in zip(tower_logits_all_shapes, tower_labels,
tower_object_labels, tower_o_inds,
tower_batches):
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
obj = object_labels[b[0]]
pred = logits_all_shapes[obj][b]
# Stack all results
objects += [obj]
obj_inds += [o_inds[b_i]]
preds += [pred]
targets += [labels[b]]
idx += 1
except tf.errors.OutOfRangeError:
break
msIoU, mpIoU, mmsIoU, mmpIoU = partnet_metrics(dataset.num_classes, dataset.num_parts,
objects, preds, targets)
logger.info(f'E{epoch} {split} * mmsIoU {mmsIoU:.3%} mmpIoU {mmpIoU:.3%}')
logger.info(f'E{epoch} {split} * msIoU {msIoU}')
logger.info(f'E{epoch} {split} * mpIoU {mpIoU}')
return
def val_vote_one_epoch(sess, ops, dataset, epoch, split, num_votes=10):
"""
One epoch voting validating
"""
is_training = False
original_labels, original_points, \
projection_inds, average_predictions = prepare_testing_structure(dataset, split)
feed_dict = {ops['is_training_pl']: is_training}
for v in range(num_votes):
if v == 0:
sess.run(ops[f'{split}_init_op'])
else:
sess.run(ops[f'{split}_vote_init_op'])
all_predictions = []
all_obj_inds = []
all_objects = []
while True:
try:
tower_logits_all_shapes, tower_labels, \
tower_object_labels, tower_o_inds, tower_batches = sess.run([ops['tower_logits_all_shapes'],
ops['tower_labels'],
ops['tower_super_labels'],
ops['tower_object_inds'],
ops['tower_in_batches']],
feed_dict=feed_dict)
for logits_all_shapes, labels, object_labels, o_inds, batches in zip(tower_logits_all_shapes,
tower_labels,
tower_object_labels,
tower_o_inds,
tower_batches):
max_ind = np.max(batches)
for b_i, b in enumerate(batches):
# Eliminate shadow indices
b = b[b < max_ind - 0.5]
# Get prediction (only for the concerned parts)
obj = object_labels[b[0]]
pred = logits_all_shapes[obj][b]
# Stack all results
all_objects += [obj]
all_obj_inds += [o_inds[b_i]]
all_predictions += [pred]
except tf.errors.OutOfRangeError:
break
true_num_test = len(all_predictions)
num_test = dataset.num_test if split == 'test' else dataset.num_val
vote_objects = [-1] * num_test
# Project predictions on original point clouds
for i, probs in enumerate(all_predictions):
# Interpolate prediction from current positions to original points
obj_i = all_obj_inds[i]
proj_predictions = probs[projection_inds[obj_i]]
vote_objects[obj_i] = all_objects[i]
# Average prediction across votes
average_predictions[obj_i] = average_predictions[obj_i] + \
(proj_predictions - average_predictions[obj_i]) / (v + 1)
if true_num_test != num_test:
logger.warning("{} using {}/{} data, "
"this may be caused by multi-gpu testing".format(split, true_num_test, num_test))
vote_preds = average_predictions[:true_num_test]
vote_targets = original_labels[:true_num_test]
else:
vote_preds = average_predictions
vote_targets = original_labels
msIoU, mpIoU, mmsIoU, mmpIoU = partnet_metrics(dataset.num_classes, dataset.num_parts,
vote_objects, vote_preds, vote_targets)
logger.info(f'E{epoch} V{v} {split} * mmsIoU {mmsIoU:.3%} mmpIoU {mmpIoU:.3%}')
logger.info(f'E{epoch} V{v} {split} * msIoU {msIoU}')
logger.info(f'E{epoch} V{v} {split} * mpIoU {mpIoU}')
return
def prepare_testing_structure(dataset, split):
logger.info('==> Preparing test structures')
t1 = time.time()
# Collect original test file names
original_path = os.path.join(dataset.path, f'{split}_ply')
test_names = [f[:-4] for f in os.listdir(original_path) if f[-4:] == '.ply']
test_names = np.sort(test_names)
original_labels = []
original_points = []
projection_inds = []
for i, cloud_name in enumerate(test_names):
# Read data in ply file
data = read_ply(os.path.join(original_path, cloud_name + '.ply'))
points = np.vstack((data['x'], data['y'], data['z'])).T
original_labels += [data['label']]
original_points += [points]
# Create tree structure to compute neighbors
if split == 'val':
tree = KDTree(dataset.input_points['validation'][i])
else:
tree = KDTree(dataset.input_points['test'][i])
projection_inds += [np.squeeze(tree.query(points, return_distance=False))]
# Initiate result containers
average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
return original_labels, original_points, projection_inds, average_predictions
if __name__ == "__main__":
_, config = parse_option()
os.makedirs(config.log_dir, exist_ok=True)
logger = setup_logger(output=config.log_dir, name="partnet")
logger.info(pprint.pformat(config))
save_path = training(config)
evaluating(config, save_path, config.gpus[0])
|
import subprocess
import logging
from ruxit.api.base_plugin import BasePlugin
class SSHConnections(BasePlugin):
def query(self, **kwargs):
if self.config['debug']:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("SSH Connections - Starting Query... \n")
output = subprocess.check_output("ss | grep -i ssh | wc -l", shell=True).strip()
if self.config['debug']:
debugOutput = subprocess.check_output("ss | grep -i ssh | awk '{print OFS FS $2 $5 $6}'", shell=True).strip()
self.logger.debug("SSH Connections - ss output: " + str(debugOutput) + "\n")
ssh_connects = int(output.decode("utf-8"))
self.results_builder.absolute(key='ssh_connections', value=ssh_connects) |
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self,u,v):
self.graph[u].append(v)
def DFSUtil(self,v,visited):
visited[v]= True
print v,
for i in self.graph[v]:
if visited[i] == False:
self.DFSUtil(i, visited)
def DFS(self,v):
visited = [False]*(len(self.graph))
self.DFSUtil(v,visited)
g = Graph()
edges = input("input the number of edges : ")
print "enter nodes with zero based indexing : "
for i in range(edges):
a, b = map(int, raw_input().split())
g.addEdge(a, b)
check = input("DFS check from node : ")
g.DFS(check) |
from io import BytesIO
from .virtualizedproc import signature
class MixGrabOutput(object):
def __init__(self, *args, **kwds):
self._write_buffer = BytesIO()
self._write_buffer_limit = kwds.pop('write_buffer_limit', 1000000)
super(MixGrabOutput, self).__init__(*args, **kwds)
@signature("write(ipi)i")
def s_write(self, fd, p_buf, count):
"""Writes to stdout or stderr are copied to an internal buffer."""
if fd != 1 and fd != 2:
return super(MixGrabOutput, self).s_write(fd, p_buf, count)
data = self.sandio.read_buffer(p_buf, count)
if self._write_buffer.tell() + len(data) > self._write_buffer_limit:
raise Exception("subprocess is writing too much data on "
"stdout/stderr")
self._write_buffer.write(data)
return count
def get_all_output(self):
return self._write_buffer.getvalue()
|
import plotly.offline as py
from facebook_data_analysis.tools.helpers import resolve_path
def make_map(coordinates_df):
data = [
dict(
type="scattergeo",
lon=coordinates_df["lon"],
lat=coordinates_df["lat"],
text=coordinates_df["city"] + ", " + coordinates_df["country"],
mode="markers",
marker=dict(
size=8,
opacity=0.8,
reversescale=True,
autocolorscale=False,
symbol="circle",
line=dict(width=1, color="rgba(102, 102, 102)"),
),
)
]
layout = dict(
title="Connexion locations based on IP addresses",
geo=dict(
scope="world",
projection=dict(type="winkel tripel"),
showcountries=True,
showland=True,
landcolor="rgb(250, 250, 250)",
subunitcolor="rgb(217, 217, 217)",
countrycolor="rgb(217, 217, 217)",
),
)
fig = dict(data=data, layout=layout)
py.plot(
fig, filename=resolve_path("Output", "ip_locations_map.html"), auto_open=False
)
|
import unittest
from django_no_sql.db import aggregates
from django_no_sql.db.database import Database
from django_no_sql.db.managers import Manager
class TestManager(unittest.TestCase):
def setUp(self):
self.db = Database(import_name=__file__)
self.db.load_database()
def test_get(self):
record = self.db.manager.get(name='Kendall')
self.assertIsInstance(record, list)
# Expected: one record where the name
# is equals to Kendall
record_dict = record[0]
self.assertEqual(record_dict['name'], 'Kendall')
def test_complex_get(self):
record = self.db.manager.get(location__state='Arizona')
self.assertIsInstance(record, list)
# Expected: one record where the state
# within location is equals to Arizona
record_dict = record[0]
self.assertEqual(record_dict['location']['state'], 'Arizona')
def test_complex_get_exact(self):
record = self.db.manager.get(location__state__exact='Arizona')
record_dict = record[0]
self.assertEqual(record_dict['location']['state'], 'Arizona')
def test_complex_get_contains(self):
record = self.db.manager.get(location__state__contains='Cali')
record_dict = record[0]
self.assertEqual(record_dict['location']['state'], 'California')
def test_complex_get_gt(self):
# FIXME: should return only on record
record = self.db.manager.get(location__state__contains='Cali')
record_dict = record[0]
self.assertEqual(record_dict['location']['state'], 'California')
def test_filter(self):
records = self.db.manager.filter(surname='Jenner')
self.assertEqual(len(records), 2)
for record in records:
self.assertEqual(record['surname'], 'Jenner')
def test_filter_eq(self):
records = self.db.manager.filter(age=24)
self.assertEqual(records[0]['age'], 24)
def test_filter_gt(self):
records = self.db.manager.filter(age__gt=23)
# self.assertEqual(len(records), 1)
self.assertGreater(records[0]['age'], 23)
def test_filter_aggregate_sum(self):
total = self.db.manager.all().aggregate(aggregates.Sum('age'))
self.assertIn('age__sum', total)
self.assertEqual(total['age__sum'], 92)
def test_filter_aggregate_min(self):
total = self.db.manager.all().aggregate(aggregates.Min('age'))
self.assertIn('age__min', total)
self.assertEqual(total['age__min'], 22)
def test_filter_aggregate_max(self):
total = self.db.manager.all().aggregate(aggregates.Max('height'))
self.assertIn('height__max', total)
self.assertEqual(total['height__max'], 178)
def test_filter_last(self):
record = self.db.manager.last()
expected = {
"name": "Kylie",
"surname": "Jenner",
"age": 22,
"height": 168,
"location": {
"country": "USA",
"state": "California",
"city": "Los Angeles"
}
}
self.assertEqual(expected, record)
def test_filter_first(self):
record = self.db.manager.first()
expected = {
"name": "Kendall",
"surname": "Jenner",
"age": 24,
"height": 178,
"location": {
"country": "USA",
"state": "California",
"city": "Los Angeles"
}
}
self.assertEqual(expected, record)
def test_filter_annotate(self):
pass
def test_filter_values_list(self):
pass
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTests([TestManager])
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PubMed search for Gene Ontology vs for Cognitive Ontology
# Here, I visualize the number of pubmed search results for Gene Ontology vs for Cognitive Ontology, in order to illustrate the success of the gene ontology and the lack of discussion about a cogntive ontology.
#
# The exact search terms were:
# - **Gene ontology**: '"gene* ontolog*" NOT brain'
# - **Cognitive ontology**: '("cognitive ontolog*" OR "functional ontolog*") AND (brain OR neuro*) NOT gene'
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# load files
cog_file = "PubMed_Timeline_Results_by_Year__cognitive_ontology.csv"
gen_file = "PubMed_Timeline_Results_by_Year__gene_ontology.csv"
cog_data = pd.read_csv(cog_file, header=1, index_col='Year')
gen_data = pd.read_csv(gen_file, header=1, index_col='Year')
cog_data.rename(columns={'Count': '\"Cognitive ontology\"'}, inplace=True)
gen_data.rename(columns={'Count': '\"Gene ontology\"'}, inplace=True)
# merge the two dataframes
data = gen_data.merge(right=cog_data, how='outer',
left_index=True, right_index=True)
# drop the year 2021
data.drop(index=2021, inplace=True)
# get the search strings
cog_search_str = pd.read_csv(cog_file, header=0).columns[0].replace("Search query: ", "")
gen_search_str = pd.read_csv(gen_file, header=0).columns[0].replace("Search query: ", "")
# replace nan's with 0's
# data.fillna(0, inplace=True)
data
# +
plt.rcParams.update({'font.size': 15})
fig, ax = plt.subplots(figsize=(10,5))
years = list(data.index)
xticks = np.arange(2000, 2021, 2).astype(int)
ax.scatter(years, list(data['"Gene ontology"']), color="silver",
label="Gene ontology") #\n\" %s \""%gen_search_str)
cog = list(data['"Cognitive ontology"'])
cog_color="black"
ax.scatter(years, cog, label="Cognitive ontology", color=cog_color) #\n\" %s \""%cog_search_str, color='orange')
for i, txt in enumerate(cog):
ax.annotate('%0.f'%txt, (years[i], cog[i]+80), color=cog_color)
ax.set_ylabel('Number of PubMed search results')
ax.set_xlabel('Year')
plt.xticks(xticks)
l = ax.legend(title='', framealpha=0)
# fig_description = ("Figure 1. bla " +
# "PubMed search string for Gene ontology:\n%s\n\n" %gen_search_str +
# "PubMed search string for Cognitive ontology:\n %s" %cog_search_str)
# plt.title(fig_description, y=-0.9, x=-.1, loc='left')
plt.tight_layout()
# -
|
from data_loader import DataLoader
import tensorflow as tf
import argparse
from utils import get_callbacks_list, get_model_from_id, get_classes, preprocess_image, write_class_on_img
import numpy as np
import cv2
import os
INPUT_SHAPE = (220, 220, 3)
def main(arguments):
cap = cv2.VideoCapture(arguments.video)
model: tf.keras.Model = tf.keras.models.load_model(arguments.weights_path)
model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
classes = get_classes(os.path.join(arguments.data_path, "training_set"))
success, img = cap.read()
while success:
img_pp = preprocess_image(img, INPUT_SHAPE)
# Inference
x = model.predict(np.expand_dims(img_pp, 0), batch_size=1)
# Post-process image
img_out = write_class_on_img(img_pp, classes[int(np.argmax(np.array(x)))])
cv2.imshow("EfficientNet Prediction", img_out)
cv2.waitKey(10)
# Read next frame
success, img = cap.read()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--video", type=str, default="resources/basket.mp4")
parser.add_argument("--data_path", type=str, default="resources/UCF")
parser.add_argument("--weights_path", type=str, required=True)
parser.add_argument("--efficientnet_id", type=int, choices=[0, 1, 2, 3, 4, 5, 6, 7],
help="Id of the desired EfficientNetB<id> model", default=0)
args = parser.parse_args()
main(args)
|
from __future__ import absolute_import
import json
import numpy as np
from ..app import app
from . import test_utils
from ...plotting import (reset_output, output_server, push, curdoc, figure)
from ...session import TestSession
from ...models.sources import ServerDataSource
from ...models.ranges import Range1d
from ...models.renderers import GlyphRenderer
from ...transforms import ar_downsample as ar
class TestAr(test_utils.FlaskClientTestCase):
def test_ar(self):
#move to setUp
reset_output()
sess = TestSession(client=app.test_client())
output_server('ar', session=sess)
# wierd - server source has no params now besides the blaze expression
# we don't pass a data_url right now because right now we assume the
# server is the bokeh server, however that can be handled later
orig_source = ServerDataSource(expr={'op': 'Field', 'args': [':leaf', 'gauss']})
#make template plot
p = figure(x_range=Range1d(start=0, end=0), y_range=Range1d(start=0, end=0))
plot = p.square('oneA', 'oneB', color='#FF00FF', source=orig_source)
#replace that plot with an abstract rendering one
arplot = ar.heatmap(
plot,
spread=3,
transform=None,
title="Server-rendered, uncorrected")
# set explicit value for ranges, or else they are set at 0
# until the javascript auto-sets it
arplot.x_range = Range1d(start=-2.0, end=2.0)
arplot.y_range = Range1d(start=-2.0, end=2.0)
glyph = arplot.select({'type' : GlyphRenderer})[0].glyph
#extract the original data source because it was replaced?!
source = arplot.select({'type' : ServerDataSource})[0]
#our docs don't have screen ranges, because we make those on the fly in javascript
#so we make fake ones!
screen_x_range = Range1d(start=0, end=200)
screen_y_range = Range1d(start=0, end=200)
#this dumping to json thing is terrible
plot_state = {'screen_x' : curdoc().dump(screen_x_range)[0]['attributes'],
'screen_y' : curdoc().dump(screen_y_range)[0]['attributes'],
'data_x' : curdoc().dump(arplot.x_range)[0]['attributes'],
'data_y' : curdoc().dump(arplot.y_range)[0]['attributes']}
#save data to server
#hack - because recent changes broke AR
push()
data = {'plot_state' : plot_state}
url = "/render/%s/%s/%s" % (curdoc().docid, source._id, glyph._id)
result = self.client.post(
url,
data=json.dumps(data),
headers={'content-type' : 'application/json'}
)
assert result.status_code == 200
data = json.loads(result.data.decode('utf-8'))
image = np.array(data['data']['image'][0])
#I guess it's data dependent so the shape changes....
assert image.shape[0] >200
assert image.shape[1] >200
def test_line1d_downsample(self):
reset_output()
sess = TestSession(client=app.test_client())
output_server('ar', session=sess)
source = ServerDataSource(expr={'op': 'Field', 'args': [':leaf', 'aapl']})
source.transform = dict(direction='x',
resample='line1d',
method='minmax')
# hacky - we have to specify range, otherwise code doesn't know how to serialize
# data ranges
p = figure(x_range=Range1d(start=0, end=0), y_range=Range1d(start=0, end=0))
plot = p.line('date', 'close',
x_axis_type = "datetime",
color='#A6CEE3', tools="pan,wheel_zoom,box_zoom,reset,previewsave",
source=source,
legend='AAPL')
push()
screen_x_range = Range1d(start=0, end=200)
screen_y_range = Range1d(start=0, end=200)
plot_state = {'screen_x' : curdoc().dump(screen_x_range)[0]['attributes'],
'screen_y' : curdoc().dump(screen_y_range)[0]['attributes'],
'data_x' : curdoc().dump(plot.x_range)[0]['attributes'],
'data_y' : curdoc().dump(plot.y_range)[0]['attributes']}
data = {'plot_state' : plot_state, 'auto_bounds' : 'True'}
glyph = plot.select({'type' : GlyphRenderer})[0].glyph
url = "/render/%s/%s/%s" % (curdoc().docid, source._id, glyph._id)
result = self.client.post(
url,
data=json.dumps(data),
headers={'content-type' : 'application/json'}
)
assert result.status_code == 200
data = json.loads(result.data.decode('utf-8'))
#2 x plot size (200)
assert len(data['data']['close']) == 400
def test_heatmap_downsample(self):
reset_output()
sess = TestSession(client=app.test_client())
output_server('ar', session=sess)
source = ServerDataSource(expr={'op': 'Field', 'args': [':leaf', 'array']})
source.transform = dict(resample='heatmap',
global_x_range=[0, 10],
global_y_range=[0, 10],
global_offset_x=0,
global_offset_y=0,
type="ndarray",
)
# hacky - we have to specify range, otherwise code doesn't know how to serialize
# data ranges
p = figure(x_range=Range1d(start=0, end=10), y_range=Range1d(start=0, end=10))
plot = p.image(image="image",
x='x',
y='y',
dw='dw',
dh='dh',
source=source,
)
push()
screen_x_range = Range1d(start=0, end=200)
screen_y_range = Range1d(start=0, end=200)
plot_state = {'screen_x' : curdoc().dump(screen_x_range)[0]['attributes'],
'screen_y' : curdoc().dump(screen_y_range)[0]['attributes'],
'data_x' : curdoc().dump(plot.x_range)[0]['attributes'],
'data_y' : curdoc().dump(plot.y_range)[0]['attributes']}
data = {'plot_state' : plot_state}
glyph = plot.select({'type' : GlyphRenderer})[0].glyph
url = "/render/%s/%s/%s" % (curdoc().docid, source._id, glyph._id)
result = self.client.post(
url,
data=json.dumps(data),
headers={'content-type' : 'application/json'}
)
assert result.status_code == 200
data = json.loads(result.data.decode('utf-8'))
#2 x plot size (200)
assert np.array(data['data']['image'][0]).shape == (200,200)
|
__version__ = "3.8.8"
import pathlib # isort:skip
root = pathlib.Path(__file__).parent # isort:skip
APP_NAME = "SPFY"
import os # isort:skip
import kick # isort:skip
kick.start(APP_NAME.lower()) # isort:skip
from kick import config, logger # isort:skip
Unsplash = None
if config.unsplash.auth.client_id:
from unsplash import Auth, Api
auth = Auth(**config.unsplash.auth)
Unsplash = Api(auth)
from .client import SpotifyClient
from .constants import *
from .exceptions import *
from .result import SpotifyResult
from .wrapper import Spotify
|
import functools
import time
from django.db import connection, reset_queries
def query_debugger(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
reset_queries()
number_of_start_queries = len(connection.queries)
start = time.perf_counter()
result = func(*args, **kwargs)
end = time.perf_counter()
number_of_end_queries = len(connection.queries)
print(f"-------------------------------------------------------------------")
print(f"Function : {func.__name__}")
print(f"Number of Queries : {number_of_end_queries-number_of_start_queries}")
print(f"Finished in : {(end - start):.2f}s")
print(f"-------------------------------------------------------------------")
return result
return wrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.