content stringlengths 5 1.05M |
|---|
import sys
import json
import logging
import blosc
import boto3
import hashlib
import numpy as np
from PIL import Image
from spdb.c_lib.ndtype import CUBOIDSIZE
from spdb.c_lib import ndlib
from bossutils.multidimensional import XYZ, Buffer
from bossutils.multidimensional import range as xyz_range
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(handler)
np_types = {
'uint64': np.uint64,
'uint16': np.uint16,
'uint8': np.uint8,
}
#### Helper functions and classes ####
def HashedKey(*args, version = None):
""" BOSS Key creation function
Takes a list of different key string elements, joins them with the '&' char,
and prepends the MD5 hash of the key to the key.
Args (Common usage):
collection_id
experiment_id
channel_id
resolution
time_sample
morton (str): Morton ID of cube
Keyword Args:
version : Optional Object version, not part of the hashed value
"""
key = '&'.join([str(arg) for arg in args if arg is not None])
digest = hashlib.md5(key.encode()).hexdigest()
key = '{}&{}'.format(digest, key)
if version is not None:
key = '{}&{}'.format(key, version)
return key
class S3Bucket(object):
"""Wrapper for calls to S3
Wraps boto3 calls to upload and download data from S3
"""
def __init__(self, bucket):
self.bucket = bucket
self.s3 = boto3.client('s3')
def _check_error(self, resp, action):
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception("Error {} cuboid to/from S3".format(action))
def get(self, key):
try:
resp = self.s3.get_object(Key = key,
Bucket = self.bucket)
except:
raise Exception("No Such Key")
self._check_error(resp, "reading")
data = resp['Body'].read()
return data
def put(self, key, data):
resp = self.s3.put_object(Key = key,
Body = data,
Bucket = self.bucket)
self._check_error(resp, "writing")
class S3IndexKey(dict):
"""Key object for DynamoDB S3 Index table
Args:
obj_key: HashedKey of the stored data
version:
job_hash:
job_range:
"""
def __init__(self, obj_key, version=0, job_hash=None, job_range=None):
super().__init__()
self['object-key'] = {'S': obj_key}
self['version-node'] = {'N': str(version)}
if job_hash is not None:
self['ingest-job-hash'] = {'S': str(job_hash)}
if job_range is not None:
self['ingest-job-range'] = {'S': job_range}
class IdIndexKey(dict):
"""Key object for DynamoDB ID Index table
Args:
chan_key: Key for the resource channel
version:
"""
def __init__(self, chan_key, version=0):
super().__init__()
self['channel-id-key'] = {'S': chan_key}
self['version'] = {'N': str(version)}
class DynamoDBTable(object):
"""Wrapper for calls to DynamoDB
Wraps boto3 calls to create and update DynamoDB entries.
Supports updates for both S3 and ID Index tables
"""
def __init__(self, table):
self.table = table
self.ddb = boto3.client('dynamodb')
def _check_error(self, resp, action):
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception("Error {} index information to/from/in DynamoDB".format(action))
def put(self, item):
try:
self.ddb.put_item(TableName = self.table,
Item = item,
ReturnConsumedCapacity = 'NONE',
ReturnItemCollectionMetrics = 'NONE')
except:
raise Exception("Error adding item to DynamoDB Table")
def update_ids(self, key, ids):
resp = self.ddb.update_item(TableName = self.table,
Key = key,
UpdateExpression='ADD #idset :ids',
ExpressionAttributeNames={'#idset': 'id-set'},
ExpressionAttributeValues={':ids': {'NS': ids}},
ReturnConsumedCapacity='NONE')
self._check_error(resp, 'updating')
def update_id(self, key, obj_key):
resp = self.ddb.update_item(TableName = self.table,
Key = key,
UpdateExpression='ADD #cuboidset :objkey',
ExpressionAttributeNames={'#cuboidset': 'cuboid-set'},
ExpressionAttributeValues={':objkey': {'SS': [obj_key]}},
ReturnConsumedCapacity='NONE')
self._check_error(resp, 'updating')
def exists(self, key):
resp = self.ddb.get_item(TableName = self.table,
Key = key,
ConsistentRead=True,
ReturnConsumedCapacity='NONE')
return 'Item' in resp
#### Main lambda logic ####
def downsample_volume(args, target, step, dim, use_iso_key, index_annotations):
"""Downsample a volume into a single cube
Download `step` cubes from S3, downsample them into a single cube, upload
to S3 and update the S3 index for the new cube.
Args:
args {
collection_id (int)
experiment_id (int)
channel_id (int)
annotation_channel (bool)
data_type (str) 'uint8' | 'uint16' | 'uint64'
s3_bucket (URL)
s3_index (URL)
id_index (URL)
resolution (int) The resolution to downsample. Creates resolution + 1
type (str) 'isotropic' | 'anisotropic'
iso_resolution (int) if resolution >= iso_resolution && type == 'anisotropic' downsample both
}
target (XYZ) : Corner of volume to downsample
step (XYZ) : Extent of the volume to downsample
dim (XYZ) : Dimensions of a single cube
use_iso_key (boolean) : If the BOSS keys should include an 'ISO=' flag
"""
log.debug("Downsampling {}".format(target))
# Hard coded values
version = 0
t = 0
dim_t = 1
iso = 'ISO' if use_iso_key else None
# If anisotropic and resolution is when neariso is reached, the first
# isotropic downsample needs to use the anisotropic data. Future isotropic
# downsamples will use the previous isotropic data.
parent_iso = None if args['resolution'] == args['iso_resolution'] else iso
col_id = args['collection_id']
exp_id = args['experiment_id']
chan_id = args['channel_id']
data_type = args['data_type']
annotation_chan = args['annotation_channel']
resolution = args['resolution']
s3 = S3Bucket(args['s3_bucket'])
s3_index = DynamoDBTable(args['s3_index'])
id_index = DynamoDBTable(args['id_index'])
# Download all of the cubes that will be downsamples
volume = Buffer.zeros(dim * step, dtype=np_types[data_type], order='C')
volume.dim = dim
volume.cubes = step
volume_empty = True # abort if the volume doesn't exist in S3
for offset in xyz_range(step):
cube = target + offset
try:
obj_key = HashedKey(parent_iso, col_id, exp_id, chan_id, resolution, t, cube.morton, version=version)
data = s3.get(obj_key)
data = blosc.decompress(data)
# DP ???: Check to see if the buffer is all zeros?
data = Buffer.frombuffer(data, dtype=np_types[data_type])
data.resize(dim)
#log.debug("Downloaded cube {}".format(cube))
volume[offset * dim: (offset + 1) * dim] = data
volume_empty = False
except Exception as e: # TODO: Create custom exception for S3 download
#log.exception("Problem downloading cubes {}".format(cube))
#log.debug("No cube at {}".format(cube))
# Eat the error, we don't care if the cube doesn't exist
# If the cube doesn't exist blank data will be used for downsampling
# If all the cubes don't exist, then the downsample is finished
pass
if volume_empty:
log.debug("Completely empty volume, not downsampling")
return
# Create downsampled cube
new_dim = XYZ(*CUBOIDSIZE[resolution + 1])
cube = Buffer.zeros(new_dim, dtype=np_types[data_type], order='C')
cube.dim = new_dim
cube.cubes = XYZ(1,1,1)
downsample_cube(volume, cube, annotation_chan)
target = target / step # scale down the output
# Save new cube in S3
obj_key = HashedKey(iso, col_id, exp_id, chan_id, resolution + 1, t, target.morton, version=version)
compressed = blosc.compress(cube, typesize=(np.dtype(cube.dtype).itemsize))
s3.put(obj_key, compressed)
# Update indicies
# Same key scheme, but without the version
obj_key = HashedKey(iso, col_id, exp_id, chan_id, resolution + 1, t, target.morton)
# Create S3 Index if it doesn't exist
idx_key = S3IndexKey(obj_key, version)
if not s3_index.exists(idx_key):
ingest_job = 0 # Valid to be 0, as posting a cutout uses 0
idx_key = S3IndexKey(obj_key,
version,
col_id,
'{}&{}&{}&{}'.format(exp_id, chan_id, resolution + 1, ingest_job))
s3_index.put(idx_key)
if annotation_chan and index_annotations:
ids = ndlib.unique(cube)
# Convert IDs to strings and drop any IDs that equal zero
ids = [str(id) for id in ids if id != 0]
if len(ids) > 0:
idx_key = S3IndexKey(obj_key, version)
s3_index.update_ids(idx_key, ids)
for id in ids:
idx_key = HashedKey(iso, col_id, exp_id, chan_id, resolution + 1, id)
chan_key = IdIndexKey(idx_key, version)
id_index.update_id(chan_key, obj_key)
def downsample_cube(volume, cube, is_annotation):
"""Downsample the given Buffer into the target Buffer
Note: Both volume and cube both have the following attributes
dim (XYZ) : The dimensions of the cubes contained in the Buffer
cubes (XYZ) : The number of cubes of size dim contained in the Buffer
dim * cubes == Buffer.shape
Args:
volume (Buffer) : Raw numpy array of input cube data
cube (Buffer) : Raw numpy array for output data
is_annotation (boolean) : If the downsample should be an annotation downsample
"""
#log.debug("downsample_cube({}, {}, {})".format(volume.shape, cube.shape, is_annotation))
if is_annotation:
# Use a C implementation to downsample each value
ndlib.addAnnotationData_ctype(volume, cube, volume.cubes.zyx, volume.dim.zyx)
else:
if volume.dtype == np.uint8:
image_type = 'L'
elif volume.dtype == np.uint16:
image_type = 'I;16'
else:
raise Exception("Unsupported type for image downsampling '{}'".format(volume.dtype))
for z in range(cube.dim.z):
# DP NOTE: For isotropic downsample this skips Z slices, instead of trying to merge them
slice = volume[z * volume.cubes.z, :, :]
image = Image.frombuffer(image_type,
(volume.shape.x, volume.shape.y),
slice.flatten(),
'raw',
image_type,
0, 1)
cube[z, :, :] = Buffer.asarray(image.resize((cube.shape.x, cube.shape.y), Image.BILINEAR))
def handler(args, context):
"""Convert JSON arguments into the expected internal types"""
def convert(key):
args[key] = XYZ(*args[key])
convert('target')
convert('step')
convert('dim')
downsample_volume(args['args'], args['target'], args['step'], args['dim'], args['use_iso_flag'], args['index_annotations'])
## Entry point for multiLambda ##
log.debug("sys.argv[1]: " + sys.argv[1])
args = json.loads(sys.argv[1])
handler(args, None)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import time
from ava.util.clock import Clock
import pytest
@pytest.fixture
def clock():
return Clock()
class TestClock(object):
def test_milliseconds_round_up(self):
t = time.time()
pt = round((t + 0.0005), 3)
assert pt >= t
def test_clock_should_increment_monotonically(self, clock):
prev = clock.timestamp()
for _ in xrange(2000):
cur = clock.tick()
# print('%r' % cur)
assert cur > prev
prev = cur
def test_create_clock_from_timestamp(self):
c = Clock(94132454961709074)
assert c.l == 1436347274196
assert c.c == 18
def test_clock_seconds_should_be_bounded(self):
c = Clock().seconds()
t = time.time()
assert (c - t) < 0.002
def test_clock_tick_with_timestamp(self, monkeypatch):
def mytime():
return 1436345964.484081
monkeypatch.setattr(time, 'time', mytime)
# print(time.time())
c = Clock(94132454961709074)
assert c.l == 1436347274196
assert c.c == 18
c.update(94132454961709075)
assert c.c == 20 # tick() increments c by 1 as well
if __name__ == '__main__':
clock = Clock()
for i in range(100):
clock.tick()
print("%r, %r" % (clock.timestamp(), time.time()))
time.sleep(0.1)
|
"""Simple audio examples"""
from taped import LiveWf
from know.util import pairwise, source_slices
from functools import partial
mk_audio_stream = partial(
LiveWf,
input_device_index=None, # if None, will try to guess the device index
sr=44100,
sample_width=2,
chk_size=4096,
stream_buffer_size_s=60,
)
def get_some_audio_chunks(
intervals_size=2048 * 21, n_chks=3, **audio_stream_kwargs
):
total_size = n_chks * n_chks
intervals = pairwise(range(0, total_size + 1, intervals_size))
_mk_audio_stream = partial(mk_audio_stream, **audio_stream_kwargs)
return list(source_slices(_mk_audio_stream, intervals=intervals))
|
from django import template
from django.contrib.sites.models import Site
from django.conf import settings
from dinette.models import Ftopics, SiteConfig, NavLink
register = template.Library()
class BaseDinetteNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
tokens = token.contents.split()
if len(tokens) == 3:
if tokens[1] != "as":
raise template.TemplateSyntaxError("Second argument in %r must be 'as'" % tokens[0])
return cls(
as_varname=tokens[2]
)
else:
return cls()
class GetAnnouncementNode(BaseDinetteNode):
def __init__(self, as_varname='announcement'):
self.as_varname = as_varname
def render(self, context):
try:
ancount = Ftopics.objects.filter(announcement_flag=True).count()
if(ancount > 0):
announcement = Ftopics.objects.filter(announcement_flag=True).latest()
context[self.as_varname] = announcement
return ''
except Ftopics.DoesNotExist:
return ''
@register.tag
def get_announcement(parser, token):
return GetAnnouncementNode.handle_token(parser, token)
class GetNavLinksNode(BaseDinetteNode):
def __init__(self, as_varname='nav_links'):
self.as_varname = as_varname
def render(self, context):
context[self.as_varname] = NavLink.objects.all()
return ''
@register.tag
def get_forumwide_links(parser, token):
return GetNavLinksNode.handle_token(parser, token)
@register.simple_tag
def get_site_name():
try:
config = SiteConfig.objects.get(id=1)
return config.name
except SiteConfig.DoesNotExist:
return ''
@register.simple_tag
def get_site_tag_line():
try:
config = SiteConfig.objects.get(id=1)
return config.tag_line
except SiteConfig.DoesNotExist:
return ''
@register.simple_tag
def get_main_site_name():
try:
name = Site.objects.get_current().name
return name
except:
return ''
@register.simple_tag
def get_main_site_domain():
try:
domain = Site.objects.get_current().domain
return domain
except:
return ''
@register.simple_tag
def get_login_url():
return settings.LOGIN_URL
@register.simple_tag
def get_logout_url():
return settings.LOGOUT_URL
|
import sys
import os
from scipy import stats
import tenseal as ts
import numpy as np
from scipy.stats import chi2
sys.path.append(os.path.abspath('../../tensealstat'))
from tensealstat.tools_context import ToolsContext as tc
from tensealstat.algebra.algebra_numpy import AlgebraNumpy
from tensealstat.algebra.algebra_tenseal import AlgebraTenseal
from tensealstat.statistic.chi2_variance import Chi2Variance
variance_h0 = 0.01
statistic_generator = Chi2Variance(variance_h0)
'''
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
'''
# 1 done by the key holder
context = tc.get_context_default()
algebra_tenseal = AlgebraTenseal(context)
# 2 done by the data holders
sample_0 = [ 1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000 ]
sample_0.extend([ 0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988 ])
sample_0.extend([ 0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996 ])
sample_0.extend([ 1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996 ])
sample_0.extend([ 0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996 ])
sample_0.extend([ 1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996 ])
sample_0.extend([ 0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002 ])
sample_0.extend([ 0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006 ])
sample_0.extend([ 1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991 ])
sample_0.extend([ 0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997 ])
list_sample = [sample_0]
list_sample_encrypted = [algebra_tenseal.encrypt_vector(sample) for sample in list_sample]
# 3 done by the agregator
statistic_encrypted = statistic_generator.encrypt_statistic(algebra_tenseal, list_sample_encrypted)
# 4 done by the key holder
chi2_statistic, degrees_of_freedom_0 = statistic_generator.decrypt_statistic(algebra_tenseal, statistic_encrypted)
p_value = chi2.cdf(chi2_statistic, degrees_of_freedom_0)
# Assert.IsTrue(chi2_statistic ~= 0.3903)
# Assert.IsTrue(p_value ~= 0.0)
print('via tensealstat')
print('chi2_statistic: ' + str(chi2_statistic))
print('p_value: ' + str(p_value))
algebra_numpy = AlgebraNumpy()
statistic_encrypted = statistic_generator.encrypt_statistic(algebra_numpy, list_sample)
chi2_statistic, degrees_of_freedom_0 = statistic_generator.decrypt_statistic(algebra_numpy, statistic_encrypted)
p_value = chi2.cdf(chi2_statistic, degrees_of_freedom_0)
# Assert.IsTrue(chi2_statistic ~= 0.3903)
# Assert.IsTrue(p_value ~= 0.0)
print('via tensealstat')
print('chi2_statistic: ' + str(chi2_statistic))
print('p_value: ' + str(p_value)) |
"""
Classes to define hierarchical configuration options which support inheriting
from other options, default values, overriding options, etc.
"""
from abc import ABC, abstractmethod
from copy import deepcopy
from dataclasses import dataclass
import os
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union
from flit_core.config import ConfigError
ConfPath = Tuple[str, ...]
ConfValue = Optional[Union[str, List[str], Dict[str, Any]]]
Conf = Dict[str, Any]
def pth(s: str) -> ConfPath:
if not s:
return ()
return tuple(s.split('/'))
def pth2str(p: ConfPath) -> str:
return '/'.join(p)
def joinpth(p1: ConfPath, p2: ConfPath) -> ConfPath:
while p1 and p2 and p2[0] == '^':
p1 = p1[:-1]
p2 = p2[1:]
return p1 + p2
def hasparent(path: ConfPath) -> bool:
return len(path) >= 1
def parent(path: ConfPath) -> ConfPath:
if not hasparent(path):
raise RuntimeError(f"Path {pth2str(path)} does not have a parent")
return path[:-1]
def basename(path: ConfPath) -> str:
return path[-1]
class ConfigNode:
def __init__(self,
value: ConfValue = None,
sub: Dict[str, 'ConfigNode'] = None) -> None:
self.value: ConfValue = value
self.sub: Optional[Dict[str, 'ConfigNode']] = sub
@classmethod
def from_dict(cls, d: dict):
node = cls()
node.sub = {}
for k, v in d.items():
if isinstance(v, dict):
node.sub[k] = cls.from_dict(v)
else:
node.sub[k] = cls(value=v)
return node
def to_dict(self):
if self.sub is None:
return self.value
return {k: v.to_dict() for k, v in self.sub.items()}
def iter_dfs(self, path: ConfPath = ()):
yield path, self.value
if self.sub is not None:
for name, sub in self.sub.items():
for y in sub.iter_dfs(path + (name, )):
yield y
def __getitem__(self, key):
if isinstance(key, str):
if self.sub is None:
raise KeyError()
return self.sub[key]
elif isinstance(key, tuple):
if len(key) == 0:
return self
elif self.sub is None:
raise KeyError()
else:
return self.sub[key[0]][key[1:]]
else:
raise TypeError(key)
def get(self, key: ConfPath, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, path: ConfPath, default: Any):
tgt = self[parent(path)]
if tgt.sub is None:
tgt.sub = {}
return tgt.sub.setdefault(basename(path), default)
def contains(self, path: ConfPath):
try:
self[path]
return True
except KeyError:
return False
@dataclass
class DefaultValueWrapper:
value: ConfValue
class DefaultValue(ABC):
@abstractmethod
def get_default(self, rootopts: 'ConfigOption', opt: 'ConfigOption',
cfg: ConfigNode, cfgpath: ConfPath,
optpath: ConfPath) -> Optional[DefaultValueWrapper]:
...
@abstractmethod
def get_name(self) -> str:
...
class DefaultValueValue(DefaultValue):
def __init__(self, value: ConfValue) -> None:
self.value: ConfValue = value
def get_default(self, rootopts: 'ConfigOption', opt: 'ConfigOption',
cfg: ConfigNode, cfgpath: ConfPath,
optpath: ConfPath) -> Optional[DefaultValueWrapper]:
return DefaultValueWrapper(self.value)
def get_name(self):
return repr(self.value)
class NoDefaultValue(DefaultValue):
def get_default(self, rootopts: 'ConfigOption', opt: 'ConfigOption',
cfg: ConfigNode, cfgpath: ConfPath,
optpath: ConfPath) -> Optional[DefaultValueWrapper]:
return None
def get_name(self):
return 'none'
class MissingDefaultError(ConfigError):
pass
class RequiredValue(DefaultValue):
def get_default(self, rootopts: 'ConfigOption', opt: 'ConfigOption',
cfg: ConfigNode, cfgpath: ConfPath,
optpath: ConfPath) -> Optional[DefaultValueWrapper]:
raise MissingDefaultError(f'{pth2str(cfgpath)} requires a value')
def get_name(self):
return 'required'
class RefDefaultValue(DefaultValue):
def __init__(self, path: ConfPath, relative: bool = False) -> None:
super().__init__()
self.path: ConfPath = path
self.relative = relative
def get_default(self, rootopts: 'ConfigOption', opt: 'ConfigOption',
cfg: ConfigNode, cfgpath: ConfPath,
optpath: ConfPath) -> Optional[DefaultValueWrapper]:
abscfgpath = absoptpath = self.path
if self.relative:
absoptpath = joinpth(optpath, ('^', ) + absoptpath)
abscfgpath = joinpth(cfgpath, ('^', ) + abscfgpath)
opt = rootopts.get(absoptpath)
if opt is None:
raise ValueError("DefaultValue: reference to nonexisting option "
f"{pth2str(absoptpath)}")
return opt.update_default(rootopts, cfg, abscfgpath, absoptpath)
def get_name(self) -> str:
r = pth2str(self.path).replace('^', '..')
if not self.relative:
r = '/' + r
return r
class ConfigOption:
allow_unknown_keys = False
def __init__(self,
name: str,
description: str = '',
example: str = '',
default: DefaultValue = NoDefaultValue(),
inherit_from: Optional[ConfPath] = None,
create_if_inheritance_target_exists: bool = False) -> None:
self.name = name
self.description = description
self.example = example
self.sub: Dict[str, 'ConfigOption'] = {}
self.default: DefaultValue = default
self.inherit_from: Optional[ConfPath] = inherit_from
self.create_if_inheritance_target_exists = create_if_inheritance_target_exists
def get_typename(self):
return None
def insert(self, opt: 'ConfigOption'):
assert opt.name not in self.sub
self.sub[opt.name] = opt
return self.sub[opt.name]
def insert_multiple(self, opts: Iterable['ConfigOption']):
for opt in opts:
self.insert(opt)
def iter_opt_paths(self) -> Iterator[ConfPath]:
"""DFS of the option tree."""
for name, subopt in self.sub.items():
yield (name, )
for p in subopt.iter_opt_paths():
yield (name, ) + p
def iter_leaf_opt_paths(self) -> Iterator[ConfPath]:
"""DFS of the option tree."""
if not self.sub:
yield ()
else:
for name, subopt in self.sub.items():
for p in subopt.iter_leaf_opt_paths():
yield (name, ) + p
def iter_dfs(self, path: ConfPath = ()):
yield path, self
for name, sub in self.sub.items():
for y in sub.iter_dfs(path + (name, )):
yield y
def __getitem__(self, key) -> 'ConfigOption':
if isinstance(key, str):
return self.sub[key]
elif isinstance(key, tuple):
if len(key) == 0:
return self
else:
return self.sub[key[0]][key[1:]]
else:
raise TypeError(key)
def get(self, key: ConfPath, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, path: ConfPath, default: Any):
tgt = self[parent(path)]
if tgt.sub is None:
tgt.sub = {}
return tgt.sub.setdefault(basename(path), default)
def contains(self, path: ConfPath):
try:
self[path]
return True
except KeyError:
return False
def inherit(self, rootopts: 'ConfigOption', cfg: ConfigNode,
selfpth: ConfPath):
superpth = self.inherit_from
if superpth is not None:
# If the super option is not set, there's nothing to inherit
if (supercfg := cfg.get(superpth)) is None:
return
# If this option is not set, but the super option is,
# create our own config as well, including all of its parents,
# but only if create_if_inheritance_target_exists is set on those
# options
selfcfg = self.create_parent_config_for_inheritance(
rootopts, cfg, selfpth)
if selfcfg is None:
return
# Find the option we inherit from and make sure it exists
if (superopt := rootopts.get(superpth)) is None:
raise ValueError(f'{pth2str(superpth)} is not a valid option')
# Create a copy of the config of our super-option and override it
# with our own config
supercfg = deepcopy(supercfg)
superopt.explicit_override(self, supercfg, superpth, selfcfg,
selfpth)
selfcfg.sub = supercfg.sub
if self.sub:
for name, sub in self.sub.items():
sub.inherit(rootopts, cfg, selfpth + (name, ))
@staticmethod
def create_parent_config_for_inheritance(rootopts: 'ConfigOption',
cfg: ConfigNode,
selfpth: ConfPath):
"""
Loop over all parent options of selfpth in rootopts and default-
initialize their configuration in cfg to empty ConfigNodes if the
option's create_if_inheritance_target_exists is set to True.
Returns cfg[selfpth] or None if parents were not created because of
create_if_inheritance_target_exists.
"""
selfcfg = None
p: ConfPath = ()
opt = rootopts
create_paths: List[ConfPath] = []
for s in selfpth:
p += s,
opt = opt[s]
if (selfcfg := cfg.get(p)) is None:
if not opt.create_if_inheritance_target_exists:
return None
create_paths.append(p)
for p in create_paths:
selfcfg = cfg.setdefault(p, ConfigNode(sub={}))
return selfcfg
def explicit_override(self, rootopts: 'ConfigOption', selfcfg: ConfigNode,
selfpth: ConfPath, overridecfg: ConfigNode,
overridepath: ConfPath):
# The default ConfigOption simply overrides all of its sub-options, but
# this function is overridden by specific subclasses.
if overridecfg.sub is None:
return # No sub-options, so nothing to override
for name, subopt in self.sub.items():
assert isinstance(selfcfg, ConfigNode)
assert isinstance(overridecfg, ConfigNode)
if name not in overridecfg.sub:
continue
subselfcfg = selfcfg.setdefault((name, ), ConfigNode())
subpath = selfpth + (name, )
suboverridepath = overridepath + (name, )
suboverridecfg = overridecfg.sub[name]
subopt.explicit_override(rootopts, subselfcfg, subpath,
suboverridecfg, suboverridepath)
if self.inherit_from is not None:
superopt = rootopts[self.inherit_from]
superopt.explicit_override(rootopts, selfcfg, selfpth, overridecfg,
overridepath)
def override(self, rootopts: 'ConfigOption', cfg: ConfigNode,
selfpath: ConfPath):
"""Override other options with this option if appropriate. This is a
no-op in most cases and only does something in OverrideConfigOption."""
assert cfg.contains(selfpath)
def verify_impl(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
assert cfg.contains(cfgpath)
selfcfg = cfg[cfgpath]
# Check if there are any unknown options in the config
if not self.allow_unknown_keys:
if (unkwn := set(selfcfg.sub or ()) - set(self.sub or ())):
raise ConfigError(f'Unkown options in {pth2str(cfgpath)}: ' +
', '.join(unkwn))
# Recursively verify the sub-options
if selfcfg.sub:
for name, sub in selfcfg.sub.items():
if name in self.sub:
self.sub[name].verify(rootopts, cfg, cfgpath + (name, ))
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
if self.inherit_from is None:
return self.verify_impl(rootopts, cfg, cfgpath)
else:
return rootopts[self.inherit_from].verify_impl(
rootopts, cfg, cfgpath)
def override_all(self, cfg: ConfigNode):
# This is just me being lazy, we probably don't need to iterate over
# all nodes ...
for p, opt in self.iter_dfs():
if cfg.contains(p):
opt.override(self, cfg, p)
def verify_all(self, cfg: ConfigNode):
self.verify(self, cfg, ())
def inherit_all(self, cfg: ConfigNode):
self.inherit(self, cfg, ())
def update_default(
self,
rootopts: 'ConfigOption',
cfg: ConfigNode,
cfgpath: ConfPath,
selfpath: Optional[ConfPath] = None
) -> Optional[DefaultValueWrapper]:
if selfpath is None:
selfpath = cfgpath
result = None
# If the entire path exists in cfg, simply return that value
if (cfgval := cfg.get(cfgpath)) is not None:
result = cfgval
# If the path is not yet in cfg
else:
assert self is rootopts[selfpath]
# Find the default value for this option
default = self.default.get_default(rootopts, self, cfg, cfgpath,
selfpath)
# If the parent is set in the config, set this value as well
if default is not None and cfg.contains(parent(cfgpath)):
cfgval = cfg.setdefault(cfgpath, ConfigNode())
cfgval.value = default.value
if cfgval.value is not None:
self.verify(rootopts, cfg, cfgpath)
result = default
if self.inherit_from is not None:
targetopt = rootopts.get(self.inherit_from)
if targetopt is None:
raise ValueError(f"Inheritance {pth2str(selfpath)} targets "
f"nonexisting option "
f"{pth2str(self.inherit_from)}")
for p, opt in targetopt.iter_dfs():
if opt.inherit_from is not None:
# TODO: this might be too restrictive, but we need to break
# the recursion somehow ...
continue
optpth = joinpth(self.inherit_from, p)
newcfgpth = joinpth(cfgpath, p)
opt.update_default(rootopts, cfg, newcfgpth, optpth)
return result
def update_default_all(self, cfg: ConfigNode):
for p, opt in self.iter_dfs():
if hasparent(p) and cfg.contains(parent(p)):
opt.update_default(self, cfg, p)
class UncheckedConfigOption(ConfigOption):
allow_unknown_keys = True
class StrConfigOption(ConfigOption):
def get_typename(self):
return 'string'
def explicit_override(self, opts: 'ConfigOption', selfcfg: ConfigNode,
selfpth: ConfPath, overridecfg: ConfigNode,
overridepath: ConfPath):
assert not self.sub
assert not selfcfg.sub
assert not overridecfg.sub
selfcfg.value = deepcopy(overridecfg.value)
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
if cfg[cfgpath].sub:
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{str}, not {dict}')
elif not isinstance(cfg[cfgpath].value, str):
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{str}, not {type(cfg[cfgpath].value)}')
class PathConfigOption(StrConfigOption):
def __init__(self,
name: str,
description: str = '',
example: str = '',
default: DefaultValue = NoDefaultValue(),
must_exist: bool = True,
expected_contents: List[str] = [],
base_path: Optional[Path] = None):
super().__init__(name, description, example, default)
self.must_exist = must_exist
self.expected_contents = expected_contents
self.base_path = base_path
def get_typename(self):
return 'path'
def check_path(self, cfg: ConfigNode, cfgpath):
path = cfg[cfgpath].value = os.path.normpath(cfg[cfgpath].value)
if os.path.isabs(path):
raise ConfigError(f'{pth2str(cfgpath)} must be a relative path')
if self.base_path is not None:
abspath = self.base_path / path
if self.must_exist and not os.path.exists(abspath):
raise ConfigError(f'{pth2str(cfgpath)}: {str(abspath)} '
f'does not exist')
missing = [
sub for sub in self.expected_contents
if not os.path.exists(os.path.join(abspath, sub))
]
if missing:
missingstr = '", "'.join(missing)
raise ConfigError(f'{pth2str(cfgpath)} does not contain '
f'required files or folders "{missingstr}"')
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
super().verify(rootopts, cfg, cfgpath)
self.check_path(cfg, cfgpath)
class ListOfStrConfigOption(ConfigOption):
def __init__(self,
name: str,
description: str = '',
example: str = '',
default: DefaultValue = NoDefaultValue(),
inherit_from: Optional[ConfPath] = None,
create_if_inheritance_target_exists: bool = False,
convert_str_to_singleton=False) -> None:
super().__init__(name, description, example, default, inherit_from,
create_if_inheritance_target_exists)
self.convert_str_to_singleton = convert_str_to_singleton
def get_typename(self):
return 'list'
def explicit_override(self, opts: 'ConfigOption', selfcfg: ConfigNode,
selfpth: ConfPath, overridecfg: ConfigNode,
overridepath: ConfPath):
assert not self.sub
assert not selfcfg.sub
assert not overridecfg.sub
if overridecfg.value is not None:
if selfcfg.value is None:
selfcfg.value = []
assert isinstance(selfcfg.value, list)
assert isinstance(overridecfg.value, list)
selfcfg.value += deepcopy(overridecfg.value)
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
if cfg[cfgpath].sub:
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{list}, not {dict}')
elif not isinstance(cfg[cfgpath].value, list):
if self.convert_str_to_singleton and \
isinstance(cfg[cfgpath].value, str):
cfg[cfgpath].value = [cfg[cfgpath].value]
else:
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{list}, not {type(cfg[cfgpath].value)}')
elif not all(isinstance(el, str) for el in cfg[cfgpath].value):
raise ConfigError(f'Type of elements in {pth2str(cfgpath)} should '
f'be {str}')
class DictOfStrConfigOption(ConfigOption):
def get_typename(self):
return 'dict'
def explicit_override(self, opts: 'ConfigOption', selfcfg: ConfigNode,
selfpth: ConfPath, overridecfg: ConfigNode,
overridepath: ConfPath):
assert not self.sub
assert not selfcfg.value
assert not overridecfg.value
if overridecfg.sub is not None:
if selfcfg.sub is None:
selfcfg.sub = {}
assert isinstance(selfcfg.sub, dict)
assert isinstance(overridecfg.sub, dict)
selfcfg.sub.update(deepcopy(overridecfg.sub))
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
if cfg[cfgpath].value is not None:
if isinstance(cfg[cfgpath].value, dict):
newcfg = ConfigNode.from_dict(cfg[cfgpath].value)
cfg[cfgpath].value = newcfg.value
cfg[cfgpath].sub = newcfg.sub
else:
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{dict}, not {type(cfg[cfgpath].value)}')
valdict = cfg[cfgpath].sub
if not isinstance(valdict, dict):
raise ConfigError(f'Type of {pth2str(cfgpath)} should be '
f'{dict}, not {type(valdict)}')
elif not all(isinstance(el, str) for el in valdict.keys()):
raise ConfigError(f'Type of keys in {pth2str(cfgpath)} should '
f'be {str}')
elif not all(isinstance(el.value, str) for el in valdict.values()):
raise ConfigError(f'Type of values in {pth2str(cfgpath)} should '
f'be {str}')
class OverrideConfigOption(ConfigOption):
def __init__(
self,
name: str,
description: str,
targetpath: ConfPath,
default: DefaultValue = NoDefaultValue()) -> None:
super().__init__(name, description, '', default)
self.targetpath = targetpath
def verify(self, rootopts: 'ConfigOption', cfg: ConfigNode,
cfgpath: ConfPath):
rootopts[self.targetpath].verify(rootopts, cfg, cfgpath)
def inherit(self, rootopts: 'ConfigOption', cfg: ConfigNode,
selfpth: ConfPath):
pass
def override(self, rootopts: ConfigOption, cfg: ConfigNode,
cfgpath: ConfPath):
if (selfcfg := cfg.get(cfgpath, None)) is None:
return
elif selfcfg.value is None and selfcfg.sub is None:
return
super().override(rootopts, cfg, cfgpath)
curropt = rootopts[self.targetpath]
self.create_parent_config(cfg, self.targetpath)
currcfg = cfg[self.targetpath]
overridecfg = cfg[cfgpath]
# Override the config at those paths by our own config
curropt.explicit_override(rootopts, currcfg, self.targetpath,
overridecfg, cfgpath)
@staticmethod
def create_parent_config(cfg: ConfigNode, path: ConfPath):
parentcfg = cfg
for s in path:
assert parentcfg.sub is not None
parentcfg = parentcfg.sub.setdefault(s, ConfigNode(sub={}))
|
class ZaloAppInfo:
def __init__(self, app_id, secret_key):
self.app_id = app_id
self.secret_key = secret_key
self.callback_url = None
|
#!/usr/bin/env python2
from __future__ import print_function
from codecs import open
from setuptools import setup
setup(name="scrapy-moviesinconcert",
author="Wieland Hoffmann",
author_email="themineo@gmail.com",
packages=["scrapy_moviesinconcert"],
package_dir={"scrapy_moviesinconcert": "scrapy_moviesinconcert"},
download_url=["https://github.com/mineo/scrapy-moviesinconcert/tarball/master"],
url=["http://github.com/mineo/scrapy-moviesinconcert"],
license="MIT",
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7"],
description="",
long_description=open("README.txt", encoding="utf-8").read(),
install_requires=["scrapy"],
setup_requires=["setuptools_scm"],
use_scm_version={"write_to": "scrapy_moviesinconcert/version.py"},
extras_require={
'docs': ['sphinx']
}
)
|
n = int(input())
a = input().split()
t = [int(m) for m in a]
m = int(input())
#行列
c = []
for i in range(m):#h:高さ
c.append([int(m) for m in input().split()])
k = sum(t)
for i in range(m):
print(k - t[c[i][0] - 1] + c[i][1]) |
#! /usr/bin/env python
#
# Copyright (C) 2013 Olivier Grisel <olivier.grisel@ensta.org>
description = """Cloud compute cluster configuration tool"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
DISTNAME = 'cardice'
DESCRIPTION = 'Cloud compute cluster configuration tool'
#LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Olivier Grisel'
MAINTAINER_EMAIL = 'olivier.grisel@ensta.org'
URL = 'http://github.com/ogrisel/cardice'
LICENSE = 'MIT'
DOWNLOAD_URL = 'http://pypi.python.org/pypi/cardice'
import cardice
VERSION = cardice.__version__
try:
from setuptools import setup
extra_setuptools_args = dict(
zip_safe=False,
include_package_data=True,
)
except ImportError:
from distutils.core import setup
extra_setuptools_args = dict()
class CleanCommand(Clean):
description = "Remove generated files in the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('cardice'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
def setup_package():
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
#long_description=LONG_DESCRIPTION,
packages=['cardice', 'cardice.templates'],
package_data={'cardice.templates': ['*.yaml']},
scripts=['scripts/cardice'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
cmdclass={'clean': CleanCommand},
**extra_setuptools_args)
if __name__ == "__main__":
setup_package()
|
from src.Transition import Transition
from src.Graph import Graph
def acceptation(graph, words):
"""Fonction de vérification d'acceptation de mots par un automate
Algorithme prenant un automate déterministe et un mot, et décidant si
le mot est accepté par l'automate.
Args:
graph (Graph): Un automate sous forme d'objet de la classe Graph
words (list): Liste de mots à vérifier
Returns:
dic: Dictionnaire ayant pour clés les mots donnés en paramètre, et
comme valeur un booléen selon si il est accepté ou non.
"""
nodes = graph.gettransitions()
finalStates = graph.getFinalStates()
alphabet = graph.getAlphabet()
initialState = graph.getInitialState()
result = {}
for word in words:
result[word] = False
# print(initialState)
# get string to check against
for uInput in words:
mot = uInput
print(mot)
mot = mot + mot[-1]
currentTransition = initialState
error = False
counter = 0
for letter in mot:
# Vérifier si la lettre est dans l'alphabet
if letter in alphabet:
# Vérifier si la lettre est dans le noeud courant
for node in nodes:
# print(type(str(node.mFrom)), type(currentTransition))
if node.mFrom == currentTransition and node.mValue == letter:
print("Read {} from {} to {}".format(
node.mValue, node.mFrom, node.mGoto))
# Si c'est la dernière lettre du mot
if (counter == len(mot) - 1):
# Si état final alors mot accepté
if (currentTransition in finalStates):
break
else:
print(
"Error last letter not in final state")
error = True
break
# Avencer le noeud
currentTransition = node.mGoto
break
# Si mot pas dans le langage
else:
print("Error letter not in alphabet")
error = True
break
counter += 1
if (error):
print("Not valid!")
else:
print("String accepted")
|
import logging
from discord.ext.commands import Bot, Cog, Context, command, BucketType
from discord.ext import commands
import asyncio
from butt_library import valid_user_or_bot, vacuum_enabled_in_guild, can_speak_in_channel
import mojang
log = logging.getLogger('bot.' + __name__)
class MojangCog(Cog):
def __init__(self, bot: Bot):
self.bot = bot
@command()
@commands.cooldown(1, 10, BucketType.guild)
@valid_user_or_bot()
@can_speak_in_channel()
# TODO: need "minecraft" setting check
async def mojang(self, ctx: Context):
status = mojang.Mojang.mojang_status()
message = []
if status[0]:
message.append(
"aw fuck, looks like %s %s broke (lol)" % (", ".join(status[0]), "are" if len(status[0]) > 1 else "is"))
if status[1]:
if status[0]:
message.append("also %s could be having problems" % ", ".join(status[1]))
else:
message.append("looks like %s could be having problems" % ",".join(status[1]))
if not status[0] and not status[1]:
message.append("praise notch, it works")
for t in message:
async with ctx.typing():
await asyncio.sleep(3)
await ctx.send(t)
|
#
# lmbspecialops - a collection of tensorflow ops
# Copyright (C) 2017 Albert Ludwigs University of Freiburg, Pattern Recognition and Image Processing, Computer Vision Group
# Author(s): Lukas Voegtle <voegtlel@tf.uni-freiburg.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import tensorflow as tf
import numpy as np
import sys
sys.path.insert(0, '../python')
import lmbspecialops as ops
class EncodeDecodeWebpTest(tf.test.TestCase):
def _test_save_load(self, data, settings):
with self.test_session(use_gpu=False, force_gpu=False):
input_data = tf.constant(data)
encoded_webp = ops.encode_webp(input_data, **settings)
decoded_webp = ops.decode_webp(encoded_webp)
self.assertEqual(input_data.eval().shape, decoded_webp.eval().shape)
if 'lossless' in settings and settings['lossless'] == True:
pass
# Actually this is supposed to succeed, but it doesn't. Seems to be a bug in the encoder(?)
#self.assertAllClose(input_data.eval(), decoded_webp.eval(), atol=5.0)
@staticmethod
def _test_data():
return [
(np.random.rand(1, 1, 3) * 255).astype(np.uint8),
(np.random.rand(1, 1, 4) * 255).astype(np.uint8),
(np.random.rand(3, 3, 3) * 255).astype(np.uint8),
(np.random.rand(3, 3, 4) * 255).astype(np.uint8),
(np.random.rand(128, 128, 3) * 255).astype(np.uint8),
(np.random.rand(32, 32, 4) * 255).astype(np.uint8),
]
@staticmethod
def _test_settings():
return [
{'lossless': True, 'preset_quality': 100, 'alpha_quality': 100},
{'preset_quality': 90},
]
def test_save_load(self):
for t in self._test_data():
for s in self._test_settings():
self._test_save_load(t, s)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/python3
from docopt import docopt
import importlib
import os
import sys
import lib.init as init
from lib.services import color, git
__version__ = "0.9.4" # did you change this in docs/conf.py as well?
if git.get_branch() != "master":
__version__+= "-"+git.get_branch()+"-"+git.get_commit()
__doc__ = """Usage: brownie <command> [<args>...] [options <args>]
Commands:
init Initialize a new brownie project
console Load the console
run Run a script in the /scripts folder
test Run test scripts in the /tests folder
coverage Evaluate test coverage
Options:
-h --help Display this message
--update Update to the latest version of brownie
--stable Use stable build
--dev Use nightly build
Type 'brownie <command> --help' for specific options and more information about
each command."""
print("Brownie v{} - Python development framework for Ethereum\n".format(__version__))
if '--stable' in sys.argv and git.get_branch() != "master":
git.checkout('master')
print("Switching to {0[value]}stable{0} brownie build".format(color))
sys.argv.append('--update')
elif '--dev' in sys.argv and git.get_branch() != "develop":
git.checkout("develop")
print("Switching to {0[value]}nightly{0} brownie build - may be buggy!".format(color))
sys.argv.append('--update')
if '--update' in sys.argv:
print("Checking for updates...")
if git.pull():
print("Brownie has been updated to the latest version!")
else:
print("Your version of Brownie is already up to date.")
sys.exit()
if len(sys.argv)>1 and sys.argv[1][0] != "-":
try:
idx = next(sys.argv.index(i) for i in sys.argv if i[0]=="-")
opts = sys.argv[idx:]
sys.argv = sys.argv[:idx]
except StopIteration:
opts = []
args = docopt(__doc__)
sys.argv += opts
lib_folder = __file__[:__file__.rfind('/')]+"/lib"
cmd_list = [i[:-3] for i in os.listdir(lib_folder) if i[-3:]==".py"]
if args['<command>'] not in cmd_list:
sys.exit("Invalid command. Try 'brownie --help' for available commands.")
if args['<command>'] != "init":
if not init.check_for_project():
sys.exit(
"ERROR: Brownie environment has not been initiated for this folder."
"\nType 'brownie init' to create the file structure."
)
init.create_build_folders()
try:
importlib.import_module("lib."+args['<command>']).main()
except Exception:
print(color.format_tb(sys.exc_info())) |
"""Convert segmentation from RGB encoding to 32 bit TIFF"""
import click
import numpy as np
from dtoolbioimage.segment import Segmentation3D
def convert_segmentation(input_fpath, output_fpath, output_encoding):
segmentation = Segmentation3D.from_file(input_fpath)
segmentation.save(output_fpath, encoding=output_encoding)
@click.command()
@click.argument('input_fpath')
@click.argument('output_fpath')
def main(input_fpath, output_fpath):
convert_segmentation(input_fpath, output_fpath, '32bit')
if __name__ == "__main__":
main()
|
import numpy as np
import datetime
import torch
from dateutil import tz
import logging
import os
import random
from nltk.tokenize import WhitespaceTokenizer
from allennlp.modules.elmo import batch_to_ids
from transformers import BertTokenizer
import json
from pathlib import Path
from collections import OrderedDict
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def format_time(elapsed):
"""
Takes a time in seconds and returns a string hh:mm:ss
"""
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
def get_device():
if torch.cuda.is_available():
device = torch.device("cuda")
logging.info(f"There are {torch.cuda.device_count()} GPU(s) available.")
logging.info(f"We will use the GPU: {torch.cuda.get_device_name(0)}")
else:
logging.info("No GPU available, using the CPU instead.")
device = torch.device("cpu")
return device
def current_utc_time():
return datetime.datetime.now().astimezone(tz.tzutc())
def read_json(fname):
fname = Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
# https://github.com/ylhsieh/pytorch-elmo-classification
class Corpus(object):
def __init__(
self,
sentences,
labels,
lowercase=False,
test_size=0.05,
max_len=-1,
label_dict=None,
shuffle=True,
):
self.shuffle = shuffle
self.lowercase = lowercase
self.all_sentences = sentences
self.all_labels = labels
self.max_len = max_len
self.label_dict = label_dict
self.test_size = test_size
self.test_sentences = []
self.test_labels = []
self.train_sentences = []
self.train_labels = []
self.split_dataset()
self.tokenizer = WhitespaceTokenizer()
self.train = self.tokenize(self.train_sentences, self.train_labels)
self.test = self.tokenize(self.test_sentences, self.test_labels)
def split_dataset(self):
if self.shuffle:
data = list(zip(self.all_sentences, self.all_labels))
random.shuffle(data)
self.all_sentences, self.all_labels = zip(*data)
ind = int(self.test_size * len(self.all_sentences))
self.test_sentences = self.all_sentences[:ind]
self.test_labels = self.all_labels[:ind]
self.train_sentences = self.all_sentences[ind:]
self.train_labels = self.all_labels[ind:]
def tokenize(self, sentences, labels):
processed_sentences = []
processed_labels = []
cropped = 0
for (sent, label) in zip(sentences, labels):
if self.lowercase:
sent = sent.lower().strip()
else:
sent = sent.strip()
sent = self.tokenizer.tokenize(sent)
if self.max_len > 0:
if len(sent) > self.max_len:
cropped += 1
sent = sent[: self.max_len]
if self.label_dict:
label = self.label_dict[label]
processed_sentences.append(sent)
processed_labels.append(label)
print(f"Number of sentences cropped: {cropped}")
return list(zip(processed_labels, processed_sentences))
def batchify(data, bsz, shuffle=False, gpu=False):
if shuffle:
random.shuffle(data)
tags, sents = zip(*data)
nbatch = (len(sents) + bsz - 1) // bsz
# downsample biggest class
# sents, tags = balance_tags(sents, tags)
for i in range(nbatch):
batch = sents[i * bsz : (i + 1) * bsz]
batch_tags = tags[i * bsz : (i + 1) * bsz]
# lengths = [len(x) for x in batch]
# sort items by length (decreasing)
# batch, batch_tags, lengths = length_sort(batch, batch_tags, lengths)
# Pad batches to maximum sequence length in batch
# find length to pad to
# maxlen = lengths[0]
# for b_i in range(len(batch)):
# pads = [pad_id] * (maxlen-len(batch[b_i]))
# batch[b_i] = batch[b_i] + pads
# batch = torch.tensor(batch).long()
batch = batch_to_ids(batch)
batch_tags = torch.tensor(batch_tags).long()
# lengths = [torch.tensor(l).long() for l in lengths]
# yield (batch, batch_tags, lengths)
yield batch, batch_tags
|
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import get_object_or_404
from .models import Article, Author
from .serializers import ArticleSerializer, AuthorSerializer
class ArticleView(APIView):
def get(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response({"articles": serializer.data})
def post(self, request):
article = request.data.get('article')
serializer = ArticleSerializer(data=article)
if serializer.is_valid(raise_exception=True):
article_saved = serializer.save()
return Response({"success": "Article '{}' created successfully".format(article_saved.title)})
def put(self, request, pk):
saved_article = get_object_or_404(Article.objects.all(), pk=pk)
data = request.data.get('article')
serializer = ArticleSerializer(instance=saved_article, data=data, partial=True)
if serializer.is_valid(raise_exception=True):
article_saved = serializer.save()
return Response({
"success": "Article '{}' updated successfully".format(article_saved.title)
})
def delete(self, request, pk):
article = get_object_or_404(Article.objects.all(), pk=pk)
article.delete()
return Response({
"message": "Article with id `{}` has been deleted.".format(pk)
}, status=204)
class AuthorView(APIView):
def get(self, request):
authors = Author.objects.all()
serializer = AuthorSerializer(authors, many=True)
return Response({"authors": serializer.data})
|
#: W191
if False:
print # indented with 1 tab
#:
#: W191
y = x == 2 \
or x == 3
#: E101 W191 W504
if (
x == (
3
) or
y == 4):
pass
#: E101 W191
if x == 2 \
or y > 1 \
or x == 3:
pass
#: E101 W191
if x == 2 \
or y > 1 \
or x == 3:
pass
#:
#: E101 W191 W504
if (foo == bar and
baz == frop):
pass
#: E101 W191 W504
if (
foo == bar and
baz == frop
):
pass
#:
#: E101 E101 W191 W191
if start[1] > end_col and not (
over_indent == 4 and indent_next):
return(0, "E121 continuation line over-"
"indented for visual indent")
#:
#: E101 W191
def long_function_name(
var_one, var_two, var_three,
var_four):
print(var_one)
#: E101 W191 W504
if ((row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col)):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E101 E101 E101 E101 W191 W191 W191 W191 W191 W191
if bar:
return(
start, 'E121 lines starting with a '
'closing bracket should be indented '
"to match that of the opening "
"bracket's line"
)
#
#: E101 W191 W504
# you want vertical alignment, so use a parens
if ((foo.bar("baz") and
foo.bar("frop")
)):
print "yes"
#: E101 W191 W504
# also ok, but starting to look like LISP
if ((foo.bar("baz") and
foo.bar("frop"))):
print "yes"
#: E101 W191 W504
if (a == 2 or
b == "abc def ghi"
"jkl mno"):
return True
#: E101 W191 W504
if (a == 2 or
b == """abc def ghi
jkl mno"""):
return True
#: W191:2:1 W191:3:1 E101:3:2
if length > options.max_line_length:
return options.max_line_length, \
"E501 line too long (%d characters)" % length
#
#: E101 W191 W191 W504
if os.path.exists(os.path.join(path, PEP8_BIN)):
cmd = ([os.path.join(path, PEP8_BIN)] +
self._pep8_options(targetfile))
#: W191
'''
multiline string with tab in it'''
#: E101 W191
'''multiline string
with tabs
and spaces
'''
#: Okay
'''sometimes, you just need to go nuts in a multiline string
and allow all sorts of crap
like mixed tabs and spaces
or trailing whitespace
or long long long long long long long long long long long long long long long long long lines
''' # nopep8
#: Okay
'''this one
will get no warning
even though the noqa comment is not immediately after the string
''' + foo # noqa
#
#: E101 W191
if foo is None and bar is "frop" and \
blah == 'yeah':
blah = 'yeahnah'
#
#: W191 W191 W191
if True:
foo(
1,
2)
#: W191 W191 W191 W191 W191
def test_keys(self):
"""areas.json - All regions are accounted for."""
expected = set([
u'Norrbotten',
u'V\xe4sterbotten',
])
#: W191
x = [
'abc'
]
#:
|
"""
- Takes fixed points data from fixed point dictionary file
- Computes matrix L and eigenvalues for the fixpoint, located, corresponding to a wave with wave numbers (k1,k2).
- Creates a subfolder and save the result
INPUT:
- args: k1, k2, delta0, tol
- fixpoint_dict.pkl -> dictionary of fixed points
"""
import sys
import logging
import os
import pickle
import numpy as np
import scipy.linalg as lin
import carpet
from sim_physics import solve_cycle, nx, ny, N, get_mtwist
k1,k2, delta0, tol = int(sys.argv[1]), int(sys.argv[2]), float(sys.argv[3]), float(sys.argv[4])
dirname = os.path.dirname(__file__) # sys.argv[3]
def dump_object(obj, filename):
filename = os.path.join(dirname, filename)
print(filename)
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_object(filename):
filename = os.path.join(dirname, filename)
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
# Load fixpoints
filename = 'fixpoint_dict.pkl' #'../fixpoint_dict_nx=6_ny=6_tol=1.000E-08.pkl' #
fixpoint_dict = load_object(filename)
def get_fixpoint(k1,k2):
return np.array(fixpoint_dict[k1, k2])
fixpoint = get_fixpoint(k1,k2)
## Eigenvalues functions
"""
2019-07-31: choose any set of N-1 perturbations with zero mean phase
2019-08-27: eigenvalues of L-I -> eigenvalues of ln(L) [logarithms of eigenvalues of L]
"""
def calc_sort_log_evals_evecs(L):
'''
:return: logairthm of eigenvalues, eigenvectors
'''
# Ljapunov spectrum
evals, evecs = lin.eig(L)
evals = np.log(evals)
# Sort eigenvalues
idx = evals.argsort()
evals = evals[idx]
evecs = evecs[:, idx]
return evals, evecs
def get_L3_general(k1, k2, Delta0s, tol):
"""
Input: N-1 perturbation vectors with zero mean phase
N-th perturbation will be added automatically:
Since we have already tested that it's neutral - just set last Delta1 = Delta0 = (1,1,1...1)
"""
Delta1s = [] # list of deviation from the fixpoint after one cycle
fixpoint = get_fixpoint(k1, k2)
for Delta0 in Delta0s: # index of single node that was initially perturbed
# Initial condition
phi0 = fixpoint + Delta0
assert abs(carpet.get_mean_phase(Delta0)) < tol
# Solve
solution = solve_cycle(phi0, tol)
phi1 = solution.y.T[-1]
# Fill matrix row
Delta1 = (phi1 - fixpoint - 2 * np.pi)
Delta1s.append(Delta1)
## Out of Poincare-plane perturbation - make it stay neutral
D0 = np.array([Delta0 for Delta0 in Delta0s] + [np.ones([N])]) # Truncated vectors as rows of a matrix
D1 = np.array([Delta1 for Delta1 in Delta1s] + [np.ones([N])]) # Truncated vectors as rows of a matrix
## Get L
L = lin.solve(D0, D1).transpose() # Transpose - to work with vectors as columns again
return L
def get_L_single(k1, k2, delta0, tol):
"""
N-1 single-node perturbations (+ change other nodes to preserve mean phase)
- Normalized by L_infinity norm
"""
def get_single_node_perturbation(ix, delta0):
Delta = np.zeros(N) - 1 / (N - 1)
Delta[ix] = 1
return Delta * delta0
Delta0s = [get_single_node_perturbation(ix, delta0) for ix in range(0, N - 1)] # get basis, multiply by delta0
return get_L3_general(k1, k2, Delta0s, tol)
def get_mtwist_trig_basis(delta0=1, phi0=0):
'''
ad hoc solution for 2D
Go through all possible cosine,sine of mtwists, keep only the ones which are orthogonal to each other
2019-08-28: phi0 - phase shift all mtwists
Added this one from 2D: can shift by angle
Checked: equivalent to the old one when phi0=0
'''
def zero_vec(vec, eps=10 ** -8):
return lin.norm(vec) * N ** (-1 / 2) < eps
def orthogonal(vec, basis, eps=10 ** -8):
'''
If vector is coaligned with a vector from the set - return True, else False
'''
for b in basis:
if abs(vec @ b.conj()) > eps * lin.norm(vec) * lin.norm(b):
return False
return True
basis = []
for k1 in range(nx):
for k2 in range(ny):
if k1 == 0 and k2 == 0:
continue
cos_mtwist = np.cos(get_mtwist(k1, k2) + phi0)
sin_mtwist = np.sin(get_mtwist(k1, k2) + phi0)
if not zero_vec(cos_mtwist) and orthogonal(cos_mtwist, basis):
basis.append(cos_mtwist)
if not zero_vec(sin_mtwist) and orthogonal(sin_mtwist, basis):
basis.append(sin_mtwist)
Delta0s = [delta0 * b for b in basis]
assert len(Delta0s) == N - 1
return Delta0s
def get_L_mtwist(k1, k2, delta0, tol):
"""
N-1 perturbation - cosines and sines of m-twists
Nth - orthogonal to the Poincare plane; construct L s.t. this is a neutral perturbation
"""
Delta0s = get_mtwist_trig_basis(delta0)
return get_L3_general(k1, k2, Delta0s, tol)
def calc_evals_evecs_mtwist(k1, k2, delta0, tol):
'''
evecs: eigenvectors as columns!
'''
L = get_L_mtwist(k1, k2, delta0, tol)
return calc_sort_log_evals_evecs(L)
def fill_evals_evecs_dict_mtwist(k1, k2, delta0, tol, evals_dict, evecs_dict):
evals, evecs = calc_evals_evecs_mtwist(k1, k2, delta0, tol)
evals_dict[(k1, k2)] = evals
evecs_dict[(k1, k2)] = evecs
logging.info("Finished: k1={} k2={}".format(k1, k2))
###
L_mtwist = get_L_mtwist(k1, k2, delta0, tol)
output_folder = 'out/linear_delta0={:.3E}_tol={:.3E}/'.format(delta0, tol)
os.makedirs(output_folder, exist_ok=True)
filename = output_folder + "/L_mtwist_k1={}_k2={}.npy".format(k1,k2)
np.save(filename, L_mtwist)
# L_log_lin = L_mtwist - sp.eye(N)
# evals, evecs = calc_sort_evals_evecs(L_log_lin)
|
with open('../input.txt','rt') as f:
lst = set(map(int,f.readlines()))
for x in lst:
if 2020-x in lst:
print(x*(2020-x)) |
# Problem 2
# (10/10 points)
# Assume s is a string of lower case characters.
# Write a program that prints the number of times the string 'bob' occurs in s.
# For example, if s = 'azcbobobegghakl', then your program should print: Number of times bob occurs is: 2
count = 0
for i in range(len(s)-2):
if s[i:i+3] == 'bob':
count += 1
print('Number of vowels: %d' %count)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-06 22:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0004_auto_20171106_1747'),
]
operations = [
migrations.AddField(
model_name='report',
name='company_country',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='company_industry',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='company_location',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='company_name',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='company_phone',
field=models.CharField(default='', max_length=15),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='company_sector',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='current_projects',
field=models.TextField(default='', help_text='Enter a list of the current projects'),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='info',
field=models.TextField(default='', help_text='Enter information about the business plan and/or project'),
preserve_default=False,
),
]
|
from IPython import embed
from engine import *
class CLI():
def __init__(self):
self.query = "\nLast move: {}\nYour move: "
def gen_move(self, engine, color):
last = engine.last_move
last = "" if not last else engine.string_from_move(last)
while True:
print(engine)
string = input(self.query.format(last))
if string == 'debug':
embed()
elif string == '':
return PASS
try:
move = engine.move_from_string(string)
assert engine.legal(move, color)
return move
except:
print("Illegal move! Try again.")
|
"""
Classes, that represents various value types that can appear in the configuration and problem definitionfiles.
Each grammar type can both parse string containing a value of a given type, and to create the string containing a given value.
"""
import pyparsing as pp
import io
import inspect
from pyparsing import Word, Suppress
import itertools
import numpy as np
from collections import namedtuple
from collections.abc import Hashable
from .misc import OrderedDict, cached_property, cache
ppc = pp.pyparsing_common
from .grammar import generate_grammar, separator as separator_grammar, \
delimitedList, line_end, optional_quote,\
replace_whitechars, White
from ase.units import Rydberg
import copy
import datetime
from typing import Union, Any, Callable
context = generate_grammar()
context.__enter__()
#it ensures that the generated grammar will have the correct whitespaces
class BaseType:
""" Base class for definition of configuration option types
A type without value (e.g. Separator) are just syntactical
elements in the potentials file, that do not carry an information.
Such elements do not yields (name, value) pair during parsing the file.
Do not confuse this with BaseType.missing_value functionality.
Missing_value is just the opposite: missing_value can be ommited in the file
(or even the absence of the name in the file carry the information, that
the Flag is False), but the name-value tuple of such Type is present
in the parse result. On the other hand, has_value = False is in the file, but
not in the result.
"""
has_value = True
""" Default value for BaseValueDefinition.name_in_grammar.
Some types (e.g. Tables) commonly have no name (are identified
by its position in the potential file.
"""
name_in_grammar = True
""" Default value for the given type. It can be overriden in the constructor (or just by setting
the instantiated object attribute) """
default_value = None
""" Deafault type for creating numpy arrays (e.g. by Table) is object - to be redefined
in the descendatns
"""
numpy_type = object
def __init__(self, prefix:Union[str,None]=None, postfix:Union[str,None]=None,
format:str='', default_value:Any=None,
condition:Union[Callable[[Any], Union[bool,str]],None]=None,
after_convert:Union[Callable[[Any], Any],None]=None):
"""
Create the object.
Parameters
----------
prefix
The string, that will be printed before the value
postfix
The string, that will be printed after the value
format
The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine.
default_value
The default value of the options of this type. ``None`` means no default value.
condition
Function, that check the validity of the value. It should return ``True`` for a valid
value, and ``False`` or string for invalid. The string is interpreted as an error message
that explains the invalidity of the value.
after_convert
Function, that - if it is given - is applied to the (entered or parsed) value. The function
is applied on the result of the
:meth:`convert<ase2sprkkr.common.grammar_types.BaseType.convert>` method
"""
self.prefix = prefix
""" The string, that will be printed before the value """
self.postfix = postfix
""" The string, that will be printed after the value """
self.format = format
""" The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine. """
self.condition = condition
if after_convert is not None:
self.convert = lambda v: \
after_convert(self, self.__class__.convert(self, v))
""" Some subclasses has default_value defined via read-only property. """
if default_value is not None:
self.default_value = self.convert(default_value)
def __str__(self):
return self.__class__.__name__
@cache
def grammar(self, param_name=False):
""" Return a pyparsing grammar for the type """
grammar = self._grammar
if not isinstance(grammar, pp.ParserElement):
grammar = grammar(param_name)
if self.prefix or self.postfix:
with generate_grammar():
if self.prefix:
grammar = pp.Literal(self.prefix).suppress().setName(self.prefix) + grammar
if self.postfix:
grammar += pp.Literal(self.postfix).suppress().setName(self.postfix)
grammar = self.transform_grammar(grammar, param_name)
if self.has_value:
def validate(s, loc, x):
try:
out = self.validate(x[0], parse_check=True, param_name=param_name)
except ValueError as e:
raise pp.ParseException(s, loc, str(e) + '\nValidating of the parsed value failed') from e
return x
grammar.addParseAction(validate)
grammar.grammar_type = self
return grammar
def parse(self, str, whole_string=True):
return self.grammar().parseString(str, whole_string)[0]
async def parse_from_stream(self, stream, up_to, start=None, whole_string=True):
result = await stream.readuntil(up_to)
result = result[:-len(up_to)].decode('utf8')
if start:
result = start + result
return self.parse(result, whole_string)
def grammar_name(self):
""" Human readable expression of the grammar. By default,
this is what is set by grammar.setName, however, sometimes
is desirable to set even shorter string """
return str(self.grammar)
def transform_grammar(self, grammar, param_name=False):
""" The chance for the resulting class to alter the resulting prefixed grammar """
return grammar
def missing_value(self):
""" Is the configuraion value a flag? I.e. can be =<value> ommited
in the configuration
Return
------
can_be_ommited : bool
Is an ommision of the value possible, e.g. the option is given as Flag (only by name of the option)
default_value
The value used if the value is ommitted
do_not_output_value
The value, with which the variable should not be outputed at all (e.g. False for a flag)
"""
return False, None, None
def validate(self, value, param_name='<Unknown>', parse_check=False):
""" Validate either the pyparsing result or a user given value
Parameters
---------
value : mixed
Value to be validated
param_name : str or callable
Parameter name to be used in possible throwed exception (Optional)
If it is callable, it should be a function that returns the param_name
"""
try:
err = self._validate(value, parse_check)
except ValueError as err:
self._valueError(value, err, param_name)
if err is not True:
self._valueError(value, err, param_name)
if self.condition:
err = self.condition(value)
if err is not True:
self._valueError(value, err, param_name)
return True
def _validate(self, value, parse_check=False):
""" Return error message if the value is not valid """
return True
def _valueError(self, value, error_message=False, param_name=False):
if callable(param_name):
param_name = param_name()
if param_name:
param = f'for paramater {param_name} of type {self}'
else:
param = f'for type {self}'
if error_message is False:
error_message = 'invalid value'
if isinstance(error_message, Exception):
raise ValueError("Value '{}' {} is not valid: {}".format(value, param, error_message)) from error_message
else:
raise ValueError("Value '{}' {} is not valid: {}".format(value, param, error_message))
def read(self, token, parameterName='<Unknown>'):
""" Transform pyparsing token to a validated value """
self.validate(val)
return val
def convert(self, value):
""" Convert a value from user to a "cannonical form" """
return value
def _string(self, val):
return val
def string(self, val):
val = self._string(val)
if self.prefix:
val = self.prefix + str(val)
if self.postfix:
val = str(val) + self.postfix
if self.format:
val = "{:{}}".format(val, self.format)
return str(val)
def write(self, f, val):
f.write(self.string(val))
def print(self, val):
print(self.string(val))
def copy(self):
return copy.copy(self)
def enrich(self, option):
""" Some types can add properties to the options that have
the type, e.g. see Sequence.enrich, which adds the ability to
access the items of the sequence using [] """
pass
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
class Unsigned(BaseType):
""" Unsigned integer (zero is possible) """
_grammar = replace_whitechars(ppc.integer).setParseAction(lambda x:int(x[0]))
def _validate(self, value, parse_check=False):
if not isinstance(value, int): return "Integer value required"
return value >= 0 or "Positive value required"
def grammar_name(self):
return '<+int>'
numpy_type = int
Unsigned.I = Unsigned()
class Integer(BaseType):
""" Signed integer """
_grammar = replace_whitechars(ppc.signed_integer).setParseAction(lambda x:int(x[0]))
def _validate(self, value, parse_check=False):
return isinstance(value, (int, np.int64) ) or f'Integer value required ({value.__class__} was given)'
def grammar_name(self):
return '<int>'
numpy_type = int
Integer.I = Integer()
class Bool(BaseType):
""" A bool type, whose value is represented by a letter (T or F) """
_grammar = (pp.CaselessKeyword('T') | pp.CaselessKeyword('F')).setParseAction( lambda x: x[0] == 'T' )
def _validate(self, value, parse_check=False):
return isinstance(value, bool) or "Bool value rquired"
def grammar_name(self):
return '<T|F>'
def _string(self, val):
return 'T' if val else 'F'
numpy_type = bool
Bool.I = Bool()
class Real(BaseType):
""" A real value """
_grammar = replace_whitechars(ppc.fnumber).setParseAction(lambda x: float(x[0]))
def _validate(self, value, parse_check=False):
return isinstance(value, float) or "Float value required"
def grammar_name(self):
return '<float>'
numpy_type = float
Real.I = Real()
class Date(BaseType):
""" A date value of the form 'DD.MM.YYYY' """
_grammar = pp.Regex(r'(?P<d>\d{2}).(?P<m>\d{2}).(?P<y>\d{4})').setParseAction(lambda x: datetime.date(int(x['y']), int(x['m']), int(x['d'])))
def _validate(self, value, parse_check=False):
return isinstance(value, datetime.date) or "Date (datetime.date) value required"
def grammar_name(self):
return '<dd.mm.yyyy>'
def _string(self, val):
return val.strftime("%d.%m.%Y")
Date.I = Date()
class BaseRealWithUnits(BaseType):
""" The base class for float value, which can have units append.
The value is converted automatically to the base units.
"""
grammar_cache = {}
""" A grammar for units is cached """
def _grammar_units(self, units):
i = id(units)
if not i in self.grammar_cache:
units = pp.Or(
(pp.Empty() if v is None else pp.CaselessKeyword(v))
.setParseAction(lambda x,*args, u=u: u) for v,u in units.items()
)
out = Real.I.grammar() + pp.Or(units)
out.setParseAction(lambda x: x[0]*x[1])
self.grammar_cache[i] = out
return out
return self.grammar_cache[i]
def _grammar(self, param_name):
return self._grammar_units(self.units)
def _validate(self, value, parse_check=False):
return isinstance(value, float) or "Float value required"
def grammar_name(self):
return '<float>[{}]'.format("|".join(('' if i is None else i for i in self.units)))
numpy_type = float
class RealWithUnits(BaseRealWithUnits):
""" A float value with user-defined units """
def __init__(self, *args, units, **kwargs):
self.units = units
super().__init__(*args, **kwargs)
class Energy(BaseRealWithUnits):
""" The grammar type for energy. The default units are Rydberg, one can specify eV. """
units = {
'Ry' : 1.,
'eV' : 1. / Rydberg,
None : 1.,
}
""" The allowed units and their conversion factors """
Energy.I = Energy()
class BaseString(BaseType):
""" Base type for string grammar types """
def _validate(self, value, parse_check=False):
if not isinstance(value, str): return "String value required"
if not parse_check:
try:
self._grammar.parseString(value, True)
except pp.ParseException as e:
return f"Forbidden character '{e.line[e.col]}' in the string"
return True
class String(BaseString):
""" Just a string (without whitespaces and few special chars) """
_grammar = Word(pp.printables,excludeChars=",;{}").setParseAction(lambda x:x[0])
def grammar_name(self):
return '<str>'
String.I = String()
class QString(BaseString):
""" Either a quoted string, or just a word (without whitespaces or special chars) """
_grammar = (pp.Word(pp.printables, excludeChars=",;{}") or pp.QuotedString("'")).setParseAction(lambda x:x[0])
def grammar_name(self):
return "'<str>'"
QString.I = String()
class LineString(BaseString):
""" A string, that takes all up to the end of the line """
_grammar = pp.SkipTo(pp.LineEnd() | pp.StringEnd())
def grammar_name(self):
return "'<str....>\n'"
LineString.I = LineString()
class Keyword(BaseType):
"""
A value, that can take values from the predefined set of strings.
"""
def __init__(self, *keywords, **kwargs):
super().__init__(**kwargs)
self.keywords = [ i.upper() for i in keywords ]
with generate_grammar():
self._grammar = optional_quote + pp.MatchFirst((pp.CaselessKeyword(i) for i in self.keywords)).setParseAction(lambda x: x[0].upper()) + optional_quote
def _validate(self, value, parse_check=False):
return value in self.keywords or "Required one of [" + "|".join(self.keywords) + "]"
def grammar_name(self):
return '|'.join(('"'+i+'"' for i in self.keywords ))
def __str__(self):
return self.grammar_name()
def convert(self, value):
return value.upper()
def DefKeyword(default, *others, **kwargs):
"""
A value, that can take values from the predefined set of strings, the first one is the default value.
"""
return Keyword(default, *others, default_value=default, **kwargs)
class Flag(BaseType):
"""
A boolean value, which is True, if a name of the value appears in the input file.
"""
def grammar_name(self):
return None
def str(self):
return "(Flag)"
def missing_value(self):
return (True, True, False)
def _validate(self, value, parse_check=False):
return value is True or value is False or value is None or "This is Flag with no value, please set to True to be present or to False/None to not"
_grammar = pp.Empty().setParseAction(lambda x: True)
Flag.I = Flag()
normalize_type_map = {
np.int64 : int,
np.float64: float,
np.bool_: bool
}
""" Mapping of alternative types to the 'canonical ones'. """
def normalize_type(type):
""" Return the 'canonical type' for a given type
I.e. it maps numpy internal types to standard python ones
doctest:
>>> normalize_type(np.int64)
<class 'int'>
"""
return normalize_type_map.get(type, type)
type_from_type_map = OrderedDict([
(float, Real.I),
(int , Integer.I),
(bool, Bool.I),
(str , String.I)]
)
""" The standard grammar_types for python types.
The value type can be given by a standard python type, this map maps the
python type for the appropriate grammar_type class.
"""
def format_for_type(format, type):
"""
Returns the format appropriate to the given type
Parameters
----------
format: str or dict
If it is str, just return it.
Dict should has the form { type : format_for_the_type } + { None : default_format }
"""
if isinstance(format, dict):
if type in format:
return format[type]
return format[None]
return format
def type_from_type(type, format='', format_all=False):
""" Guess and return the grammar element (BaseType class descendatnt) from a python type. E.g. int => Integer.
The given format can be optionally set to the returned grammar element.
Parameters
----------
type: A python type or BaseType
A type to be converted to a grammar type (BaseType class descendant)
format: str or dict
The format to be applied to the resulting class. If dict is given, see 'format_for_type'
for the way how the format is determined
format_all: boolean
If False (default), the format is not applied, if instance of BaseType is given as
the type parameter. Otherwise, a copy of the input type with the applied format is returned
"""
if isinstance(type, Hashable) and type in type_from_type_map:
type = normalize_type(type)
format = format_for_type(format, type)
type = type_from_type_map[type]
if format:
type = type.copy()
type.format = format
return type
elif format_all:
type = type.copy()
type.format = format_for_type(format, normalize_type(type.numpy_type))
return type
class Array(BaseType):
""" A (numpy) array of values of one type """
delimiter=White(' \t').suppress()
delimiter_str = ' '
def __init__(self, type, default_value=None,
length=None, max_length=None, min_length=None,
as_list=False,
**kwargs):
if isinstance(type, (list, np.ndarray)):
if default_value is not None:
raise ValueException("It is not possible for an Array to provide default_value both in 'default_value' and in 'type' argument")
default_value = type
type = type[0].__class__
self.type = type_from_type(type)
self.as_list = as_list
super().__init__(default_value=default_value, **kwargs)
self.min_length = min_length or length
self.max_length = max_length or length
with generate_grammar():
grammar = self.type.grammar()
grammar = delimitedList(grammar, self.delimiter)
if self.as_list:
if callable(self.as_list):
grammar = grammar.setParseAction(lambda x: self.as_list(x.asList()))
else:
grammar = grammar.setParseAction(lambda x: [x.asList()])
else:
grammar.setParseAction(lambda x: self.convert(x.asList()))
grammar.setName(self.grammar_name())
self._grammar = grammar
def __str__(self):
return "Array({})".format(str(self.type))
def grammar_name(self):
gn = self.type.grammar_name()
if self.min_length is not None and self.min_length == self.max_length:
return f'{self.min_length}*{gn}'
return f'{gn}{self.delimiter_str}{gn}{self.delimiter_str}...'
def _string(self, val):
it = iter(val)
i = next(it)
out = self.type.string(i)
for i in it:
out += self.delimiter_str
out += self.type.string(i)
return out
def _validate(self, value, parse_check=False):
if callable(self.as_list):
cls = self.as_list
elif self.as_list:
cls = list
else:
cls = np.ndarray
if not isinstance(value, cls):
return f'{cls} type required, {value.__class__} is given'
for i,v in enumerate(value):
try:
self.type.validate(v, parse_check=False)
except ValueError as e:
raise ValueError("Value {} in the set is incorrect: {}".format(i, str(e))) from e
if self.min_length is not None and len(value) < self.min_length:
return f"The array should be at least {self.min_length} items long, it has {len(value)} items"
if self.max_length is not None and len(value) > self.min_length:
return f"The array can not have more than {self.max_length} items, it has {len(value)} items"
return True
def convert(self, value):
if self.as_list:
if callable(self.as_list):
return value if isinstance(value, self.as_list) else self.as_list(value)
return list(value) if isinstance(value, tuple) else value
if not isinstance(value, np.ndarray):
if self.type.numpy_type == object:
#https://stackoverflow.com/questions/60939396/forcing-a-creation-of-1d-numpy-array-from-a-list-array-of-possibly-iterable-obje
out = np.empty(len(value), object)
out[:] = value
return out
else:
return np.atleast_1d(value)
return value
class SetOf(Array):
""" Set of values of the same type. E.g. {1,2,3} """
delimiter = pp.Suppress(pp.Literal(',') | pp.Literal(';') | White(' \t')).setName('[,; ]')
delimiter_str = ','
def __init__(self, type, **kwargs):
kwargs.setdefault('prefix', '{')
kwargs.setdefault('postfix', '}')
super().__init__(type, **kwargs)
def transform_grammar(self, grammar, param_name=False):
return grammar | self.type.grammar(param_name).copy().addParseAction(lambda x: np.atleast_1d(x.asList()))
def __str__(self):
return "SetOf({})".format(str(self.type))
type_from_set_map = OrderedDict([
(float, SetOf(float)),
(int , SetOf(int)),
])
""" Map the python type of a collection member to a grammar type of the collection.
Only canonical types are expected, see :meth:`ase2sprkkr.common.grammar_types.normalize_type`
"""
def type_from_value(value):
""" Gues the grammar type from a python value.
..doctest::
>>> type_from_value(2)
<Integer>
>>> type_from_value(2.0)
<Real>
"""
if isinstance(value, (list, np.ndarray)):
return type_from_set_map[normalize_type(value[0].__class__)] if len(value) else Integer.I
if isinstance(value, str):
try:
String._grammar.parseString(value, True)
return String.I
except Exception:
return QString.I
type = type_from_type(value.__class__)
if type is value.__class__:
raise ValueError('Cannot determine grammar type from value {value}')
return type.__class__(default_value = value)
def type_from_default_value(value, format='', format_all=False):
""" Guess the grammar type from a value, that will become the default value of the grammar type.
It has to create a new object instance, as it has to set the default
value property of the returned object. An (output) format can be applied to the
resulting grammar type
Grammar types passed as types are left as is, unless format_all flag is set.
"""
if inspect.isclass(value) or isinstance(value, BaseType):
return type_from_type(value, format=format, format_all=format_all)
ptype = normalize_type(value.__class__)
gtype = type_from_type(value.__class__).__class__
return gtype(default_value = value, format=format_for_type(format, ptype))
class BaseMixed(BaseType):
"""
A variant type - it can hold "anything".
"""
type = None
""" The types, that the value can hold. To be redefined in the descendants. """
string_type = None
""" Type of string grammar_type to be used. To be redefined in the descendants. """
@classmethod
def _grammar(cls, param_name=False):
return pp.MatchFirst((
i.grammar(param_name) for i in cls.types
))
def get_type(self, value):
""" Return the type of the value """
return self.string_type if isinstance(value, str) else type_from_value(value)
def _validate(self, value, parse_check=False):
type = self.get_type(value)
if type is value:
return 'Can not determine the type of value {}'.format(value)
return type.validate(value, parse_check)
def grammar_name(self):
return '<mixed>'
class Mixed(BaseMixed):
""" A variant value to be used in input files (in unknown - custom - options) """
string_type = QString.I
""" Input files use quoted strings. """
types = [
Energy.I,
Real.I,
Integer.I,
type_from_set_map[int],
type_from_set_map[float],
QString.I,
Flag.I,
]
def missing_value(self):
return True, True, False
Mixed.I = Mixed()
class PotMixed(BaseMixed):
""" A variant value to be used in potential files (in unknown - custom - options) """
string_type = LineString.I
""" Potential files use line strings. """
types = [
Energy.I,
Real.I,
Integer.I,
Bool.I,
type_from_set_map[int],
type_from_set_map[float],
LineString.I,
]
def _string(self, val):
if isinstance(val, bool):
return Bool._string(self, val)
else:
return super()._string(val)
PotMixed.I = PotMixed()
class Separator(BaseType):
""" Special class for ``****`` separator inside a section """
_grammar = separator_grammar.copy().setParseAction(lambda x: [None])
has_value = False
def _validate(self, value, parse_check=False):
return 'Can not set a value to a separator'
def _grammar_name(self):
return '****...****\n'
def _string(self, val=None):
return '*'*79
Separator.I = Separator()
class Sequence(BaseType):
""" A sequence of values of given types """
def __init__(self, *types, format='', format_all=False, allowed_values=None,
default_values=False, names=None, **kwargs):
super().__init__(**kwargs)
if names:
self.names = names if isinstance(names, dict) else {name:i for i,name in enumerate(names)}
self.value_type = namedtuple("_".join(names), names)
self.value_constructor = self.value_type
else:
self.names = None
self.value_type = tuple
self.value_constructor = lambda *x: tuple(x)
if isinstance(format, (str, dict)):
format = itertools.repeat(format)
self.types = [ type_from_default_value(i, dfs, format_all=format_all) for i,dfs in zip(types, format) ]
if allowed_values and not isinstance(allowed_values, set):
allowed_values = set(allowed_values)
self.allowed_values=allowed_values
self.default_values=default_values
def _grammar(self, param_name = False):
def grm(type):
g = type.grammar(param_name)
if self.default_values and type.default_value is not None:
g = g | pp.Empty().setParseAction(lambda x: type.default_value)
return g
grammars = [grm(i) for i in self.types]
grammar = pp.And(grammars).setParseAction(lambda x: self.value_constructor(*x))
if self.allowed_values is not None:
grammar.addConditionEx(lambda x: x[0] in self.allowed_values, lambda x: f'{x[0]} is not in the list of allowed values')
return grammar
def _validate(self, value, parse_check=False):
if not isinstance(value, (self.value_type)) or len(value) != len(self.types):
return f'A tuple of {len(self.types)} values is required'
for i,j in zip(self.types, value):
out = i.validate(j, parse_check=parse_check)
return True
def convert(self, value):
if not isinstance(value, self.value_type):
return self.value_constructor(*value)
try:
return self.value_constructor(*value)
except TypeError:
pass
return value
def grammar_name(self):
return " ".join( (f'{j.grammar_name()}' for j in self.types) )
def _string(self, val):
out = []
for i,v in zip(self.types, val):
out.append(' ')
out.append(i.string(v))
return ''.join(out)
def enrich(self, option):
class cls(option.__class__):
def _get_index(sulf, name):
if self.names and isinstance(name, str):
return self.names[name]
return name
def __getitem__(self, key):
key = self._get_index(key)
return self()[key]
def __setitem__(self, key, value):
key = self._get_index(key)
v = list(self())
v[key] = value
self.set(v)
if self.names:
for n,i in self.names.items():
(lambda i: setattr(cls, n, property(
lambda self: self[i],
lambda self, v: self.__setitem__(i, v)
)))(i)
option.__class__ = cls
class Table(BaseType):
"""
Table, optionaly with named columns, e.g.
::text
IQ IREFQ IMQ NOQ ITOQ CONC
1 1 1 1 1 1.000
2 2 2 1 2 1.000
"""
name_in_grammar = False
def __init__(self, columns=None,
header=None, free_header=False,
format = {float: '>21.17', None: '>16'}, format_all=True,
numbering=None, numbering_label=None, numbering_format=True,
prefix=None, postfix=None, length=None,
row_condition=None,
default_values=False,
named_result = False, **kwargs):
super().__init__(prefix=None, postfix=None)
if columns is None:
columns = kwargs
if isinstance(columns, dict):
self.names = list(columns.keys())
columns = columns.values()
else:
self.names = None
if header is None:
header = self.names
self.sequence = Sequence( *columns, format=format, format_all=format_all, condition = row_condition, default_values=default_values )
self.header = header
self.free_header = free_header
if numbering.__class__ is str:
numbering_label=numbering
numbering=True
self.numbering = Unsigned.I if numbering is True else numbering
if self.numbering and numbering_format and not (numbering_format is True and self.numbering.format):
if numbering_format is True:
numbering_format = '<4'
self.numbering = self.numbering.copy()
self.numbering.format = numbering_format
self.numbering_label = numbering_label
self.named_result = named_result
self.length = length
def _grammar(self, param_name=False):
line = self.sequence.grammar(param_name)
if self.numbering:
line = self.numbering.grammar() + line # + pp.And._ErrorStop()
grammar = delimitedList(line, line_end)
if self.names:
if self.free_header:
fh = pp.SkipTo(line_end) + line_end
if callable(self.free_header):
fh.addConditionEx(lambda x: self.free_header(x[0]),
lambda x: f"This is not an allowed header for table {param_name}: {x[0]}" )
grammar = pp.Suppress(fh) + grammar
else:
def names():
for n in self.names:
if ' ' in n:
""" multiple column headers for one column are allowed
-- see Occupation section"""
yield from map(pp.CaselessKeyword, n.split(' '))
else:
yield pp.CaselessKeyword(n)
grammar = pp.Suppress(pp.And(list(names())) + pp.lineEnd) + grammar
if self.numbering_label:
grammar = pp.CaselessKeyword(self.numbering_label).suppress() + grammar
def ensure_numbering(s, loc, x):
numbers = x[::2]
datas = x[1::2]
if not numbers == [*range(1, len(numbers)+1)]:
raise pp.ParseException(s, loc, 'First column should contain row numbering')
return datas
if self.numbering is not None:
grammar.addParseAction(ensure_numbering)
grammar.addParseActionEx( lambda x: np.array(x.asList(), self.numpy_type), "Cannot retype to numpy array")
return grammar
def _string(self, data):
out = []
if self.header:
def gen():
names = ((i[1] if isinstance(i, tuple) else i) for i in self.names)
for n,t in zip(self.names, self.sequence.types):
yield n
yield t.format
fstr = (" {:{}}"*len(self.names))
if self.numbering:
fstr = self.numbering.string(self.numbering_label or '') + fstr
else:
fstr = fstr[1:]
out.append(fstr.format(*gen()))
newline = True
else:
newline = False
line = 1
for i in data:
if newline:
out.append('\n')
newline = True
if self.numbering is not None:
out.append(self.numbering.string(line))
line+=1
out.append(self.sequence.string(i))
return ''.join(out)
def _validate(self, value, parse_check=False):
if not isinstance(value, np.ndarray):
return f"Numpy array as a value required {value.__class__} given"
dtype = self.numpy_type
dim = 1 if isinstance(dtype, list) else 2
if len(value.shape) != dim:
return f"The array should have dimension={dim}, it has dimension {len(value.shape)}"
if value.dtype != self.numpy_type:
return f"The data type of the value should be {dtype}, it is {value.dtype}"
if dim==2 and value.shape[1] != len(self.sequence.types):
return f"The array is required to have {len(self.sequence.types)} columns, it has {value.shape[1]}"
if self.length is not None and self.length != value.shape[0]:
return f"The array is required to have {self.length} rows, it has {value.shape[1]}"
return True
def convert(self, value):
return np.asarray(value, dtype = self.numpy_type)
@cached_property
def numpy_type(self):
types = self.sequence.types
nr = self.names and self.named_result
if not nr:
dtype = types[0].numpy_type
for t in types[1:]:
if t.numpy_type != dtype:
nr = True
break
else:
return dtype
names = self.names or itertools.repeat('')
return list(zip(names, (i.numpy_type for i in types)))
def number_of_collumns(self):
return len(self.sequence.types)
def zero_data(self, length):
""" Return array of zeros with the given number of rows and
with the dtype of the table
"""
dtype = self.numpy_type
if isinstance(dtype, list):
return np.zeros(length, dtype)
else:
return np.zeros((length, self.number_of_collumns()), dtype)
def grammar_name(self):
if self.names:
data = " ".join( (f'{i}:{j.grammar_name()}' for i,j in zip(self.names, self.sequence.types) ) )
else:
data = self.sequence.grammar_name()
return f"<TABLE of {data}>"
integer = Integer.I
""" A standard signed integer grammar type instance """
unsigned = Unsigned.I
""" A standard unsigned integer grammar type instance """
boolean = Bool.I
""" A standard bool grammar type instance (for potential files) """
flag = Flag.I
""" A standard bool grammar type instance (for input files) """
real = Real.I
""" A standard real grammar type instance """
string = String.I
""" A standard string grammar type instance """
qstring = QString.I
""" A standard quoted string grammar type instance (for input files) """
line_string = LineString.I
""" A standard line string grammar type instance (for potential files) """
mixed = Mixed.I
""" A standard variant grammar type instance (for input files) """
pot_mixed = PotMixed.I
""" A standard variant grammar type instance (for potential files) """
separator = Separator.I
""" A standard separator line grammar type instance (for potential files) """
energy = Energy.I
""" A standard energy float value type instance (for potential files) """
context.__exit__(None, None, None)
del context
|
#!/usr/bin/env python3
import requests
import sys
from random_rekt import random_rekt
import time
from decouple import config
from promote import promote
from demote import demote
from notes import notes, save, del_note, get
from time_convert import time_convert
from pin_message import pin_msg, unpin_msg
from userManagement import banUser, unbanUser, warnUser, noOfWarns, removeWarn, muteUser, unmuteUser
import os
botapi_url = 'https://api.telegram.org/bot'
token = config('token')
path = os.path.dirname(__file__)
endpoint = botapi_url + token
offset = 0
method = 'getUpdates'
request = endpoint + '/' + method
while(True):
try:
query = {'offset': offset}
response = requests.get(request, params=query)
json = response.json()
if(json['result']):
result = json['result']
for update in result:
if 'callback_query' in update:
callback_query = update['callback_query']
message = callback_query['message']
message_id = message['message_id']
chat_id = message['chat']['id']
data = callback_query['data']
data = data[:-1]
inp = ['/get ']
inp.append(data)
reply_text = get(chat_id,endpoint,inp,token,path)
if reply_text == '':
method_resp = 'deleteMessage'
query_resp = {'chat_id' : chat_id, 'message_id' : message_id}
requests.get(endpoint + '/' + method_resp, params=query_resp)
else:
method_resp = 'editMessageText'
query_resp = {'chat_id' : chat_id, 'message_id' : message_id, 'text' : reply_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
if 'message' in update:
message = update['message']
if 'new_chat_participant' in message:
newguy = message['new_chat_participant']
getMe = requests.get(endpoint + '/' + 'getMe').json()
chat_id = message['chat']['id']
if newguy['id'] == getMe['result']['id']:
reply_text = 'Thankyou for adding me in this group. Press /help to get a list of commands.'
else:
reply_text = "Hi! "
if 'username' in newguy:
reply_text += "@" + newguy['username']
else:
reply_text += newguy['first_name']
reply_text += ", How are you? You are welcome to this group."
method_resp = 'sendMessage'
query_resp = {'chat_id' : chat_id, 'text' : reply_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
if 'left_chat_participant' in message:
thatguy = message['left_chat_participant']
chat_id = message['chat']['id']
reply_text = "User "
if 'username' in thatguy:
reply_text += "@" + thatguy['username']
else:
reply_text += thatguy['first_name']
reply_text += ", left the chat"
method_resp = 'sendMessage'
query_resp = {'chat_id' : chat_id, 'text' : reply_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
elif 'text' in message:
text = message['text']
spl = text.split(' ')
chat_id = message['chat']['id']
command = spl[0]
reply_text = ''
if(text[:1] == '#'):
temp = text.split('#')[1]
temp = temp.split(' ')[0]
inp = ['/get']
inp.append(temp)
chat_id = message['chat']['id']
reply_text = get(chat_id,endpoint,inp,token,path)
if(command == '/start'):
reply_text = 'Hello I am @cruzex_bot. Send /help to get a list of commands.'
elif(command[:5] == '/help'):
file_name = path + '/help'
f = open(file_name)
lines= f.readlines()
for line in lines:
reply_text += line
elif(command == '/rekt'):
reply_text = random_rekt(spl,message)
elif(command == '/save'):
reply_text = save(message,endpoint,spl,token,path)
elif(command == '/get'):
reply_text = get(chat_id,endpoint,spl,token, path)
elif(command == '/notes'):
reply_text = notes(message['chat']['id'], path, endpoint)
elif(command == '/delete'):
reply_text = del_note(spl, message['chat']['id'],message,endpoint,path)
elif(command == '/convert'):
reply_text = time_convert(message,spl)
elif(message['chat']['type'] != 'private'):
if(command == '/promote'):
reply_text = promote(message,endpoint)
elif(command == '/demote'):
reply_text = demote(message, endpoint)
elif(command == '/pin'):
reply_text = pin_msg(message, spl, endpoint)
elif(command == '/unpin'):
reply_text = unpin_msg(message, endpoint)
elif(command == '/ban'):
reply_text = banUser(message,endpoint)
elif(command == '/unban'):
reply_text = unbanUser(message,endpoint)
elif(command == '/kick'):
reply_text = unbanUser(message,endpoint,True)
elif(command == '/warn'):
reply_text = warnUser(message, endpoint, path)
elif(command == '/warns'):
reply_text = noOfWarns(message,path,endpoint)
elif(command == '/removewarn'):
reply_text = removeWarn(message, path)
elif(command == '/mute'):
reply_text = muteUser(message, endpoint, spl)
elif(command == '/unmute'):
reply_text = unmuteUser(message, endpoint)
method_resp = 'sendMessage'
query_resp = {'chat_id' : chat_id, 'text' : reply_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
offset = int(update['update_id']) + 1
except ValueError:
print(time.ctime(), ": Broken response: ", response)
time.sleep(60)
except KeyboardInterrupt:
print(time.ctime(), ": Ctrl-C pressed - exiting")
exit(1)
except:
print(time.ctime(), ": Unexpected error", sys.exc_info()[0])
time.sleep(90) |
from prkng.database import db
class Analytics(object):
@staticmethod
def get_user_data():
today = db.engine.execute("""
SELECT count(id)
FROM users
WHERE (created AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' >= (NOW() AT TIME ZONE 'US/Eastern')::date
AND (created AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' <= (NOW() AT TIME ZONE 'US/Eastern' + INTERVAL '1 DAY')::date
""").first()[0]
week = db.engine.execute("""
SELECT
a.date, count(u.id)
FROM (
SELECT
to_char(date_trunc('day', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 DAY'))), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"') AS date
FROM generate_series(0, 365, 1) offs
) a
LEFT OUTER JOIN users u
ON (a.date = to_char(date_trunc('day', (u.created AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"'))
GROUP BY a.date
ORDER BY a.date DESC
OFFSET 1 LIMIT 6
""")
year = db.engine.execute("""
SELECT
to_char(a.date, 'Mon'), count(u.id)
FROM (
SELECT
date_trunc('month', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 MONTH'))) AS date
FROM generate_series(0, 12, 1) offs
) a
LEFT OUTER JOIN users u
ON (to_char(a.date, 'Mon') = to_char(date_trunc('month', (u.created AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'Mon'))
GROUP BY a.date
ORDER BY a.date DESC
LIMIT 6
""")
return {"day": today, "week": [{key: value for key, value in row.items()} for row in week],
"year": [{key: value for key, value in row.items()} for row in year]}
@staticmethod
def get_active_user_chk_data():
today = db.engine.execute("""
SELECT count(DISTINCT c.user_id)
FROM checkins c
WHERE (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' >= (NOW() AT TIME ZONE 'US/Eastern')::date
AND (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' <= (NOW() AT TIME ZONE 'US/Eastern' + INTERVAL '1 DAY')::date
""").first()[0]
week = db.engine.execute("""
SELECT
a.date, count(DISTINCT c.user_id)
FROM (
SELECT
to_char(date_trunc('day', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 DAY'))), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"') AS date
FROM generate_series(0, 365, 1) offs
) a
LEFT OUTER JOIN checkins c
ON (a.date = to_char(date_trunc('day', (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"'))
GROUP BY a.date
ORDER BY a.date DESC
OFFSET 1 LIMIT 6
""")
year = db.engine.execute("""
SELECT
to_char(a.date, 'Mon'), count(DISTINCT c.user_id)
FROM (
SELECT
date_trunc('month', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 MONTH'))) AS date
FROM generate_series(0, 12, 1) offs
) a
LEFT OUTER JOIN checkins c
ON (to_char(a.date, 'Mon') = to_char(date_trunc('month', (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'Mon'))
GROUP BY a.date
ORDER BY a.date DESC
LIMIT 6
""")
return {"day": today, "week": [{key: value for key, value in row.items()} for row in week],
"year": [{key: value for key, value in row.items()} for row in year]}
@staticmethod
def get_active_user_data():
today = db.engine.execute("""
SELECT count(DISTINCT u.id)
FROM users u
WHERE (u.last_hello AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' >= (NOW() AT TIME ZONE 'US/Eastern')::date
AND (u.last_hello AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' <= (NOW() AT TIME ZONE 'US/Eastern' + INTERVAL '1 DAY')::date
""").first()[0]
week = db.engine.execute("""
SELECT
a.date, count(DISTINCT u.id)
FROM (
SELECT
to_char(date_trunc('day', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 DAY'))), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"') AS date
FROM generate_series(0, 365, 1) offs
) a
LEFT OUTER JOIN users u
ON (a.date = to_char(date_trunc('day', (u.last_hello AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"'))
GROUP BY a.date
ORDER BY a.date DESC
OFFSET 1 LIMIT 6
""")
year = db.engine.execute("""
SELECT
to_char(a.date, 'Mon'), count(DISTINCT u.id)
FROM (
SELECT
date_trunc('month', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 MONTH'))) AS date
FROM generate_series(0, 12, 1) offs
) a
LEFT OUTER JOIN users u
ON (to_char(a.date, 'Mon') = to_char(date_trunc('month', (u.last_hello AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'Mon'))
GROUP BY a.date
ORDER BY a.date DESC
LIMIT 6
""")
return {"day": today, "week": [{key: value for key, value in row.items()} for row in week],
"year": [{key: value for key, value in row.items()} for row in year]}
@staticmethod
def get_checkin_data():
today = db.engine.execute("""
SELECT count(id)
FROM checkins
WHERE (checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' >= (NOW() AT TIME ZONE 'US/Eastern')::date
AND (checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern' <= (NOW() AT TIME ZONE 'US/Eastern' + INTERVAL '1 DAY')::date
""").first()[0]
week = db.engine.execute("""
SELECT
a.date, count(c.id)
FROM (
SELECT
to_char(date_trunc('day', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 DAY'))), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"') AS date
FROM generate_series(0, 365, 1) offs
) a
LEFT OUTER JOIN checkins c
ON (a.date = to_char(date_trunc('day', (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'YYYY-MM-DD"T"HH24:MI:SS"-0400"'))
GROUP BY a.date
ORDER BY a.date DESC
OFFSET 1 LIMIT 6
""")
year = db.engine.execute("""
SELECT
to_char(a.date, 'Mon'), count(c.id)
FROM (
SELECT
date_trunc('month', ((NOW() AT TIME ZONE 'US/Eastern')::date - (offs * INTERVAL '1 MONTH'))) AS date
FROM generate_series(0, 12, 1) offs
) a
LEFT OUTER JOIN checkins c
ON (to_char(a.date, 'Mon') = to_char(date_trunc('month', (c.checkin_time AT TIME ZONE 'UTC') AT TIME ZONE 'US/Eastern'), 'Mon'))
GROUP BY a.date
ORDER BY a.date DESC
LIMIT 6
""")
return {"day": today, "week": [{key: value for key, value in row.items()} for row in week],
"year": [{key: value for key, value in row.items()} for row in year]}
@staticmethod
def get_map_usage(hours=24):
res = db.engine.execute("""
SELECT
ST_X(ST_Transform(centerpoint, 4326)) AS long,
ST_Y(ST_Transform(centerpoint, 4326)) AS lat,
count
FROM analytics_pos
WHERE created >= (NOW() - ({} * INTERVAL '1 HOUR'))
""".format(hours))
return [{key: value for key, value in row.items()} for row in res]
|
#Faça um programa que leia uma matriz 3x3 de inteiros
#e retorne a linha de maior soma.
#Imprima na tela a matriz,
#a linha de maior soma
#e a soma.
from random import randint
mat=[]
for i in range(3):
linha=[]
for j in range(3):
linha.append(randint(1,10))
mat.append(linha)
for i in range(3):
print(mat[i])
soma_maior=sum(mat[0])
pos=0
for i in range(1,3):
som=sum(mat[i])
if som > soma_maior:
soma_maior=som
pos=i
print("***Maior linha***")
print (mat[pos])
print("*** A soma da maior linha***")
print(soma_maior)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Support functions for connecting with libreoffice Calc."""
# standard imports
import numpy as np
from pathlib import Path
import os
import inspect
import psutil
import signal
import subprocess
import time
import warnings
# import uno
from unotools import Socket, connect
from unotools.component.calc import Calc
from unotools.unohelper import convert_path_to_url
# calcObject (xlsx)
#ok
def connect(file=None, port=8100, counter_max=5000):
"""Open libreoffice and enable conection with Calc.
Args:
file (str or pathlib.Path, optional): Filepath. If None, a
new Calc instance will be opened.
port (int, optional): port for connection.
counter_max (int, optional): Max number of tentatives to establish a
connection.
Returns:
Calc object.
Examples:
Open new instance of Calc:
>>> calcObject = calc.connect()
Adds one sheet ('Sheet2') at position 1:
>>> calcObject.insert_sheets_new_by_name('Sheet2', 1)
Add multiple sheets ('Sheet3' and 'Sheet4) at position 2:
>>> calcObject.insert_multisheets_new_by_name(['Sheet3', 'Sheet4'], 2)
>>> # Get number of sheets
>>> print(calcObject.get_sheets_count())
4
>>> # Remove sheets
>>> calcObject.remove_sheets_by_name('Sheet3')
>>> # get sheet data
>>> sheet1 = calcObject.get_sheet_by_name('Sheet1')
>>> sheet2 = calcObject.get_sheet_by_index(0)
"""
# open libreoffice
libreoffice = subprocess.Popen([f"soffice --nodefault --accept='socket,host=localhost,port={port};urp;'"], shell=True, close_fds=True)
# connect to libreoffice
connected = False
counter = 0
while connected == False:
time.sleep(0.5)
try:
context = connect(Socket('localhost', f'{port}'))
connected = True
except:
counter += 1
if counter == counter_max:
raise ConnectionError('Cannot establish connection, maybe try increasing counter_max value.')
pass
if file is None:
return Calc(context)
else:
file = Path(file)
return Calc(context, convert_path_to_url(str(file)))
#ok
def closeCalc(calcObject):
"""Close Calc.
Args:
calcObject (Calc object): Object created by connect2calc().
"""
calcObject.close(True)
return
#ok
#ok
def saveCalc(calcObject, filepath=None):
"""Save xlsx file.
Note:
If `filepath` have no suffix, it adds '.ods' at the end of filepath.
Args:
calcObject (Calc object): Object created by :py:func:`calcmanip.connect2Calc`.
filepath (string or pathlib.Path, optional): filepath to save file.
"""
if filepath is None:
if calcObject.Location == '':
filepath = Path('./Untitled.ods')
warnings.warn('Saving at ./Untitled.ods')
else:
filepath = Path(calcObject.Location)
else:
filepath = Path(filepath)
# fix extension
if filepath.suffix == '':
filepath = filepath.parent / (str(filepath.name) + '.ods')
# save
url = convert_path_to_url(str(filepath))
calcObject.store_as_url(url, 'FilterName')
# calcObject manipulation
#ok
def get_sheets_name(calcObject):
"""Get sheets names in a tuple."""
return calcObject.Sheets.ElementNames
def set_col_width(sheetObject, col, width):
colsObject = sheetObject.getColumns()
colsObject[col].setPropertyValue('Width', width)
def get_col_width(sheetObject, col):
colsObject = sheetObject.getColumns()
return colsObject[col].Width
def set_row_height(sheetObject, row, height):
rowsObject = sheetObject.getRows()
rowsObject[row].setPropertyValue('Height', height)
def get_row_height(sheetObject, row):
colsObject = sheetObject.getRows()
return colsObject[row].Height
def float_hook(value):
"""Substitute string for float.
"""
return float(value)
def get_cell_value(sheetObject, row, col, type='formula', object_hook=None):
"""
type='data', 'formula'
"""
if type == 'formula':
value = sheetObject.get_cell_by_position(col, row).getFormula()
elif type == 'data':
value = sheetObject.get_cell_by_position(col, row).getString()
else:
warnings.warn(f"type = {type} is not a valid option. Using type = 'data'.")
value = sheetObject.get_cell_by_position(col, row).getString()
if object_hook is not None:
return object_hook(value)
else:
return value
def set_cell_value(sheetObject, row, col, value, type='formula'):
"""
type='data', 'formula'
"""
if type == 'formula':
sheetObject.get_cell_by_position(col, row).setFormula(value)
elif type == 'data':
sheetObject.get_cell_by_position(col, row).setString(value)
else:
warnings.warn(f"type = {type} is not a valid option. Using type = 'data'.")
sheetObject.get_cell_by_position(col, row).setString(value)
def copy_cell(sheet2copyFrom, sheet2pasteAt, row, col, type='formula',
Font=1, ConditionalFormat=False, Border=False, resize=None,
row2pasteAt=None, col2pasteAt=None, additional=None):
"""
type='string', 'formula', None
resize = None, 'r', 'c', 'rc' or 'cr'
0 = ['FormatID', 'CharWeight', 'CharHeight', 'CharColor', 'CellBackColor'],
1 = ['IsTextWrapped', 'HoriJustify', 'HoriJustifyMethod', 'VertJustify', 'VertJustifyMethod'],
2 = [ 'CharFontName', 'CharFont', 'CellStyle'],
3 = ['CharUnderline', 'CharCrossedOut', 'CharEmphasis', 'CharEscapement', 'CharContoured'],
4 = ['CharPosture', 'CharPostureComplex', 'CharRelief', 'CharShadowed', 'CharStrikeout', 'CharUnderlineColor', 'CharUnderlineHasColor',]
This function do not copy ALL the properties of a cell, because it is very
time consuming. Instead, it copys only the most used properties. If you
need to include additional properties, have a look at
``sheetObject.get_cell_by_position(0, 0)._show_attributes()`` and find the
desired propertie. Then, include it in ``additional``.
"""
Font = int(Font)
if Font > 5:
Font = 5
elif Font <0:
Font = 0
if row2pasteAt is None:
row2pasteAt = row
if col2pasteAt is None:
col2pasteAt = col
# cell value
if type is not None:
set_cell_value(sheet2pasteAt, row=row2pasteAt, col=col2pasteAt, value=get_cell_value(sheet2copyFrom, row, col, type=type), type=type)
# font name
font_property_list_parsed = [['FormatID', 'CharWeight', 'CharHeight', 'CharColor', 'CellBackColor'],
['IsTextWrapped', 'HoriJustify', 'HoriJustifyMethod', 'VertJustify', 'VertJustifyMethod'],
[ 'CharFontName', 'CharFont', 'CellStyle'],
['CharUnderline', 'CharCrossedOut', 'CharEmphasis', 'CharEscapement', 'CharContoured'],
['CharPosture', 'CharPostureComplex', 'CharRelief', 'CharShadowed', 'CharStrikeout', 'CharUnderlineColor', 'CharUnderlineHasColor',]
]
font_property_list = [item for sublist in font_property_list_parsed[0:Font] for item in sublist]
for property in font_property_list:
sheet2pasteAt.get_cell_by_position(col2pasteAt, row2pasteAt).setPropertyValue(property, getattr(sheet2copyFrom.get_cell_by_position(col, row), property))
# conditional formating
if ConditionalFormat:
font_property_list = ['ConditionalFormat']
for property in font_property_list:
sheet2pasteAt.get_cell_by_position(col2pasteAt, row2pasteAt).setPropertyValue(property, getattr(sheet2copyFrom.get_cell_by_position(col, row), property))
# border
if Border:
border_property_list = ['TableBorder', 'TableBorder2']#, 'LeftBorder', 'LeftBorder2', 'RightBorder', 'RightBorder2', 'TopBorder', 'TopBorder2', 'BottomBorder', 'BottomBorder2']
for property in border_property_list:
sheet2pasteAt.get_cell_by_position(col2pasteAt, row2pasteAt).setPropertyValue(property, getattr(sheet2copyFrom.get_cell_by_position(col, row), property))
# additional
if additional is not None:
for property in additional:
sheet2pasteAt.get_cell_by_position(col2pasteAt, row2pasteAt).setPropertyValue(property, getattr(sheet2copyFrom.get_cell_by_position(col, row), property))
# col and row width
if resize is not None:
if resize == 'r':
set_row_height(sheet2pasteAt, row2pasteAt, get_row_height(sheet2copyFrom, row))
elif resize == 'c':
set_col_width(sheet2pasteAt, col2pasteAt, get_col_width(sheet2copyFrom, col))
elif resize == 'cr' or resize == 'rc':
set_row_height(sheet2pasteAt, row2pasteAt, get_row_height(sheet2copyFrom, row))
set_col_width(sheet2pasteAt, col2pasteAt, get_col_width(sheet2copyFrom, col))
else:
warnings.warn(f"resize = {resize} is not a valid option. Using resize = None.")
# ok
def get_cells_value(sheetObject, row_init, col_init, row_final=None, col_final=None, type='data'):
"""
type= formula or data.
"""
if row_final is None:
row_final = len(sheetObject.getRowDescriptions()) + sheetObject.queryVisibleCells().Count -1
if col_final is None:
col_final = len(sheetObject.getColumnDescriptions()) + sheetObject.queryVisibleCells().Count -1
sheet_data = sheetObject.get_cell_range_by_position(col_init, row_init, col_final, row_final)
if type == 'formula':
sheet_data = list(sheet_data.getFormulaArray())
elif type == 'data':
sheet_data = list(sheet_data.getDataArray())
else:
warnings.warn(f"type = {type} is not a valid option. Using type = 'data'.")
sheet_data = list(sheet_data.getDataArray())
# transform in list
for row_number, row_data in enumerate(sheet_data):
sheet_data[row_number] = list(row_data)
# if one column or one row data, transform in vector
if col_init == col_final:
sheet_data[row_number] = row_data[0]
# if one column or one row data, transform in vector
if col_init == col_final:
sheet_data[row_number] = row_data[0]
if row_init == row_final:
sheet_data = sheet_data[0]
return sheet_data
def set_cells_value(sheetObject, row_init, col_init, data, type='formula'):
"""
type=formula or data or string
formula set formulas, but also set numbers fine. Dates and time not so much because it changes the formating (if setting date and time iwth formula you might wanna format the
cell like date or time using copy_cells to copy formatting).
string (data) works fine with date, time and number, but formulas are set as string. Therefore, formulas do not work.
value (data_number) works fine for numbers ONLY.
"""
if type == 'formula':
for row, row_data in enumerate(data):
sheetObject.set_columns_formula(row_init, row+col_init, row_data)
elif type == 'data':
for row, row_data in enumerate(data):
sheetObject.set_columns_str(row_init, row+col_init, row_data)
elif type == 'data_number':
for row, row_data in enumerate(data):
sheetObject.set_columns_value(row_init, row+col_init, row_data)
else:
warnings.warn(f"type = {type} is not a valid option. Using type = 'data'.")
for row, row_data in enumerate(data):
sheetObject.set_columns_str(row_init, row+col_init, row_data)
def copy_cells(sheet2copyFrom, sheet2pasteAt, row_init, col_init, row_final, col_final, type='formula',
Font=0, ConditionalFormat=False, Border=False, resize=None,
row2pasteAt=None, col2pasteAt=None, additional=None):
"""
type='data', 'formula', 'none'
"""
if row2pasteAt is None:
row2pasteAt = row_init
if col2pasteAt is None:
col2pasteAt = col_init
if Font>0 or ConditionalFormat is not False or Border is not False or additional is not False:
for row_relative, row in enumerate(range(row_init, row_final)):
for col_relative, col in enumerate(range(col_init, col_final)):
copy_cell(sheet2copyFrom, sheet2pasteAt, row, col, type=type,
Font=Font, ConditionalFormat=ConditionalFormat, Border=Border, resize=None,
row2pasteAt=row2pasteAt+row_relative, col2pasteAt=col2pasteAt+col_relative, additional=additional)
else:
data = get_cells_value(sheet2copyFrom, row_init, col_init, row_final, col_final, type=type)
set_cells_value(row2pasteAt, row2pasteAt, col2pasteAt, data, type=type)
# col and row width
if resize is not None:
if resize == 'r' or resize == 'c' or resize == 'cr' or resize == 'rc':
if 'r' in resize:
for row_relative, row in enumerate(range(row_init, row_final)):
set_row_height(sheet2pasteAt, row2pasteAt+row_relative, get_row_height(sheet2copyFrom, row))
if 'c' in resize:
for col_relative, col in enumerate(range(col_init, col_final)):
set_col_width(sheet2pasteAt, col2pasteAt+col_relative, get_col_width(sheet2copyFrom, col))
else:
warnings.warn(f"resize = {resize} is not a valid option ('r', 'c', 'rc', 'None'). Using resize = None.")
def copy_sheet(sheet2copy, sheet2paste, type='formula',
Font=0, ConditionalFormat=False, Border=False, resize=None, additional=None):
""" copy_sheet.
"""
last_col = len(sheet2copy.getColumnDescriptions()) + sheet2copy.queryVisibleCells().Count -1
last_row = len(sheet2copy.getRowDescriptions()) + sheet2copy.queryVisibleCells().Count -1
copy_cells(sheet2copy, sheet2paste, 0, 0, last_row, last_col, type=type, Font=Font, ConditionalFormat=ConditionalFormat, Border=Border, resize=resize, additional=None)
def get_cell_value_from_sheets(sheetObject_list, row, col, type='data'):
"""
"""
values = []
for sheetObject in sheetObject_list:
values.append(get_cell_value(sheetObject, row, col, type))
return values
|
# coding:utf-8
#import导包部分
import requests
import base64
import re
import rsa
import urllib
import json
import binascii
import time
'''
#INFO信息说明
1, 在提交POST请求之前, 需要GET 获取两个参数。
地址是:http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.3.18)
得到的数据中有 "servertime" 和 "nonce" 的值, 是随机的,其他值貌似没什么用。
2, 通过httpfox/chrome源码分析 观察POST 的数据, 参数较复杂,其中 “su" 是加密后的username, "sp"是加密后的password。"servertime" 和 ”nonce" 是上一步得到的。其他参数是不变的。
username 经过了BASE64 计算: username = base64.encodestring( urllib.quote(username) )[:-1];
password 经过了三次SHA1 加密, 且其中加入了 servertime 和 nonce 的值来干扰。
即: 两次SHA1加密后, 将结果加上 servertime 和 nonce 的值, 再SHA1 算一次。
'''
#user,password用户名密码,使用自己注册的sina用户名密码
username = ''
password = ''
session = requests.Session()
#login url登录地址
url_prelogin = 'http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod&client=ssologin.js(v1.4.5)&_=1364875106625'
url_login = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.5)'
# get servertime,nonce, pubkey,rsakv获取登录session相关登录时间等信息
resp = session.get(url_prelogin)
json_data = re.search('\((.*)\)', resp.content).group(1)
data = json.loads(json_data)
servertime = data['servertime']
nonce = data['nonce']
pubkey = data['pubkey']
rsakv = data['rsakv']
# calc su,第一步加密用户名
su = base64.b64encode(urllib.quote(username))
# calc sp,第二步,加密密码
rsaPublickey = int(pubkey, 16)
key = rsa.PublicKey(rsaPublickey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
sp = binascii.b2a_hex(rsa.encrypt(message, key))
#post reqest,第三部,提交请求
postdata = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
'ssosimplelogin': '1',
'vsnf': '1',
'vsnval': '',
'su': su,
'service': 'miniblog',
'servertime': servertime,
'nonce': nonce,
'pwencode': 'rsa2',
'sp': sp,
'encoding': 'UTF-8',
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META',
'rsakv' : rsakv,
}
resp = session.post(url_login, data=postdata)
login_url = re.findall('replace\("(.*)"\)', resp.content)
# print login_url
resp = session.get(login_url[0])
# print resp.content
uid = re.findall('"uniqueid":"(\d+)",', resp.content)[0]
url = "http://weibo.com/u/" + uid
resp = session.get(url)
# print resp.content
# text:测试下使用Python写的登录和发文的demo,先获取地址信息
# def decode_content(content): # 解码输入的文本
# result = re.findall('<script>STK && STK.pageletM && STK.pageletM.view\((.*?)\)<\/script>', content)
# for i in result:
# r = i.encode("utf-8").decode('unicode_escape').encode("utf-8")
# print r.replace("\/", "/")
# # def add_new(content, resp): # 添加新微博的方法
def add_new():#create a new weibo发布新微博方法
addurl = "http://weibo.com/aj/mblog/add?_wv=5&__" + str(time.time())
content = "测试下使用python写的发文"
data = {'text':content, 'rank':'0', 'location':'home', 'module':'stissue', '_t':'0'}
headers = {}
headers['set-cookie'] = resp.headers['set-cookie']
headers['Referer'] = 'http://weibo.com/u/' + uid + '?topnav=1&wvr=5'
respon = session.post(addurl, data, headers=headers)
print respon.status_code
add_new("添加新微博的方法", resp)
# mid:3599892905366755
# d_expanded:off
# expanded_status:
# _t:0
# __rnd:1373792334798
# add_new()
# mid=3600011008917103
def forward(mid, content): # forward other's weibo 转发别人微博方法
forwardurl = "http://weibo.com/aj/mblog/forward?_wv=5&__" + str(time.time())
data = {'uid':mid, 'style-type':2, 'reason':content, 'rank':0, 'location':'mblog', '_t':0}
headers = {}
headers['set-cookie'] = resp.headers['set-cookie']
headers['Referer'] = 'http://weibo.com/u/3118088481?from=profile&wvr=5&loc=tabprofile'
respon = session.post(forwardurl, data, headers=headers)
print respon.status_code
forward(3600011008917103, "转发")
def followed(dstuid,oid):#folled other's weibo 关注他人微博的方法
followedurl = "http://weibo.com/aj/f/followed?_wv=5&__rnd=%s"% int(time.time())
data = {'uid':dstuid, 'rank':0, 'location':'mblog', '_t':0,'f':0,
'oid':oid,
'nogroup':'false',
'challenge_uids':'',
'check_challenge_value':'',
'location':'home',
'refer_sort':'interest',
'refer_flag':'friend_bridge',
'loc':1,
}
headers = {}
headers['set-cookie'] = resp.headers['set-cookie']
headers['Referer'] = 'http://weibo.com/u/'+oid+'?topnav=1&wv=5'
respon = session.post(followedurl, data, headers=headers)
print respon.status_code
followed('2898801847',uid)
|
from os import getenv
import logging
from flask import Flask, request
from glados import (
Glados,
GladosBot,
GladosRequest,
RouteType,
SlackVerification,
GladosRouteNotFoundError,
read_config,
)
from test_plugin.test_plugin import TestPlugin
from example import FLASK_HOST, FLASK_PORT
import json
glados_config_file = "glados_standalone/glados.yaml"
config = read_config(glados_config_file)
app = Flask(__name__)
server_config = config.config.server
glados = Glados(config.config_file)
app.secret_key = server_config.secret_key
def extract_slack_info(r: request):
try:
data = r.get_data(as_text=True)
timestamp = r.headers.get("X-Slack-Request-Timestamp")
signature = r.headers.get("X-Slack-Signature")
return SlackVerification(data, timestamp, signature)
except Exception as e:
# If it makes it here, the request probably isn't from Slack.
logging.error(e)
return None
@app.route("/SendMessage/<bot>/<route>", methods=["POST"])
def send_message_route(bot, route):
glados_request = GladosRequest(
RouteType.SendMessage, route, bot_name=bot, json=request.get_json()
)
return glados.request(glados_request)
@app.route("/Events/<bot>", methods=["POST"])
def event_subscriptions(bot):
body = request.json
event_type = body.get("type", "")
if event_type == "url_verification":
return body.get("challenge")
# Build GladosRequest
r = GladosRequest(
RouteType.Events,
slack_verify=extract_slack_info(request),
bot_name=bot,
json=request.get_json(),
)
try:
return glados.request(r)
except KeyError:
return ""
@app.route("/Slash/<bot>/<route>", methods=["POST"])
def slash_command(bot, route):
slack_info = extract_slack_info(request)
request_json = request.form.to_dict()
r = GladosRequest(
RouteType.Slash, route, slack_info, bot_name=bot, json=request_json
)
return glados.request(r)
@app.route("/Interaction/<bot>", methods=["POST"])
def interaction(bot):
slack_info = extract_slack_info(request)
request_json = request.form.to_dict()
request_json = json.loads(request_json.get("payload"))
r = GladosRequest(
RouteType.Interaction, slack_verify=slack_info, bot_name=bot, json=request_json
)
try:
return glados.request(r)
except GladosRouteNotFoundError as e:
logging.error(e)
return "not found"
@app.route("/Menu", methods=["POST"])
def external_menu():
slack_info = extract_slack_info(request)
request_json = request.form.to_dict()
request_json = json.loads(request_json.get("payload"))
r = GladosRequest(RouteType.Menu, slack_verify=slack_info, json=request_json)
return glados.request(r)
def start():
glados.read_config()
def run():
start()
app.run(server_config.host, server_config.port, debug=server_config.debug)
if __name__ == "__main__":
run()
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 https://aws.amazon.com/apache-2-0/
import boto3
import sys
import time
import os
import json
try:
agent_config_file = os.environ['AGENT_CONFIG_FILE']
except KeyError:
agent_config_file = "/etc/agent/Agent_config.tfvars.json"
try:
with open(agent_config_file, 'r') as file:
agent_config_data = json.loads(file.read())
except OSError:
# This path is expected to be executed in Lambdas which don't have config
# files
agent_config_data = {"error_log_group": os.environ["ERROR_LOG_GROUP"],
"error_logging_stream": os.environ["ERROR_LOGGING_STREAM"],
"region": os.environ.get('REGION', None)}
try:
cw = boto3.client('logs', agent_config_data.get('region', None))
except:
cw = None
# DEBUGGING
def log(message,
log_group_name=agent_config_data["error_log_group"],
log_stream_name=agent_config_data["error_logging_stream"]):
# print("ERROR-PRINT: {}".format(message))
if cw is None:
return
try:
# retreive seq number
response = cw.describe_log_streams(logGroupName=log_group_name)
seq = None
logEvents = [{
'timestamp': int(time.time() * 1000),
'message': message
}
]
for lg in response.get('logStreams'):
if lg.get('logStreamName') == log_stream_name:
seq = lg.get('uploadSequenceToken')
if seq is None:
response = cw.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=logEvents
)
else:
response = cw.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=logEvents,
sequenceToken=seq
)
except Exception as e:
print("Cannot log errors because {}".format(e), file=sys.stderr)
|
import time
class Timer:
def __init__(self):
self._start = None
self.duration = []
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, *args, **kwargs):
self.duration.append(time.time() - self._start)
|
from typing import Any
class Pagination:
def __init__(self, items: list[Any], page: int, per_page: int, total: int):
self.items = items
self.page = page
self.per_page = per_page
self.total = total
def __repr__(self) -> str:
return f'<Pagination {self.page}:{self.per_page} {len(self.items)}/{self.total}>'
class PaginationParams:
def __init__(self, page: int, per_page: int):
self.page = page
self.per_page = per_page
def __repr__(self):
return f'<Pagination {self.page}:{self.per_page}>'
class OrderByParams:
def __init__(self, column: str, ascending: bool):
self.column = column
self.ascending = ascending
def __repr__(self):
return f'<OrderBy {self.column}:{"asc" if self.ascending else "desc"}>'
|
import re
import os
def Test(fPath):
# fPath = r'C:\Users\jackw\Documents\弹幕姬\Plugins\DanmuLog\Data-10313214-20220116.txt'
fp = open(fPath,'r',encoding='utf-8')
lines = fp.readlines()
lastLine = lines[-1]
print(lastLine)
test= lastLine
pattern = r'Uname":"([\w\u4E00-\u9FA5\-_]{4,16}[^"])", "Comment":"([\w\u4E00-\u9FA5\-_#]{1,16})"'
print ("输入:"+test)
username = re.search(pattern, test).group(1)
rawComment = re.search(pattern, test).group(2)
print ("username " +username +"\nrawCommnet "+rawComment)
fp.close() |
from rest_framework.response import Response
def success(self, message, data, total):
return {
success: True,
self.__message: message,
self.__data: data,
self.__total: total
}
def not_found(self, message, data):
return {
success: False,
self.__message: message,
self.__data: []
}
def already_exists(self, message, data):
return {
success: False,
self.__message: message,
self.__data: []
}
def exception_has_occurred(self, message):
# será necessário criar um lógica aqui para armazenar a variável text em uma tabela para log de erros.
message = 'Ocorreu um erro no servidor, contate o administrador.'
return message |
import pytest
import torch
from easydict import EasyDict
from ding.policy.r2d3 import R2D3Policy
from ding.utils.data import offline_data_save_type
from tensorboardX import SummaryWriter
from ding.model.wrapper.model_wrappers import ArgmaxSampleWrapper, HiddenStateWrapper, EpsGreedySampleWrapper
import os
from typing import List
from collections import namedtuple
obs_space = 5
action_space = 4
cfg = dict(
cuda=True,
on_policy=False,
priority=True,
priority_IS_weight=True,
model=dict(
obs_shape=obs_space,
action_shape=action_space,
encoder_hidden_size_list=[128, 128, 512],
),
discount_factor=0.99,
burnin_step=2,
nstep=5,
learn_unroll_len=20,
burning_step=5,
learn=dict(
value_rescale=True,
update_per_collect=8,
batch_size=64,
learning_rate=0.0005,
target_update_theta=0.001,
lambda1=1.0, # n-step return
lambda2=1.0, # supervised loss
lambda3=1e-5, # L2 it's very important to set Adam optimizer optim_type='adamw'.
lambda_one_step_td=1, # 1-step return
margin_function=0.8, # margin function in JE, here we implement this as a constant
per_train_iter_k=0,
ignore_done=False,
),
collect=dict(
n_sample=32,
traj_len_inf=True,
env_num=8,
pho=1 / 4,
),
eval=dict(env_num=8, ),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=100000,
),
replay_buffer=dict(
replay_buffer_size=int(1e4),
alpha=0.6,
beta=0.4,
),
),
)
cfg = EasyDict(cfg)
def get_batch(size=8):
data = {}
for i in range(size):
obs = torch.zeros(obs_space)
data[i] = obs
return data
def get_transition(size=20):
data = []
import numpy as np
for i in range(size):
sample = {}
sample['obs'] = torch.zeros(obs_space)
sample['action'] = torch.tensor(np.array([int(i % action_space)]))
sample['done'] = False
sample['prev_state'] = [torch.randn(1, 1, 512) for __ in range(2)]
sample['reward'] = torch.Tensor([1.])
sample['IS'] = 1.
sample['is_expert'] = bool(i % 2)
data.append(sample)
return data
@pytest.mark.parametrize('cfg', [cfg])
@pytest.mark.unittest
def test_r2d3(cfg):
policy = R2D3Policy(cfg, enable_field=['collect', 'eval'])
policy._init_learn()
assert type(policy._learn_model) == ArgmaxSampleWrapper
assert type(policy._target_model) == HiddenStateWrapper
policy._reset_learn()
policy._reset_learn([0])
state = policy._state_dict_learn()
policy._load_state_dict_learn(state)
policy._init_collect()
assert type(policy._collect_model) == EpsGreedySampleWrapper
policy._reset_collect()
policy._reset_collect([0])
policy._init_eval()
assert type(policy._eval_model) == ArgmaxSampleWrapper
policy._reset_eval()
policy._reset_eval([0])
assert policy.default_model()[0] == 'drqn'
var = policy._monitor_vars_learn()
assert type(var) == list
assert sum([type(s) == str for s in var]) == len(var)
batch = get_batch(8)
out = policy._forward_collect(batch, eps=0.1)
assert len(set(out[0].keys()).intersection({'logit', 'prev_state', 'action'})) == 3
assert list(out[0]['logit'].shape) == [action_space]
timestep = namedtuple('timestep', ['reward', 'done'])
ts = timestep(
1.,
0.,
)
ts = policy._process_transition(batch[0], out[0], ts)
assert len(set(ts.keys()).intersection({'prev_state', 'action', 'reward', 'done', 'obs'})) == 5
ts = get_transition(64 * policy._sequence_len)
sample = policy._get_train_sample(ts)
n_traj = len(ts) // policy._sequence_len
assert len(sample) == n_traj + 1 if len(ts) % policy._sequence_len != 0 else n_traj
out = policy._forward_eval(batch)
assert len(set(out[0].keys()).intersection({'logit', 'action'})) == 2
assert list(out[0]['logit'].shape) == [action_space]
for i in range(len(sample)):
sample[i]['IS'] = sample[i]['IS'][cfg.burnin_step:]
out = policy._forward_learn(sample)
policy._value_rescale = False
out = policy._forward_learn(sample)
|
import time
import serial
def getBeacons(ser: serial.Serial):
beacons = []
ser.write(b'S')
res = b''
while True:
res += ser.read(ser.inWaiting())
time.sleep(0.01)
if res.endswith(b'OK+DISCE\r\n'):
break
for line in res.split(b'\r\n'):
if line == b'OK+DISCS':
continue
elif line == b'OK+DISCE':
break
else:
temp = line.decode('ascii').split(':')
if temp[0] != 'OK+DISC':
continue
factory_id = temp[1]
uuid = temp[2]
identifier = temp[3]
mac = temp[4]
rssi = int(temp[5])
beacons.append({
'factory_id': factory_id,
'uuid': uuid,
'identifier': identifier,
'mac': mac,
'rssi': rssi
})
return beacons
def getDistance(power: int, rssi: int, n: int):
return 10 ** ((power - rssi) / (10 * n))
def getBeaconId(factory_id: str, uuid: str, identifier: str, mac: str):
return f'{factory_id}:{uuid}:{identifier}:{mac}'
def feedback(ser: serial.Serial):
ser.write(b'F') |
"""
Testing Script
testing from commandline:
put:
curl http://localhost:5000/CsvReader/path1 -d "csvFilePath=C:/Users/Nicho/Desktop/Projects/CsvPlotterAndGrapher/csvTest.csv" -X PUT
get:
curl http://localhost:5000/CsvReader/path1
"""
from flask import Flask, request
from requests import get,put
def read_csv():
API_ENDPOINT = 'http://localhost:5000/CsvReader/path1'
CSV_FILE_PATH = 'C:/Users/Nicho/Desktop/Projects/CsvPlotterAndGrapher/csvTest.csv'
put(API_ENDPOINT, data={'csvFilePath': CSV_FILE_PATH})
print(get('http://localhost:5000/CsvReader/path1').json())
def histogram_api():
API_ENDPOINT = 'http://localhost:5000/Plotter/ColumnToHistogram/plot1'
put(API_ENDPOINT, data={'columnName':"sepal length"})
get(API_ENDPOINT)
read_csv()
histogram_api() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#.--. .-. ... .... -. - ... .-.-.- .. -.
import click
from flask.ext.script import Manager
from shady import create_app, app
manager = Manager(app)
@manager.command
def runserver():
"Runs the App"
create_app()
app.run(host = app.config['SERVE_HOST'],
port = app.config['SERVE_PORT'],
threaded = app.config['THREADED'])
if __name__ == "__main__":
manager.run()
|
from enum import Enum, Flag, IntEnum, IntFlag
class MyEnum(Enum):
a = "letter a"
b = "letter b"
class MyStrEnum(str, Enum):
a = "letter a"
b = "letter b"
class MyIntEnum(IntEnum):
a = 1
b = 2
class MyFlag(Flag):
a = 1
b = 2
class MyIntFlag(IntFlag):
a = 1
b = 2
|
import pickle
from nose.tools import eq_
from .. import revision
from ....datasources import revision_oriented
from ....dependencies import solve
h_headings_ds = revision.datasources.heading_titles_matching(r"h")
h_headings = revision.heading_titles_matching(r"h")
lvl_2_headings_ds = revision.datasources.headings_by_level(2)
lvl_2_headings = revision.headings_by_level(2)
enwiki_wikilinks_ds = revision.datasources.wikilink_titles_matching(r"^:?en:")
enwiki_wikilinks = revision.wikilink_titles_matching(r"^:?en:")
wikimedia_external_links_ds = \
revision.datasources.external_link_urls_matching(r".*wikimedia.*")
wikimedia_external_links = \
revision.external_link_urls_matching(r".*wikimedia.*")
cite_templates_ds = revision.datasources.template_names_matching(r"^cite")
cite_templates = revision.template_names_matching(r"^cite")
r_text = revision_oriented.revision.text
def test_content():
cache = {r_text: "This is some text.\n" +
"== A heading! ==\n" +
"{{Foo}} the [[bar]]!"}
eq_(solve(revision.datasources.content, cache=cache),
"This is some text.\n" +
" A heading! \n" +
" the bar!")
eq_(solve(revision.content_chars, cache=cache), 41)
eq_(pickle.loads(pickle.dumps(revision.content_chars)),
revision.content_chars)
def test_headings():
cache = {r_text: "This is some text.\n" +
"== Heading! ==\n" +
"{{Foo}} the [[bar]]!\n" +
"=== Another heading! ==="}
eq_(solve(revision.datasources.heading_titles, cache=cache),
["Heading!", "Another heading!"])
eq_(solve(revision.headings, cache=cache), 2)
eq_(solve(h_headings, cache=cache), 1)
eq_(solve(lvl_2_headings, cache=cache), 1)
eq_(pickle.loads(pickle.dumps(revision.headings)), revision.headings)
eq_(pickle.loads(pickle.dumps(h_headings)), h_headings)
eq_(pickle.loads(pickle.dumps(lvl_2_headings)), lvl_2_headings)
def test_wikilinks():
cache = {r_text: "This is [[:en:some|text]].\n" +
"== Heading! ==\n" +
"{{Foo}} the [[bar]]!\n" +
"=== Another heading! ==="}
eq_(solve(revision.datasources.wikilink_titles, cache=cache),
[":en:some", "bar"])
eq_(solve(revision.wikilinks, cache=cache), 2)
eq_(solve(enwiki_wikilinks, cache=cache), 1)
eq_(pickle.loads(pickle.dumps(revision.wikilinks)), revision.wikilinks)
eq_(pickle.loads(pickle.dumps(enwiki_wikilinks)), enwiki_wikilinks)
def test_external_links():
cache = {r_text: "This is [https://wikis.com].\n" +
"== Heading! ==\n" +
"{{Foo}} the [//meta.wikimedia.org foobar]!\n" +
"=== Another heading! ==="}
eq_(solve(revision.datasources.external_link_urls, cache=cache),
["https://wikis.com", "//meta.wikimedia.org"])
eq_(solve(revision.external_links, cache=cache), 2)
eq_(solve(wikimedia_external_links, cache=cache), 1)
eq_(pickle.loads(pickle.dumps(revision.external_links)),
revision.external_links)
eq_(pickle.loads(pickle.dumps(wikimedia_external_links)),
wikimedia_external_links)
def test_tags():
cache = {r_text: "This is [https://wikis.com].\n" +
"== Heading! ==\n" +
"<ref>Foo</ref> the <span>foobar</span>!\n" +
"=== Another heading! ==="}
eq_(solve(revision.datasources.tag_names, cache=cache),
["ref", "span"])
eq_(solve(revision.tags, cache=cache), 2)
eq_(solve(revision.ref_tags, cache=cache), 1)
eq_(pickle.loads(pickle.dumps(revision.tags)),
revision.tags)
eq_(pickle.loads(pickle.dumps(revision.ref_tags)),
revision.ref_tags)
def test_templates():
cache = {r_text: "This is [https://wikis.com].\n" +
"== Heading! ==\n" +
"<ref>{{Cite thing}}</ref> the {{citation needed}}\n" +
"=== Another {{heading|foo}}! ==="}
eq_(solve(revision.datasources.template_names, cache=cache),
["Cite thing", "citation needed", "heading"])
eq_(solve(revision.templates, cache=cache), 3)
eq_(solve(cite_templates, cache=cache), 1)
eq_(pickle.loads(pickle.dumps(revision.templates)),
revision.templates)
eq_(pickle.loads(pickle.dumps(cite_templates)),
cite_templates)
|
import os
def check_dir():
return os.getcwd()
|
import pytest
from carsus.util.selected import element, element_range, \
selected_atoms, parse_selected_atoms, species_entry, \
parse_selected_species
@pytest.mark.parametrize("test_input, exp_atomic_number",[
("H", 1),
("Zn", 30),
("Uuo", 118),
("h", 1)
])
def test_element(test_input, exp_atomic_number):
tokens = element.parseString(test_input)
assert tokens[0] == exp_atomic_number
@pytest.mark.parametrize("test_input, exp_atomic_numbers",[
("H-Li", [1, 2, 3]),
("H-Zn", list(range(1,31))),
("si-s", [14, 15, 16])
])
def test_element_range(test_input, exp_atomic_numbers):
tokens = element_range.parseString(test_input)
assert tokens.asList() == exp_atomic_numbers
@pytest.mark.parametrize("test_input, exp_atomic_numbers",[
("H", [1,]),
("H-Zn", list(range(1,31))),
("h, si-s", [1, 14, 15, 16]),
('he, h-li', [1, 2, 3])
])
def test_selected_atoms(test_input, exp_atomic_numbers):
tokens = selected_atoms.parseString(test_input)
assert tokens.asList() == exp_atomic_numbers
@pytest.mark.parametrize("selected_atoms, expected_list", [
("H", [1]),
("H-Li", [1, 2, 3]),
("H, Be-B", [1, 4, 5]),
("h, be-b", [1, 4, 5]),
(" h , be - b ", [1, 4, 5])
])
def test_parse_selected_atoms(selected_atoms, expected_list):
assert parse_selected_atoms(selected_atoms) == expected_list
@pytest.mark.parametrize("invalid_selected_atoms", [
"Foo", "H-Foo", "H, Al-Foo"
])
def test_parse_selected_atoms_raises_invalid(invalid_selected_atoms):
with pytest.raises(ValueError):
parse_selected_atoms(invalid_selected_atoms)
@pytest.mark.parametrize("test_input, expected_list", [
("H 0", [(1, 0)]),
("H, Li 0", [(1, 0), (3, 0)]),
("H-Li 0-1", [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1)]),
("H-C 0, 4", [(1, 0), (2, 0), (3, 0), (4, 0),
(5, 0), (5, 4), (6, 0), (6, 4)]),
('H-Li', [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)])
])
def test_parse_species_entry(test_input, expected_list):
tokens = species_entry.parseString(test_input)
assert tokens.asList() == expected_list
@pytest.mark.parametrize("test_species, expected_list", [
("H 0; li 0", [(1, 0), (3, 0)]),
("H, Li 0", [(1, 0), (3, 0)]),
("li 0; h", [(1, 0), (3, 0)]),
("h 0; h-li 0", [(1, 0), (2, 0), (3, 0)]),
("H-Li 0-1", [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1)]),
("H-C 0, 4", [(1, 0), (2, 0), (3, 0), (4, 0),
(5, 0), (5, 4), (6, 0), (6, 4)]),
('H-Li', [(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]),
('fe 12; ni-zn 23-25', [(26, 12), (28, 23), (28, 24), (28, 25),
(29, 23), (29, 24), (29, 25),
(30, 23), (30, 24), (30, 25)])
])
def test_parse_selected_species(test_species, expected_list):
assert parse_selected_species(test_species) == expected_list
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class Router(pulumi.CustomResource):
"""
Manages a V2 router resource within OpenStack.
"""
def __init__(__self__, __name__, __opts__=None, admin_state_up=None, availability_zone_hints=None, distributed=None, enable_snat=None, external_fixed_ips=None, external_gateway=None, external_network_id=None, name=None, region=None, tenant_id=None, value_specs=None, vendor_options=None):
"""Create a Router resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if admin_state_up and not isinstance(admin_state_up, bool):
raise TypeError('Expected property admin_state_up to be a bool')
__self__.admin_state_up = admin_state_up
"""
Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
"""
__props__['adminStateUp'] = admin_state_up
if availability_zone_hints and not isinstance(availability_zone_hints, list):
raise TypeError('Expected property availability_zone_hints to be a list')
__self__.availability_zone_hints = availability_zone_hints
"""
An availability zone is used to make
network resources highly available. Used for resources with high availability so that they are scheduled on different availability zones. Changing
this creates a new router.
"""
__props__['availabilityZoneHints'] = availability_zone_hints
if distributed and not isinstance(distributed, bool):
raise TypeError('Expected property distributed to be a bool')
__self__.distributed = distributed
"""
Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
"""
__props__['distributed'] = distributed
if enable_snat and not isinstance(enable_snat, bool):
raise TypeError('Expected property enable_snat to be a bool')
__self__.enable_snat = enable_snat
"""
Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
"""
__props__['enableSnat'] = enable_snat
if external_fixed_ips and not isinstance(external_fixed_ips, list):
raise TypeError('Expected property external_fixed_ips to be a list')
__self__.external_fixed_ips = external_fixed_ips
"""
An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
"""
__props__['externalFixedIps'] = external_fixed_ips
if external_gateway and not isinstance(external_gateway, basestring):
raise TypeError('Expected property external_gateway to be a basestring')
__self__.external_gateway = external_gateway
"""
The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
"""
__props__['externalGateway'] = external_gateway
if external_network_id and not isinstance(external_network_id, basestring):
raise TypeError('Expected property external_network_id to be a basestring')
__self__.external_network_id = external_network_id
"""
The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
"""
__props__['externalNetworkId'] = external_network_id
if name and not isinstance(name, basestring):
raise TypeError('Expected property name to be a basestring')
__self__.name = name
"""
A unique name for the router. Changing this
updates the `name` of an existing router.
"""
__props__['name'] = name
if region and not isinstance(region, basestring):
raise TypeError('Expected property region to be a basestring')
__self__.region = region
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
"""
__props__['region'] = region
if tenant_id and not isinstance(tenant_id, basestring):
raise TypeError('Expected property tenant_id to be a basestring')
__self__.tenant_id = tenant_id
"""
The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
"""
__props__['tenantId'] = tenant_id
if value_specs and not isinstance(value_specs, dict):
raise TypeError('Expected property value_specs to be a dict')
__self__.value_specs = value_specs
"""
Map of additional driver-specific options.
"""
__props__['valueSpecs'] = value_specs
if vendor_options and not isinstance(vendor_options, dict):
raise TypeError('Expected property vendor_options to be a dict')
__self__.vendor_options = vendor_options
"""
Map of additional vendor-specific options.
Supported options are described below.
"""
__props__['vendorOptions'] = vendor_options
super(Router, __self__).__init__(
'openstack:networking/router:Router',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'adminStateUp' in outs:
self.admin_state_up = outs['adminStateUp']
if 'availabilityZoneHints' in outs:
self.availability_zone_hints = outs['availabilityZoneHints']
if 'distributed' in outs:
self.distributed = outs['distributed']
if 'enableSnat' in outs:
self.enable_snat = outs['enableSnat']
if 'externalFixedIps' in outs:
self.external_fixed_ips = outs['externalFixedIps']
if 'externalGateway' in outs:
self.external_gateway = outs['externalGateway']
if 'externalNetworkId' in outs:
self.external_network_id = outs['externalNetworkId']
if 'name' in outs:
self.name = outs['name']
if 'region' in outs:
self.region = outs['region']
if 'tenantId' in outs:
self.tenant_id = outs['tenantId']
if 'valueSpecs' in outs:
self.value_specs = outs['valueSpecs']
if 'vendorOptions' in outs:
self.vendor_options = outs['vendorOptions']
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2004-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Matplotlib computationally modest image class."""
__authors__ = ["V.A. Sole", "T. Vincent"]
__license__ = "MIT"
__date__ = "16/02/2016"
import numpy
from matplotlib import cbook
from matplotlib.image import AxesImage
class ModestImage(AxesImage):
"""Computationally modest image class.
Customization of https://github.com/ChrisBeaumont/ModestImage to allow
extent support.
ModestImage is an extension of the Matplotlib AxesImage class
better suited for the interactive display of larger images. Before
drawing, ModestImage resamples the data array based on the screen
resolution and view window. This has very little affect on the
appearance of the image, but can substantially cut down on
computation since calculations of unresolved or clipped pixels
are skipped.
The interface of ModestImage is the same as AxesImage. However, it
does not currently support setting the 'extent' property. There
may also be weird coordinate warping operations for images that
I'm not aware of. Don't expect those to work either.
"""
def __init__(self, *args, **kwargs):
self._full_res = None
self._sx, self._sy = None, None
self._bounds = (None, None, None, None)
self._origExtent = None
super(ModestImage, self).__init__(*args, **kwargs)
if 'extent' in kwargs and kwargs['extent'] is not None:
self.set_extent(kwargs['extent'])
def set_extent(self, extent):
super(ModestImage, self).set_extent(extent)
if self._origExtent is None:
self._origExtent = self.get_extent()
def get_image_extent(self):
"""Returns the extent of the whole image.
get_extent returns the extent of the drawn area and not of the full
image.
:return: Bounds of the image (x0, x1, y0, y1).
:rtype: Tuple of 4 floats.
"""
if self._origExtent is not None:
return self._origExtent
else:
return self.get_extent()
def set_data(self, A):
"""
Set the image array
ACCEPTS: numpy/PIL Image A
"""
self._full_res = A
self._A = A
if (self._A.dtype != numpy.uint8 and
not numpy.can_cast(self._A.dtype, numpy.float)):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache = None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
self._sx, self._sy = None, None
def get_array(self):
"""Override to return the full-resolution array"""
return self._full_res
def _scale_to_res(self):
""" Change self._A and _extent to render an image whose
resolution is matched to the eventual rendering."""
# extent has to be set BEFORE set_data
if self._origExtent is None:
if self.origin == "upper":
self._origExtent = (0, self._full_res.shape[1],
self._full_res.shape[0], 0)
else:
self._origExtent = (0, self._full_res.shape[1],
0, self._full_res.shape[0])
if self.origin == "upper":
origXMin, origXMax, origYMax, origYMin = self._origExtent[0:4]
else:
origXMin, origXMax, origYMin, origYMax = self._origExtent[0:4]
ax = self.axes
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
xlim = max(xlim[0], origXMin), min(xlim[1], origXMax)
if ylim[0] > ylim[1]:
ylim = max(ylim[1], origYMin), min(ylim[0], origYMax)
else:
ylim = max(ylim[0], origYMin), min(ylim[1], origYMax)
# print("THOSE LIMITS ARE TO BE COMPARED WITH THE EXTENT")
# print("IN ORDER TO KNOW WHAT IT IS LIMITING THE DISPLAY")
# print("IF THE AXES OR THE EXTENT")
dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]
y0 = max(0, ylim[0] - 5)
y1 = min(self._full_res.shape[0], ylim[1] + 5)
x0 = max(0, xlim[0] - 5)
x1 = min(self._full_res.shape[1], xlim[1] + 5)
y0, y1, x0, x1 = [int(a) for a in [y0, y1, x0, x1]]
sy = int(max(1, min((y1 - y0) / 5., numpy.ceil(dy / ext[1]))))
sx = int(max(1, min((x1 - x0) / 5., numpy.ceil(dx / ext[0]))))
# have we already calculated what we need?
if (self._sx is not None) and (self._sy is not None):
if (sx >= self._sx and sy >= self._sy and
x0 >= self._bounds[0] and x1 <= self._bounds[1] and
y0 >= self._bounds[2] and y1 <= self._bounds[3]):
return
self._A = self._full_res[y0:y1:sy, x0:x1:sx]
self._A = cbook.safe_masked_invalid(self._A)
x1 = x0 + self._A.shape[1] * sx
y1 = y0 + self._A.shape[0] * sy
if self.origin == "upper":
self.set_extent([x0, x1, y1, y0])
else:
self.set_extent([x0, x1, y0, y1])
self._sx = sx
self._sy = sy
self._bounds = (x0, x1, y0, y1)
self.changed()
def draw(self, renderer, *args, **kwargs):
self._scale_to_res()
super(ModestImage, self).draw(renderer, *args, **kwargs)
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
from scipy.ndimage import rotate
from scipy.stats import bernoulli
import cv2,os
import scipy.misc
import cv2
#PATH = './data_extra'
PATH = './data/'
data_csv = '/driving_log.csv'
DRIVING_LOG_FILE = ''#'./data_extra/driving_log.csv'
IMG_PATH = ''#'./data_extra/'
STEERING_COEFFICIENT = 0.229
# read the csv file
def read_csv():
#training_data = pd.read_csv(PATH+data_csv,names=['center','left','right','steering','throttle','break','speed'])
training_data = pd.read_csv(PATH+data_csv,names=None)
print(training_data.shape)
training_data[['center','left','right']]
X = training_data[['center','left','right']]
Y = training_data['steering']
#print(X.head())
#X_left = X['left'].as_matrix()
#X_right = X_['right'].as_matrix()
#X_center = X['center'].as_matrix()
#Y = Y.as_matrix()
#return X_left, X_right, X_center, Y
return X, Y
def datashuffle(X, Y):
from sklearn.utils import shuffle
X, Y = shuffle(X, Y)
return X, Y
def trainval_split(X, Y):
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, random_state=42)
#X_Tleft = X_train['left'].as_matrix()
#X_Tright = X_train['right'].as_matrix()
#X_Tcenter = X_train['center'].as_matrix()
#Y_T = Y_train.as_matrix()
#X_Vleft = X_val['left'].as_matrix()
#X_Vright = X_val['right'].as_matrix()
#X_Vcenter = X_val['center'].as_matrix()
#Y_V = Y_val.as_matrix()
#return X_Tleft, X_Tright, X_Tcenter,Y_T, X_Vleft, X_Vright, X_Vcenter, Y_V
return X_train, X_val, Y_train, Y_val
def random_flip(img, label):
choice = np.random.choice([0,1])
if choice == 1:
img, label = cv2.flip(img, 1), -label
return (img, label)
def random_brightness(img, label):
br_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
coin = np.random.randint(2)
if coin == 0:
random_bright = 0.2 + np.random.uniform(0.2, 0.6)
br_img[:, :, 2] = br_img[:, :, 2] * random_bright
br_img = cv2.cvtColor(br_img, cv2.COLOR_HSV2RGB)
return br_img, label
def random_view(index, X_left, X_right, X_center, Y):
PATH = './data/'
choice = np.random.choice([0,1,2])
y = Y[index]
if choice == 0:
image = plt.imread(PATH+'/'+X_left[index].strip())
dsteering = y+ 0.25
return image,dsteering
elif choice == 2:
image = plt.imread(PATH+'/'+X_right[index].strip())
dsteering = y- 0.25
return image,dsteering
else:
image = plt.imread(PATH+'/'+X_center[index].strip())
dsteering = y
return image,dsteering
def generate_train(X_center,X_left,X_right,Y):
"""
data augmentation
transformed image & crop
"""
index = np.random.randint(0,len(Y))
img,y = random_view(index, X_left, X_right, X_center, Y)
img,y = random_brightness(img, y)
img, y = random_flip(img, y)
return img, y
#num = np.random.randint(0, len(Y))
#img, y =
#img = cv2.resize(X[num,:,:,:], (64,64), cv2.INTER_AREA)
#img = crop(X[num,:,:,:], 64, 64)
#img = X[num,:,:,:]
#img_, y_ = flip(img, Y[num])
#img_ = brightness(img)
#img_ = random_noise(img_)
#return img, Y[num]
def generate_train_batch(center, left, right, steering, batch_size):
""" compose training batch set """
image_set = np.zeros((batch_size, 160, 320, 3))
steering_set = np.zeros(batch_size)
while 1:
for i in range(batch_size):
img, steer = generate_train(center,left, right, steering)
image_set[i] = img
steering_set[i] = steer
yield image_set, steering_set
|
import os
from yozuch.generators.template import TemplateEntry, TemplateGenerator
class PageGenerator(TemplateGenerator):
def __init__(self, url_template, name):
super().__init__(url_template, name, None)
def generate(self, context):
for filename in context.site.get('pages', []):
name, ext = os.path.splitext(filename)
url = self.url_template.format(name=name, filename=filename)
entry = TemplateEntry(url, '!pages/' + filename, 'pages/{}'.format(filename))
yield entry
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='name_extractor',
version='0.1.0',
description='Statistical name extractor for HTML.',
long_description=readme,
author='João Mateus de Freitas Veneroso',
author_email='jmfveneroso@gmail.com',
url='https://github.com/jmfveneroso/name_extractor',
license=license,
packages=find_packages(exclude=('tests', 'dataset', 'conditional_dataset', 'tex', 'deprecated'))
)
|
import itertools
import logging
import re
from logrec.dataprep.model.containers import ProcessableTokenContainer
from logrec.dataprep.model.word import ParseableToken
from logrec.dataprep.preprocessors import java
from logrec.dataprep.model.chars import NewLine, MultilineCommentEnd, MultilineCommentStart, \
OneLineCommentStart, Quote, Backslash, Tab
from logrec.dataprep.model.placeholders import placeholders
from logrec.dataprep.util import create_regex_from_token_list
logger = logging.getLogger(__name__)
def from_file(lines):
return [w for line in lines for w in
(ParseableToken(line if len(line) > 0 and line[-1] != '\n' else line[:-1]), NewLine())]
def from_string(str):
return list(map(lambda x: ParseableToken(x), str.split(" ")))
def from_list(lst):
return list(map(lambda x: ParseableToken(x), lst))
############### Multitoken list level ###########
def replace_4whitespaces_with_tabs(token_list, context):
result = []
for token in token_list:
if isinstance(token, ParseableToken):
split_line = re.split("( {4})", str(token))
result.extend([(Tab() if w == " " * 4 else ParseableToken(w)) for w in split_line])
elif isinstance(token, ProcessableTokenContainer):
for subtoken in token.get_subtokens():
result.extend(replace_4whitespaces_with_tabs(subtoken))
else:
result.append(token)
return result
def to_token_list(tokens):
return repr(" ".join(map(lambda t : str(t),tokens)))[1:-1] + f" {placeholders['ect']}\n"
def to_human_readable(tokens, context):
return " ".join(map(lambda t : str(t),tokens)) + "\n"
def spl(token_list, multiline_comments_tokens, two_char_delimiters, one_char_delimiters):
multiline_comments_regex = create_regex_from_token_list(multiline_comments_tokens)
two_char_regex = create_regex_from_token_list(two_char_delimiters)
one_char_regex = create_regex_from_token_list(one_char_delimiters)
split_nested_list = list(map(
lambda token: split_to_key_words_and_identifiers(token, multiline_comments_regex,
two_char_regex, one_char_regex,
java.delimiters_to_drop_verbose), token_list))
return [w for lst in split_nested_list for w in lst]
def spl_verbose(token_list, context):
'''
doesn't remove such tokens as tabs, newlines, brackets
'''
return spl(token_list,
java.multiline_comments_tokens,
java.two_character_tokens + java.two_char_verbose,
java.one_character_tokens + java.one_char_verbose)
characters = set(java.multiline_comments_tokens + java.two_character_tokens + java.two_char_verbose + java.one_character_tokens + java.one_char_verbose)
def split_to_key_words_and_identifiers(token, multiline_comments_regex,
two_char_regex, one_char_regex, to_drop):
if isinstance(token, ParseableToken):
raw_result = []
result = []
comment_tokens_separated = re.split(multiline_comments_regex, str(token))
for st in comment_tokens_separated:
if re.fullmatch(multiline_comments_regex, st):
raw_result.append(st)
else:
two_char_tokens_separated = re.split(two_char_regex, st)
for st in two_char_tokens_separated:
if re.fullmatch(two_char_regex, st):
raw_result.append(st)
else:
one_char_token_separated = re.split(one_char_regex, st)
raw_result.extend(list(filter(None, itertools.chain.from_iterable(
[re.split(to_drop, st) for st in one_char_token_separated]
))))
for raw_str in raw_result:
if not raw_str in characters:
result.append(ParseableToken(raw_str))
elif raw_str == "/*":
result.append(MultilineCommentStart())
elif raw_str == "*/":
result.append(MultilineCommentEnd())
elif raw_str == "//":
result.append(OneLineCommentStart())
elif raw_str == "\"":
result.append(Quote())
elif raw_str == "\\":
result.append(Backslash())
elif raw_str == "\t":
result.append(Tab())
else:
result.append(raw_str)
return result
elif isinstance(token, ProcessableTokenContainer):
res = []
for subtoken in token.get_subtokens():
res.extend(split_to_key_words_and_identifiers(subtoken, multiline_comments_regex, two_char_regex, one_char_regex, to_drop))
return res
else:
return [token] |
"""ResNeXt architecture.
Based on:
https://arxiv.org/abs/1611.05431
Original implementation:
https://github.com/facebookresearch/ResNeXt
This model is based on torchvision's ResNet model:
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNeXt', 'resnext50_32x4d', 'resnext101_32x4d',
'resnext101_64x4d', 'resnext152_32x4d']
model_urls = {
'resnext50_32x4d': '',
'resnext101_32x4d': '',
'resnext101_64x4d': '',
'resnext152_32x4d': '',
}
def group_conv3x3(in_channels, out_channels, groups, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
def conv1x1(in_channels, out_channels, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride,
bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, cardinality, base_width, stride=1,
downsample=None):
super(Bottleneck, self).__init__()
channels = cardinality * int(planes * base_width / 64)
self.conv1 = conv1x1(inplanes, channels)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = group_conv3x3(channels, channels, cardinality, stride)
self.bn2 = nn.BatchNorm2d(channels)
self.conv3 = conv1x1(channels, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, block, layers, cardinality, base_width,
num_classes=1000, zero_init_residual=False):
super(ResNeXt, self).__init__()
self.base_width = base_width
self.cardinality = cardinality
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = self._make_layer(block, 64, layers[0])
self.conv3 = self._make_layer(block, 128, layers[1], stride=2)
self.conv4 = self._make_layer(block, 256, layers[2], stride=2)
self.conv5 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, self.cardinality,
self.base_width, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, self.cardinality,
self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnext50_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt-50-32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt(Bottleneck, [3, 4, 6, 3], 32, 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(
model_urls['resnext50_32x4d']))
return model
def resnext101_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNext-101-32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt(Bottleneck, [3, 4, 23, 3], 32, 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(
model_urls['resnext101_32x4d']))
return model
def resnext101_64x4d(pretrained=False, **kwargs):
"""Constructs a ResNext-101-64x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt(Bottleneck, [3, 4, 23, 3], 64, 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(
model_urls['resnext101_64x4d']))
return model
def resnext152_32x4d(pretrained=False, **kwargs):
"""Constructs a ResNeXt-152-32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt(Bottleneck, [3, 8, 36, 3], 32, 4, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(
model_urls['resnext152_32x4d']))
return model
|
import idc
import idaapi
# Vient d'un projet perso
# Le code est potentiellement trouvable sur internet a mon nom.
all_breakpoint = {}
class BreakPoint(object):
BP_TYPE = idc.BPT_DEFAULT
BP_SIZE = 0
def __init__(self, addr, size=None):
if size is None:
size = self.BP_SIZE
self.addr = addr
if idc.CheckBpt(self.addr) != idc.BPTCK_NONE:
raise ValueError("There is already a breakpoint at {0}".format(hex(self.addr)))
if not idc.AddBptEx(addr, size, self.BP_TYPE):
raise ValueError("Failed to create breakpoint at {0}".format(hex(self.addr)))
self._set_elang("Python")
self._set_condition("return breakpoint.all_breakpoint[{0}].trigger()".format(self.addr))
all_breakpoint[self.addr] = self
def _set_condition(self, cond):
return idc.SetBptCnd(self.addr, cond)
def _set_elang(self, elang):
bpt = idaapi.bpt_t()
if not idaapi.get_bpt(self.addr, bpt):
return False
bpt.elang = elang
return idaapi.update_bpt(bpt)
def delete(self):
if all_breakpoint[self.addr] is self:
del all_breakpoint[self.addr]
idc.DelBpt(self.addr)
@property
def condition(self):
return idc.GetBptAttr(self.addr, idc.BPTATTR_COND)
@property
def elang(self):
bpt = idaapi.bpt_t()
if not idaapi.get_bpt(self.addr, bpt):
return False
return bpt.elang
@property
def state(self):
return idc.CheckBpt(self.addr)
@property
def is_enabled(self):
return self.state in [idc.BPTCK_YES, idc.BPTCK_ACT]
def enable(self):
if self.is_enabled:
raise ValueError("{0} already enabled".format(self))
return idc.EnableBpt(self.addr, True)
def disable(self):
if not self.is_enabled:
raise ValueError("{0} is not enabled".format(self))
return idc.EnableBpt(self.addr, False)
def trigger(self):
return True
class HardwareExecBreakPoint(BreakPoint):
BP_TYPE = idc.BPT_EXEC
BP_SIZE = 1
def wait_for_breakpoint():
while True:
x = idc.GetDebuggerEvent(idc.WFNE_SUSP | idc.WFNE_CONT, -1)
if x == idc.DBG_TIMEOUT:
return x
bp_addr = idc.GetEventEa()
if bp_addr not in all_breakpoint:
print "Ignore event at {0}".format(hex(bp_addr))
continue
return all_breakpoint[bp_addr]
def remove_all_breakpoint():
for i in range(idc.GetBptQty()):
idc.DelBpt(idc.GetBptEA(i))
all_breakpoint.clear()
|
import berserk
class Bots(berserk.clients.Bots):
def decline_challenge(self, challenge_id, reason="generic"):
"""Decline an incoming challenge.
:param str challenge_id: ID of a challenge
:param str reason: reason for declining
:return: success
:rtype: bool
"""
path = f"api/challenge/{challenge_id}/decline"
payload = {
"reason": reason
}
return self._r.post(path, json=payload)["ok"]
class Client(berserk.Client):
def __init__(self, session=None, base_url=None, pgn_as_default=False):
super().__init__(session, base_url, pgn_as_default)
self.bots = Bots(session, base_url)
|
from typing import Union
from torch import Tensor
from torch.nn import Module
from torch.nn import functional as F
class Interpolate(Module):
def __init__(
self,
size: Union[int, tuple] = None,
scale_factor: Union[float, tuple] = None,
mode: str = "nearest",
align_corners: bool = None,
recompute_scale_factor: bool = False,
):
super(Interpolate, self).__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
self.recompute_scale_factor = recompute_scale_factor
def forward(self, x: Tensor) -> Tensor:
return F.interpolate(
input=x,
size=self.size,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners,
recompute_scale_factor=self.recompute_scale_factor,
)
class Downsample(Interpolate):
def __init__(
self,
scale_factor: Union[float, tuple],
mode: str = "bilinear",
):
assert scale_factor <= 1.0
super(Downsample, self).__init__(scale_factor=scale_factor, mode=mode)
class Upsample(Interpolate):
def __init__(
self,
scale_factor: Union[float, tuple],
mode: str = "bilinear",
):
assert scale_factor >= 1.0
super(Upsample, self).__init__(scale_factor=scale_factor, mode=mode)
|
import numpy as np
from game_state_base import BaseGameState
class AccumulateGameState(BaseGameState):
environment_shape = [1]
def _reset(self):
self.state = 0
def _score(self):
return self.state
def _terminal(self):
self.time_steps = 100
def _apply_action(self, action):
if action:
self.state += action - 2
return np.array([self.state])
|
import unittest
from rcd_str import capitalize_word_sequences
class TestCapita(unittest.TestCase):
def test(self):
self.assertEqual(capitalize_word_sequences('усть-каменогорск'),
'Усть-Каменогорск')
self.assertEqual(capitalize_word_sequences('нижний тагил'), 'Нижний Тагил')
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import fixtures
import six
class CreateFileWithContent(fixtures.Fixture):
"""Create a temporary file with the given content.
Creates a file using a predictable name, to be used by tests for
code that need a filename to load data or otherwise interact with
the real filesystem.
.. warning::
It is the responsibility of the caller to ensure that the file
is removed.
Users of this fixture may also want to use
:class:`fixtures.NestedTempfile` to set the temporary directory
somewhere safe and to ensure the files are cleaned up.
.. py:attribute:: path
The canonical name of the file created.
:param filename: Base file name or full literal path to the file
to be created.
:param contents: The data to write to the file. Unicode data will
be encoded before being written.
:param ext: An extension to add to filename.
:param encoding: An encoding to use for unicode data (ignored for
byte strings).
"""
def __init__(self, filename, contents, ext='.conf', encoding='utf-8'):
self._filename = filename
self._contents = contents
self._ext = ext
self._encoding = encoding
def setUp(self):
super(CreateFileWithContent, self).setUp()
contents = self._contents
if isinstance(contents, six.text_type):
contents = contents.encode(self._encoding)
if not os.path.isabs(self._filename):
(fd, self.path) = tempfile.mkstemp(prefix=self._filename,
suffix=self._ext)
else:
self.path = self._filename + self._ext
fd = os.open(self.path, os.O_CREAT | os.O_WRONLY)
try:
os.write(fd, contents)
finally:
os.close(fd)
|
import random as r
s = ["a","e","i","o","u"]
li = []
for i in range(r.randint(1,20)):
temp = ""
for j in range(r.randint(1,10)):
temp += r.choice(s)
li.append(temp)
print(len(li))
for i in li:
print(i)
# print(30 | 28) |
import du
from du._test_utils import equal
def test_memoize():
x = [0]
@du.memoize
def foo():
x[0] += 1
equal(x[0], 0)
foo()
equal(x[0], 1)
foo()
equal(x[0], 1)
def test_attr_dict():
x = du.AttrDict(foo=3, bar=2)
equal(x.foo, 3)
x.bar = 4
equal(x.bar, 4)
|
import unittest
import subprocess
import requests
import time
import json
from glockr.config import PORT
from glockr.client import GClient
BASE_URL = 'http://127.0.0.1:{}'.format(PORT)
class TestClient(unittest.TestCase):
_server_process = None
@staticmethod
def get_current_dict() -> dict:
target_url = BASE_URL + '/res'
resp = requests.get(target_url)
resp_dict = json.loads(resp.text)
return resp_dict
@classmethod
def setUpClass(cls):
super(TestClient, cls).setUpClass()
cls._server_process = subprocess.Popen('python -m glockr.server start', shell=True)
time.sleep(3)
def test_1_add(self):
name_list = list(range(0, 30))
for label in range(5):
for _ in range(5):
GClient.add(str(name_list.pop()), str(label))
def test_2_show_all(self):
current_dict = GClient.show_all()
assert len(current_dict) == 25
def test_3_acquire_name(self):
# check 15-19
for each_name in range(15, 20):
each_name = str(each_name)
GClient.acquire('name', each_name)
# try to acquire it again
resp = GClient.acquire('name', each_name)
assert not resp['result']
# try to acquire by label
resp = GClient.acquire('label', str(2))
assert not resp['result']
# release
resp = GClient.release('name', each_name)
assert resp['result']
# check its status
resp = GClient.show_all()
assert resp[each_name]['status'] == 'FREE'
@classmethod
def tearDownClass(cls):
super(TestClient, cls).tearDownClass()
cls._server_process.kill()
if __name__ == '__main__':
unittest.main()
|
"""
迁移学习的网络结构
"""
import os
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
import torch.nn.functional as F
try:
import softpool_cuda
from SoftPool import soft_pool2d, SoftPool2d
except ImportError:
print('Please install SoftPool first: https://github.com/alexandrosstergiou/SoftPool')
def Model(model_name, nc, pretrained):
"""获取网络模型"""
return eval(model_name)(nc, pretrained)
def shufflenet_v2_x0_5(nc, pretrained):
model_ft = models.shufflenet_v2_x0_5(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def shufflenet_v2_x1_0(nc, pretrained):
model_ft = models.shufflenet_v2_x1_0(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def shufflenet_v2_x1_5(nc, pretrained):
model_ft = models.shufflenet_v2_x1_5(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def shufflenet_v2_x2_0(nc, pretrained):
model_ft = models.shufflenet_v2_x2_0(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def squeezenet1_1(nc, pretrained):
model_ft = models.squeezenet1_1(pretrained=pretrained)
model_ft.classifier[1] = nn.Conv2d(512, nc, kernel_size=1)
return model_ft
def mobilenet_v2(nc, pretrained):
"""pretrained 为true,那么会加载imageNet训练好的"""
model_ft = models.mobilenet_v2(pretrained=pretrained)
model_ft.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(model_ft.last_channel, nc),
)
return model_ft
def inception_v3(nc, pretrained):
model_ft = models.inception_v3(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def getVggNet(nc):
model_ft = models.vgg11(pretrained=False)
# 对迁移模型进行调整
for parma in model_ft.parameters():
parma.requires_grad = False
model_ft.classifier = nn.Sequential(nn.Linear(25088, nc))
return model_ft
def resnet18(nc, pretrained):
model_ft = models.resnet18(pretrained=pretrained)
# 使用softpool
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 可以用下面替换
# model_ft.maxpool = SoftPool2d(kernel_size=(2,2), stride=(2,2))
# 可以用的情况2
# self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
# self.pool2 = SoftPool2d(kernel_size=3, stride=2)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def resnet152(nc, pretrained):
model_ft = models.resnet152(pretrained=pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, nc)
return model_ft
def getmnasnet1_0(nc):
model_ft = models.mnasnet1_0(pretrained=True)
model_ft.classifier = nn.Sequential(
# nn.Linear(1280, 1280),
nn.Linear(1280, nc),
)
return model_ft
if __name__ == '__main__':
model = inception_v3(2,False)
dummy_input = torch.randn(1, 3, 640, 640)
onnx_path = os.path.join("netron_model.onnx")
torch.onnx.export(model, dummy_input, onnx_path) |
# Notes
# This module not really designed for general purpose use. I wrote this as a study mechanism for
# decomposition algorithms. The codes are not well tested and may be naive.
#
# lapack working notes at http://www.netlib.org/lapack/lawns/
import pyJvsip as pv
def eye(t,n):
"""
Usage: I=eye(t,n)
create and return an identity matrix of size n and type t
t must be a matrix
"""
return pv.create(t,n,n).identity
def sign(a_in): # see LAPACK Working Notes 148 for definition of sign
"""
Function sign(alpha) returns the sign of scalar (real or complex) alpha.
"""
if type(a_in) is int:
a=float(a_in)
else:
a=a_in
if type(a) is float or type(a) is complex:
t=pv.vsip_hypot_d(a.real,a.imag)
if t == 0.0:
return 1.0
elif a.imag==0.0:
if a.real < 0.0:
return -1.0
else:
return 1.0
else:
return a/t
else:
print('sign function only works on scalars')
return
# householder routines
def houseVector(x):
"""
v = houseVector(x)
returns a normalized householder vector 'v' such that
the householder projection matrix 'H' is:
H = I - 2 v v*
"""
if 'vview' not in x.type:
print('Function houseVector only works on vector views')
return
v=x.copy
v[0] += (sign(x[0]) * x.norm2)
n = v.norm2
if n == 0.0:
v[0] = 1.0
else:
v /= n
return v
def house(v): # create and return househoulder rotation matrix for householder
# vector v; works for any valid househoulder vector
"""
Usage:
H=house(v)
Create and return a householder projector matrix given input householder vector v.
"""
t={'vview_f':'mview_f','vview_d':'mview_d','cvview_f':'cmview_f','cvview_d':'cmview_d'}
return(eye(t[v.type],v.length) - v.outer(2.0/v.jdot(v),v))
def houseProd(v,A):
"""
Usage:
houseProd(v,A)
using a householder vector V with a matrix of the proper size return HA
Note A is modified in-place; but there are create/destroy penalties with this function
Note a convenience reference to A is returned
"""
beta = 2.0/v.jdot(v)
v.conj;w=v.prod(A).conj;v.conj
A -= v.outer(beta,w)
return A
def prodHouse(A,v):
"""
Usage:
prodHouse(A,v)
using a householder vector V with a matrix of the proper size return AH
Note A is modified in-place; but there are create/destroy penalties with this function
Note a convenience reference to A is returned
"""
beta = 2.0/v.jdot(v)
w=A.prod(v)
A-=w.outer(beta,v)
return A
#Givens
def givensCoef(x1_in,x2_in):
""" Code adapted from Algorithm 1 of LAPACK working Notes lawn148
"""
if type(x1_in) is int:
x1=float(x1_in)
else:
x1 = x1_in
if type(x2_in) is int:
x2=float(x2_in)
else:
x2 = x2_in
if type(x1) is float and type(x2) is float:
t=pv.vsip_hypot_d(x1,x2)
if x2 == 0.0:
return (1.0,0.0,x1)
elif x1 == 0.0:
return (0.0,sign(x2),t)
else: # return (c,s,r)
sn=sign(x1)
return(pv.vsip_mag_d(x1)/t,sn*x2/t,sn*t)
elif type(x1) is complex or type(x2) is complex:
mx1=pv.vsip_hypot_d(x1.real,x1.imag)
mx2=pv.vsip_hypot_d(x2.real,x2.imag)
if mx2 == 0.0:
return(1.0,0.0,x1)
elif mx1 == 0.0:
return(0,sign(x2.conjugate()),mx2)
else:
t=pv.vsip_hypot_d(mx1,mx2)
c=mx1/t
sn=sign(x1)
s=(sn * x2.conjugate())/t
r=sn * t
return(c,s,r)
else:
print('Type <:'+repr(type(x1)) + ':> or <:'+ \
repr(type(x2))+':> not recognized by givensCoef')
return
def givens(t,i,j,c,s,size):
"""
Return an extended givens matrix.
An extended givens matrix is an identity matrix of size 'size'
with elements at (i,i) and (j,j) replaced with c,
the element at (i,j) replaced with s,
and the element at (j,i) replaced with -conjugate(s)
Usage:
G=givens(t,i,j,c,s,size)
Where:
t = type
i,j are index values for placement of
c,s which are obtained (probably) from function givensCoef.
size is an integer
"""
G=eye(t,size)
G[i,i]=c;G[j,j]=c;G[i,j]=s;G[j,i]=-s.conjugate()
return G
def gProd(i,j,c,s,A):
"""
Done in-place (A is modified)
Usage:
gProd(i,j,c,s,A)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G = givens(A.type,i,j,c,s,m)
does:
A = G A
returns:
reference to A as a convenience
"""
a1=A.rowview(i).copy
a2=A.rowview(j).copy
A.rowview(i)[:]= c * a1 + s * a2
A.rowview(j)[:]= c * a2 - s.conjugate() * a1
return A
def prodG(A,i,j,c,s):
"""
Done in-place (A is modified)
Usage:
prodG(A,i,j,c,s)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix GH = givens(A.type,i,j,c,s,m).herm
does:
A = A GH
returns:
reference to A as a convenience
"""
a_i=A.colview(i).copy
a_j=A.colview(j).copy
A.colview(i)[:]= c * a_i + s.conjugate() * a_j
A.colview(j)[:]= c * a_j - s * a_i
return A
def gtProd(i,j,c,s,A):
"""
Done in-place (A is modified)
Usage:
gtProd(i,j,c,s,A)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G_TH = givens(A.type,i,j,c,s,m).transview.herm
does:
A = G_TH A
returns:
reference to A as a convenience
"""
a_i=A.rowview(i).copy
a_j=A.rowview(j).copy
A.rowview(i)[:]= c * a_i + s.conjugate() * a_j
A.rowview(j)[:]= c * a_j - s * a_i
return A
def prodGT(A,i,j,c,s):
"""
Done in-place (A is modified)
Usage:
prodG(A,i,j,c,s)
where:
A is a matrix of size (m,n)
i,j,c,s are equivalent to a givens matrix G_T = givens(A.type,i,j,c,s,m).transview
does:
A = A G_T
returns:
reference to A as a convenience
"""
a1 = A.colview(i).copy
a2 = A.colview(j).copy
A.colview(i)[:] = c * a1 + s * a2
A.colview(j)[:] = c * a2 -s.conjugate() * a1
return A
# QR decomposition
def QRD_inPlace(A):
"""
The function QRD_inPlace(A) is done in-place on matrix A. If you want to retain A make a copy first.
Usage:
QRD_inPlace(A)
Note that the decomposition represented is A=QR.
Matrix R is stored in the upper triangular portion of A.
Householder vectors are stored in the lower sub-triangular portion of A.
Householder vectors are normalized so that v[0] is 1.0;
"""
m=A.collength
n=A.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('For matrices where rowlength > collength work with the transpose.')
for i in range(n-1):
x=A[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
A[i:,i:]=house(v).prod(A[i:,i:])
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=A[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
A[i:,i:]=house(v).prod(A[i:,i:])
x[1:]=v[1:]
def fullQProd(Q,B):
"""
Usage:
U=fullQProd(Q,B)
where Q is a matrix of size M,N where M >= N which was produced by QRD_inPlace(Q)
B is a matrix of size M,P
U is the matrix produced by the matrix product Q B where Q is the full Q matrix from a QR decomposition.
"""
m=Q.collength
n=B.rowlength
U=B.copy
if m > n: #extract last column if matrix is not square
i=n-1
v=Q[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
for i in range(n-2,-1,-1):
v=Q[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
return U
def QmatExtract(B):
"""
If B is a matrix which has been operated on by QRD_inPlace then
QmatExtract(B) will return the full Q matrix of the QR decomposition.
"""
m=B.collength
n=B.rowlength
Q=eye(B.type,m)
if m > n: #extract last column if matrix is not square
i=n-1
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,Q[i:,i:])
for i in range(n-2,-1,-1):
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,Q[i:,i:])
return Q
def RmatExtract(B):
"""
If B is a matrix which has been operated on by QRD_inPlace then
RmatExtract(B) returns a new matrix with the (full) R from the QR decomposition.
"""
R=B.copy
m=B.collength
for i in range(1,m):
R.diagview(-i).fill(0.0)
return R
def houseQR(A):
"""
Done out of place
Usage:
Q,R=houseQR(A)
where:
A is of size M, N; M >= N; A = Q R
Q is unitary
R is upper triangular
"""
R=A.copy
m=A.collength
n=A.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('for matrices where rowlength > collength work with the transpose')
for i in range(n-1):
x=R[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,R[i:,i:])
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=R[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,R[i:,i:])
x[1:]=v[1:]
#accumulate Q
Q = QmatExtract(R)
#zero entries of R
for i in range(1,m):
R.diagview(-i).fill(0.0)
return (Q,R)
def bidiag(A): # m >= n
"""
B=bidiag(A)
returns, out of place, the bidiagonal decomposition of A.
The esential househoulder vectors are stored in the zeroed entries of B.
"""
B=A.copy
m=B.collength
n=B.rowlength
if m < n:
print('The input matrix must have collength >= rowlength.')
print('for matrices where rowlength > collength work with the transpose')
for i in range(n-1):
x=B[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,B[i:,i:])
x[1:]=v[1:]
if i < n-2:
j=i+1
x = B[i:,j:].rowview(0)
#v=houseVector(x.conj);x.conj
v=houseVector(x).conj
v /= v[0]
prodHouse(B[i:,j:],v)#=B[i:,j:].prod(house(v))
x[1:]=v[1:]
if m > n: #do last column if matrix not square
i=n-1
x=B[i:,i:].colview(0)
v=houseVector(x)
v /= v[0]
houseProd(v,B[i:,i:])
x[1:]=v[1:]
return B
def bidiagExtract(B):
"""
B=bidiagExtract(B0)
Returns, out of place, a matrix with the bidiagonal entries.
Input matrix is one produced by B0=bidiag(A)
"""
B0=B.empty.fill(0.0)
B0.diagview(0)[:] = B.diagview(0)
B0.diagview(1)[:] = B.diagview(1)
return B0
def UmatExtract(B):
"""
U=UmatExtract(B0)
returns, out of place, the U matrix of the bidiagonal
decomposition A=UBV^H given the result of bidiag routine
B0=bidiag(A)
"""
m=B.collength
n=B.rowlength
U=eye(B.type,m)
if m > n: #extract last column if matrix is not square
i=n-1
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
for i in range(n-2,-1,-1):
v=B[i:,i:].colview(0).copy
v[0]=1
houseProd(v,U[i:,i:])
return U
def VHmatExtract(B):
"""
VH=UmatExtract(B0)
returns, out of place, the hermtian V matrix of the bidiagonal
decomposition A=UBV^H given the result of bidiag routine
B0=bidiag(A)
"""
m=B.collength
n=B.rowlength
V=eye(B.type,n)
for i in range(n-3,-1,-1):
j=i+1
v=B[i:,j:].rowview(0).copy
v[0]=1
prodHouse(V[j:,j:],v)
return V
def givensCoef(x1_in,x2_in):
""" Code adapted from Algorithm 1 of LAPACK working Notes lawn148
"""
if type(x1_in) is int:
x1=float(x1_in)
else:
x1 = x1_in
if type(x2_in) is int:
x2=float(x2_in)
else:
x2 = x2_in
if type(x1) is float and type(x2) is float:
t=pv.vsip_hypot_d(x1,x2)
if x2 == 0.0:
return (1.0,0.0,x1)
elif x1 == 0.0:
return (0.0,sign(x2),t)
else: # return (c,s,r)
sn=sign(x1)
return(pv.vsip_mag_d(x1)/t,sn*x2/t,sn*t)
elif type(x1) is complex or type(x2) is complex:
mx1=pv.vsip_hypot_d(x1.real,x1.imag)
mx2=pv.vsip_hypot_d(x2.real,x2.imag)
if mx2 == 0.0:
return(1.0,0.0,x1)
elif mx1 == 0.0:
return(0,sign(x2.conjugate()),mx2)
else:
t=pv.vsip_hypot_d(mx1,mx2)
c=mx1/t
sn=sign(x1)
s=(sn * x2.conjugate())/t
r=sn * t
return(c,s,r)
else:
print('Type <:'+repr(type(x1)) + ':> or <:'+ \
repr(type(x2))+':> not recognized by givensCoef')
return
def givensExtract(t,i,j,c,s,size):
"""
Usage:
G=givensExtract(t,i,j,c,s,size)
t = type
i,j are index values for placement of
c,s which are obtained (probably) from function givensCoef.
size is an integer
"""
G=eye(t,size)
G[i,i]=c;G[j,j]=c;G[i,j]=s;G[j,i]=-s.conjugate()
return G
def gProd(i,j,c,s,A):
a1=A.rowview(i).copy
a2=A.rowview(j).copy
A.rowview(i)[:]= c * a1 + s * a2
A.rowview(j)[:]= c * a2 - s.conjugate() * a1
return A
def prodG(A,i,j,c,s):
a_i=A.colview(i).copy
a_j=A.colview(j).copy
A.colview(i)[:]= c * a_i + s.conjugate() * a_j
A.colview(j)[:]= c * a_j - s * a_i
return A
def gtProd(i,j,c,s,A):
a_i=A.rowview(i).copy
a_j=A.rowview(j).copy
A.rowview(i)[:]= c * a_i + s.conjugate() * a_j
A.rowview(j)[:]= c * a_j - s * a_i
return A
def prodGT(A,i,j,c,s):
a1 = A.colview(i).copy
a2 = A.colview(j).copy
A.colview(i)[:] = c * a1 + s * a2
A.colview(j)[:] = c * a2 -s.conjugate() * a1
return A
def givensQR(A):
M = A.collength
N = A.rowlength
R = A.copy
Q = eye(A.type,M)
for i in range(N):
B=R[i:,i:]
r=B[0,0]
for j in range(1,B.collength):
c,s,r=givensCoef(r,B[j,0])
prodG(Q,i,j+i,c,s)
gProd(0,j,c,s,B)
return (Q,R)
def givensBidiag(A):
M = A.collength
N = A.rowlength
B = A.copy
U = eye(A.type,M)
VH = eye(A.type,N)
for i in range(N-1):
TC=B[i:,i:]
if i < N-2:
TR=B[i:,i+1:]
r=TC[0,0]
for j in range(1,TC.collength):
c,s,r=givensCoef(r,TC[j,0])
prodG(U,i,j+i,c,s)
gProd(0,j,c,s,TC)
if i < N-2:
r=TR[0,0]
k=i+1
for j in range(1,TR.rowlength):
c,s,r=givensCoef(r,TR[0,j])
gtProd(k,j+k,c,s,VH)
prodGT(TR,0,j,c,s)
if M > N:
i=N-1
TC=B[i:,i:]
r=TC[0,0]
for j in range(1,TC.collength):
c,s,r=givensCoef(r,TC[j,0])
prodG(U,i,j+i,c,s)
gProd(0,j,c,s,TC)
return (U,B,VH)
def svdZeroCheckAndSet(e,b0,b1):
"""
Usage:
svdZeroCheckAndSet(eps,d,f)
Where:
eps0 is a small number we consider to be (close to) zero
d is a vector view representing the main diagonal of an upper bidiagonal matrix
f is a vector view representing the superdiagonal in an upper bidiagonal matrix.
In the svd algorithm this checks the superdiagonal for small numbers which
may be set to zero. If found, set to zero.
"""
s=e * (b0[0:b1.length].mag + b0[1:].mag)
indx_bool = b1.mag.llt(s)
if indx_bool.anytrue: #check super diagonal
b1.indxFill(indx_bool.indexbool,0.0)
def svdCorners(b1):
"""
Functionality
i,j = svdCorners(v)
where
v is a real vector of type float or double
i,j are indices.
i,j; as returned
v[i:j-1] will be vector with no zero elements
v[j-1:] will be a vector with all zero elements
Note v is the first super-diagonal of a bidiagonal matrix.
The corresponding main diagonal, d, will be d[i:j]
"""
v_bool=b1.leq(0.0)
j=v_bool.length-1
while j >= 0 and v_bool[j] == 1:
j -= 1
if j == -1:
return(0,0) #all of b1 is zero
i=j #index of non-zero
j+=1 #index of zero
while i >= 0 and v_bool[i] == 0:
i -= 1
return(i+1,j+1)
def diagPhaseToZero(L,B):
"""
To phase shift the main diagonal entries of a matrix B so entries
are real (imaginary zero) use this routine.
"""
d = B.diagview(0)
for i in range(d.length):
ps=d[i] #phase shift
if ps.imag != 0.0: #ignore if already real
m = pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
L.colview(i)[:] *= ps
B.rowview(i)[:] *= ps # if B is strictly diagonal don't need this step
d[i] = m
def biDiagPhaseToZero(L,d,f,R,eps0):
"""
For a Bidiagonal matrix B This routine uses subview vectors
`d=B.diagview(0)`
and
`f=B.diagview(1)`
and phase shifts vectors d and f so that B has zero complex part.
Matrices L and R are update matrices.
eps0 is a small real number used to check for zero. If an element meets a zero
check then that element is set to zero.
"""
for i in range(d.length):
ps=d[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
if m > eps0:
L.colview(i)[:] *= ps
d[i] = m
if i < f.length:
f[i] *= ps.conjugate()
else:
d[i] = 0.0
svdZeroCheckAndSet(eps0,d,f)
for i in range(f.length-1):
j=i+1
ps = f[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
L.colview(j)[:] *= ps.conjugate()
R.rowview(j)[:] *= ps
f[i] = m;
f[j] *= ps
j=f.length
i=j-1
ps=f[i]
if ps.imag == 0.0:
m = ps.real
if m < 0.0:
ps=-1.0
else:
ps= 1.0
m = abs(m)
else:
m=pv.vsip_hypot_d(ps.real,ps.imag)
ps /= m
f[i]=m
L.colview(j)[:] *= ps.conjugate()
R.rowview(j)[:] *= ps
def zeroRow(L,d,f):
"""
To use this we assume a matrix B that is bi-diagonalized.
Note i,j = svdCorners(B) => i, j=n+1
Let d0 be B.diagview(0); f0 be B.diagview(1)
d is a subview of the main diagonal
f is a subview of the first superdiagonal (diagonal(1)) and has no zeros.
if f = f0[i:n] then d = d0[i:n+1]
L is a subview of the left update matrix we call L0 here.
for the indices shown above
L = L0[:,i:n+1]
If d contains a zero entry, and the zero entry is not at the end of d,
then zeroRow is used to zero out the corresponding superdiagonal entry
in the row. Vector d may contain more than one zero. We zero out the zero
with the largest index (we designate k). So d[k] = d0[i+k] is the zero
of interest.
Note if d[k] is the last entry then the corresponding superdiagonal entry
in the row is already zero. Use zeroCol to zero out the column.
Usage:
zeroRow(L[:,k:],d[k+1:],f[k:])
"""
if 'cvview' in d.type or 'cvview' in f.type:
print('zeroRow only works for real vectors')
return
if d.length == 1:
c,s,r=givensCoef(d[0],f[0])
f[0]=0.0;d[0]=r
else:
c,s,r=givensCoef(d[0],f[0])
f[0]=0;d[0]=r
t= - f[1] * s; f[1] *= c
prodG(L,1,0,c,s)
for i in range(1,d.length-1):
c,s,r=givensCoef(d[i],t)
prodG(L,i+1,0,c,s)
d[i]=r; t=-f[i+1] * s; f[i+1] *= c
c,s,r=givensCoef(d[d.length-1],t)
d[d.length-1] = r
prodG(L,d.length,0,c,s)
def zeroCol(d,f,R):
"""
To use this we assume a matrix B that is bi-diagonalized.
Note i,j = svdCorners(B) => i, j=n+1
Let d0 be B.diagview(0); f0 be B.diagview(1)
d is a subview of the main diagonal
f is a subview of the first superdiagonal (diagonal(1)) and has no zeros.
if f = f0[i:n] then d = d0[i:n+1]
R is a subview of the right update matrix we call R0 here.
for the indices shown above
R = R0[i:n+1,:]
We assume matrix B has all zeros on row n.
Usage:
zeroCol(d,f,R)
"""
if 'cvview' in d.type or 'cvview' in f.type:
print('zeroCol only works for real vectors')
return
if f.length == 1:
c,s,r=givensCoef(d[0],f[0])
d[0]=r; f[0]=0.0
gtProd(0,1,c,s,R)
elif f.length == 2:
c,s,r=givensCoef(d[1],f[1])
d[1]=r; f[1]=0;
t= - f[0] * s; f[0] *= c
gtProd(1,2,c,s,R)
c,s,r=givensCoef(d[0],t)
d[0]=r;
gtProd(0,2,c,s,R)
else:
i=f.length-1; j=i-1; k=i
c,s,r=givensCoef(d[i],f[i])
f[i]=0; d[i]=r; t=-f[j]*s; f[j]*=c;
gtProd(i,k+1,c,s,R)
while i > 1:
i = j; j = i-1
c,s,r=givensCoef(d[i],t)
d[i]=r; t= - f[j] * s; f[j] *= c
gtProd(i,k+1,c,s,R)
c,s,r=givensCoef(d[0],t)
d[0] = r
gtProd(0,k+1,c,s,R)
def svdMu(d2,f1,d3,f2):
"""
Complex is removed from bidiagonal so for this algorithm we expect real numbers.
"""
td=d2*d2; tf=f1*f1
if td == 0.0:
cu = tf
elif (td < tf):
cu=tf * (1.+td/tf)
else:
cu=td * (1.+tf/td);
td=d3*d3; tf=f2*f2
if td == 0.0:
cl = tf
elif (td < tf):
cl=tf * (1.+td/tf)
else:
cl=td * (1.+tf/td);
cd = d2 * f2
T = (cu + cl)
D = (cu * cl - cd * cd)/(T*T)
if 4.*D > 1.0:
root = 0.0
else:
root = T * pv.vsip_sqrt_d(1.0 - 4. * D)
lambda1 = (T + root)/(2.); lambda2 = (T - root)/(2.)
if abs(lambda1 - cl) < abs(lambda2 - cl):
mu = lambda1
else:
mu = lambda2
return mu
def svdStep(L,d,f,R):
if 'cvview' in d.type or 'cvview' in f.type:
print('Input vector views must be of type real; Fail for svdStep')
return
n=d.length
#initial step
if n >= 3:
mu = svdMu(d[n-2],f[n-3],d[n-1],f[n-2])
elif n == 2:
mu = svdMu(d[0],0.0,d[1],f[0])
else:
mu = svdMu(d[0],0.0,0.0,0.0)
x1=d[0]; x1 *= x1; x1 -= mu
x2 = d[0] * f[0]
c,s,r=givensCoef(x1,x2)
t=d[0] * c + s * f[0]; f[0] *= c; f[0] -= s * d[0]; d[0] = t;
t=s * d[1]; d[1] *= c;
gtProd(0,1,c,s,R)
for i in range(n-2):
j=i+1; k=i+2
#step
c,s,r = givensCoef(d[i],t)
d[i]=r;
t=c * d[j] - s * f[i]; f[i] *=c ;f[i]+=s*d[j];d[j]=t
t=s * f[j]; f[j] *= c;
prodG(L,i,j,c,s)
#step
c,s,r=givensCoef(f[i],t)
f[i]=r
t=c * d[j] + s * f[j]; f[j] *= c; f[j] -= s * d[j]; d[j] = t
t=s * d[k]; d[k] *= c;
gtProd(j,k,c,s,R)
#final step
i=n-2; j=n-1
c,s,r = givensCoef(d[i],t)
d[i]=r;
t= c * d[j] - s * f[i];
f[i] *= c; f[i] += s * d[j];
d[j]=t
prodG(L,i,j,c,s)
def zeroFind(d,eps0):
j = d.length
xd=d[j-1]
while(xd > eps0):
if (j > 1):
j -= 1;
xd=d[j-1]
elif(j==1):
return 0;
d[j-1]=0.0
return j
def svd(A):
"""
The bidiag routine is used in the svd and bidiag is defined out of place,
so svd is also out of place. The bidiag routine can be done in-place with
a simple change, so the svd can also be done in-place.
Usage:
U,S,VH = svd(A)
A is a matrix with column length >= row length
where U is a unitary matrix of size A.columnlength
S is a real vector of size A.rowlength containing the singular values of A
Note: S is considered here to be a diagonal matrix
VH is a unitary matrix of size A.rowlength
Note:
A = U S VH = U.prod(S.mmul(VH.ROW))
"""
def svdBidiagonal(A):
if 'mview_f' not in A.type and 'mview_d' not in A.type:
print('Input must be a matrix of type float for function svd.')
return
if A.rowlength > A.collength:
print('For svd function input matrix A of size (M,N) must have N >= M')
return(0,0,0,0,0)
if 'mview_d' in A.type:
eps0 = A.normFro/A.rowlength * 1.0E16
else:
eps0 = A.normFro/A.rowlength * 1.0E8
if eps0 == 0.0:
print('Input matrix appears to be zero')
return(0,0,0,0,0)
else:
eps0 = 1.0/eps0
B=bidiag(A)
L=UmatExtract(B)
R=VHmatExtract(B)
biDiagPhaseToZero(L,B.diagview(0),B.diagview(1),R,eps0)
if 'cmview' in B.type:
d0=B.diagview(0).realview.copy
f0=B.diagview(1).realview.copy
else:
d0=B.diagview(0).copy
f0=B.diagview(1).copy
return (L,d0,f0,R,eps0)
def svdIteration(L0,d0,f0,R0,eps0):
cntr=0
maxcntr=5*d0.length
while cntr < maxcntr:
print('%d %d\n'%(d0.length,f0.length))
biDiagPhaseToZero(L0,d0,f0,R0,eps0)
cntr += 1
i,j=svdCorners(f0)
if j == 0:
break
d=d0[i:j]
f=f0[i:j-1]
L=L0[:,i:j]
R=R0[i:j,:]
n=f.length
k=zeroFind(d,eps0)
if k >0:
k -= 1;
if d[n] == 0.0:
zeroCol(d,f,R)
else:
zeroRow(L[:,k:],d[k+1:],f[k:])
else:
svdStep(L,d,f,R)
def svdSort(L,d,R):
indx=d.sort('BYVALUE','DESCENDING')
if 'cmview' in R.type:
R.realview.permute(indx,'ROW')
R.imagview.permute(indx,'ROW')
L[:,0:d.length].realview.permute(indx,'COL')
L[:,0:d.length].imagview.permute(indx,'COL')
else:
R.permute(indx,'ROW')
L[:,0:d.length].permute(indx,'COL')
U,S,f0,VH,eps0 = svdBidiagonal(A)
svdIteration(U,S,f0,VH,eps0)
svdSort(U,S,VH)
return(U,S,VH)
|
# -*- coding: utf-8 -*-
import os
import sys
from PySide2.QtWidgets import QApplication
from nodedge.logger import highLightLoggingSetup, setupLogging
from nodedge.mdi_window import MdiWindow
from nodedge.utils import dumpException
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) # noqa: E402
os.environ["QT_API"] = "pyside"
def main():
app: QApplication = QApplication(sys.argv)
setupLogging()
highLightLoggingSetup()
window = MdiWindow()
window.show()
window.openFile(
f"{os.path.dirname(__file__)}/../examples/calculator/calculator.json"
)
try:
sys.exit(app.exec_())
except Exception as e:
dumpException(e)
if __name__ == "__main__":
main()
|
# Extract radar traffic count data from KITS database and publish
# new records to City of Austin Open Data Portal.
# Attributes:
# socrata_resource (str): Description
import hashlib
import pdb
import arrow
import knackpy
import _setpath
from config.secrets import *
import argutil
import datautil
import emailutil
import jobutil
import logutil
import kitsutil
import socratautil
# define config variables
socrata_resource = "i626-g7ub"
def my_round(x, base=15):
"""Summary
Args:
x (TYPE): Description
base (int, optional): Description
Returns:
TYPE: Description
"""
# https://stackoverflow.com/questions/2272149/round-to-5-or-other-number-in-python
return int(base * round(float(x) / base))
def get_timebin(minute, hour):
"""
Round an arbitrary minue/hour to the nearest 15 minutes. We expect
radar count timestamsp to come in at 15 minute increments (this is a device configuration),
however sometimes they are off by +/- a minute or two).
Args:
minute (int)
hour (int)
Returns:
TYPE: String in 15-minute time format "HH:mm"
"""
minute = my_round(minute)
if minute == 60:
minute = 0
hour = hour + 1 if hour != 23 else 0
timebin = "{}:{}".format(hour, minute)
minute = str(minute).zfill(2)
hour = str(hour).zfill(2)
return "{}:{}".format(hour, minute)
def get_direction(lane):
"""Summary
Args:
lane (TYPE): Description
Returns:
TYPE: Description
"""
if "SB" in lane:
return "SB"
elif "NB" in lane:
return "NB"
elif "EB" in lane:
return "EB"
elif "WB" in lane:
return "WB"
else:
return None
def cli_args():
"""Summary
Returns:
TYPE: Description
"""
parser = argutil.get_parser(
"radar_count_pub.py",
"Publish radar count data from KITS DB to City of Austin Open Data Portal.",
"--replace",
)
args = parser.parse_args()
return args
def main():
"""Summary
Args:
job (TYPE): Description
**kwargs: Description
Returns:
TYPE: Description
"""
args = cli_args()
replace = args.replace
# get most recent traffic count record from socrata
socrata_data = socratautil.Soda(
resource=socrata_resource, soql={"$order": "curdatetime desc", "$limit": 1}
)
socrata_data = socrata_data.data
kits_query_recent = """
SELECT TOP (1) DETID as det_id
,CURDATETIME as dettime
,DETNAME as lane
,VOLUME as vol
,SPEED as spd
FROM [KITS].[SYSDETHISTORYRM]
ORDER BY CURDATETIME DESC
"""
kits_data_recent = kitsutil.data_as_dict(KITS_CREDENTIALS, kits_query_recent)
for record in kits_data_recent:
new_date = arrow.get(record["dettime"], "US/Central")
record["dettime"] = new_date.timestamp
if replace:
kits_query = """
SELECT i.DETID as detid
,i.CURDATETIME as curdatetime
,i.VOLUME as volume
,i.SPEED as speed
,i.INTNAME as intname
,i.OCCUPANCY as occupancy
,e.INTID as int_id
,e.DETSN as detname
FROM [KITS].[SYSDETHISTORYRM] i
LEFT OUTER JOIN [KITS].[DETECTORSRM] e
ON i.[DETID] = e.[DETID]
ORDER BY CURDATETIME DESC
"""
# send new data if the socrata data is behind KITS data
# the kits data timestamp is a real unix timestamp (no need to adjust for timezone stupidty)
elif (
arrow.get(socrata_data[0]["curdatetime"]).timestamp
< kits_data_recent[0]["dettime"]
):
# create query for counts since most recent socrata data
# query start time must be in local US/Central time (KITSDB is naive!)
strtime = (
arrow.get(socrata_data[0]["curdatetime"])
.to("US/Central")
.format("YYYY-MM-DD HH:mm:ss")
)
# INTID is KITS_ID in data tracker / socrata
# it uniquely identifies the radar device/location
# detname and the lane and should be queried from the DETECTORSRM
# table note that the values in the detname field in SYSDETHISTORYRM
# are not current and appear to be updated only the first time the
# detector is configured in KITS
kits_query = """
SELECT i.DETID as detid
,i.CURDATETIME as curdatetime
,i.VOLUME as volume
,i.SPEED as speed
,i.INTNAME as intname
,i.OCCUPANCY as occupancy
,e.INTID as int_id
,e.DETSN as detname
FROM [KITS].[SYSDETHISTORYRM] i
LEFT OUTER JOIN [KITS].[DETECTORSRM] e
ON i.[DETID] = e.[DETID]
WHERE (i.[CURDATETIME] >= '{}')
ORDER BY CURDATETIME DESC
""".format(
strtime
)
else:
# No new data
return 0
kits_data = kitsutil.data_as_dict(KITS_CREDENTIALS, kits_query)
print("Processing date/time fields")
for row in kits_data:
row["month"] = row["curdatetime"].month
row["day"] = row["curdatetime"].day
row["year"] = row["curdatetime"].year
row["day"] = row["curdatetime"].day
row["hour"] = row["curdatetime"].hour
row["minute"] = row["curdatetime"].minute
row["day_of_week"] = row["curdatetime"].weekday()
# day of week is 0 to 6 starting on monday
# shit to 0 to 6 starting on sunday
if row["day_of_week"] == 6:
row["day_of_week"] = 0
else:
row["day_of_week"] = row["day_of_week"] + 1
row["timebin"] = get_timebin(row["minute"], row["hour"])
row["direction"] = get_direction(row["detname"].upper())
kits_data = datautil.replace_timezone(kits_data, "curdatetime")
kits_data = datautil.iso_to_unix(kits_data, ["curdatetime"])
kits_data = datautil.stringify_key_values(kits_data)
hash_fields = ["detid", "curdatetime", "detname"]
for row in kits_data:
hasher = hashlib.md5()
in_str = "".join([str(row[q]) for q in hash_fields])
hasher.update(in_str.encode("utf-8"))
row["row_id"] = hasher.hexdigest()
kits_data = datautil.stringify_key_values(kits_data)
socrata_payload = datautil.lower_case_keys(kits_data)
status_upsert_response = socratautil.Soda(
auth=SOCRATA_CREDENTIALS,
records=socrata_payload,
resource=socrata_resource,
location_field=None,
)
return len(socrata_payload)
if __name__ == "__main__":
main()
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class MuseScoreIE(InfoExtractor):
_VALID_URL = r'(?:https?://)(?:www\.)?musescore\.com/(?:user/\d+|[^/]+)(?:/scores)?/(?P<id>[^#&?]+)'
_TESTS = [{
'url': 'https://musescore.com/user/73797/scores/142975',
'info_dict': {
'id': '142975',
'ext': 'mp3',
'title': 'WA Mozart Marche Turque (Turkish March fingered)',
'description': 'md5:7ede08230e4eaabd67a4a98bb54d07be',
'thumbnail': r're:(?:https?://)(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'PapyPiano',
'creator': 'Wolfgang Amadeus Mozart',
}
}, {
'url': 'https://musescore.com/user/36164500/scores/6837638',
'info_dict': {
'id': '6837638',
'ext': 'mp3',
'title': 'Sweet Child O\' Mine – Guns N\' Roses sweet child',
'description': 'md5:4dca71191c14abc312a0a4192492eace',
'thumbnail': r're:(?:https?://)(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'roxbelviolin',
'creator': 'Guns N´Roses Arr. Roxbel Violin',
}
}, {
'url': 'https://musescore.com/classicman/fur-elise',
'info_dict': {
'id': '33816',
'ext': 'mp3',
'title': 'Für Elise – Beethoven',
'description': 'md5:49515a3556d5ecaf9fa4b2514064ac34',
'thumbnail': r're:(?:https?://)(?:www\.)?musescore\.com/.*\.png[^$]+',
'uploader': 'ClassicMan',
'creator': 'Ludwig van Beethoven (1770–1827)',
}
}, {
'url': 'https://musescore.com/minh_cuteee/scores/6555384',
'only_matching': True,
}]
def _real_extract(self, url):
webpage = self._download_webpage(url, None)
url = self._og_search_url(webpage) or url
id = self._match_id(url)
mp3_url = self._download_json(f'https://musescore.com/api/jmuse?id={id}&index=0&type=mp3&v2=1', id,
headers={'authorization': '63794e5461e4cfa046edfbdddfccc1ac16daffd2'})['info']['url']
formats = [{
'url': mp3_url,
'ext': 'mp3',
'vcodec': 'none',
}]
return {
'id': id,
'formats': formats,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._html_search_meta('musescore:author', webpage, 'uploader'),
'creator': self._html_search_meta('musescore:composer', webpage, 'composer'),
}
|
import random
print('____________________________________')
print('| |')
print('| Bem vindo ao Jogo de Adivinhação |')
print('|__________________________________|\n')
numero_secreto = random.randrange(1,101) #gera numero entre 0.0 1.0
total_de_tentativas = 0
pontos = 1000
print('Escolha o nivel da dificuldade:')
print('(1)Fácil (2)Médio (3)Dificil')
nivel = int(input("Defina o nivel!"))
if (nivel == 1):
total_de_tentativas = 20
elif(nivel == 2):
total_de_tentativas = 10
else:
total_de_tentativas = 5
for rodada in range (1, total_de_tentativas + 1):
print("Tentativa {} de {}".format(rodada, total_de_tentativas))
chute_str = input("Digite um número entre 1 e 100: ")
print("Você digitou " , chute_str)
chute = int(chute_str)
if (chute < 1 or chute >100 ):
print('Você deve digitar um numero entre 1 e 100!')
continue
acertou = chute == numero_secreto
maior = chute > numero_secreto
menor = chute < numero_secreto
if(acertou):
print("Acertô miseravi! Você fez {} pontos!".format(pontos))
break
else:
pontos_perdidos = abs(numero_secreto - chute)
pontos = pontos - pontos_perdidos
if(maior):
print('O seu número foi maior que o número secreto.')
if(rodada == total_de_tentativas):
print("O número secreto era {} e você fez {} pontos!".format(numero_secreto, pontos))
elif(menor):
print("O seu chute foi menor do que o número secreto!")
if(rodada == total_de_tentativas):
print('O numero secreto era {}. Você conseguiu {} pontos!'.format(numero_secreto, pontos))
print("Fim do jogo !") |
from __future__ import print_function
'''
Arabic Handwritten Digit Recognition Using DNN
Akm Ashiquzzaman
Fall 2016
13101002@uap-bd.edu
zamanashiq3@gmail.com
The Simple Convnet model classifing
'''
#Numpy and Scipy Import
import numpy as np
np.random.seed(1337)
'''
In 1000 epoch
Test score: 0.196029246261
Test accuracy: 0.974
'''
#OS CV and PIL for preprossing
from os import listdir
from PIL import Image as img
from PIL import ImageOps as ops
from os.path import isfile, join
import cv2 as cv
hog = cv.HOGDescriptor('hog-properties.xml')
#Main data parser from BMP files
def dataProcess(dirname):
Names = []
for filename in listdir(dirname):
if(filename.endswith('.bmp')):
Names.append(dirname+'/'+filename)
Names.sort()
X1 = np.array([np.array(ops.invert(img.open(name).convert('L'))) for name in Names]).astype('float32')
X2 = np.array([np.array(hog.compute(cv.imread(name,0))) for name in Names ])
num = len(Names)
Y = np.array([(x%10) for x in range(0,num)]).astype('int')
return X1 , X2, Y
#Now main spllitiing
def load_data(dirname):
dataX1,dataX2,dataY = dataProcess(dirname)
(train_X1,train_X2,train_y), (test_X1,test_X2,test_y) = (dataX1[0:2000,:,:],dataX2[0:2000,:],dataY[0:2000]),(dataX1[2000:,:,:],dataX2[2000:,:],dataY[2000:])
return (train_X1,train_X2,train_y), (test_X1,test_X2,test_y)
#Now it's just a function call to load and test data.
(X1_train,X2_train,y_train), (X1_test,X2_test,y_test) = load_data('ArabicBMP')
# data Normalization, getting all values in 0~1
X1_train /= 255
X1_test /= 255
X1_train = X1_train.astype('float32')
X1_test = X1_test.astype('float32')
#again trasforming into floating poing value
X2_train=X2_train.reshape(X2_train.shape[0],X2_train.shape[1]).astype('float32')
X2_test=X2_test.reshape(X2_test.shape[0],X2_test.shape[1]).astype('float32')
#Keras Import
from keras.utils import np_utils
from keras import backend as K
nb_classes = 10
# input image dimensions
img_rows, img_cols = 32, 32
#first used the tensorflow in code dev, but theano will be used in training, so need this part for dim ordering
if K.image_dim_ordering() == 'th':
X1_train = X1_train.reshape(X1_train.shape[0], 1, img_rows, img_cols)
X1_test = X1_test.reshape(X1_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X1_train = X1_train.reshape(X1_train.shape[0], img_rows, img_cols, 1)
X1_test = X1_test.reshape(X1_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_train = Y_train.astype('float32')
Y_test = Y_test.astype('float32')
print('Training Tensor Label:'+str(Y_train.shape))
print('Testing Tensor Label:'+str(Y_test.shape))
print('Training Tensor 1:'+str(X1_train.shape))
print('Testing Tensor 1:'+str(X1_test.shape))
print('Training Tensor 2:'+str(X2_train.shape))
print('Testing Tensor 2:'+str(X2_test.shape))
np.savez_compressed('hybrid_model-data.npz',Y_train,Y_test,X1_train,X1_test,X2_train,X2_test)
|
# 「3Dビュー」エリア → 「メッシュエディット」モード → ツールシェルフ → 「シェイプキーツール」パネル
import os, re, sys, bpy, time, bmesh, mathutils
from . import common
"""
class VIEW3D_PT_tools_mesh_shapekey(bpy.types.Panel):
bl_label = "シェイプキーツール"
bl_idname = 'VIEW3D_PT_tools_mesh_shapekey'
bl_region_type = 'TOOLS'
bl_space_type = 'VIEW_3D'
bl_category = 'Tools'
bl_context = 'mesh_edit'
def draw(self, context):
pass
"""
|
import time
import requests
from importlib import reload
from _config import *
def get_safe(url, headers=None, cookies=None, error=0, rtype='text'):
try:
response = requests.get(url, headers=headers, cookies=cookies)
except Exception as e:
if error > 2:
return ''
print(str(e), 'RELOADING REQUESTS MODULE')
reload(requests)
time.sleep(2)
return get_safe(url, headers, cookies, error + 1)
else:
if response.status_code == 200:
if rtype == 'text':
return response.text
elif rtype == 'content':
return response.content
return ''
def get_headers_from_str(header_str, strip=True):
if strip:
sss = [ss.split(': ') for ss in header_str.split('\n') if ss.strip()]
else:
sss = [ss.split(': ') for ss in header_str.split('\n')]
dic = {}
for tup in sss:
key = tup[0].replace(":", "")
if len(tup) == 2:
dic[key] = tup[1]
elif len(tup) == 1:
dic[key] = ''
else:
raise Exception("Parse Error")
return dic
def get_cookies_from_str(cookie_str):
cookies = {}
for cook in [cc.split("=") for cc in cookie_str.split("; ") if cc.strip()]:
cookies.update({cook[0]: '='.join(cook[1:])})
return cookies
|
import cv2
from matplotlib import pyplot as plt
cv2.ocl.setUseOpenCL(False)
img1 = cv2.imread('test.jpg',0) # training image
blur1 = cv2.GaussianBlur(img1,(5,5),0)
ret1,th1 = cv2.threshold(blur1,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
img2 = cv2.imread('test.jpg',0) #query image
blur2 = cv2.GaussianBlur(img2,(5,5),0)
ret2,th2 = cv2.threshold(blur2,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
orb = cv2.ORB_create(nfeatures=500)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
kp1, des1 = orb.detectAndCompute(th1,None)
kp2, des2 = orb.detectAndCompute(th2,None)
print len(des1)
print len(des2)
#crossCheck cannot be performed with KNN Matcher
matches = bf.knnMatch(des1,des2,k=2)
print len(matches)
good = []
for m,n in matches:
if m.distance < 0.9*n.distance:
good.append([m])
print len(good)
img3 = cv2.drawMatchesKnn(th1,kp1,th2,kp2,good,None,flags=2)
plt.imshow(img3)
plt.show()
|
from __future__ import print_function, division
import argparse
import sys
import torch
from torch.utils.data import DataLoader
import torchvision
import pandas as pd
from utils import net_builder
from ssl_dataset import SSL_Dataset, construct_transforms, get_transform
from haparams import create_hparams
def main(args, hps, use_transform=False):
# construct transforms
# grab mean and std
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
# overwrite batch size if needed
batch_size = args.batch_size if args.batch_size else hps.train.eval_batch
# settings
settings = ['tr', 'base', 'noise', 'contr']
path = 'D:/KTH/Deep_learning_adv/Project/fully_trained/kth_project'
# loop through settings
full_data = {}
for setting in settings:
args.translate = False
args.noise = False
args.contrast = False
if setting == 'tr':
args.translate = True
if setting == 'noise':
args.noise = True
if setting == 'contr':
args.contrast = True
# horizontal flip + crop is on by default
transform_list = construct_transforms(mean, std, args)
local_transform = get_transform(mean, std, transform_list, train=True)
# store accuracies:
accs = {}
# loop through seeds
for i in range(4):
if args.best:
checkpoint_path = path + "/" + "saved_models_seed_" + str(
i) + "_" + setting + "/cifar10_40/" + "model_best.pth"
print("Using best model")
elif args.last:
checkpoint_path = path + "/" + "saved_models_seed_" + str(
i) + "_" + setting + "/cifar10_40/" + "latest_model.pth"
print("Using latest model")
else:
sys.exit("Model type not specified")
checkpoint = torch.load(checkpoint_path)
load_model = checkpoint['train_model'] if args.use_train_model else checkpoint['eval_model']
resnet_builder = net_builder({'depth': hps.model.depth,
'widen_factor': hps.model.widen_factor,
'leaky_slope': hps.model.leaky_slope,
'dropRate': hps.model.dropout})
resnet = resnet_builder(num_classes=hps.data.num_classes)
resnet.load_state_dict(load_model)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
resnet.to(device)
resnet.eval()
eval_dset = SSL_Dataset(name=hps.data.dataset, train=False,
num_classes=hps.data.num_classes, data_dir=hps.data.data_dir, args=args).get_dset_clean()
eval_loader = DataLoader(eval_dset, batch_size=batch_size, shuffle=False,
num_workers=1, pin_memory=True)
acc = 0.0
with torch.no_grad():
for images, target in eval_loader:
images = images.type(torch.FloatTensor).to(device)
# loop over batch
if use_transform:
image = []
for j in range(images.shape[0]):
aug_image = torchvision.transforms.functional.to_pil_image(images[j].cpu())
aug_image = local_transform(aug_image)
image.append(aug_image)
images = torch.stack(image, dim=0).type(torch.FloatTensor).to(device)
logit = resnet(images)
acc += logit.cpu().max(1)[1].eq(target).sum().numpy()
key = 'seed ' + str(i)
accs[key] = (1 - acc / len(eval_dset)) * 100
meann = 0
for key in accs:
meann += accs[key]
meann = meann / 4
var = 0
for key in accs:
var += (accs[key] - meann) ** 2
var = var / 4
accs['Mean and variance'] = str(meann) + "+-" + str(var)
if setting == 'tr':
key = "Setting: Baseline"
if setting == 'noise':
key = "Setting: Noise"
if setting == 'contr':
key = "Setting: Contrast"
if setting == 'base':
key = "Setting: Removed Translate"
full_data[key] = accs
print()
print()
for key in full_data:
print(key)
acc = full_data[key]
df = pd.DataFrame.from_dict(acc, orient='index')
print(df)
print("-------------------------------")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# use the best model
parser.add_argument('--best', action='store_true', help='Use the best model for each setting')
# use the latest model
parser.add_argument('--last', action='store_true', help='Use the latest model for each setting')
# use this to switch between train/eval models
parser.add_argument('--use_train_model', action='store_true')
# Experimental arguments
parser.add_argument('-tr', '--translate', action='store_true', help='Add translation transform during test')
parser.add_argument('-n', '--noise', action='store_true', help='Add noise transform transform during test')
parser.add_argument('-c', '--contrast', action='store_true', help='Add contrast transform to weak augment')
# use this to overwrite the config file
parser.add_argument('--batch_size', type=int)
# get the config
parser.add_argument('-hp', '--hparams', type=str,
required=True, help='path to model parameters')
args = parser.parse_args()
hps = create_hparams(args.hparams)
main(args, hps, use_transform=True)
|
from classifier import clf
from flask import Flask, render_template, jsonify, request, json
from hand_data import get_hand_position
from lib import Leap
import pickle
import random
import redis
app = Flask(__name__)
controller = Leap.Controller()
controller.set_policy(Leap.Controller.POLICY_BACKGROUND_FRAMES)
past_symbol = 'a'
prev_prediction = None
r = redis.StrictRedis(host='localhost', port=6379, db=0)
@app.route('/translate')
def translate():
return render_template('ui.html')
@app.route('/')
def tutorial():
return render_template('tutorial.html')
@app.route('/score', methods=['POST'])
def add_score():
data = request.form
try:
record = json.dumps({'user': data['user'], 'score': int(data['score'])})
print record
result = r.lpush('scoreboard', record)
return jsonify(error=result)
except KeyError:
return jsonify(error=True)
@app.route('/scores', methods=['GET'])
def get_scores():
scores = [json.loads(i) for i in r.lrange('scoreboard', 0, 100)]
scores.sort(key=lambda s: s['score'], reverse=True)
return jsonify(scores=scores[:10])
@app.route('/current')
def current_symbol():
global past_symbol
global prev_prediction
# Is there a hand?
hand_pos = get_hand_position(controller)
if not hand_pos:
new = past_symbol != ' '
past_symbol = ' '
return jsonify(symbol=' ', new=new)
features = [v for k, v in hand_pos.iteritems()]
# Do we have a new symbol?
prediction = ''.join(clf.predict(features))
if prediction == prev_prediction:
# We good fam
return jsonify(new=False, symbol=prediction)
else:
prev_prediction = prediction
return jsonify(new=True, symbol=prediction)
@app.route('/splash')
def splash():
return render_template('splash.html')
@app.route('/scoreboard')
def scoreboard():
return jsonify(user_score=100)
if __name__ == '__main__':
app.run(debug=True)
|
import pytoml as toml
import os
def load_env():
"""
load environment variables from the environment. If empty, use default
values from config.toml.example.
:return: config dictionary of dictionaries.
"""
with open('config.toml.example') as defaultconf:
configdict = toml.load(defaultconf)
try:
if os.environ['CONSUMER_KEY'] != "":
configdict['twitter']['consumer_key'] = os.environ['CONSUMER_KEY']
except KeyError:
pass
try:
if os.environ['CONSUMER_SECRET'] != "":
configdict['twitter']['consumer_secret'] = os.environ['CONSUMER_SECRET']
except KeyError:
pass
try:
if os.environ['HOST'] != "":
configdict['web']['host'] = os.environ['HOST']
except KeyError:
pass
try:
if os.environ['PORT'] != "":
configdict['web']['port'] = os.environ['PORT']
except KeyError:
pass
try:
if os.environ['CONTACT'] != "":
configdict['web']['contact'] = os.environ['CONTACT']
except KeyError:
pass
try:
if os.environ['MBOX_USER'] != "":
configdict['mail']['mbox_user'] = os.environ['MBOX_USER']
except KeyError:
pass
try:
if os.environ['DB_PATH'] != "":
configdict['database']['db_path'] = os.environ['DB_PATH']
except KeyError:
pass
return configdict
# read config in TOML format (https://github.com/toml-lang/toml#toml)
try:
with open('config.toml') as configfile:
config = toml.load(configfile)
except FileNotFoundError:
config = load_env()
if __name__ == "__main__":
for category in config:
for key in config[category]:
print(key + "=" + str(config[category][key]))
|
import matplotlib.pyplot as plt
def graph(l_Gs, l_Ds):
fig = plt.figure(figsize=(10, 7))
loss = fig.add_subplot(1, 1, 1)
loss.plot(range(len(l_Gs)), l_Gs, label='Generator Loss')
loss.plot(range(len(l_Ds)), l_Ds, label='Discriminator Loss')
loss.set_xlabel('epoch')
loss.set_ylabel('loss')
loss.legend()
loss.grid()
fig.show() |
''' Question 10
Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all duplicate words
and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world '''
input1 = input().split(" ")
result = list(set(input1))
result.sort()
print(" ".join(result))
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent computation.
The main interface of this module is Recurrent().
A recurrent computation describes an auto-regressive process, where outputs
of one time step are fed to the output of the next time step.
This module uses:
theta: the "weights" each RNN uses.
state0: the initial state of each RNN.
cell_fn: A python function describing RNN cell. It must has the following
signature:
cell_fn: (theta, state0, inputs) -> (state1, extras)
state1 is the next RNN state, extras are computed by cell_fn
and the library forwards extras to cell_fn's gradient function.
cell_grad: A python function describing the backprop gradient function
for the RNN cell. It must has the following signature:
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
dstate1 is what the backprop algorithm provides representing
gradients of state1 w.r.t. the final loss.
In this module, we handle structures of tensors for theta, state0, inputs,
and extras. The structure is an arbitrarily nested python structure, such
as a dictionary of named tuples.
Because the computation is a left-to-right chain, a single in-place accumulator
can be used rather than a stack. Thus a special gradient was written to reduce
unnecessary memory usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.inplace_ops import alias_inplace_update
from tensorflow.python.util import nest
def _AssertIsCompatible(a, b):
"""Checks that `a` and `b` are nested structures of the same type."""
# TODO(drpng): implement.
del a
del b
def _Index(struct, index):
"""Returns a structure with `x[index]` for each tensor `x` in the structure.
Args:
struct: A structure of tensors.
index: A scalar integer tensor. Performance is better if `index` is
on the host memory.
Returns:
A structure of tensors congruent to `struct`.
For each key in `ret`, `rets[key] = struct[key][index]`.
"""
index = ops.convert_to_tensor(index)
index.get_shape().assert_has_rank(0)
return nest.map_structure(lambda x: array_ops.gather(x, index), struct)
def _Update(struct_acc, struct_x, t):
"""Updates t-th row in accumulators.
Args:
struct_acc: The accumulators. A structure of tensors.
struct_x: The new values. A structure of tensors congruent to `struct_acc`.
t: A scalar integer. Performance is better if `t` is on the device
memory.
Returns:
A structure of tensors. Say, ret is a returned dictionary. Then, for
each key, we have:
ret[key] = struct_acc[key];
ret[key][t, :] = struct_x[key]
"""
to_skip_update = set()
acc_lst = nest.flatten(struct_acc)
x_lst = nest.flatten(struct_x)
t = math_ops.cast(
[t], dtypes.int32) # tf.compat.v1.to_int32 casts on-device tensors.
lst = []
for acc, x in zip(acc_lst, x_lst):
if acc in to_skip_update:
# Until b/62105730 is fixed, we need to avoid inplace update for tensors
# of rank 1. could reshape to handle it, but we don't really need the
# values applied to these, so just skip their modification.
lst += [acc]
else:
lst += [alias_inplace_update(acc, t, array_ops.expand_dims(x, 0))]
return nest.pack_sequence_as(struct_acc, lst)
def _SeqLenDim(struct):
"""Returns the 0-th dim size of tensors in a structure of tensors.
This is the max sequence length according to the shape of the inputs.
Args:
struct: A structure of tensors. Every tensor's 0-th dim has the same size.
Returns:
A scalar tensor which is the size of 0-th dim of every tensors in struct.
"""
xs = nest.flatten(struct)
assert xs
dim0 = array_ops.shape(xs[0])[0]
return dim0
def _Flatten(struct):
"""Flattens a structure."""
return nest.flatten(struct)
def _Pack(elements, struct_template):
"""Packs the list of tensors according to the structure.
In the event that `elements` should be a scalar, `struct_template` must
contain exactly one non-trivial element (for instance, `[[], {'x':elt}]`).
Args:
elements: Elements to be packed. A list of tensor, or a single tensor.
struct_template: The container structure in which to pack them.
Returns:
A python structure of the same type as `struct_template`, containing
`elements` as its contained elements.
"""
if not nest.is_sequence(elements):
return nest.pack_sequence_as(struct_template, [elements])
return nest.pack_sequence_as(struct_template, elements)
def _EmptyAcc(slen, struct_template):
"""Creates a set of accumulators for tensors in structure.
Args:
slen: The sequence length. A scalar tensor.
struct_template: A structure of tensors.
Returns:
A structure congruent to `struct_template`. Say ret is a returned
dictionary. Then, `ret.key`, a tensor, has the same dtype as
`struct_template.key`. The tensor's shape has 1 more dimension
than the tensor `struct_template.key`. The extra 0-th dimension is of size
`slen`. E.g., if `slen=10` and `struct_template.key`'s shape is `[3, 5]`,
then, `ret.key`'s shape is `[10, 3, 5]`.
"""
def _EmptyAccForTensor(tensor):
return inplace_ops.empty(
array_ops.concat([[slen], array_ops.shape(tensor)], axis=0),
tensor.dtype,
init=True)
return nest.map_structure(_EmptyAccForTensor, struct_template)
def _EmptyLike(struct):
"""Creates a set of empty initialized tensors.
Args:
struct: A structure of tensors.
Returns:
A struct of tensors. Each tensor has the same shape and dtype as
its corresponding tensor in `struct`. And each tensor is initialized.
"""
return nest.map_structure(
lambda x: inplace_ops.empty_like(x, init=True), struct)
def _Add(struct_x, struct_y):
"""Adds tensors in `struct_x` with respective tensors in `struct_y`.
Args:
struct_x: A struct of tensors.
struct_y: A struct of tensors congruent to `struct_x`.
Returns:
A struct of tensors. Each element of the returned value
equals `x + y`, with corresponding values in `struct_x` and `struct_y`.
"""
list_x = nest.flatten(struct_x)
list_y = nest.flatten(struct_y)
z = []
for x, y in zip(list_x, list_y):
z += [math_ops.add(x, y)]
return nest.pack_sequence_as(struct_x, z)
def _Dtypes(struct):
"""Returns all tensors' data types in a list."""
return [x.dtype for x in nest.flatten(struct)]
def _ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A structure same as `dxs` with `None` replaced by a zero tensor.
"""
list_xs = nest.flatten(xs)
list_dxs = nest.flatten(dxs)
# If x does not get any backprop-ed gradient, propagate zeros.
rets = []
for (x, dx) in zip(list_xs, list_dxs):
if dx is None:
rets.append(array_ops.zeros_like(x))
else:
rets.append(dx)
return nest.pack_sequence_as(dxs, rets)
# All structures are flattened for use internally. This is for simplicity
# and also to use the Defun construct.
# In the forward pass (inference), the computation is structured as follows.
# Forward: [gradient = _Recurrent.Grad]
# Flatten structures, create accumulators.
# for t = 0..max_input_length:
# Defun ForwardLoopBody:
# Defun Fwd: flatten/pack around cell_fn
# state1 = Fwd(inputs[t], state0)
# acc_state += [state1]
# Pack structures.
# During the backward pass (backpropping the gradient from the last time
# step to the first, through the structure), the computation is structured
# as follows.
# Grad:
# Flatten structures.
# Defun Backward:
# Create create accumulated derivatives: d_theta, d_inputs, d_acc_state.
# Regarding the note at the top of the file, there is only one accumulator
# for d_theta accumulated over the whole sequence.
# for t = max_input_length -1..0:
# Defun BackwardLoopBody:
# Retrieve acc_state[t] computed in the forward pass.
# Defun Bak: flatten/back around cell_fn_grad.
# d_state1 is d_state0 from previous step (ie next time).
# d_acc_state[dev_t] += d_state1
# d_theta_t, d_state0, d_inputs_t, = Bak()
# d_inputs[dev_t] += d_inputs
# d_theta += d_theta_t
# d_acc_state[t] += d_state1
# Pack structures and return.
class _Recurrent(object):
"""A helper class to construct a recurrent neural net."""
def __init__(self,
cell_fn,
cell_grad,
theta,
state0,
inputs,
max_input_length,
extras,
use_tpu,
aligned_end=False):
"""RNN helper class.
Args:
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
max_input_length: None, or the maximum effective length of the input over
all batches. A scalar tensor.
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
use_tpu: A boolean indicating whether the computation is mean to
run on a TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
"""
self._theta = theta
self._state = state0
self._inputs = inputs
self._max_input_length = self._MaybeComputeMaxInputLength(
inputs, max_input_length)
self._cell_fn = cell_fn
self._cell_grad = cell_grad
self._extras = extras
self._aligned_end = aligned_end
# pylint: disable=unbalanced-tuple-unpacking
# NOTE: TF Function (Fwd, Bak, ForwardLoopBody, BackwardLoopBody,
# Forward and Backward defined below) simply takes a list of
# Tensors and returns a list of Tensors. When we pass in a
# structure (a list of structures of Tensors), we use _Flatten to
# convert the structure into a list of tensor. Conversely, the
# following code often uses _Pack to formulate a structure from a
# list of tensors based on a "template".
# Wraps cell_fn in a TF Function:
# state1 = cell_fn(theta, state0, inputs)
fwd_sig = [self._theta, self._state, self._inputs]
compiled = use_tpu
noinline = not compiled
dev_t_type = dtypes.int32 if use_tpu else dtypes.int64
@function.Defun(*_Dtypes(fwd_sig))
def Fwd(*args):
(theta, state0, inputs) = _Pack(args, fwd_sig)
state1, extras = self._cell_fn(theta, state0, inputs)
assert not function.get_extra_args(), (
'cell_fn is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(state1, self._state)
_AssertIsCompatible(extras, self._extras)
return _Flatten([state1, extras])
# Wraps cell_fn in a TF Function as a for-loop's body.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# state0: the previous recurrent state.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state is also stashed into
# acc_state.
# acc_extras: Each timestep's computed extras is stashed into acc_extras
fwdloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(fwdloop_sig))
def ForwardLoopBody(*args):
"""The body of forward loop."""
t, dev_t = args[0], args[1]
(theta, state0, inputs, acc_state, acc_extras) = _Pack(
args[2:], fwdloop_sig)
inputs_t = _Index(inputs, t) # external input at time step t.
fwd = Fwd(*_Flatten([theta, state0, inputs_t]))
state1, extras = _Pack(fwd, [self._state, self._extras])
# Saves state1 and extras in their accumulators.
acc_state = _Update(acc_state, state1, dev_t)
acc_extras = _Update(acc_extras, extras, dev_t)
return [math_ops.add(dev_t, 1)] + _Flatten(
[theta, state1, inputs, acc_state, acc_extras])
def Grad(op, *args):
"""The python grad function for the Forward function."""
# NOTE: tf.gradient backprops None for int32/int64 while zeros
# for float32/float64. For consistency, we always backprop
# zeros.
args = list(args)
for i, dy in enumerate(args):
if dy is None:
args[i] = array_ops.zeros_like(op.outputs[i])
# TODO(drpng): getting the extra state here?
op_inputs = [x for x in op.inputs]
op_struct = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
(theta, state0, inputs, max_input_length, _) = _Pack(op_inputs, op_struct)
# acc_state and acc_extras are computed by the Forward pass and
# needed by the Backward pass.
acc_state, _, acc_extras = _Pack([x for x in op.outputs],
[self._state, self._state, self._extras])
# Forward computes acc_state, the final state and
# acc_extras. tf.gradients gives us their gradients w.r.t. the
# final loss. Because acc_extras are not exposed by Compute(),
# it has no gradients w.r.t. the final loss (i.e., by
# construction, it must be zeros).
d_acc_state, d_state1, _ = _Pack(args,
[self._state, self._state, self._extras])
return Backward(*_Flatten([
theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1
]))
# Forward calls ForwardLoopBody n times. Each time computes one
# time step of the recurrent net.
forward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
]
@function.Defun(
*_Dtypes(forward_sig), python_grad_func=Grad, noinline=noinline)
def Forward(*args):
"""Forward pass of the recurrent net."""
theta, state0, inputs, max_input_length, extras = _Pack(args, forward_sig)
slen_dim = _SeqLenDim(inputs)
# Creates accumulators for state0 and extras.
acc_state = _EmptyAcc(slen_dim, state0)
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
delta=1,
inputs=[dev_t] + _Flatten(
[theta, state0, inputs, acc_state, acc_extras]),
body=ForwardLoopBody,
rewrite_with_while=compiled)
_, state1, _, acc_state, acc_extras = _Pack(
run[1:],
[self._theta, self._state, self._inputs, self._state, self._extras])
return _Flatten([acc_state, state1, acc_extras])
# The per-step backward computes:
# d_theta, d_state0, d_inputs = cell_grad(
# theta, state0, inputs, extras, d_state1)
# where d_state1 is the backprop-ed gradient for state1, and
# extras is the computed by the forward step to facilitate the
# backward step.
bak_sig = [
self._theta, self._state, self._inputs, self._extras, self._state
]
@function.Defun(*_Dtypes(bak_sig))
def Bak(*args):
"""Backward step."""
(theta, state0, inputs, extras, d_state1) = _Pack(args, bak_sig)
(dtheta, dstate0, dinputs) = self._cell_grad(theta, state0, inputs,
extras, d_state1)
assert not function.get_extra_args(), (
'cell_grad is not pure with extra args: %s.' %
(function.get_extra_args()))
_AssertIsCompatible(dtheta, self._theta)
_AssertIsCompatible(dstate0, self._state)
_AssertIsCompatible(dinputs, self._inputs)
return _Flatten(
_ConvertNoneGradientToZeros([theta, state0, inputs],
[dtheta, dstate0, dinputs]))
# Define defuns used by a functional_ops.If in BackwardLoopBody.
state_if_sig = [self._state, self._state]
@function.Defun(*_Dtypes(state_if_sig))
def ReturnOrigState0(*args):
"""Returns original state0 from inputs."""
(_, orig_state0) = _Pack(args, state_if_sig)
return nest.flatten(orig_state0)
@function.Defun(*_Dtypes(state_if_sig))
def ReturnAccState(*args):
"""Returns acc_state[t-1] from inputs."""
(acc_state, _) = _Pack(args, state_if_sig)
return nest.flatten(acc_state)
# Wraps cell_grad gradient function in a TF Function as a
# for-loop's body for the Backward pass.
#
# The loop state is composed of:
# t: The loop variable. Timestep id.
# state0: the initial state for the entire backward loop.
# dev_t: The loop variable mirrored on the device.
# theta: the recurrent net's weights.
# inputs: inputs to the recurrent net. inputs[t, :] are for the timestep t.
# acc_state: Each timestep's computed new state was stashed into
# acc_state by the Forward pass.
# acc_extras: Each timestep's computed extras was stashed into
# acc_extras by the Forward pass.
# d_theta: All timestep's gradient for theta is accumulated (added) into
# d_theta.
# d_state1: The backprop-ed gradient for the new stated computed by
# timestep t.
# d_inputs: d_inputs[t, :] is populated by the backward time step t.
# d_acc_state: The backprop-ed gradient for acc_state.
bakloop_sig = [
self._theta, self._state, self._inputs, self._state, self._extras,
self._theta, self._state, self._inputs, self._state
]
@function.Defun(dtypes.int32, dev_t_type, *_Dtypes(bakloop_sig))
def BackwardLoopBody(*args):
"""Backward loop body function."""
t, dev_t = args[0], args[1]
(theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state) = _Pack(args[2:], bakloop_sig)
# The input recurrent state for time step t is previous time step's
# output, or the original state0 when on time step 0.
state_from_acc = _Index(acc_state, math_ops.maximum(0, t - 1))
state0 = functional_ops.If(
math_ops.equal(t, array_ops.constant(0, dtypes.int32)),
_Flatten([state_from_acc, orig_state0]), ReturnOrigState0,
ReturnAccState)
state0 = nest.pack_sequence_as(orig_state0, state0)
# The external inputs for time step t.
inputs_t = _Index(inputs, t)
# The extras for time step t.
extras_t = _Index(acc_extras, t)
d_state1 = _Add(_Index(d_acc_state, t), d_state1)
(d_theta_t, d_state0, d_inputs_t) = _Pack(
Bak(*_Flatten([theta, state0, inputs_t, extras_t, d_state1])),
[self._theta, self._state, self._inputs])
d_theta = _Add(d_theta, d_theta_t)
d_inputs = _Update(d_inputs, d_inputs_t, dev_t)
return [math_ops.subtract(dev_t, 1)] + _Flatten([
theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state
])
# Backward calls BackwardLoopBody n times. Each time computes the backprop
# for one time step of the recurrent net.
backward_sig = [
self._theta, self._state, self._inputs, self._max_input_length,
self._state, self._extras, self._state, self._state
]
@function.Defun(*_Dtypes(backward_sig), noinline=noinline)
def Backward(*args):
"""Backward pass for the recurrent net."""
# theta, state0, inputs are Forward's inputs.
# acc_state is the accumulated 1st output of Forward.
# acc_extras is the accumulated 2nd output of Forward.
# d_acc_state is the gradient for acc_state.
# d_state1 is the gradient for the final state computed by Forward.
(theta, state0, inputs, max_input_length, acc_state, acc_extras,
d_acc_state, d_state1) = _Pack(args, backward_sig)
# Accumulators for gradients.
d_theta = _EmptyLike(theta)
d_inputs = _EmptyLike(inputs)
slen_dim = _SeqLenDim(inputs)
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = slen_dim - 1 if self._aligned_end else max_input_length - 1
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
limit = slen_dim - max_input_length - 1 if self._aligned_end else -1
run = functional_ops.For(
start=t,
limit=limit,
delta=-1,
inputs=[dev_t] + _Flatten([
theta, state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state
]),
body=BackwardLoopBody,
rewrite_with_while=compiled)
(theta, state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state) = _Pack(run[1:], bakloop_sig)
d_max_input_length = array_ops.constant(0, dtype=max_input_length.dtype)
return _Flatten(
[d_theta, d_state0, d_inputs, d_max_input_length, acc_extras])
self._forward = Forward
def _MaybeComputeMaxInputLength(self, inputs, max_input_length):
if max_input_length is not None:
return max_input_length
return math_ops.reduce_max(array_ops.shape(nest.flatten(inputs)[0])[0])
def Compute(self):
return _Pack(
self._forward(*_Flatten([
self._theta, self._state, self._inputs, self._max_input_length,
self._extras
])), [self._state, self._state, self._extras])[:2]
def _GetCellGrad(cell_fn, cell_grad):
"""Returns the gradient function for cell_fn.
Args:
cell_fn: The recurrent neural net's cell function.
cell_grad: If not None, cell_fn's gradient function.
Returns:
Returns cell_grad if not None. Otherwise, assume cell_fn is a python
function representing the recurrent neural net's cell function, i.e.,
cell_fn: (theta, state0, inputs) -> (state1, extra)
returns its default gradient python function, i.e.,
cell_grad: (theta, state0, inputs, extras, dstate1) -> (
dtheta, dstate0, dinputs)
"""
if cell_grad:
return cell_grad
def CellGrad(theta, state0, inputs, extras, dstate1):
"""Default gradient function for cell_fn."""
# NOTE: The default grad function recomputes the forward
# function and does not take advantage of 'extras' returned by
# the forward function.
del extras
state1, extras = cell_fn(theta, state0, inputs)
ys = _Flatten([state1])
xs = _Flatten([theta, state0, inputs])
grad_ys = _Flatten([dstate1])
grads = gradients_impl.gradients(ys=ys, xs=xs, grad_ys=grad_ys)
return _ConvertNoneGradientToZeros([theta, state0, inputs],
_Pack(grads, [theta, state0, inputs]))
return CellGrad
def _IsSingleTimeStep(inputs, max_input_length):
"""Returns True only if the time dimension of inputs is 1."""
if not isinstance(max_input_length, ops.Tensor):
return max_input_length == 1
for x in nest.flatten(inputs):
if x.shape.dims is None or x.shape[0].value != 1:
return False
return True
def Recurrent(theta,
state0,
inputs,
cell_fn,
cell_grad=None,
extras=None,
max_input_length=None,
use_tpu=False,
aligned_end=False):
"""Compute a recurrent neural net.
Roughly, Recurrent() computes the following:
state = state0
for t in inputs' sequence length:
state = cell_fn(theta, state, inputs[t, :])
accumulate_state[t, :] = state
return accumulate_state, state
theta, state, inputs are all structures of tensors.
inputs[t, :] means taking a slice out from every tensor in the inputs.
accumulate_state[t, :] = state means that we stash every tensor in
'state' into a slice of the corresponding tensor in
accumulate_state.
cell_fn is a python callable computing (building up a TensorFlow
graph) the recurrent neural network's one forward step. Two calls of
cell_fn must describe two identical computations.
By construction, Recurrent()'s backward computation does not access
any intermediate values computed by cell_fn during forward
computation. We may extend Recurrent() to support that by taking a
customized backward function of cell_fn.
Args:
theta: weights. A structure of tensors.
state0: initial state. A structure of tensors.
inputs: inputs. A structure of tensors.
cell_fn: A python function, which computes:
state1, extras = cell_fn(theta, state0, inputs[t, :])
cell_grad: A python function which computes:
dtheta, dstate0, dinputs[t, :] = cell_grad(
theta, state0, inputs[t, :], extras, dstate1)
extras: A structure of tensors. The 2nd return value of every
invocation of cell_fn is a structure of tensors with matching keys
and shapes of this `extras`.
max_input_length: maximum length of effective input. This is used to
truncate the computation if the inputs have been allocated to a
larger size. A scalar tensor.
use_tpu: whether or not we are on TPU.
aligned_end: A boolean indicating whether the sequence is aligned at
the end.
Returns:
accumulate_state and the final state.
"""
if cell_grad is None and _IsSingleTimeStep(inputs, max_input_length):
# The seqlen length is staticly known as 1. Hence, we just need to
# call cell_fn once without putting it into a loop.
inputs = nest.map_structure(lambda x: array_ops.squeeze(x, axis=0), inputs)
state1, _ = cell_fn(theta, state0, inputs)
acc_state = nest.map_structure(lambda x: array_ops.expand_dims(x, axis=0),
state1)
return acc_state, state1
# If cell_grad is not given, derives the gradient function from
# cell_fn.
cell_grad = _GetCellGrad(cell_fn, cell_grad)
if extras is None:
# Derives 'extras' so that we can allocate extras' accumulator.
_, extras = cell_fn(theta, state0, _Index(inputs, 0))
extras = nest.map_structure(array_ops.zeros_like, extras)
else:
_, actual = cell_fn(theta, state0, _Index(inputs, 0))
_AssertIsCompatible(extras, actual)
return _Recurrent(
cell_fn=cell_fn,
cell_grad=cell_grad,
theta=theta,
state0=state0,
inputs=inputs,
max_input_length=max_input_length,
extras=extras,
use_tpu=use_tpu,
aligned_end=aligned_end).Compute()
|
class Solution:
def shortestCommonSupersequence(self, str1: str, str2: str) -> str:
|
import networkx as nx
import numpy as np
import scipy
def is_symmetric(A, tol=1e-8):
return scipy.sparse.linalg.norm(A - A.T, scipy.Inf) < tol
def get_tsp_features(G):
"""
Generates a list of TSP based features for the given graph
Args:
G (object): networkx graph object
Returns:
features (dict): a dictionary of the features in the given graph
"""
features = {}
adj = nx.adjacency_matrix(G, weight='cost')
shortest1 = nx.shortest_path_length(G, weight="cost")
shortest2 = dict(shortest1)
ecc = nx.eccentricity(G, sp=shortest2)
# Find Nearest Neighbours
nearest_neighbours = np.asarray(
[
min([edge[2]['cost'] for edge in G.edges(node, data=True)])
for node in G.nodes
]
)
normalised_nearest_neighbours = nearest_neighbours / np.sqrt(
np.sum(nearest_neighbours ** 2)
)
normalised_nearest_neighbours
# Fraction of distinct distances
cost_one_dp = [np.round(edge[2]['cost'], 1) for edge in G.edges(data=True)]
features['tsp_nnd_var'] = np.var(normalised_nearest_neighbours)
features['tsp_nnd_coefficient_var'] = 100 * (
np.std(normalised_nearest_neighbours) / np.mean(normalised_nearest_neighbours)
)
features['tsp_radius'] = nx.algorithms.distance_measures.radius(G, e=ecc)
features['tsp_mean'] = np.mean(adj)
features['tsp_std'] = np.std(nx.to_numpy_matrix(G, weight='cost'))
features['tsp_frac_distinct_dist_one_dp'] = len(set(cost_one_dp)) / len(cost_one_dp)
features['tsp_clustering_coeff_variance'] = np.var(
[item[1] for item in nx.clustering(G).items()]
)
features['tsp_symmetric'] = is_symmetric(adj)
# Asymmetry features
diff = abs(adj - adj.T)
diff = diff.toarray()
features['tsp_asym_diff_matrix_sd'] = np.std(
diff[np.triu_indices(diff.shape[0], k=1)]
)
features['tsp_asym_diff_matrix_mean'] = np.mean(
diff[np.triu_indices(diff.shape[0], k=1)]
)
return features
|
# terrascript/data/pagerduty.py
import terrascript
class pagerduty_escalation_policy(terrascript.Data):
pass
class pagerduty_schedule(terrascript.Data):
pass
class pagerduty_user(terrascript.Data):
pass
class pagerduty_team(terrascript.Data):
pass
class pagerduty_vendor(terrascript.Data):
pass
class pagerduty_extension_schema(terrascript.Data):
pass
class pagerduty_service(terrascript.Data):
pass
class pagerduty_business_service(terrascript.Data):
pass
class pagerduty_priority(terrascript.Data):
pass
class pagerduty_ruleset(terrascript.Data):
pass
__all__ = [
"pagerduty_escalation_policy",
"pagerduty_schedule",
"pagerduty_user",
"pagerduty_team",
"pagerduty_vendor",
"pagerduty_extension_schema",
"pagerduty_service",
"pagerduty_business_service",
"pagerduty_priority",
"pagerduty_ruleset",
]
|
from HDMR import RBF_HDMR
from Objective_function import Func_3d
from collections import OrderedDict
import numpy as np
from pyGPGO.GPGO import GPGO
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.acquisition import Acquisition
from pyGPGO.covfunc import matern52
import warnings
f_objective = Func_3d.x1x2x3()
rbf_hdmr = RBF_HDMR.Rbf_Hdmr(f_objective)
# 其变量取值区间
x_round = f_objective.bounds
# 计算x0,f0。第一种选择方法,随机选取
x0, f0 = rbf_hdmr.x0_fx0()
point_round, f_values, type_fx, xishu_arr = rbf_hdmr.func_DEMO()
# 查看一阶模型的精度情况
is_True = rbf_hdmr.simulation_func()
print(is_True)
if is_True == False:
# 由于我们规定,任何一种变量只能与其他某一种变量发生一次相关系,即 没一个变量的下标在x_ij_index数组中最多可以出现一次
x_ij_index, x_ij_point, x_ij_value, x_ij_xishu = rbf_hdmr.f_Two_index()
# 先判断哪些自变量是与其他自变量无关的
# 整合相关自变量的数组
x_inter = []
for index in range(len(x_ij_index)):
x_inter.append(list(x_ij_index[index]))
x_inter = np.unique(x_inter)
print('*********************************函数模型搭建完毕************************************')
def func_first_order(type_f=None, xishu_f=None, point_f=None):
# 独立变量的线性、非线性判断
if type_f == 'linear':
# 左端点函数值
f_left = rbf_hdmr.func_1D_value(x_round[0][0], type=type_f, xishu=xishu_f,
point_sample=point_f)
# 右端点函数值
f_right = rbf_hdmr.func_1D_value(x_round[0][1], type=type_f, xishu=xishu_f,
point_sample=point_f)
if f_left > f_right:
f_min_i = f_right
x_min = x_round[0][1]
else:
f_min_i = f_left
x_min = x_round[1][0]
# 独立变量的非线性情况
else:
# 非一维线性函数最好的办法采用BO来找函数最小值
def f(x):
return -(rbf_hdmr.func_1D_value(x, type=type_f, xishu=xishu_f, point_sample=point_f))
sexp = matern52()
gp = GaussianProcess(sexp)
acq = Acquisition(mode='UCB')
round_x = (x_round[0][0], x_round[0][1])
param = {'x': ('cont', round_x)}
gpgo = GPGO(gp, acq, f, param)
gpgo.run(max_iter=5, nstart=1)
res, f_min_i = gpgo.getResult()
x_min = res['x']
return x_min, f_min_i
def func_model(index_ij=None, init_x=None):
'''
:param index_ij: 选择的函数项
:param init_x: 初始的函数最优解
:return: 本地迭代函数最优解
'''
# 定义函数最小值
func_min = 0
# 函数最小值所在的坐标
x_min = init_x
first_order = []
second_order = []
for k in index_ij:
if k < len(type_fx):
# 一阶
first_order.append(k)
else:
# 二阶
second_order.append(k)
# print(first_order)
# print(second_order)
'''
#判断对应的坐标
if len(first_order) == 0:
# 只存在二位情况
ij_index_0 = x_ij_index[second_order[0] - len(type_fx)]
ij_xishu_0 = x_ij_xishu[second_order[0] - len(type_fx)]
ij_point_0 = x_ij_point[second_order[0] - len(type_fx)]
ij_index_1 = x_ij_index[second_order[1] - len(type_fx)]
ij_xishu_1 = x_ij_xishu[second_order[1] - len(type_fx)]
ij_point_1 = x_ij_point[second_order[1] - len(type_fx)]
ij_index_2 = x_ij_index[second_order[2] - len(type_fx)]
ij_xishu_2 = x_ij_xishu[second_order[2] - len(type_fx)]
ij_point_2 = x_ij_point[second_order[2] - len(type_fx)]
ij_index = np.unique([ij_index_0, ij_index_1, ij_index_2])
print(ij_index)
'''
func_model(index_ij=[5, 6, 14], init_x=x0)
|
def linear_search(the_list, target):
for x in range(len(the_list)):
if the_list[x] == target:
print("Found at index", x)
return x
print("Target is not in the list")
return -1
my_list = [6 ,3, 4, 2, 5, 7]
linear_search(my_list, 5)
linear_search(my_list, 3)
linear_search(my_list, 8) |
import numpy as np
from tqdm import tqdm
from mf.knnbased import MFKnn
from mf.centbased import MFCent
from mf.hyperplanebased import MFHyperPlaneErrors, MFHyperPlaneAcima
class MetaFeatures(object):
"""
Implementation of
"""
def __init__(self, groups, k: int = -1): #cosknn, coscent, l2knn, l2cent, k_neig: int = -1):
self.groups = groups
self.k = k
if len(groups) == 0:
raise("Select at least one mf group")
for (metric, approach) in groups:
if approach == "knn":
if self.k == -1:
raise("Please define the k neighbor parameter")
def fit(self, X, y):
self.estimators = []
for (metric, approach) in self.groups:
if approach == 'knn':
estimator = MFKnn(metric, self.k)
elif approach == 'cent':
estimator = MFCent(metric)
#TODO: Remover apos refatorar codigo mf err
elif approach == 'err':
continue
#elif approach == 'hyperplane_errors':
# #cos_knn_treino,cos_knn_teste = hyperplane_errors(X_treino, y_treino, X_teste, y_teste, n_classes, f, k, 'cosine', 'original', save=False)
# estimator = MFHyperPlaneErrors(metric, self.k)
#elif approach == 'hyperplane_acima':
# #cos_knn_treino,cos_knn_teste = hyperplane_acima(X_treino, y_treino, X_teste, y_teste, n_classes, f, k, 'cosine', 'original', save=False)
# estimator = MFHyperPlaneAcima(metric, self.k)
self.estimators.append(estimator.fit(X,y))
def transform(self, X):
X_list = []
for estimator in self.estimators:
X_list.append(estimator.transform(X))
X_all = X_list[0]
for t in X_list:
print(t.shape)
if len(X_all)>1:
for i in range(1,len(X_list)):
X_all = np.hstack((X_all, X_list[i]))
print(X_all.shape)
print(type(X_all))
#print(X_all[0])
#print(np.nan in X)
array_sum = np.sum(X_all)
array_has_nan = np.isnan(array_sum)
if array_has_nan:
X_all = np.nan_to_num(X_all)
return X_all |
# Generated by Django 2.1.7 on 2020-02-19 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roboPortal', '0003_auto_20200219_1730'),
]
operations = [
migrations.AlterField(
model_name='problem_statement',
name='requirements',
field=models.TextField(max_length=2000),
),
]
|
# Copyright The IETF Trust 2016, All Rights Reserved
import six
import debug # pyflakes:ignore
from inspect import isclass
from django.conf.urls import url as django_url
from django.views.generic import View
def url(regex, view, kwargs=None, name=None):
if callable(view) and hasattr(view, '__name__'):
view_name = "%s.%s" % (view.__module__, view.__name__)
else:
view_name = regex
if name:
branch = 'name'
elif isinstance(view, (list, tuple)):
branch = 'list'
elif isinstance(view, six.string_types):
branch = 'string'
name = view
elif callable(view) and hasattr(view, '__name__'):
branch = 'callable'
name = view_name
elif isinstance(view, View):
branch = 'view'
elif isclass(view) or hasattr(view, '__class__'):
branch = 'class'
else:
branch = 'notimpl'
raise NotImplementedError("Auto-named url from view of type %s: %s" % (type(view), view))
if branch == 'name':
# List explicit url names with accompanying view dotted-path:
#debug.say("%s %s" % (view_name, name))
pass
if name:
branch = branch # silent pyflakes
#debug.show('branch')
#debug.show('name')
pass
return django_url(regex, view, kwargs=kwargs, name=name)
|
from typing import Any
import os.path
import pandas as pd
def read_num_profiles(filepath: str, column_length: int = 12) -> int:
with open(filepath, "r") as f:
first_part = f.readline()[:column_length]
no_spaces = "".join(first_part.split(" "))
return int(no_spaces)
def read_all_profile_attributes(
logs_dir: str,
num_profiles: int,
profile_prefix: str = "profile",
profile_suffix: str = ".data",
) -> pd.DataFrame:
attributes = pd.DataFrame()
for i in range(1, num_profiles + 1):
name = profile_prefix + str(i) + profile_suffix
filepath = os.path.join(logs_dir, name)
attr = read_profile_file(filepath, read_data=False).attributes
attr["profile"] = i
attributes = pd.concat([attributes, attr])
return attributes
def read_profile_file(
filepath: str,
attributes_start_row: int = 1,
data_start_row: int = 5,
read_data: bool = True,
) -> "MESAProfile":
attributes = pd.read_fwf(filepath, skiprows=attributes_start_row, nrows=1)
data = pd.read_fwf(filepath, skiprows=data_start_row) if read_data else None
return MESAProfile(attributes, data)
class MESAProfile:
def __init__(self, attributes: pd.DataFrame, data: pd.DataFrame) -> None:
self.attributes = attributes
self.data = data
def __str__(self) -> str:
return str(self.attributes) + "\n" + str(self.data)
def get_attribute(self, attr: str) -> Any:
if self.has_attribute(attr):
return self.attributes[attr].iloc[0]
else:
raise KeyError(
'Attribute "'
+ attr
+ '" is not a valid attribute for this MESA profile.'
)
def has_attribute(self, attr: str) -> bool:
return attr in list(self.attributes.columns.values)
|
# Copyright 2014 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
import webtest
from apmec.api import extensions
from apmec.api.v1 import attributes
from apmec.tests.unit import test_api_v2
from apmec.tests.unit import test_extensions
from apmec.tests.unit import testlib_api
class ExtensionTestCase(testlib_api.WebTestCase):
def _resotre_attr_map(self):
attributes.RESOURCE_ATTRIBUTE_MAP = self._saved_attr_map
def _setUpExtension(self, plugin, service_type,
resource_attribute_map, extension_class,
resource_prefix, plural_mappings=None,
translate_resource_name=False,
allow_pagination=False, allow_sorting=False,
supported_extension_aliases=None,
):
self._resource_prefix = resource_prefix
self._plural_mappings = plural_mappings or {}
self._translate_resource_name = translate_resource_name
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self._saved_attr_map = attributes.RESOURCE_ATTRIBUTE_MAP.copy()
# Restore the global RESOURCE_ATTRIBUTE_MAP
self.addCleanup(self._resotre_attr_map)
# Create the default configurations
self.config_parse()
# just stubbing core plugin with plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('core_plugin', plugin)
if service_type:
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
if service_type:
instance.get_plugin_type.return_value = service_type
if supported_extension_aliases is not None:
instance.supported_extension_aliases = supported_extension_aliases
if allow_pagination:
cfg.CONF.set_override('allow_pagination', True)
# instance.__native_pagination_support = True
native_pagination_attr_name = ("_%s__native_pagination_support"
% instance.__class__.__name__)
setattr(instance, native_pagination_attr_name, True)
if allow_sorting:
cfg.CONF.set_override('allow_sorting', True)
# instance.__native_sorting_support = True
native_sorting_attr_name = ("_%s__native_sorting_support"
% instance.__class__.__name__)
setattr(instance, native_sorting_attr_name, True)
class ExtensionTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
resource_attribute_map)
return extension_class.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
ext_mgr = ExtensionTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
def _test_entity_delete(self, entity):
"""Does the entity deletion based on naming convention."""
entity_id = uuidutils.generate_uuid()
path = self._resource_prefix + '/' if self._resource_prefix else ''
path += self._plural_mappings.get(entity, entity + 's')
if self._translate_resource_name:
path = path.replace('_', '-')
res = self.api.delete(
test_api_v2._get_path(path, id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value, "delete_" + entity)
delete_entity.assert_called_with(mock.ANY, entity_id)
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
|
import json
import logging
import platform
from pathlib import Path
import requests
from rich import print
log = logging.getLogger(__name__)
if platform.system() == 'Windows':
from .terminal_commands.windows import *
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
from .terminal_commands.posix import *
else:
raise OSError("Unknown OS")
def bin_folder() -> str:
"""
Gives the path of the user bin folder if exists else a bin folder is created in the
``<user home>/bin``
:return: ``bin`` location
"""
bin_path = Path(Path.home(), "bin")
if not bin_path.is_dir():
log.debug(f"bin directory does not exists. Creating one now. New path: {bin_path!r}")
bin_path.mkdir()
return str(bin_path)
def get_latest_version_api(override_version: str = None) -> str:
"""
Get the latest Hugo version
:param override_version: An override version of Hugo
:return: version number
"""
if override_version is not None:
hugo_response = requests.get(f"https://api.github.com/repos/gohugoio/hugo/releases/tags/v{override_version}")
if hugo_response.ok:
return override_version
else:
log.debug("Override version request error occurred", hugo_response.content)
print(f"\n[red bold]Hugo v{override_version} does not exists. See https://github.com/gohugoio/hugo/releases for more information.")
exit(1)
hugo_response = requests.get("https://api.github.com/repos/gohugoio/hugo/releases/latest")
hugo_response = json.loads(hugo_response.content.decode('utf-8'))['tag_name'][1:]
return hugo_response
|
import urllib2
import urllib
import json
import datetime
import hashlib
import base64
import binascii
import hmac
import os
import time
import webapp2
from webapp2_extras import auth
from webapp2_extras import sessions
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from webapp2_extras.auth import InvalidAuthIdError
from config import config
from controllers import base_controller
def authenticated(handler):
"""Decorator that checks if there's a user associated with the
current session. Will fail if there's no session present.
"""
def check_authentication(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_authentication
def xsrf_protect(handlar):
"""Decorator to protect webapp2's get and post functions from XSRF.
Decorating a function with @xsrf_protect will verify that a valid
XSRF token has been submitted through the xsrf parameter.
Both GET and POST parameters are accepted.
If no token or an invalid token is received,
the decorated function is not called and a 403 error will be issued.
"""
def decorate(self, *args, **kwargs):
csrf_handlar = CSRFHandlar()
path = os.environ.get('PATH_INFO', '/')
token = self.request.get('xsrf', None)
if not token:
self.send_error(403, 'Not authorised, bad request.')
return
user = csrf_handlar.ANONYMOUS_USER
if self.auth.get_user_by_session():
user = self.auth.get_user_by_session().get_id()
if not csrf_handlar.validate_token(config.CSRF_SECRET_KEY,
token, user, path):
self.send_error(403, 'Not authorised, invalid request.')
return
return handlar(self, *args, **kwargs)
return decorate
def admin(handlar):
"""Decorator to check if logged in user is admin. Returns 403 if not."""
def decorate(self, *args, **kwargs):
auth = self.auth
user = auth.get_user_by_session()
if not user.is_admin:
self.send_error(403, 'Not authorised, user is not a admin.')
return
return handlar(self, *args, **kwargs)
class CSRFHandlar(base_controller.BaseHandler):
"""docstring for CSRFHandlar."""
def __init__(self):
# String used instead of user id when there is no user.
# Not that it makes sense to protect unauthenticated
# functionality from XSRF.
self.ANONYMOUS_USER = 'anonymous'
# DELIMITER character
self.DELIMITER = ':'
# 24 hours in seconds
self.DEFAULT_TIMEOUT_SECS = 1*60*60*24
def generate_token(self, key, user_id, path="", when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
path: The path the token should be valid for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
when = when or int(time.time())
digester = hmac.new(str(key))
digester.update(str(user_id))
digester.update(self.DELIMITER)
digester.update(str(path))
digester.update(self.DELIMITER)
digester.update(str(when))
digest = digester.digest()
token = base64.urlsafe_b64encode('%s%s%d' % (digest,
self.DELIMITER,
when))
return token
def validate_token(self, key, token, user_id, path="", current_time=None,
timeout=0):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
path: The path the token was received on.
current_time: Time at which the token was received (defaults to now)
timeout: How long your tokens are valid in seconds before they time out
(defaults to DEFAULT_TIMEOUT_SECS)
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
if not timeout:
timeout = self.DEFAULT_TIMEOUT_SECS
try:
decoded = base64.urlsafe_b64decode(str(token))
token_time = long(decoded.split(self.DELIMITER)[-1])
except (TypeError, ValueError):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > timeout:
return False
# The given token should match the generated one with the same time.
expected_token = self.generate_token(
key, user_id, path=path, when=token_time)
return self.const_time_compare(expected_token, token)
@staticmethod
def const_time_compare(a, b):
"""Compares the the given strings in constant time."""
if len(a) != len(b):
return False
equals = 0
for x, y in zip(a, b):
equals |= ord(x) ^ ord(y)
return equals == 0
def xsrf_token(self, path=None):
"""Generates an XSRF token for the given path.
This function is mostly supposed to be used as a filter for a
templating system, so that tokens can be conveniently generated
directly in the template.
Args:
path: The path the token should be valid for. By default,
the path of the current request.
"""
user = self.ANONYMOUS_USER
if not path:
path = os.environ.get('PATH_INFO')
if self.auth.get_user_by_session():
user = self.auth.get_user_by_session().get_id()
return self.generate_token(config.CSRF_SECRET_KEY, user, path)
class LoginServicesHandler(CSRFHandlar):
"""login handlar - handler all types of login."""
def accountkit_login(self, code):
"""Initialtes fb accountkit mobile no based login
:params code:
code to verify login request
:returns:
logged in mobile_no
"""
api_version = config.CONFIG.fb_accountkit_api_version
app_id = config.CONFIG.fb_account_kit_app_id
app_secret = config.CONFIG.fb_account_kit_app_secret
me_endpoint_base_url = config.CONFIG.fb_account_kit_endpoint_url
token_exchange_base_url = \
config.CONFIG.fb_account_kit_token_exchange_url
app_access_token = '|'.join(['AA', app_id, app_secret])
params = {
'grant_type': 'authorization_code',
'code': code,
'access_token': app_access_token
}
token_exchange_url = token_exchange_base_url + '?' + \
urllib.urlencode(params)
token_exchange_req = urllib2.urlopen(token_exchange_url)
token_exchange_result = json.loads(token_exchange_req.read()
.decode('utf-8'))
me_endpoint_url = me_endpoint_base_url + '?access_token=' + \
token_exchange_result['access_token']
me_detail_request = urllib2.urlopen(me_endpoint_url)
me_detail_response = json.loads(me_detail_request
.read().decode('utf-8'))
return me_detail_response['phone']
def initiate_email_login(self, user):
"""Initialtes email based login by sending verification mail
:params user:
user
:returns:
status(boolean)
"""
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for(
'verification', type='signup', user_id=user_id,
signup_token=token, _full=True)
valid_till = datetime.time.now()
email_payload = {
'verification_url': verification_url,
'valid_till': valid_till
}
self.send_email(user.email, email_type='signup', payload=email_payload)
return
def verify_auth(self, authentication_token, authentication_type):
"""varifies user based on request type and email.
currently used for email login verification
:params authentication_token:
signup token
:params authentication_type:
verification type
:returns:
user if verified
"""
user, ts = self.user_model.get_by_auth_token(
int(user_id), authentication_token, authentication_type)
if not user:
logging.info(
'Could not find any user with id "%s" signup token "%s"',
user_id, authentication_token)
raise Exception('error', 'Could not find any user')
# store user data in the session
self.auth.set_session(
self.auth.store.user_to_dict(user), remember=True)
self.user_model.delete_authentication_token(
user.get_id(), authentication_token)
if not user.verified:
user.verified = True
user.put()
return user
|
"""Query interfaces and their information of a device."""
from typing import Dict, Callable, List, Any
import pytest
from nornir.core.filter import F
from nornir.core.task import MultiResult, Result
from nornir_napalm.plugins.tasks import napalm_get
from nuts.context import NornirNutsContext
from nuts.helpers.filters import filter_hosts
from nuts.helpers.result import AbstractHostResultExtractor
class InterfacesExtractor(AbstractHostResultExtractor):
def single_transform(self, single_result: MultiResult) -> Dict[str, Dict[str, Any]]:
return self._simple_extract(single_result)["interfaces"]
class InterfacesContext(NornirNutsContext):
def nuts_task(self) -> Callable[..., Result]:
return napalm_get
def nuts_arguments(self) -> Dict[str, List[str]]:
return {"getters": ["interfaces"]}
def nornir_filter(self) -> F:
return filter_hosts(self.nuts_parameters["test_data"])
def nuts_extractor(self) -> InterfacesExtractor:
return InterfacesExtractor(self)
CONTEXT = InterfacesContext
class TestNapalmInterfaces:
@pytest.mark.nuts("name,is_enabled")
def test_is_enabled(self, single_result, name, is_enabled):
assert single_result.result[name]["is_enabled"] == is_enabled
@pytest.mark.nuts("name,is_up")
def test_is_up(self, single_result, name, is_up):
assert single_result.result[name]["is_up"] == is_up
@pytest.mark.nuts("name,mac_address")
def test_mac_address(self, single_result, name, mac_address):
assert single_result.result[name]["mac_address"] == mac_address
@pytest.mark.nuts("name,mtu")
def test_mtu(self, single_result, name, mtu):
assert single_result.result[name]["mtu"] == mtu
@pytest.mark.nuts("name,speed")
def test_speed(self, single_result, name, speed):
assert single_result.result[name]["speed"] == speed
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as ds
import matplotlib.colors
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import euclidean_distances
if __name__ == "__main__":
N = 400
centers = [[1, 2], [-1, -1], [1, -1], [-1, 1]]
data, y = ds.make_blobs(N, n_features=2, centers=centers, cluster_std=[0.5, 0.25, 0.7, 0.5], random_state=0)
m = euclidean_distances(data, squared=True)
preference = -np.median(m)
print('Preference:', preference)
matplotlib.rcParams['font.sans-serif'] = [u'SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(12, 9), facecolor='w')
for i, mul in enumerate(np.linspace(1, 4, 9)):
print(mul)
p = mul * preference
model = AffinityPropagation(affinity='euclidean', preference=p)
af = model.fit(data)
center_indices = af.cluster_centers_indices_
n_clusters = len(center_indices)
print('p = %.1f' % mul), p, '聚类簇的个数为:', n_clusters
y_hat = af.labels_
plt.subplot(3, 3, i + 1)
plt.title(u'Preference:%.2f,簇个数:%d' % (p, n_clusters))
clrs = []
for c in np.linspace(16711680, 255, n_clusters):
clrs.append('#%06x' % int(c))
# clrs = plt.cm.Spectral(np.linspace(0, 1, n_clusters))
for k, clr in enumerate(clrs):
cur = (y_hat == k)
plt.scatter(data[cur, 0], data[cur, 1], c=clr, edgecolors='none')
center = data[center_indices[k]]
for x in data[cur]:
plt.plot([x[0], center[0]], [x[1], center[1]], color=clr, zorder=1)
plt.scatter(data[center_indices, 0], data[center_indices, 1], s=100, c=clrs, marker='*', edgecolors='k',
zorder=2)
plt.grid(True)
plt.tight_layout()
plt.suptitle(u'AP聚类', fontsize=20)
plt.subplots_adjust(top=0.92)
plt.show()
|
def test_import():
"""Test the imports work"""
import poker_ai
from poker_ai import ai, poker
from poker_ai.ai import runner
from poker_ai.terminal import runner
from poker_ai.games.short_deck import player, state
from poker_ai.poker import actions, card, dealer, deck, engine, player
from poker_ai.poker import state, table, evaluation
from poker_ai.poker.evaluation import eval_card, evaluator, lookup
|
from autoflow.workflow.components.data_process_base import AutoFlowDataProcessAlgorithm
__all__ = ["RepeatedEditedNearestNeighbours"]
class RepeatedEditedNearestNeighbours(AutoFlowDataProcessAlgorithm):
class__ = "RepeatedEditedNearestNeighbours"
module__ = "imblearn.under_sampling"
|
string = "hello world python"
print(string.title())
|
from .register import RegisterSerializer
from .login import LoginSerializer |
'''
This package is the base Python client for the Spreader distributed work system.
The client should be fairly lightweight, to allow easy portability.
Since all the work is done on the DB server, that should be fairly easy to accomplish.
'''
NAME = 'pyspreader'
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='locust_exporter',
version='1.1.0',
description="Website load testing framework",
long_description="""Locust is a python utility for doing easy, distributed load testing of a web site""",
classifiers=[
"License :: OSI Approved :: Apache 2",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
],
keywords='',
author='Arthur Halet',
author_email='',
license='Apache 2.0',
packages=['locust_exporter'],
include_package_data=True,
zip_safe=False,
install_requires=["prometheus_client>=0.7.1", "requests>=2.9.1"],
entry_points={
'console_scripts': [
'locust_exporter = locust_exporter.locust_exporter:main',
]
},
)
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
# maybe fails more reliably with just 2 jvms?
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_C_hhp_107_01(self):
csvPathname = "hhp_107_01.data.gz"
print "\n" + csvPathname
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', timeoutSecs=15)
## h2b.browseTheCloud()
# build up the parameter string in X
y = "106"
x = ""
# go right to the big X and iterate on that case
### for trial in range(2):
for trial in range(2):
print "\nTrial #", trial, "start"
print "\nx:", x
print "y:", y
start = time.time()
kwargs = {'y': y}
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=200, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, 'C58', **kwargs)
h2o.check_sandbox_for_errors()
### h2b.browseJsonHistoryAsUrlLastMatch("GLM")
print "\nTrial #", trial
if __name__ == '__main__':
h2o.unit_main()
|
# Copyright 2019 Allan Galarza
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import OrderedDict
from tibiawikisql import schema
from tibiawikisql.models import abc
from tibiawikisql.models.creature import CreatureDrop
from tibiawikisql.models.npc import NpcBuyOffer, NpcSellOffer
from tibiawikisql.models.quest import QuestReward
from tibiawikisql.utils import clean_links, clean_question_mark, client_color_to_rgb, parse_boolean, parse_float, \
parse_integer, parse_sounds
ELEMENTAL_RESISTANCES = ['physical%', 'earth%', 'fire%', 'energy%', 'ice%', 'holy%', 'death%', 'drowning%']
SKILL_ATTRIBUTES_MAPPING = {
"magic": "magic level {0}",
"axe": "axe fighting {0}",
"sword": "sword fighting {0}",
"club": "club fighting {0}",
"distance": "distance fighting {0}",
"shielding": "shielding {0}",
"fist": "fist fighting {0}",
}
class Item(abc.Row, abc.Parseable, table=schema.Item):
"""Represents an Item.
Attributes
----------
id: :class:`int`
The id of the containing article.
title: :class:`str`
The title of the containing article.
timestamp: :class:`int`
The last time the containing article was edited.
name: :class:`str`
The in-game name of the item.
plural: :class:`str`
The plural of the name.
article: :class:`str`
The article that goes before the name when looking at the item.
marketable: :class:`bool`
Whether the item can be traded on the Market or not.
stackable: :class:`bool`
Whether the item can be stacked or not.
pickupable: :class:`bool`
Whether the item can be picked up or not.
value_sell: :class:`int`
The highest price an NPC will buy this item for.
value_buy: :class:`int`
The lowest price an NPC will sell this item for.
weight: :class:`float`
The item's weight in ounces.
item_class: :class:`str`
The item class the item belongs to.
type: :class:`str`
The item's type.
type_secondary: :class:`str`
The item's secondary type, if any.
flavor_text: :class:`str`
The extra text that is displayed when some items are looked at.
light_color: :class:`int`, optional.
The color of the light emitted by this item in RGB, if any.
light_radius: :class:`int`
The radius of the light emitted by this item, if any.
client_id: :class:`int`
The internal id of the item in the client.
version: :class:`str`
The client version where this item was first implemented.
image: :class:`bytes`
The item's image in bytes.
attributes: list of :class:`ItemAttribute`
The item's attributes.
dropped_by: list of :class:`CreatureDrop`
List of creatures that drop this item, with the chances.
sold_by: list of :class:`NpcSellOffer`
List of NPCs that sell this item.
bought_by: list of :class:`NpcBuyOffer`
List of NPCs that buy this item.
awarded_in: list of :class:`QuestReward`
List of quests that give this item as reward.
sounds: list of :class:`ItemSound`.
List of sounds made when using the item.
"""
_map = {
"article": ("article", str.strip),
"actualname": ("name", str.strip),
"plural": ("plural", clean_question_mark),
"marketable": ("marketable", parse_boolean),
"stackable": ("stackable", parse_boolean),
"pickupable": ("pickupable", parse_boolean),
"weight": ("weight", parse_float),
"npcvalue": ("value_sell", parse_integer),
"npcprice": ("value_buy", parse_integer),
"flavortext": ("flavor_text", str.strip),
"itemclass": ("item_class", str.strip),
"primarytype": ("type", str.strip),
"secondarytype": ("type_secondary", str.strip),
"lightcolor": ("light_color", lambda x: client_color_to_rgb(parse_integer(x))),
"lightradius": ("light_radius", parse_integer),
"implemented": ("version", str.strip),
"itemid": ("client_id", parse_integer)
}
_pattern = re.compile(r"Infobox[\s_]Item")
__slots__ = (
"article_id",
"title",
"timestamp",
"name",
"plural",
"article",
"marketable",
"stackable",
"pickupable",
"value_sell",
"value_buy",
"weight",
"item_class",
"type",
"type_secondary",
"flavor_text",
"light_color",
"light_radius",
"client_id",
"version",
"image",
"attributes",
"dropped_by",
"sold_by",
"bought_by",
"awarded_in",
"sounds",
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def attributes_dict(self):
""":class:`dict`: A mapping of the attributes this item has."""
if self.attributes:
return {a.name: a.value for a in self.attributes}
return dict()
@property
def resistances(self):
""":class:`collections.OrderedDict`: A mapping of the elemental resistances of this item."""
resistances = dict()
attributes = self.attributes_dict
for element in ELEMENTAL_RESISTANCES:
value = attributes.get(element)
if value is not None:
resistances[element[:-1]] = int(value)
return OrderedDict(sorted(resistances.items(), key=lambda t: t[1], reverse=True))
@property
def look_text(self):
""":class:`str`: The item's look text."""
look_text = ["You see ", self.article or self.name[0] in ["a", "e", "i", "o", "u"], f" {self.name}"]
self._get_attributes_look_text(look_text)
attributes = self.attributes_dict
if "charges" in attributes:
look_text.append(f" that has {attributes['charges']} charges left")
if "duration" in attributes:
look_text.append(" that is brand-new")
look_text.append(".")
self._get_requirements(look_text)
if self.weight:
look_text.append(f"\nIt weights {self.weight:.2f} oz.")
if self.flavor_text:
look_text.append("\n")
look_text.append(self.flavor_text)
return "".join(look_text)
def _get_requirements(self, look_text):
attributes = self.attributes_dict
separator = " and " if self.item_class != "Runes" else ", "
vocation = "players"
verb = "wielded properly" if self.item_class != "Runes" else "used"
if "vocation" in attributes:
vocation = separator.join(attributes["vocation"].split("+"))
if "without" in vocation:
vocation = "players without vocations"
if "level" in attributes or vocation != "players":
look_text.append(f" It can only be {verb} by {vocation}")
if "level" in attributes:
look_text.append(f" of level {attributes['level']}")
if "magic_level" in attributes and attributes["magic_level"] != "0":
look_text.append(f" and magic level {attributes['magic_level']}")
look_text.append(" or higher")
look_text.append(".")
def _get_attributes_look_text(self, look_text):
attributes = self.attributes_dict
attributes_rep = []
self._parse_combat_attributes(attributes, attributes_rep)
self._parse_skill_attributes(attributes, attributes_rep)
if "regeneration" in attributes:
attributes_rep.append(attributes["regeneration"])
if self.resistances:
resistances = []
for element, value in self.resistances.items():
resistances.append(f"{element} {value:+d}%")
attributes_rep.append(f"protection {', '.join(resistances)}")
if "volume" in attributes:
attributes_rep.append(f"Vol:{attributes['volume']}")
if attributes_rep:
look_text.append(f" ({', '.join(attributes_rep)})")
@staticmethod
def _parse_combat_attributes(attributes, attributes_rep):
if "range" in attributes:
attributes_rep.append(f"Range: {attributes['range']}")
if "attack+" in attributes:
attributes_rep.append(f"Atk+{attributes['attack+']}")
if "hit%+" in attributes:
attributes_rep.append(f"Hit%+{attributes['hit%+']}")
if "attack" in attributes:
elements = ['fire_attack', 'earth_attack', 'ice_attack', 'energy_attack']
attacks = dict()
physical_attack = int(attributes["attack"])
for element in elements:
value = attributes.pop(element, None)
if value:
attacks[element[:-7]] = int(value)
attack = f"Atk:{physical_attack}"
if attacks:
attack += " physical + "
attack += "+ ".join(f"{v} {e}" for e, v in attacks.items())
attributes_rep.append(attack)
if "defense" in attributes:
defense = f"Def:{attributes['defense']}"
if "defense_modifier" in attributes:
defense += f" {attributes['defense_modifier']}"
attributes_rep.append(defense)
if "armor" in attributes:
attributes_rep.append(f"Arm:{attributes['armor']}")
@staticmethod
def _parse_skill_attributes(attributes, attributes_rep):
for attribute, template in SKILL_ATTRIBUTES_MAPPING.items():
if attribute in attributes:
attributes_rep.append(template.format(attributes[attribute]))
@classmethod
def from_article(cls, article):
item = super().from_article(article)
if item is None:
return None
item.attributes = []
for name, attribute in ItemAttribute._map.items():
if attribute in item._raw_attributes and item._raw_attributes[attribute]:
item.attributes.append(ItemAttribute(item_id=item.article_id, name=name,
value=item._raw_attributes[attribute]))
if "attrib" in item._raw_attributes:
attribs = item._raw_attributes["attrib"].split(",")
for attr in attribs:
attr = attr.strip()
m = re.search(r'([\s\w]+)\s([+\-\d]+)', attr)
if m:
attribute = m.group(1).replace("fighting", "").replace("level", "").strip()
value = m.group(2)
item.attributes.append(ItemAttribute(item_id=item.article_id, name=attribute, value=value))
if "regeneration" in attr:
item.attributes.append(ItemAttribute(item_id=item.article_id, name="regeneration",
value="faster regeneration"))
if "resist" in item._raw_attributes:
resistances = item._raw_attributes["resist"].split(",")
for element in resistances:
element = element.strip()
m = re.search(r'([a-zA-Z0-9_ ]+) +(-?\+?\d+)%', element)
if m:
attribute = m.group(1) + "%"
try:
value = int(m.group(2))
except ValueError:
value = 0
item.attributes.append(ItemAttribute(item_id=item.article_id, name=attribute, value=value))
vocations = item._raw_attributes.get('vocrequired')
if vocations and "none" not in vocations.lower():
vocation = vocations.replace('and', '+').replace(',', '+').replace(' ', '')
item.attributes.append(ItemAttribute(item_id=item.article_id, name="vocation", value=vocation))
if "sounds" in item._raw_attributes:
sounds = parse_sounds(item._raw_attributes["sounds"])
if sounds:
item.sounds = [ItemSound(item_id=item.article_id, content=sound) for sound in sounds]
return item
def insert(self, c):
super().insert(c)
for attribute in getattr(self, "attributes", []):
attribute.insert(c)
for attribute in getattr(self, "sounds", []):
attribute.insert(c)
@classmethod
def get_by_field(cls, c, field, value, use_like=False):
item: cls = super().get_by_field(c, field, value, use_like)
if item is None:
return None
item.attributes = ItemAttribute.search(c, "item_id", item.article_id)
item.dropped_by = CreatureDrop.search(c, "item_id", item.article_id, sort_by="chance", ascending=False)
item.sold_by = NpcSellOffer.search(c, "item_id", item.article_id, sort_by="value", ascending=True)
item.bought_by = NpcBuyOffer.search(c, "item_id", item.article_id, sort_by="value", ascending=False)
item.awarded_in = QuestReward.search(c, "item_id", item.article_id)
item.sounds = ItemSound.search(c, "item_id", item.article_id)
return item
class Key(abc.Row, abc.Parseable, table=schema.ItemKey):
"""
Represents a key item.
Attributes
----------
article_id: :class:`int`
The id of the containing article.
title: :class:`str`
The title of the containing article.
timestamp: :class:`int`
The last time the containing article was edited.
name: :class:`str`
The name of the creature, as displayed in-game.
number: :class:`int`
The key's number.
item_id: :class:`int`
The article id of the item this key is based on.
material: :class:`str`
The key's material.
location: :class:`str`
The key's location.
notes: :class:`str`
Notes about the key.
origin: :class:`str`
Notes about the origin of the key.
version: :class:`str`
The client version where this creature was first implemented.
"""
__slots__ = (
"article_id",
"title",
"timestamp",
"name",
"number",
"item_id",
"material",
"notes",
"origin",
"version",
"location",
)
_map = {
"aka": ("name", clean_links),
"number": ("number", int),
"primarytype": ("material", str.strip),
"location": ("location", clean_links),
"origin": ("origin", clean_links),
"shortnotes": ("notes", clean_links),
"implemented": ("version", str.strip),
}
_pattern = re.compile(r"Infobox[\s_]Key")
def __init__(self, **kwargs):
super().__init__(**kwargs)
def insert(self, c):
if getattr(self, "item_id", None):
super().insert(c)
return
else:
query = f"""INSERT INTO {self.table.__tablename__}(article_id, title, number, item_id, name, material,
location, origin, notes, version, timestamp)
VALUES(?, ?, ?, (SELECT article_id FROM item WHERE title = ?), ?, ?, ?, ?, ?, ?, ?)"""
c.execute(query, (self.article_id, self.title, self.number, self.material + " Key", self.name,
self.material, self.location, self.origin, self.notes, self.version, self.timestamp))
class ItemAttribute(abc.Row, table=schema.ItemAttribute):
"""
Represents an Item's attribute
Attributes
----------
item_id: :class:`int`
The id of the item the attribute belongs to
name: :class:`str`
The name of the attribute.
value: :class:`str`
The value of the attribute.
"""
_map = {
"level": "levelrequired",
"attack": "attack",
"defense": "defense",
"defense_modifier": "defensemod",
"armor": "armor",
"hands": "hands",
"imbue_slots": "imbueslots",
"imbuements": "imbuements",
"attack+": "atk_mod",
"hit%+": "hit_mod",
"range": "range",
"damage_type": "damagetype",
"damage": "damage",
"mana": "mana",
"magic_level": "mlrequired",
"words": "words",
"critical_chance": "crithit_ch",
"critical%": "critextra_dmg",
"hpleech_chance": "hpleech_ch",
"hpleech%": "hpleech_am",
"manaleech_chance": "manaleech_ch",
"manaleech%": "manaleech_am",
"volume": "volume",
"charges": "charges",
"food_time": "regenseconds",
"duration": "duration",
"fire_attack": "fire_attack",
"energy_attack": "energy_attack",
"ice_attack": "ice_attack",
"earth_attack": "earth_attack",
"destructible": "destructible",
"holds_liquid": "holdsliquid",
"hangable": "hangable",
"writable": "writable",
"rewritable": "rewritable",
"writable_chars": "writechars",
"consumable": "consumable",
"fansite": "fansite"
}
__slots__ = (
"item_id",
"name",
"value",
)
def insert(self, c):
columns = dict(item_id=self.item_id, name=self.name, value=clean_links(str(self.value)))
self.table.insert(c, **columns)
class ItemSound(abc.Row, table=schema.ItemSound):
"""
Represents a sound made by an item.
Attributes
----------
item_id: :class:`int`
The article id of the item that does this sound.
content: :class:`str`
The content of the sound.
"""
__slots__ = (
"item_id",
"content",
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __repr__(self):
attributes = []
for attr in self.__slots__:
try:
v = getattr(self, attr)
if v is None:
continue
attributes.append(f"{attr}={v!r}")
except AttributeError:
pass
return f"{self.__class__.__name__}({','.join(attributes)})"
def insert(self, c):
columns = dict(item_id=self.item_id, content=self.content)
self.table.insert(c, **columns)
|
# -*- coding: utf-8 -*-
"""
邮件提醒功能
"""
import smtplib
from email.mime.text import MIMEText
from email.header import Header
class EmailNotification(object):
"""邮件提醒类"""
sender = 'hujb2015@163.com'
password = 'IlovePython'
smtpserver = 'smtp.163.com'
enable = False
@staticmethod
def set_enable(enable=False):
EmailNotification.enable = enable
@staticmethod
def is_enable():
return EmailNotification.enable
@staticmethod
def send_email(receiver, subject, words):
if not EmailNotification.is_enable():
return
try:
msg = MIMEText(words, 'plain', 'utf-8') # 中文需参数‘utf-8',单字节字符不需要
msg['Subject'] = Header(subject, 'utf-8') # 邮件标题
msg['from'] = EmailNotification.sender # 发信人地址
msg['to'] = receiver # 收信人地址
smtp = smtplib.SMTP()
smtp.connect(EmailNotification.smtpserver)
smtp.login(EmailNotification.sender, EmailNotification.password)
smtp.sendmail(EmailNotification.sender, receiver,
msg.as_string()) # 这行代码解决的下方554的错误
smtp.quit()
print("邮件发送成功!")
except Exception as e:
print(e)
if __name__ == '__main__':
en = EmailNotification()
en.set_enable(True)
en.send_email("hujb2000@163.com",'不是垃圾','是我在测死') |
# coding: utf-8
import re
from collections import Counter
LOGNAME = '../data/201603021414.log'
MOVEREX = re.compile('/move/([rps]+)/([rps]+)', re.I)
MOVES = "rps"
DRAW = 0
HWIN = 1
CWIN = -1
# implementera i JS
def switch_count(s, step=1):
cnt = Counter()
trucnt = Counter()
prev = None
i = 1.0
for c in s:
if prev:
cnt[prev, c] += i
trucnt[prev] += 1
prev = c
i += step
return cnt, trucnt
def get_skew(cnt, h):
rc = cnt[h, 'r']
pc = cnt[h, 'p']
sc = cnt[h, 's']
sm = rc + pc + sc
rscore = sc - pc
pscore = rc - sc
sscore = pc - rc
if rscore > max(pscore, sscore):
return 'r', rscore / sm
if pscore > max(sscore, rscore):
return 'p', pscore / sm
if sscore > max(rscore, pscore):
return 's', sscore / sm
return 'r', 0
def score_switch(hs, cs, limit=3, skew=0.2, base=True):
good = 0
for i in range(8, len(hs) - 1):
cnt, tcnt = switch_count(hs[:i], base)
if tcnt[hs[i - 1]] >= limit:
move, moveskew = get_skew(cnt, hs[i - 1])
if moveskew > skew:
good += decide(hs[i], cs[i]) - decide(hs[i], move)
return good
def decide(h, c):
res = (MOVES.index(h) - MOVES.index(c)) % 3
if res == 2:
res = -1
return res
def score(hs, cs):
return Counter([decide(h, c) for (h, c) in zip(hs, cs)])
def get_all_positions(fname=LOGNAME):
with open(fname) as f:
for x in f:
m = MOVEREX.search(x)
if m:
yield m.group(1).lower(), m.group(2).lower()
def get_matches(fname=LOGNAME):
matches = {}
maxlen = 0
for hs, cs in get_all_positions(fname):
if len(hs) not in matches:
matches[len(hs)] = set()
matches[len(hs)].add((hs, cs))
maxlen = max(maxlen, len(hs))
dropped = 0
uniqueset = set()
for i in range(maxlen):
nxt = matches.get(i + 1, [])
for pos in matches.get(i, []):
for n in nxt:
if n[0].startswith(pos[0]) and n[1].startswith(pos[1]):
dropped += 1
else:
uniqueset.add(pos)
print dropped
return uniqueset
def evalparam(matches, skew, base, verbose=False):
hwin = cwin = 0
totg = 0
for hs, cs in matches:
if 'r' * 6 in hs or 's' * 6 in hs or 'p' * 6 in hs:
continue
s = score(hs, cs)
sc, tsc = switch_count(hs, base)
ss = score_switch(hs, cs, skew=skew, base=base)
totg += ss
if s[HWIN] == 19:
hwin += 1
if verbose:
print 'h', hs, cs, ss
if s[CWIN] == 19:
cwin += 1
if verbose:
print 'c', hs, cs, ss
return totg
if __name__ == '__main__':
scores = []
matches = get_matches()
print len(matches)
for skew in range(20):
skew = skew / 20.0
for base in range(20):
base = base / 10.0
good = evalparam(matches, skew, base)
print skew, base, good
scores.append((good, skew, base))
scores.sort(reverse=True)
print scores[:50]
|
import numpy as np
import subprocess, os, shutil
from astropy.io import fits, ascii as ap_ascii
from astropy.table import Table
def TelluricCorrectPrimary(path_list,probenum,molecfit_dir=''):
hdulist = fits.open(path_list[1])
fibre = hdulist['FIBRES_IFU']
# Load in SS flux data
ww = np.where((fibre.data['PROBENUM'] == probenum) & (fibre.data['TYPE'] == 'P'))
flux_data_raw = np.sum(hdulist[0].data[ww,:].squeeze(),axis=0)
sigma_flux = np.sqrt(np.sum(hdulist[1].data[ww,:].squeeze(),axis=0))
# Might put in an interpolation over NaNs; for now just taking a straight copy
flux_data = flux_data_raw.copy()
header = hdulist[0].header
crval1 = header['CRVAL1']
cdelt1 = header['CDELT1']
naxis1 = header['NAXIS1']
crpix1 = header['CRPIX1']
wave_axis = crval1 + cdelt1 * (np.arange(naxis1) + 1 - crpix1)
transfer_function, sigma_transfer, corrected_flux = TelluricCorrect(path_list[1],flux_data,
sigma_flux,wave_axis,mf_bin_dir=molecfit_dir,primary=True)
uncorrected_flux = hdulist[0].data.copy()
hdulist[0].data*= transfer_function
hdulist[1].data = hdulist[0].data**2 * ((sigma_transfer/transfer_function)**2 +
hdulist[1].data/uncorrected_flux**2)
path_telluric_corrected = path_list[1].replace('red.fits','fcal.fits')
hdulist.writeto(path_telluric_corrected,overwrite=True)
return [path_list[0],path_telluric_corrected]
def TelluricCorrect(fcal_fname, star_flux, star_flux_err, wave, mf_bin_dir = '',
wrange_include='none', delete_files=True, quiet=True, primary=False):
""" Perform a telluric correction for every each fiber spectum using ESO's molecfit software """
# Using the model correction determined by the molecfit software for the secondary standard
# star, correct all the fiber spectra in the fcal primary data extension.
#_______________________________________ OUTPUT FILES _________________________________________#
if not primary:
obs_name = fcal_fname.replace('fcal.fits', '') # Name of observation (i.e "06mar20040")
else:
obs_name = fcal_fname.replace('red.fits','')
obs_name_root = os.path.basename(obs_name)
gal_list_fname = obs_name + '/galaxy_list.txt' # File containing list of fnames of gal spec
gal_spec_dir = obs_name + '/spec_files/' # directory to hold galaxy spectra table files
param_fname = obs_name + '/param_file.par' # parameter file (Molecfit Input file)
star_spec_fname = obs_name + '/star.dat' # ascii table containing star spectrum
mf_output_dir = obs_name + '/molecfit_output/'# molecfit output directory name
mf_script_fname = obs_name + '/molecfit.sh' # script name which calls molecfit
#mf_bin_dir = '/Users/nscott/Reduction/molecfit/bin' # directory for molecfit binary files
# Check that the above directories exists, making them if they arent
for directory in [gal_spec_dir, mf_output_dir]:
if not os.path.isdir(directory):
os.makedirs(os.path.dirname(directory))
#----------------------------------------------------------------------------------------------#
#_________________________ EXTRACT SPECTRAL INFO FROM FCAL.FITS FILE __________________________#
# Need to find the ID of the fiber at the centre of the secondary standard star bundle.
with fits.open(fcal_fname) as hdu:
# extract headers
h0 = hdu['PRIMARY'].header
h1 = hdu['FIBRES_IFU'].header
# extract tables - not needed when getting flux from parent function
#primary = hdu['PRIMARY'].data
#variance = hdu['VARIANCE'].data
#fibers_ifu = hdu['FIBRES_IFU'].data
# extract name of secondary standard star
if not primary:
star_name = hdu['FLUX_CALIBRATION'].header['STDNAME']
else:
star_name = hdu[0].header['MNGRNAME']
# Air wavelength in microns
#wave = (h0['CRVAL1'] + h0['CDELT1'] * (np.arange(h0['NAXIS1']) - h0['CRPIX1']))*10**-4
wave = wave*(10**-4)
# identify rows of central 19 fibers in star fiber bundle
#centrals = np.where((fibers_ifu['NAME'] == star_name) & (fibers_ifu['FIBNUM'] <= 19))[0]
# extract star flux and flux error
#star_flux = np.nansum([primary[i] for i in centrals], axis=0)
#star_flux_err = np.sqrt(np.nansum([variance[i] for i in centrals], axis=0))
# convert 0.0 flux error to np.inf ? and 0.0 flux to np.nan
star_flux[np.where(star_flux == 0.0)] = np.nan
star_flux_err[np.where(star_flux_err == 0.0)] = np.inf
# Write star info to ascii file in required Molecfit format
star_table = Table([wave, star_flux, star_flux_err, np.isfinite(star_flux)],
names=[r'#Wavelength', 'Flux', 'Flux_Err', 'Mask'])
star_table.write(star_spec_fname, format='ascii', overwrite=True)
#gal_fnames = [None] * h0['NAXIS2'] # initialise list of galaxy parameter filenames
# Need to arrange all galaxy spectra into individual files, and then provide a list of these
# filenames to the dictionary below. For now, include ALL fiber spectra, including the secondary
# standard fibers and the sky fibers
#for i in range(h0['NAXIS2']): # iterate through rows
# gal_name = fibers_ifu['NAME'][i] # name
# gal_fiber_id = fibers_ifu['FIBNUM'][i] # fiber number
# gal_flux = primary[i] # flux
# with np.errstate(invalid='ignore'): # ignore RuntimeWarning in sqrt for NaNs
# gal_flux_err = np.sqrt(variance[i]) # flux_error (noise spectrum)
# gal_fnames[i] = f"{gal_spec_dir}{gal_name}_{gal_fiber_id}.dat"
# gal_table = Table([wave, gal_flux, gal_flux_err, ~np.isnan(gal_flux)],
# names=[r'#Wavelength', 'Flux', 'Flux_Err', 'Mask'])
#
# gal_table.write(gal_fnames[i], format='ascii', overwrite=True) # write table to file
#----------------------------------------------------------------------------------------------#
#___________________________ WRITE LIST OF FILENAMES TO A TXT FILE ____________________________#
# write the list of all the galaxy parameters filenames to a .txt file
#with open(gal_list_fname, 'w') as file:
# [file.write(f + '\n') for f in gal_fnames]
#----------------------------------------------------------------------------------------------#
#________________________ WRITE DICTIONARY OF PARAMATER FILE KEYWORDS _________________________#
if not primary:
cont_n = 3
else:
cont_n = 5
dic = { ## INPUT DATA
'filename' : star_spec_fname,
#'listname' : gal_list_fname, # List of additional files to be corrected
'trans' : 1, # type of input spectrum (transmission=1, emission=0)
'columns' : 'Wavelength Flux Flux_Err Mask', # input table column names
'default_error' : '',
'wlgtomicron' : 1.0, # wavelength already converted to microns
'vac_air' : 'air', # wavelength in air
'wrange_include' : wrange_include, # wavelength range to include in fit
'wrange_exclude' : 'none', # wavelength range to exclude in fit
'prange_exclude' : 'none', # pixel range to exclude in fit
## RESULTS
'output_dir' : mf_output_dir, # directory of molecfit output files
'output_name' : obs_name_root, # use observation name to label molecfit output files
'plot_creation' : ' ', # create postscript plots
'plot_range' : 0, # create plots for fit ranges (0 = nah dont)
## FIT PRECISION
'ftol' : 0.01, # Relative chi2 convergence criterion
'xtol' : 0.01, # Relative parameter convergence criterion
## MOLECULAR COLUMNS
'list_molec' : 'H2O O2', # List of molecules to be included in the model
'fit_molec' : '1 1', # Fit flags for molecules (1 = yes)
'relcol' : '1.0 1.0', # Molecular column values, expressed wrt to input
# ATM profile
## BACKGROUND AND CONTINUUM
'flux_unit' : 0,
'fit_back' : 0, # Fit of telescope background [bool]
'telback' : 0.1, # Initial value for telescope background fit
'fit_cont' : 1, # Polynomial fit of continuum --> degree: cont_n
'cont_n' : cont_n, # Degree of coefficients for continuum fit
'cont_const' : 1.0, # Initial constant term for continuum fit
## WAVELENGTH SOLUTION
'fit_wlc' : 1, # Refine wavelength solution using polynomal deg wcl_n
'wlc_n' : 3, # Degree of polynomial of refined wavelength solution
'wlc_const' : 0.0, # Initial constant term for wavelength correction
## RESOLUTION
'fit_res_box' : 0, # Fit resolution by boxcar function (0 = no)
'relres_box' : 0.0, # Initial value for FWHM of boxcar wrt slit width
'kernmode' : 0, # Voigt profile instead of Gaussian & Lorentzaian (nah)
'fit_res_gauss' : 1, # Fit resolution by Gaussian (1 = yes)
'res_gauss' : 1.0, # Initial value for FWHM of Gaussian in pixels
'fit_res_lorentz': 0, # Fit resolution by Lorentzian (0 = no)
'res_lorentz' : 0.0, # Initial value for FWHM of Lorentzian in pixels
'kernfac' : 30.0, # Size of Gaussian/Lorentzian/Voigtian kernal in FWHM
'varkern' : 1, # Variable kernel (1 = yes)
'kernel_file' : 'none', # Ascii file for user defined kernal elements (optional)
## AMBIENT PARAMETERS
'obsdate' : int(np.floor(h0['UTMJD'])), # Observing date in [years] or MJD in [days]
'utc' : int(np.float('0.'+str(h0['UTMJD']).split('.')[1]) *24*60*60), # UTC in [s]
'telalt' : h0['ZDSTART'], # Starting zeneth distance (Telescope altitude angle)[deg]
'rhum' : h1['ATMRHUM'] * 100, # Humidity in [%]
'pres' : h1['ATMPRES'], # Pressure in hPa [millibar -> hPa is 1-1]
'temp' : h1['ATMTEMP'], # Ambient temperature in [deg C]
'm1temp' : h1['MIRRTEMP'], # Mirror temperature in [deg C]
'geoelev' : h0['ALT_OBS'], # Elevation above sea level in [m]
'longitude' : h0['LONG_OBS'], # Telescope longitude
'latitude' : h0['LAT_OBS'], # Telescope latitude
## INSTRUMENTAL PARAMETERS
'slitw' : 1.6, # Fiber width in arcsec.
'pixsc' : 1.6 / 2.5, # FWHM of fiber projected onto ccd = 2.5 [pix],
# hence Pixel scale in arcsec = 1.6/2.5 [arcsec /pix]
## ATMOSPHERIC PROFILES
'ref_atm' : 'equ.atm', # Reference atmospheric profile
'gdas_prof' : 'auto', # Specific GDAS-like input profile (auto = auto retrieval)
'layers' : 1, # Grid of layer heights for merging reg_atm and GDAS prof
'emix' : 5.0, # Upper mixing heights in kms (5 is default)
'pwv' : -1., # input water vapour profile in mm (-1 = no scaling)
'clean_mflux' : 1, # internal GUI specific parameter
}
# Write the above information to the parameter file
# for each key in the dictionary, write a new line using the format
# key : dict[key]
with open(param_fname, 'w') as file:
for key in dic.keys():
file.write(key + ': ' + str(dic[key]) + '\n')
file.write('\n')
file.write('end \n')
#----------------------------------------------------------------------------------------------#
#__________________________ EXECUTE BASH COMMANDS TO CALL MOLECFIT ___________________________#
if quiet == True:
with open(os.devnull,'w') as devnull:
[subprocess.run([f"{mf_bin_dir}/{func}", f"{param_fname}"],stdout=devnull)
for func in ['molecfit', 'calctrans']]#, 'corrfilelist']]
else:
[subprocess.run([f"{mf_bin_dir}/{func}", f"{param_fname}"],stdout=devnull)
for func in ['molecfit', 'calctrans']]
#----------------------------------------------------------------------------------------------#
#_______________________ SAVE TELLURIC CORRECTED SPECTRUM TO SCI.FITS ________________________#
#with fits.open(fcal_fname) as hdu:
# for i in range(hdu['PRIMARY'].header['NAXIS2']):
# gal_name = hdu['FIBRES_IFU'].data['NAME'][i] # name
# gal_fiber_id = hdu['FIBRES_IFU'].data['FIBNUM'][i] # fiber number
# filename = f"{mf_output_dir}{gal_name}_{gal_fiber_id}_TAC.dat"
# extract the telluric corrected flux from the appropriate _TAC.dat file
# hdu['PRIMARY'].data[i] = ap_ascii.read(filename)['tacflux']
# add a line to the fits primary header to indicate Molecfit has done the correction
# hdu['PRIMARY'].header['TELLURIC'] = ('Molecfit', 'ESO molecfit software used')
# hdu.writeto(fcal_fname.replace('fcal', 'sci'), overwrite=True) # save to sci file.
#----------------------------------------------------------------------------------------------#
#_________________________ CLEANUP! DELETE INTERMEDIARY FILES ________________________________#
# Need to delete all the intermediary files (i.e all the Molecfit specific input and output).
# They are all found in the directory called obs_name. The sci.fits file is saved above this
# directory, so the whole directory can be removed
transfer_table = fits.open(f"{mf_output_dir}{obs_name_root}_tac.fits")
transfer_data = transfer_table[1].data
model_flux = transfer_data['cflux']
transfer_function = 1./transfer_data['mtrans']
sigma_transfer = star_flux_err/star_flux*transfer_function #np.zeros(len(transfer_function))
sigma_transfer[transfer_function == 1.] = 0.0
sigma_transfer[np.isfinite(sigma_transfer) == False] = 0.0
# Possibly not multi-processing safe. NEED TO CHECK THIS - ignore_errors=True is a possibly dangerous fudge
if delete_files:
shutil.rmtree(obs_name,ignore_errors=True)
return transfer_function, sigma_transfer, model_flux
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.