content
stringlengths 5
1.05M
|
|---|
"""helper for anaconda-project with mamba and a 'clean' condarc"""
# Copyright (c) 2021 Dane Freeman.
# Distributed under the terms of the Modified BSD License.
import os
import shutil
import subprocess
import sys
from pathlib import Path
HERE = Path(__file__).parent
ROOT = HERE.parent
CONDARC = ROOT / ".github" / ".condarc"
MAMBA = shutil.which("mamba")
def lock():
env = dict(os.environ)
if "CONDARC" not in env:
env["CONDARC"] = str(CONDARC)
if MAMBA:
env["CONDA_EXE"] = MAMBA
for envspec in ["default", "atest", "docs"]:
subprocess.check_call(["anaconda-project", "update", "-n", envspec], env=env)
if __name__ == "__main__":
sys.exit(lock())
|
"""AWS Glue Catalog Delete Module."""
import logging
from typing import Any, Dict, List, Optional
_logger: logging.Logger = logging.getLogger(__name__)
def _parquet_table_definition(
table: str, path: str, columns_types: Dict[str, str], partitions_types: Dict[str, str], compression: Optional[str]
) -> Dict[str, Any]:
compressed: bool = compression is not None
return {
"Name": table,
"PartitionKeys": [{"Name": cname, "Type": dtype} for cname, dtype in partitions_types.items()],
"TableType": "EXTERNAL_TABLE",
"Parameters": {"classification": "parquet", "compressionType": str(compression).lower(), "typeOfData": "file"},
"StorageDescriptor": {
"Columns": [{"Name": cname, "Type": dtype} for cname, dtype in columns_types.items()],
"Location": path,
"InputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"Compressed": compressed,
"NumberOfBuckets": -1,
"SerdeInfo": {
"SerializationLibrary": "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
"Parameters": {"serialization.format": "1"},
},
"StoredAsSubDirectories": False,
"SortColumns": [],
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"classification": "parquet",
"compressionType": str(compression).lower(),
"typeOfData": "file",
},
},
}
def _parquet_partition_definition(
location: str, values: List[str], compression: Optional[str], columns_types: Optional[Dict[str, str]]
) -> Dict[str, Any]:
compressed: bool = compression is not None
definition: Dict[str, Any] = {
"StorageDescriptor": {
"InputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"Location": location,
"Compressed": compressed,
"SerdeInfo": {
"Parameters": {"serialization.format": "1"},
"SerializationLibrary": "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
},
"StoredAsSubDirectories": False,
"NumberOfBuckets": -1,
},
"Values": values,
}
if columns_types is not None:
definition["StorageDescriptor"]["Columns"] = [
{"Name": cname, "Type": dtype} for cname, dtype in columns_types.items()
]
return definition
def _csv_table_definition(
table: str,
path: str,
columns_types: Dict[str, str],
partitions_types: Dict[str, str],
compression: Optional[str],
sep: str,
skip_header_line_count: Optional[int],
) -> Dict[str, Any]:
compressed: bool = compression is not None
parameters: Dict[str, str] = {
"classification": "csv",
"compressionType": str(compression).lower(),
"typeOfData": "file",
"delimiter": sep,
"columnsOrdered": "true",
"areColumnsQuoted": "false",
}
if skip_header_line_count is not None:
parameters["skip.header.line.count"] = "1"
return {
"Name": table,
"PartitionKeys": [{"Name": cname, "Type": dtype} for cname, dtype in partitions_types.items()],
"TableType": "EXTERNAL_TABLE",
"Parameters": parameters,
"StorageDescriptor": {
"Columns": [{"Name": cname, "Type": dtype} for cname, dtype in columns_types.items()],
"Location": path,
"InputFormat": "org.apache.hadoop.mapred.TextInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
"Compressed": compressed,
"NumberOfBuckets": -1,
"SerdeInfo": {
"Parameters": {"field.delim": sep, "escape.delim": "\\"},
"SerializationLibrary": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
},
"StoredAsSubDirectories": False,
"SortColumns": [],
"Parameters": {
"classification": "csv",
"compressionType": str(compression).lower(),
"typeOfData": "file",
"delimiter": sep,
"columnsOrdered": "true",
"areColumnsQuoted": "false",
},
},
}
def _csv_partition_definition(
location: str, values: List[str], compression: Optional[str], sep: str, columns_types: Optional[Dict[str, str]]
) -> Dict[str, Any]:
compressed: bool = compression is not None
definition: Dict[str, Any] = {
"StorageDescriptor": {
"InputFormat": "org.apache.hadoop.mapred.TextInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
"Location": location,
"Compressed": compressed,
"SerdeInfo": {
"Parameters": {"field.delim": sep, "escape.delim": "\\"},
"SerializationLibrary": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
},
"StoredAsSubDirectories": False,
"NumberOfBuckets": -1,
},
"Values": values,
}
if columns_types is not None:
definition["StorageDescriptor"]["Columns"] = [
{"Name": cname, "Type": dtype} for cname, dtype in columns_types.items()
]
return definition
|
# Scippy libs test by
# Henry Diaz
#
# MIT Licensed
import scippy as Scippy
import sys
try:
if SCIPP_PATH=='C:\\Scipp\\' or SCIPP_PATH=='~/Scipp/' or SCIPP_PATH==None:
maths = scippy_lib(Scippy.SCIPP_MATHS)
if maths == None:
print('ERROR!!')
sys.exit(1)
else:
print('ERROR!!')
sys.exit(1)
except Exception:
print('ERROR!!')
sys.exit(1)
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from mistral.policies import action
from mistral.policies import action_executions
from mistral.policies import base
from mistral.policies import code_sources
from mistral.policies import cron_trigger
from mistral.policies import dynamic_actions
from mistral.policies import environment
from mistral.policies import event_trigger
from mistral.policies import execution
from mistral.policies import member
from mistral.policies import service
from mistral.policies import task
from mistral.policies import workbook
from mistral.policies import workflow
def list_rules():
return itertools.chain(
action.list_rules(),
action_executions.list_rules(),
base.list_rules(),
code_sources.list_rules(),
dynamic_actions.list_rules(),
cron_trigger.list_rules(),
environment.list_rules(),
event_trigger.list_rules(),
execution.list_rules(),
member.list_rules(),
service.list_rules(),
task.list_rules(),
workbook.list_rules(),
workflow.list_rules()
)
|
r"""
Integration models
"""
import os
from .base import Model
from .scglue import AUTO, configure_dataset, SCGLUEModel
def load_model(fname: os.PathLike) -> Model:
r"""
Load model from file
Parameters
----------
fname
Specifies path to the file
"""
return Model.load(fname)
|
import os
import yaml
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("output_with", help="output file with the schedule with ours")
parser.add_argument("output_without", help="output file with the schedule without ours")
parser.add_argument("dynamic_obs", help="path of dynamic obs")
parser.add_argument("result_pth", help="path of results")
args = parser.parse_args()
output_with_pth = args.output_with
output_without_pth = args.output_without
dy_obs_pth = args.dynamic_obs
result_pth = args.result_pth
with open(output_with_pth, 'r') as file:
try:
output_with_file = yaml.load(file, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
with open(output_without_pth, 'r') as file:
try:
output_without_file = yaml.load(file, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
with open(dy_obs_pth, 'r') as file:
try:
dy_obs_file = yaml.load(file, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
a_max_t_with = max([len(path) for path in output_with_file['schedule'].values()])
a_max_t_without = max([len(path) for path in output_without_file['schedule'].values()])
d_max_t = max([len(path) for path in dy_obs_file['schedule'].values()])
max_t_with= max(a_max_t_with, d_max_t)
max_t_without= max(a_max_t_without, d_max_t)
edge_conflict_num_with = 0
edge_conflict_num_without = 0
for t in range(max_t_with):
for agent_name in output_with_file['schedule'].keys():
for dy_obs_name in dy_obs_file['schedule'].keys():
a_pth = output_with_file['schedule'][agent_name]
d_pth = dy_obs_file['schedule'][dy_obs_name]
if t >= len(a_pth) - 1 or t >= len(d_pth) - 1:
continue
state_1a = a_pth[t]
state_2a = a_pth[t + 1]
state_1d = d_pth[t]
state_2d = d_pth[t + 1]
if ((state_1a['x'] == state_2d['x']) and (state_1a['y'] == state_2d['y'])) and ((state_2a['x'] == state_1d['x']) and (state_2a['y'] == state_1d['y'])):
edge_conflict_num_with += 1
for t in range(max_t_without):
for agent_name in output_without_file['schedule'].keys():
for dy_obs_name in dy_obs_file['schedule'].keys():
a_pth = output_without_file['schedule'][agent_name]
d_pth = dy_obs_file['schedule'][dy_obs_name]
if t >= len(a_pth) - 1 or t >= len(d_pth) - 1:
continue
state_1a = a_pth[t]
state_2a = a_pth[t + 1]
state_1d = d_pth[t]
state_2d = d_pth[t + 1]
if ((state_1a['x'] == state_2d['x']) and (state_1a['y'] == state_2d['y'])) and ((state_2a['x'] == state_1d['x']) and (state_2a['y'] == state_1d['y'])):
edge_conflict_num_without += 1
output_with_points = []
for agent in output_with_file['schedule'].keys():
for point in output_with_file['schedule'][agent]:
output_with_points.append(point)
output_without_points = []
for agent in output_without_file['schedule'].keys():
for point in output_without_file['schedule'][agent]:
output_without_points.append(point)
dy_obs_points = []
for agent in dy_obs_file['schedule'].keys():
for point in dy_obs_file['schedule'][agent]:
dy_obs_points.append(point)
collision_num_with = 0
for out_pt in output_with_points:
for dy_pt in dy_obs_points:
if out_pt == dy_pt:
collision_num_with += 1
collision_num_without = 0
for out_pt in output_without_points:
for dy_pt in dy_obs_points:
if out_pt == dy_pt:
collision_num_without += 1
f = open(result_pth, 'a')
f.write('Collision with: ' + str(collision_num_with + edge_conflict_num_with) + ' | Collision without: ' + str(collision_num_without + edge_conflict_num_without) + ' \n')
f.close()
if __name__=='__main__':
main()
|
import re
from utils import utils
def parse(pass_str):
m = re.search(r'(\d+)-(\d+) ([a-z]): ([a-z]+)', pass_str)
lb, ub, letter, password = m.groups()
return (int(lb), int(ub), letter, password)
def part_1(data):
count = 0
for pass_str in data:
lb, ub, letter, password = parse(pass_str)
letter_count = len(re.findall(rf'{letter}', password))
if lb <= letter_count and letter_count <= ub:
count += 1
return count
def part_2(data):
count = 0
for pass_str in data:
lb, ub, letter, password = parse(pass_str)
match_lower = password[lb-1] == letter
match_upper = password[ub-1] == letter
if match_lower != match_upper:
count += 1
return count
if __name__ == "__main__":
day = 2
data = utils.get_strs_from_file(f"data/aoc{day}_data.txt")
output_1 = part_1(data)
print(output_1)
output_2 = part_2(data)
print(output_2)
|
import inspect
import json
import pytest
from ..manifest import Manifest
from ..item import TestharnessTest, RefTest, item_types
@pytest.mark.parametrize("path", [
"a.https.c",
"a.b.https.c",
"a.https.b.c",
"a.b.https.c.d",
"a.serviceworker.c",
"a.b.serviceworker.c",
"a.serviceworker.b.c",
"a.b.serviceworker.c.d",
])
def test_url_https(path):
m = TestharnessTest("/foo", "bar/" + path, "/", "bar/" + path)
assert m.https is True
@pytest.mark.parametrize("path", [
"https",
"a.https",
"a.b.https",
"https.a",
"https.a.b",
"a.bhttps.c",
"a.httpsb.c",
"serviceworker",
"a.serviceworker",
"a.b.serviceworker",
"serviceworker.a",
"serviceworker.a.b",
"a.bserviceworker.c",
"a.serviceworkerb.c",
])
def test_url_not_https(path):
m = TestharnessTest("/foo", "bar/" + path, "/", "bar/" + path)
assert m.https is False
@pytest.mark.parametrize("fuzzy", [
{('/foo/test.html', u'/foo/ref.html', '=='): [[1, 1], [200, 200]]},
{('/foo/test.html', u'/foo/ref.html', '=='): [[0, 1], [100, 200]]},
{None: [[0, 1], [100, 200]]},
{None: [[1, 1], [200, 200]]},
])
def test_reftest_fuzzy(fuzzy):
t = RefTest('/',
'foo/test.html',
'/',
'foo/test.html',
[('/foo/ref.html', '==')],
fuzzy=fuzzy)
assert fuzzy == t.fuzzy
json_obj = t.to_json()
m = Manifest("/", "/")
t2 = RefTest.from_json(m, t.path, json_obj)
assert fuzzy == t2.fuzzy
# test the roundtrip case, given tuples become lists
roundtrip = json.loads(json.dumps(json_obj))
t3 = RefTest.from_json(m, t.path, roundtrip)
assert fuzzy == t3.fuzzy
@pytest.mark.parametrize("fuzzy", [
{('/foo/test.html', u'/foo/ref-2.html', '=='): [[0, 1], [100, 200]]},
{None: [[1, 1], [200, 200]], ('/foo/test.html', u'/foo/ref-2.html', '=='): [[0, 1], [100, 200]]},
])
def test_reftest_fuzzy_multi(fuzzy):
t = RefTest('/',
'foo/test.html',
'/',
'foo/test.html',
[('/foo/ref-1.html', '=='), ('/foo/ref-2.html', '==')],
fuzzy=fuzzy)
assert fuzzy == t.fuzzy
json_obj = t.to_json()
m = Manifest("/", "/")
t2 = RefTest.from_json(m, t.path, json_obj)
assert fuzzy == t2.fuzzy
# test the roundtrip case, given tuples become lists
roundtrip = json.loads(json.dumps(json_obj))
t3 = RefTest.from_json(m, t.path, roundtrip)
assert fuzzy == t3.fuzzy
def test_item_types():
for key, value in item_types.items():
assert isinstance(key, str)
assert not inspect.isabstract(value)
|
#! /usr/bin/env python3
"""
Thermal decomposition in the n-,s-C5H11 system
(two-well, two-channel, 1D ME as a function of E)
Steady-state decomposition of n-C5H11
sample output (c5h11_2a_me1d_E_w1.dat):
T[K] p[bar] w1-k2(dis) w2-k2(dis) ktot[s-1] x(w1) x(w2)
1000.0 1.000e+02 4.7452e+06 3.2981e+06 8.0433e+06 6.9810e-01 3.0190e-01
1000.0 1.000e+01 3.5927e+06 2.8640e+06 6.4566e+06 6.9099e-01 3.0901e-01
1000.0 1.000e+00 1.6175e+06 1.8428e+06 3.4603e+06 6.7063e-01 3.2937e-01
1000.0 1.000e-01 3.8500e+05 7.9742e+05 1.1824e+06 6.2665e-01 3.7335e-01
"""
from me2d import ME1DMW
maxE = 60000 # cm^-1
# list of (name, rrkm_filename, relative_energy)
well_list = [("w1", "c5h11_1_rrkmE_nc5h11_dE10.dat", 948.38),
("w2", "c5h11_1_rrkmE_sc5h11_dE10.dat", 0.)]
# list of ((name, ch), (name, ch))
connections = [(("w1", 1), ("w2", 1))]
outfn = "c5h11_2a_me1d_E_w1.dat"
solver = "InvIter,cg" # inverse iteration with conjugate gradient
reactant = "w1" # solve steady-state decomposition of w1
neig = 1 # neig = 1 for InvIter solver
bandpcrit = 1e-9 # truncation threshold for banded matrix
nthreads = 2
maxmemGB = 1
verbose = True
T = 1000. # K
pl = [100., 10., 1., 0.1] # bar
y = 0.5
alpha_w1 = 54.8 # cm^-1
Z_w1 = 1.36e-09 # cm^3 molecule^-1 s^-1
alpha_w2 = 54.4 # cm^-1
Z_w2 = 1.43e-09 # cm^3 molecule^-1 s^-1
memw = ME1DMW.read_from(well_list, connections, maxE=maxE)
memw["w1"].set_params(Z_w1, y, alpha_w1)
memw["w2"].set_params(Z_w2, y, alpha_w2)
outfp = open(outfn, "w")
kstrl, xstrl = memw.get_channel_strings()
outfp.write(" T[K] p[bar] %s\n" %
(" ".join("%12s" % x for x in kstrl+["ktot[s-1]"]+xstrl)))
ga = None
for p in pl:
ktot, kchl, ga, popl, vals, vec = \
memw.solve(T, p, gguess=ga, solver=solver, reactant=reactant,
neig=neig, verbose=verbose, bandpcrit=bandpcrit,
nthreads=nthreads, maxmemGB=maxmemGB)
outfp.write("%8.1f %.3e %s\n" % (T, p, " ".join("%12.4e" % x for x in kchl+[ktot]+popl)))
outfp.flush()
|
import pygame
from pygame.sprite import Sprite
import os
class Firebar(Sprite):
"""Firebar the can collide"""
def __init__(self, hub, x, y, name='firebar', direction='LEFT'):
super().__init__()
self.name = name
self.hub = hub
self.original_pos = (x + 4, y + 28)
self.scale = (271, 271)
self.screen = self.hub.main_screen
self.screen_rect = self.screen.get_rect()
self.camera = hub.camera
self.index = 0
self.increment = 1
self.frameRate = 30
self.clock = 0
self.image_index = []
self.setimages()
self.image = self.image_index[self.index]
self.image = pygame.transform.scale(self.image, self.scale)
self.brick = pygame.image.load("imgs/Blocks/2/EmptyBlock.png")
self.rect = self.image.get_rect()
self.clock = pygame.time.get_ticks() + self.frameRate
self.rect.center = self.original_pos
# self.mask = pygame.mask.from_surface(self.image)
self.mask = pygame.mask.from_surface(self.image)
self.mask2 = pygame.mask.Mask((80, 80), True)
self.mask.erase(self.mask2, (95, 95))
self.direction = direction
def setimages(self):
for i in os.listdir("imgs\Firebar"):
self.image_index.append(pygame.image.load("imgs/Firebar/" + i))
def draw(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.check_direction()
if pygame.time.get_ticks() > self.clock:
self.clock = pygame.time.get_ticks() + self.frameRate
self.index += self.increment
self.index %= len(self.image_index)
self.image = self.image_index[self.index]
self.image = pygame.transform.scale(self.image, self.scale)
self.mask = pygame.mask.from_surface(self.image)
self.mask.erase(self.mask2, (95, 95))
def check_direction(self):
if self.direction == self.hub.LEFT:
self.increment = -1
else:
self.increment = 1
|
import click
from overhave.cli.db.regular import _create_all, _drop_all
class TestOverhaveDatabaseCmds:
""" Sanity tests for database operating CLI commands. """
def test_create_all(self, click_ctx_mock: click.Context, set_config_to_ctx: None) -> None:
_create_all(click_ctx_mock.obj)
def test_drop_all(self, click_ctx_mock: click.Context, set_config_to_ctx: None) -> None:
_drop_all(click_ctx_mock.obj)
|
#!/usr/bin/env python
"""
Example demonstrating usage of App driver to manually start it and stop it.
"""
import sys, re
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.driver.app import App
from testplan.common.utils.match import LogMatcher
@testsuite
class MyTestsuite:
"""
A testsuite that uses helper utilities in setup/teardown.
"""
@testcase
def manual_start(self, env, result):
result.equal(env.cat_app.proc, None, description="App is not running.")
env.cat_app.start()
env.cat_app.wait(env.cat_app.status.STARTED)
matcher = LogMatcher(log_path=env.cat_app.logpath)
env.cat_app.proc.stdin.write(b"testplan\n")
matched = matcher.match(re.compile(r"testplan"))
result.true(matched, description="testplan in stdin")
result.not_equal(env.cat_app.proc, None, description="App is running.")
env.cat_app.stop()
env.cat_app.wait(env.cat_app.status.STOPPED)
result.equal(env.cat_app.proc, None, description="App is not running.")
@testcase
def manual_start_using_context_manager(self, env, result):
result.equal(env.cat_app.proc, None, description="App is not running.")
with env.cat_app:
matcher = LogMatcher(log_path=env.cat_app.logpath)
env.cat_app.proc.stdin.write(b"testplan\n")
matched = matcher.match(re.compile(r"testplan"))
result.true(matched, description="testplan in stdin")
result.not_equal(
env.cat_app.proc, None, description="App is running."
)
result.equal(env.cat_app.proc, None, description="App is not running.")
@test_plan(name="App driver example")
def main(plan):
"""
A simple example that demonstrate manually starting and stopping the App driver.
"""
plan.add(
MultiTest(
name="TestCat",
suites=[MyTestsuite()],
environment=[
App(
name="cat_app",
binary="/bin/cat",
auto_start=False,
)
],
)
)
if __name__ == "__main__":
sys.exit(main().exit_code)
|
import re
from ._enums import OutId
_actions = '|'.join([
'folds',
'calls',
'checks',
'bets',
'raises',
'allin'
])
out_type = re.compile(
r'\*?(?P<out_type>[^\s]+)'
)
round_start = re.compile(
r'Game started at: '
r'(?P<date>[\d/]+) '
r'(?P<time>[\d:]+)'
)
round_id = re.compile(
r'Game ID: '
r'(?P<round_id>\d+) '
r'(?P<small_blind>[\d\.]+)/'
r'(?P<big_blind>[\d\.]+)'
)
round_end = re.compile(
r'Game ended at: '
r'(?P<date>[\d/]+) '
r'(?P<time>[\d:]+)'
)
seat_joined = re.compile(
r'Seat '
r'(?P<seat>\d): '
r'(?P<user>.+?) '
r'\((?P<buyin>[\d\.]+)\)\.'
)
seat_button = re.compile(
r'Seat (?P<seat>\d) is the button'
)
player_blind = re.compile(
r'Player (?P<user>.+?) '
r'has (?P<blind_type>\w+) blind '
r'\((?P<amount>[\d\.]+)\)'
)
player_received_card = re.compile(
r'Player (?P<user>.+?) '
r'received card: \[(?P<card>[\w\d]{2,3})\]'
)
player_action = re.compile(
r'Player (?P<user>.+?) '
rf'(?P<action>{_actions})'
r'(?: \((?P<amount>[\d\.]+)\))?'
)
player_show_cards = re.compile(
r'\*?Player (?P<user>.+?) '
r'(?:mucks )?'
r'(?:'
r'(?:\(?does not show cards\)?)|'
r'(?:shows: (?P<hand>.+?)?)'
r')\. ?'
r'Bets: (?P<bets>[\d\.]+)\. '
r'Collects: (?P<collects>[\d\.]+)\. '
r'(?P<state>Wins|Loses): '
r'(?P<amount>[\d\.]+)\.'
)
new_turn_board = re.compile(
r'\*{3} '
r'(?P<turn_name>FLOP|TURN|RIVER) '
r'\*{3}: '
r'\[(?P<board>[^\]]*)\]'
r'(?: \[(?P<new_card>\w{2})\])?'
)
pot_size = re.compile(
r'Pot: '
r'(?P<pot_size>[\d\.]+)\.'
)
board_show = re.compile(
r'(?P<board>\[.+?\])'
)
out_types = {
'Game': [
(round_start, OutId.RoundStart),
(round_end, OutId.RoundEnd),
(round_id, OutId.RoundId)
],
'Seat': [
(seat_joined, OutId.SeatJoined),
(seat_button, OutId.SeatButton)
],
'Player': [
(player_blind, OutId.PlayerBlind),
(player_received_card, OutId.PlayerReceivedCard),
(player_show_cards, OutId.PlayerShowCards),
(player_action, OutId.PlayerAction)
],
'**': [
(new_turn_board, OutId.NewTurn)
],
'Pot:': [
(pot_size, OutId.PotSize)
],
'Board': [
(board_show, OutId.BoardShow)
]
}
data_info = {
OutId.RoundStart : {
'date': str,
'time': str
},
OutId.RoundEnd : {
'date': str,
'time': str
},
OutId.RoundId : {
'round_id': str,
'small_blind': float,
'big_blind': float
},
OutId.SeatJoined : {
'seat': int,
'user': str,
'buyin': float
},
OutId.SeatButton : {
'seat': int
},
OutId.PlayerBlind : {
'user': str,
'blind_type': str,
'amount': float
},
OutId.PlayerReceivedCard : {
'user': str,
'card': str
},
OutId.PlayerShowCards : {
'user': str,
'hand': str,
'bets': float,
'collects': float,
'state': str,
'amount': float
},
OutId.PlayerAction : {
'user': str,
'action': str,
'amount': float
},
OutId.NewTurn : {
'turn_name': str,
'board': str,
'new_card': str
},
OutId.PotSize : {
'pot_size': float
},
OutId.BoardShow : {
'board': str
}
}
|
"""
Sponge Knowledge Base
Blinking LED
GrovePi board: Connect LED to D4
"""
state = False
led = None
class LedBlink(Trigger):
def onConfigure(self):
self.withEvent("blink")
def onRun(self, event):
global led, state
state = not state
led.set(state)
def onStartup():
global led
led = grovepi.device.getDigitalOut(4)
sponge.event("blink").sendAfter(0, 1000)
def onShutdown():
global led
if led is not None:
led.set(False)
|
from flask_restful import Resource, reqparse
from .model import ListModel
class ListResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, help='Filed bane cannot be blank')
def post(self):
data = self.parser.parse_args()
name = data['name']
new_list = ListModel(name)
list_id, err = new_list.save()
if err:
return {'error': err}, 400
return {'status': 'created', 'list_id': list_id}
class ListsResource(Resource):
def get(self):
return {
'lists': list(map(lambda x: x.json(), ListModel.query.all()))
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmfewshot.classification.models.backbones import Conv4
from mmfewshot.classification.models.utils import convert_maml_module
def test_maml_module():
model = Conv4()
maml_model = convert_maml_module(model)
image = torch.randn(1, 3, 32, 32)
for weight in maml_model.parameters():
assert weight.fast is None
feat = maml_model(image)
for weight in maml_model.parameters():
weight.fast = weight
maml_feat = maml_model(image)
assert torch.allclose(feat, maml_feat)
|
#!/usr/bin/env python3
from lms.authentication.password_hash import hash_password
if __name__ == "__main__":
# pylint: disable=invalid-name
password = input("Please enter a password: ").encode("utf8")
salt = input("Please enter a salt (leave blank to have one created): ")
pw_hash, salt = hash_password(password, salt)
print(f"password hash: {pw_hash}")
print(f"salt: {salt}")
|
import os
basedir=os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', 'thissecretkey')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG=True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_boilerplate_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS=False
class TestingConfig(Config):
DEBUG=True
TESTING=True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_boilerplate_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdcutionConfig(Config):
DEBUG = False
config_by_name = dict(
dev = DevelopmentConfig,
test = TestingConfig,
prod = ProdcutionConfig
)
key = Config.SECRET_KEY
|
#
# @lc app=leetcode id=90 lang=python
#
# [90] Subsets II
#
# https://leetcode.com/problems/subsets-ii/description/
#
# algorithms
# Medium (41.88%)
# Total Accepted: 195.2K
# Total Submissions: 466.1K
# Testcase Example: '[1,2,2]'
#
# Given a collection of integers that might contain duplicates, nums, return
# all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: [1,2,2]
# Output:
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
#
#
#
class Solution(object):
'''
def solver(self, nums, res):
if nums not in res:
res.append(nums)
else:
return
if len(nums)<=1:
return
for i in range(len(nums)):
if i == 0:
tmp_nums = nums[1:]
else:
tmp_nums = nums[0:i]+nums[i+1:]
self.solver(tmp_nums, res)
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
nums.sort()
self.solver(nums, res)
res.append([])
return res
def subsetsWithDup(self, nums):
result = [[]]
for num in sorted(nums):
result += [i+[num] for i in result if i+[num] not in result]
return result
'''
def subsetsWithDup(self, nums):
res = []
nums.sort()
self.dfs(nums, 0, [], res)
return res
def dfs(self, nums, index, path, res):
res.append(path)
for i in xrange(index, len(nums)):
if i > index and nums[i] == nums[i-1]:
continue
self.dfs(nums, i+1, path+[nums[i]], res)
|
import json
from mysql.connector.pooling import MySQLConnectionPool
from mysql.connector import connect
# Read config file and get information for MySQL database
# This file contains all the functions related to the MySQL database
with open("./config.json", "r+") as config_file:
config = json.load(config_file)
# pool = MySQLConnectionPool(pool_name='connection_pool',
# pool_size=3,
# **config['mysql'])
def get_connection():
return connect(**config['mysql'])
def execute_update(sql: str, *args):
connection = get_connection()
cursor = connection.cursor()
cursor.execute(sql, args)
connection.commit()
cursor.close()
connection.close()
def execute_query(sql, *args):
connection = get_connection()
cursor = connection.cursor()
cursor.execute(sql, args)
result = cursor.fetchone()
cursor.close()
connection.close()
return result
def create_tables():
# Create necessary MySQL tables
execute_update("CREATE TABLE IF NOT EXISTS `whmcs_discord` ("
"`client_id` VARCHAR(255) UNIQUE NOT NULL,"
"`discord_id` VARCHAR(255) NOT NULL,"
"PRIMARY KEY (`client_id`))")
def is_connected_discord(discord_id):
# Check if a member is connected based on their discord id
result = execute_query("SELECT * FROM whmcs_discord WHERE discord_id = %s", discord_id)
return result is not None
def is_connected_client(client_id):
# Check if a client is connected based on their client_id
result = execute_query("SELECT * FROM whmcs_discord WHERE client_id = %s", client_id)
return result is not None
def get_client_id(discord_id):
# Get a member's client id from their discord_id
result = execute_query("SELECT client_id FROM whmcs_discord WHERE discord_id = %s", discord_id)
# Since this function is supposed to return an int, I decided to return a negative value
# If no client id was found
if result is None:
return -1
# Otherwise convert the data into an int
return int(result[0])
def get_discord_id(client_id):
# Get a client's discord id using their client id
result = execute_query("SELECT discord_id FROM whmcs_discord WHERE client_id = %s", client_id)
if result is None:
return None
return result[0]
def add_client(discord_id, client_id):
# Insert a linked user into the database
execute_update("INSERT INTO whmcs_discord(client_id, discord_id) VALUES (%s, %s)", discord_id, client_id)
def remove_client(client_id):
# Remove a linked user from the database
# This is used when a client decides to unlink their accounts
execute_update("DELETE FROM whmcs_discord WHERE client_id = %s", client_id)
def get_synced_members():
# Get all the synced users in the database
synced = dict()
connection = get_connection()
cursor = connection.cursor()
cursor.execute("SELECT discord_id, client_id FROM whmcs_discord")
results = cursor.fetchall()
# Iterate over them and add them to a dictionary to be used later
for result in results:
synced[int(result[0])] = int(result[1])
cursor.close()
connection.close()
# Format is synced[discord_id] = client_id
return synced
|
from oidctest.cp.op import parse_resource
def test_parse_resource():
a,b = parse_resource('acct:op_id.test_id@domain')
assert a == 'op_id'
assert b == 'test_id'
a,b = parse_resource(
'https://example.com/openid-client/rp-discovery-webfinger-url/joe')
assert a == 'openid-client'
assert b == 'rp-discovery-webfinger-url'
a,b = parse_resource('acct:op_id.test_id@domain@localhost')
assert a == 'op_id'
assert b == 'test_id'
a,b = parse_resource('acct:op_id.X.test_id@domain')
assert a == 'op_id.X'
assert b == 'test_id'
try:
parse_resource('acct:op_id_test_id@domain')
except ValueError:
pass
try:
parse_resource('https://example.com/rp-discovery-webfinger-url')
except ValueError:
pass
|
# INPUT VARIABLES:
# - ants_dir: path to ANTS directory
# - code_dir: directory in which the code resides
# - input_dir: input directory
# - output_dir: output directory
# - alignment_type: type of motion correction.
# possible values:
# translation: (translation only)
# rigid: (translation and rotation)
# - dt: the range of timepoints to use for cell detection
# can be a vector which specifies the precise point sequence
# can be a scalar which specifies the downsampling factor
# e.g.
# dt = list(range(2000)) # first 2000 timepoints of recording
# dt = 10; # every 10th point of recording
# dt = 1, dt = 0, dt = [] # every point of recording
# - thr_mask: fluorescence threshold for brain masking the brain
# typical values lie in the range of 100 to 110
# if in doubt, set thr_mask=0 (this allows to choose it interactively later)
# NB: cell detection is only performed on suprathreshold pixels
# - ds: spatial coarsegraining of in x-y dimensions:
# typical value is 2 (leading to 4-fold reduction in size)
# set value to 1 to maintain original dimensions
# - blok_cell_nmbr: number of cells in each cell_detection blok
# larger values result in larger blocks and slower computation
# - cell_diam: approximate cell diameter
# - imageframe_nmbr: number of brains in each image
# typical value is 1.
# value is 2 if two-color image
# - baseline_tau; time intervals for baseline detection (in seconds)
# typical values are 300-600 (5-10 minutes)
# - censor_tau: time intervals for censoring signal (in seconds)
# censor_tau = 0 # apply no censoring
# censor_tau = [300, 0] # censor 300 seconds at start of recording
# censor_tau = [0, 300] # censor 300 seconds at end of recording
# censor_tau = 300 # censor 300 seconds at beginning and end of recording
# - freq_cutoff: frequency cut-off for high-pass filtering
# typical value is 0.001
# set to 0 to disable high-pass filtering
# - nmf_algorithm: type of nmf algorithm for component detection
# nmf_algorithm=0 # no nmf component detection
# nmf_algorithm=1 # alternating least squares (custom implementation)
# nmf_algorithm=2 # coordinate descent (sklearn implementation)
# - n_components: number of components for nmf
# could be a scalar or an array
ants_dir = '/groups/ahrens/home/rubinovm/ants-2.1.0-redhat/'
code_dir = '/groups/ahrens/home/rubinovm/mycode/zfish_prepro/code/'
input_dir = '/groups/ahrens/home/rubinovm/mycode/zfish_prepro/input/'
output_dir = '/groups/ahrens/home/rubinovm/mycode/zfish_prepro/output/'
alignment_type = 'rigid'
dt = 1
thr_prob = 0.5
ds = 1
blok_cell_nmbr = 100
cell_diam = 6.0
imageframe_nmbr = 1
baseline_tau = 300
censor_tau = [30, 30]
freq_cutoff = 0
nmf_algorithm = 2
n_components = [20, 60]
# imaging parameters
# automatically parse imaging parameters
try:
# - xml_filename: full path to xml filename which has recording metadata
# - stack_filename: full path to stack filename which has recording metadata
from past.builtins import execfile
execfile(code_dir + 'zfun.py')
xml_filename = input_dir + '/ch0.xml'
stack_filename = input_dir + '/Stack_frequency.txt'
resn_x, resn_y, resn_z, lx, ly, lz, t_exposure, freq_stack \
= parse_info(xml_filename, stack_filename, imageframe_nmbr)
# manually specify imaging parameters
except:
resn_x = 0.812
resn_y = 0.812
resn_z = 1.0
lx = 512
ly = 444
lz = 1
t_exposure = 9.7
freq_stack = 2.66
# packed planes: set to 1 when single plane stacks are packed into a 3d-volume
packed_planes = 0
# configure parameters, function, variable and path definitions
from past.builtins import execfile
execfile(code_dir + 'zfun.py')
execfile(code_dir + 'z0_preliminaries.py')
os.chdir(output_dir)
|
import numpy as np
import pytest
import tensorflow as tf
import larq as lq
from larq.testing_utils import generate_real_values_with_zeros
@pytest.mark.parametrize("fn", [lq.math.sign])
def test_sign(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([-1, 1], size=(2, 5)).astype(np.float32)
result = f(binarized_values)[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f(real_values)[0]
assert not np.any(result == 0)
assert np.all(result[real_values < 0] == -1)
assert np.all(result[real_values >= 0] == 1)
zero_values = np.zeros((2, 5))
result = f(zero_values)[0]
assert np.all(result == 1)
@pytest.mark.parametrize("fn", [lq.math.heaviside])
def test_heaviside(fn):
x = tf.keras.backend.placeholder(ndim=2)
f = tf.keras.backend.function([x], [fn(x)])
binarized_values = np.random.choice([0, 1], size=(2, 5))
result = f([binarized_values])[0]
np.testing.assert_allclose(result, binarized_values)
real_values = generate_real_values_with_zeros()
result = f([real_values])[0]
assert np.all(result[real_values <= 0] == 0)
assert np.all(result[real_values > 0] == 1)
|
#!/usr/bin/env python
# In case of poor (Sh***y) commenting contact adam.lamson@colorado.edu
# Basic
import sys
import os
# Testing
# import pdb
# import time, timeit
# import line_profiler
# Analysis
# import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib as mpl
# import h5py
# import yaml
# from math import *
# Speed
# from numba import jit
# Other importing
# sys.path.append(os.path.join(os.path.dirname(__file__), '[PATH]'))
"""@package docstring
File:
Author: Adam Lamson
Email: adam.lamson@colorado.edu
Description:
"""
def title_case(title_string):
"""!Change string so first letter of every word is upper case and the other letters are lowercase.
@param title_string: TODO
@return: TODO
"""
words = title_string.split()
title = ""
for word in words:
title += word[0].upper() + word[1:].lower() + " "
return title
##########################################
if __name__ == "__main__":
print("Not implemented yet")
|
from swift.common.utils import get_logger
from swift.proxy.controllers.base import get_container_info
from swift.proxy.controllers.base import get_account_info, get_object_info
from swift.common.swob import Request, Response
import traceback
import requests
from json import dumps
class Euler(object):
"""
Middleware that writes file create/update event to api/v0/files
for dispersal to ['dcc', 'bmeg', ... ]
"""
def __init__(self, app, conf, logger=None):
self.app = app
if logger:
self.logger = logger
else:
self.logger = get_logger(conf, log_route='euler')
self.api_url = conf.get('api_url')
self.logger.debug("euler.api_url: {}".format(self.api_url))
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
if not env['REQUEST_METHOD'] in ('PUT', 'COPY', 'POST'):
return self.app(env, start_response)
response = None
try:
# complete the pipeline
response = self.app(env, start_response)
# we want to query the api after the file is stored
# harvest container, account and object info
container_info = get_container_info(
env, self.app, swift_source='Euler')
account_info = get_account_info(
env, self.app, swift_source='Euler')
object_info = get_object_info(env, self.app)
# post useful data to euler api service
from_env = ['REQUEST_METHOD', 'keystone.identity',
'keystone.token_info', 'PATH_INFO']
self.logger.debug("euler.env: {}".format(env))
to_api_env = {}
for key in from_env:
to_api_env[key] = env[key]
to_api = {'type': 'swift_event',
'id': object_info['etag'],
'env': to_api_env,
'container': container_info,
'account': account_info,
'object': object_info}
self.logger.debug("euler.to_api: {}".format(to_api))
auth_token = to_api['env']['keystone.token_info']['token']['auth_token'] # NOQA
headers = {'X-Auth-Token': auth_token}
r = requests.post(self.api_url, json=to_api, headers=headers)
self.logger.debug("euler.api_response: {} {}".format(r, r.text))
except: # catch *all* exceptions
tb = traceback.format_exc()
self.logger.debug("euler.traceback: {}".format(tb))
finally:
# return unaltered upstream response
return response
def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
def euler(app):
return Euler(app, conf)
return euler
|
# -*- coding: utf-8 -*-
import time
from django.db import models
from app.fail2ban import constants
class Fail2Ban(models.Model):
name = models.CharField(u"管理员输入的名称", max_length=200, null=True, blank=True)
proto = models.CharField(u"协议", max_length=50, null=False, blank=True)
internal = models.IntegerField(u'统计间隔', default=-1)
block_fail = models.IntegerField(u'验证失败次数', default=-1)
block_unexists = models.IntegerField(u'验证不存在帐号次数', default=-1)
block_minute = models.IntegerField(u'禁用时间', default=-1)
update_time = models.DateTimeField(u"更新时间", null=False, blank=False)
disabled = models.CharField(u'激活状态', max_length=2, choices=constants.FAIL2BAN_DISABLE, null=False, blank=False, default="-1")
class Meta:
managed = False
db_table = 'ext_fail2ban'
@property
def get_proto(self):
proto_list = self.proto.split(u",")
if "all" in proto_list:
return u"所有"
return self.proto
class Fail2BanTrust(models.Model):
ip = models.CharField(u"IP", max_length=50, null=False, blank=True)
name = models.CharField(u"名称", max_length=200, null=True, blank=True)
disabled = models.CharField(u'激活状态', max_length=2, choices=constants.FAIL2BAN_DISABLE, null=False, blank=False, default="-1")
class Meta:
managed = False
db_table = 'ext_fail2ban_trust'
class Fail2BanBlock(models.Model):
ip = models.CharField(u"IP", max_length=50, null=False, blank=True)
name = models.CharField(u"名称", max_length=200, null=True, blank=True)
expire_time = models.IntegerField(u'过期时间', default=0)
update_time = models.DateTimeField(u"更新时间", null=False, blank=False)
disabled = models.CharField(u'激活状态', max_length=2, choices=constants.FAIL2BAN_DISABLE, null=False, blank=False, default="-1")
class Meta:
managed = False
db_table = 'ext_fail2ban_block'
@property
def get_expire_time(self):
try:
expire = int(self.expire_time)
except:
return u"已失效"
if expire <=0:
return u"已失效"
try:
#settings.py配置的时区比底层晚8小时,先用这个愚蠢的方法保证测试
t_tuple = time.localtime(expire)
time_val = time.strftime('%Y-%m-%d %H:%M:%S',t_tuple)
return u"{}".format(time_val)
except:
return u"已失效"
|
from confluent_kafka import Consumer
from confluent_kafka.admin import AdminClient
from confluent_kafka.cimpl import NewTopic
from pyctm.memory.kafka.topic_config_provider import TopicConfigProvider
class KConsumerBuilder:
@staticmethod
def build_consumer(broker, consumer_group_id, topic_config):
KConsumerBuilder.check_topic_exist(broker, topic_config.name)
consumer = Consumer({
'bootstrap.servers': broker,
'group.id': consumer_group_id,
'auto.offset.reset': 'earliest'
})
consumer.subscribe([topic_config.name])
return consumer
@staticmethod
def check_topic_exist(broker, topic):
kafka_admin = AdminClient({"bootstrap.servers": broker})
topic_metadata = kafka_admin.list_topics()
if topic_metadata.topics.get(topic) is None:
new_kafka_topic = NewTopic(topic, num_partitions=1, replication_factor=1)
kafka_admin.create_topics([new_kafka_topic])
@staticmethod
def generate_consumers(topic_configs, consumer_group_id):
consumers = {}
for topic_config in topic_configs:
if topic_config.regex_pattern is not None:
print('Regex pattern %s identified.' %
topic_config.regex_pattern)
if not topic_config.regex_pattern:
found_topic_configs = TopicConfigProvider.generate_topic_configs_regex_pattern(
topic_config.broker, topic_config.regex_pattern, topic_config.class_name)
if len(found_topic_configs) == 0:
raise Exception(
'Topic regex not found - pattern - %s' % topic_config.regex_pattern)
regex_pattern_consumers = KConsumerBuilder.generate_consumers(
found_topic_configs, consumer_group_id)
for key, value in regex_pattern_consumers.items():
consumers[key] = value
return
print('Creating consumer for topic configuration - Name: %s - Broker: %s - Class: %s - Behavior Type: %s'
% (topic_config.name, topic_config.broker, topic_config.class_name,
topic_config.k_distributed_memory_behavior))
consumer = KConsumerBuilder.build_consumer(
topic_config.broker, consumer_group_id, topic_config)
print('Consumer created for topic %s.' % topic_config.name)
consumers[topic_config] = consumer
return consumers
|
from module_base import ModuleBase
from module_mixins import vtkPipelineConfigModuleMixin
import module_utils
import vtk
class contourFLTBase(ModuleBase, vtkPipelineConfigModuleMixin):
def __init__(self, module_manager, contourFilterText):
# call parent constructor
ModuleBase.__init__(self, module_manager)
self._contourFilterText = contourFilterText
if contourFilterText == 'marchingCubes':
self._contourFilter = vtk.vtkMarchingCubes()
else: # contourFilter == 'contourFilter'
self._contourFilter = vtk.vtkContourFilter()
module_utils.setup_vtk_object_progress(self, self._contourFilter,
'Extracting iso-surface')
# now setup some defaults before our sync
self._config.isoValue = 128;
self._viewFrame = None
self._createViewFrame()
# transfer these defaults to the logic
self.config_to_logic()
# then make sure they come all the way back up via self._config
self.logic_to_config()
self.config_to_view()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
self.set_input(0, None)
# don't forget to call the close() method of the vtkPipeline mixin
vtkPipelineConfigModuleMixin.close(self)
# take out our view interface
self._viewFrame.Destroy()
# get rid of our reference
del self._contourFilter
def get_input_descriptions(self):
return ('vtkImageData',)
def set_input(self, idx, inputStream):
self._contourFilter.SetInput(inputStream)
def get_output_descriptions(self):
return (self._contourFilter.GetOutput().GetClassName(),)
def get_output(self, idx):
return self._contourFilter.GetOutput()
def logic_to_config(self):
self._config.isoValue = self._contourFilter.GetValue(0)
def config_to_logic(self):
self._contourFilter.SetValue(0, self._config.isoValue)
def view_to_config(self):
try:
self._config.isoValue = float(
self._viewFrame.isoValueText.GetValue())
except:
pass
def config_to_view(self):
self._viewFrame.isoValueText.SetValue(str(self._config.isoValue))
def execute_module(self):
self._contourFilter.Update()
def view(self, parent_window=None):
# if the window was visible already. just raise it
if not self._viewFrame.Show(True):
self._viewFrame.Raise()
def _createViewFrame(self):
# import the viewFrame (created with wxGlade)
import modules.Filters.resources.python.contourFLTBaseViewFrame
reload(modules.Filters.resources.python.contourFLTBaseViewFrame)
self._viewFrame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
modules.Filters.resources.python.contourFLTBaseViewFrame.\
contourFLTBaseViewFrame)
objectDict = {'contourFilter' : self._contourFilter}
module_utils.create_standard_object_introspection(
self, self._viewFrame, self._viewFrame.viewFramePanel,
objectDict, None)
module_utils.create_eoca_buttons(
self, self._viewFrame, self._viewFrame.viewFramePanel)
|
"""
Parsing Neurolucida files.
Here we directly extract the tree and discard everything else.
It could easily be changed to extract labels and properties etc.
What could be done next is the creation of a Morphology object, possibly using labels.
Structure of Mustafa's files:
Axon ( Marker AIS (Marker Axon))
The marker is a short and thin piece of axon that has been manually added.
"""
from numpy import *
from pylab import *
from pyparsing import (CharsNotIn, Optional, Suppress, Word, Regex, Combine, Group, delimitedList,
ParseException, alphas, nums, ZeroOrMore, Literal, Forward)
# Comments
COMMENT = Regex(r";.*").setName("Neurolucida comment")
# Terminal symbols
FLOAT = Combine(Word('+-'+nums,nums)+Literal('.')+Word(nums)).setParseAction(lambda t:float(t[0]))
INTEGER = Word('+-'+nums,nums).setParseAction(lambda t:int(t[0]))
NUMBER = FLOAT | INTEGER # this will always match floats
LABEL = Word(alphas, alphas+nums)
STRING = Suppress('"')+CharsNotIn('"')+Suppress('"')
# Basic elements
RGB = Suppress('RGB') + Suppress('(') + Group(INTEGER+Suppress(Optional(','))\
+ INTEGER+Suppress(Optional(','))\
+ INTEGER) + Suppress(')')
VALUE = NUMBER | STRING | RGB | LABEL
PROPERTY = Suppress('(') + LABEL + ZeroOrMore(VALUE) + Suppress(')')
POINT = Suppress("(") + Group(FLOAT + ZeroOrMore(Optional(Suppress(","))+FLOAT)) + Suppress(')')
# Tree
NODE = Forward()
END = Suppress(LABEL) | NODE.setResultsName('children')
BRANCH = Suppress(ZeroOrMore(STRING | PROPERTY)) + Group(ZeroOrMore(POINT)).setResultsName('branch') + END
NODE << Suppress('(') + Group(delimitedList(BRANCH,"|")) + Suppress(')')
# File
FILE = (Suppress(PROPERTY) + NODE).ignore(COMMENT)
# Let's do it!
from os import listdir
from os.path import isfile, join
path = "/Users/romain/Dropbox/Projects/Spike initiation/Collaborations/Maarten/Data/AIS reconstructions (.ASC)/"
path1 = path+"Axo-dendritic/"
path2 = path+"Axo-somatic/"
filenames = [ (path1+f,f) for f in listdir(path1) if isfile(join(path1,f)) ] +\
[ (path2+f,f) for f in listdir(path2) if isfile(join(path2,f)) ]
#filename="2013-07-16_#1.asc"
Ri = 150 / 100. # in Mohm.um
for full_name,filename in filenames:
text= open(full_name).read()
parsed = FILE.parseString(text)
axon_start = array(list(parsed[0]['branch'])).T
AIS = array(list(parsed[0]['children'][1])).T
AIS_start = axon_start.shape[1]
axon = hstack((axon_start,AIS))
cpt_length = sum(diff(axon[:3,:])**2,axis=0)**.5
d = .5*(axon[3,:-1]+axon[3,1:])
# Plotting
plot(cumsum(cpt_length[:AIS_start+1]),d[:AIS_start+1],'k')
plot(sum(cpt_length[:AIS_start])+cumsum(cpt_length[AIS_start:]),d[AIS_start:],'r')
# Analysis
AIS_onset = sum(sum(diff(array(list(parsed[0]['branch']))[:,:3].T)**2,axis=0)**.5)
AIS_length = sum(sum(diff(array(list(parsed[0]['children'][1]))[:,:3].T)**2,axis=0)**.5) # should add length of first segment
AIS_onset_Ra = 4/pi*Ri * sum(cpt_length[:AIS_start+1]/d[:AIS_start+1]**2)
AIS_end_Ra = AIS_onset_Ra + 4/pi*Ri * sum((sum(diff(array(list(parsed[0]['children'][1]))[:,:3].T)**2,axis=0)**.5) /
(array(list(parsed[0]['children'][1]))[:-1,3].T)**2)
AIS_area = sum(cpt_length[AIS_start:]*pi*d[AIS_start:])
# Ra calculated 5 um within the AIS
n = AIS_start + where(cumsum(cpt_length[AIS_start:])>5.)[0][0]
AIS_onset_Ra_5um = 4/pi*Ri * sum(cpt_length[:n]/d[:n]**2)
print filename,",",AIS_onset,",",AIS_length,",",AIS_onset_Ra,",",AIS_end_Ra,",",AIS_onset_Ra_5um,",",AIS_area
show()
|
'''
There is an integer array nums sorted in ascending order (with distinct values).
Prior to being passed to your function,
nums is rotated at an unknown pivot index k (0 <= k < nums.length)
such that the resulting
array is [nums[k], nums[k+1], ..., nums[n-1], nums[0], nums[1], ..., nums[k-1]] (0-indexed).
For example, [0,1,2,4,5,6,7] might be rotated at pivot index 3 and become [4,5,6,7,0,1,2].
Given the array nums after the rotation and an integer target,
return the index of target if it is in nums, or -1 if it is not in nums.
You must write an algorithm with O(log n) runtime complexity.
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
Output: -1
Example 3:
Input: nums = [1], target = 0
Output: -1
'''
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums) - 1
while l <= r:
mid = (l + r) // 2
if target == nums[mid]:
return mid
# now we need to check which portion of the array are we in... left or right
if nums[l] <= nums[mid]:
# then we are in left sorted portion
if target > nums[mid] or target < nums[l]:
# then we search the right most portion
l = mid + 1
else:
# that means the target is less than the middle and greater than or equal to the left
# so we search the left portion and update our right pointer
r = mid - 1
else:
# we are in right sorted portion
if target < nums[mid] or target > nums[r]:
# then we search the left most portion
r = mid - 1
else:
l = mid + 1
return -1
s = Solution()
print(s.search([4, 5, 6, 7, 0, 1, 2], 2))
|
from .scanplugbase import ScanPlugBase
from idownclient.scan.shodan.shodan import Shodan
from .zoomeye import ZoomEye
from .scantools import ScanTools
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""yamltodb - generate SQL statements to update a PostgreSQL database
to match the schema specified in a YAML file"""
from __future__ import print_function
import os
import sys
import getpass
from argparse import ArgumentParser, FileType
import yaml
from pyrseas.database import Database
from pyrseas.cmdargs import parent_parser
def main(host='localhost', port=5432):
"""Convert YAML specifications to database DDL."""
parser = ArgumentParser(parents=[parent_parser()],
description="Generate SQL statements to update a "
"PostgreSQL database to match the schema specified"
" in a YAML file")
parser.add_argument('spec', nargs='?', type=FileType('r'),
default=sys.stdin, help='YAML specification')
parser.add_argument('-1', '--single-transaction', action='store_true',
dest='onetrans', help="wrap commands in BEGIN/COMMIT")
parser.add_argument('-u', '--update', action='store_true',
help="apply changes to database (implies -1)")
parser.add_argument('-n', '--schema', dest='schlist', action='append',
help="only for named schemas (default all)")
parser.set_defaults(host=host, port=port,
username=os.getenv("PGUSER") or os.getenv("USER"))
args = parser.parse_args()
pswd = (args.password and getpass.getpass() or None)
db = Database(args.dbname, args.username, pswd, args.host, args.port)
inmap = yaml.load(args.spec)
if args.schlist:
kschlist = ['schema ' + sch for sch in args.schlist]
for sch in list(inmap.keys()):
if sch not in kschlist and sch.startswith('schema '):
del inmap[sch]
stmts = db.diff_map(inmap, args.schlist)
if stmts:
fd = args.output or sys.stdout
if args.onetrans or args.update:
print("BEGIN;", file=fd)
print(";\n".join(stmts) + ';', file=fd)
if args.onetrans or args.update:
print("COMMIT;", file=fd)
if args.update:
try:
for stmt in stmts:
db.dbconn.execute(stmt)
except:
db.dbconn.rollback()
raise
else:
db.dbconn.commit()
print("Changes applied", file=sys.stderr)
if args.output:
args.output.close()
if __name__ == '__main__':
main()
|
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
def get_img(x, folder: str="train_images"):
"""
Return image based on image name and folder.
"""
data_folder = f"{path}/{folder}"
image_path = os.path.join(data_folder, x)
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def rle_decode(mask_rle: str="", shape: tuple=(1400, 2100)):
"""
Decode rle encoded mask.
:param mask_rle: run-length as string formatted (start length)
:param shape: (height, width) of array to return
Returns numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int)
for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape, order="F")
def make_mask(df: pd.DataFrame, image_name: str="img.jpg",
shape: tuple=(1400, 2100)):
"""
Create mask based on df, image name and shape.
"""
encoded_masks = df.loc[df["im_id"] == image_name, "EncodedPixels"]
masks = np.zeros((shape[0], shape[1], 4), dtype=np.float32)
for idx, label in enumerate(encoded_masks.values):
if label is not np.nan:
mask = rle_decode(label)
masks[:, :, idx] = mask
return masks
def make_mask_resized_dset(df: pd.DataFrame, image_name: str="img.jpg",
masks_dir: str="./masks",
shape: tuple=(320, 640)):
"""
Create mask based on df, image name and shape.
"""
masks = np.zeros((shape[0], shape[1], 4), dtype=np.float32)
df = df[df["im_id"] == image_name]
for idx, im_name in enumerate(df["im_id"].values):
for classidx, classid in enumerate(["Fish", "Flower", "Gravel", "Sugar"]):
mask = cv2.imread(os.path.join(masks_dir, f"{classid}{im_name}"),
cv2.IMREAD_GRAYSCALE)
if mask is None:
continue
# if mask[:,:,0].shape != (350,525):
# mask = cv2.resize(mask, (525,350))
masks[:, :, classidx] = mask
masks = masks/255
return masks
def get_classification_label(df: pd.DataFrame, image_name: str):
"""
Gets one-hot encoded labels. Assumes that the dataframe is coming in
through ClassificationSteelDataset where there is a "hasMask" column.
Returns:
One-hot encoded torch tensor (length 4) of the label present for
each class.
"""
df = df[df["im_id"] == image_name]
label = df["hasMask"].values * np.ones(4)
return torch.from_numpy(label).float()
def to_tensor(x, **kwargs):
"""
Convert image or mask.
"""
return x.transpose(2, 0, 1).astype("float32")
|
#!/usr/bin/env python
"""
@package mi.dataset.driver.spkir_abj.cspp.test
@file mi/dataset/driver/spikr_abj/cspp/test/test_spkir_abj_cspp_recovered_driver.py
@author Mark Worden
@brief Minimal test code to exercise the driver parse method for spkir_abj_cspp recovered
Release notes:
Initial Release
"""
import os
import unittest
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.spkir_abj.cspp.resource import RESOURCE_PATH
from mi.dataset.driver.spkir_abj.cspp.spkir_abj_cspp_recovered_driver import parse
__author__ = 'mworden'
log = get_logger()
@attr('UNIT', group='mi')
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, '11079419_PPB_OCR.txt')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
|
import inspect
import warnings
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
kernels = tfp.math.psd_kernels
tfpl = tfp.layers
tfd = tfp.distributions
# Only needed for GW-OT
try:
import ot
except ModuleNotFoundError:
warnings.warn('POT package not available.')
# Probability distribution utils
def matrix_log_density_gaussian(x, mu, scale):
"""Calculates log density of a Gaussian for all combination of batch pairs of
`x` and `mu`. I.e. return tensor of shape `(batch_size, batch_size, dim)`.
Arguments:
x: Float value at which to compute the density. Shape: (batch_size, dim).
mu: Float value indicating the mean. Shape: (batch_size, dim).
logvar: Float value indicating the log variance. Shape: (batch_size, dim).
batch_size: Integer indicating the batch size.
"""
x = tf.expand_dims(x, 1)
mu = tf.expand_dims(mu, 0)
scale = tf.expand_dims(scale, 0)
return log_density_gaussian(x, mu, scale)
def log_density_gaussian(x, mu, scale):
"""Calculates log density of a Gaussian.
Arguments:
x: Float value at which to compute the density.
mu: Float value indicating the mean.
logvar: Float value indicating the log variance.
"""
x = tf.cast(x, tf.float32)
mu = tf.cast(mu, tf.float32)
scale = tf.cast(scale, tf.float32)
normal_dist = tfp.distributions.Normal(mu, scale)
log_density = normal_dist.log_prob(x)
return log_density
def total_correlation(z, mu, scale):
"""Estimate of total correlation on a batch.
We need to compute the expectation over a batch of:
E_j [log(q(z(x_j))) - log(prod_l q(z(x_j)_l))].
We ignore the constants as they do not matter for the minimization.
The constant should be equal to (num_latents - 1) * log(batch_size * dataset_size)
Arguments:
z: [batch_size, num_latents]-tensor with sampled representation.
z_mean: [batch_size, num_latents]-tensor with mean of the encoder.
z_log_squared_scale: [batch_size, num_latents]-
tensor with log variance of the encoder.
"""
# Compute log(q(z(x_j)|x_i)) for every sample in the batch, which is a
# tensor of size [batch_size, batch_size, num_latents]. In the following
# comments, [batch_size, batch_size, num_latents] are indexed by [j, i, l].
log_qz_prob = log_density_gaussian(
tf.expand_dims(z, 1), tf.expand_dims(mu, 0),
tf.expand_dims(scale, 0)
)
# Compute log prod_l p(z(x_j)_l) = sum_l(log(sum_i(q(z(z_j)_l|x_i)))
# + constant) for each sample in the batch, which is a vector of size
# [batch_size,].
log_qz_product = tf.math.reduce_sum(
tf.math.reduce_logsumexp(log_qz_prob, axis=1, keepdims=False),
axis=1,
keepdims=False)
# Compute log(q(z(x_j))) as log(sum_i(q(z(x_j)|x_i))) + constant =
# log(sum_i(prod_l q(z(x_j)_l|x_i))) + constant.
log_qz = tf.math.reduce_logsumexp(
tf.math.reduce_sum(log_qz_prob, axis=2, keepdims=False),
axis=1,
keepdims=False)
return tf.math.reduce_mean(log_qz - log_qz_product)
# Kernels
# Multi-scale RBF kernel modified from https://github.com/theislab/scarches
def ms_rbf_kernel(x, y):
"""Multi-scale RBF kernel"""
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1,
1, 5, 10, 15, 20, 25, 30, 35, 100,
1e3, 1e4, 1e5, 1e6
]
beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))
dist = squared_distance(x, y)
s = tf.tensordot(beta, tf.reshape(dist, (1, -1)), axes=0)
return tf.reshape(tf.math.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) / len(sigmas)
def rbf_kernel(x, y):
"""Radial Basis Function or Exponentiated Quadratic kernel"""
kernel = kernels.ExponentiatedQuadratic()
return kernel.matrix(x, y)
def rq_kernel(x, y):
"""Rational Quadratic kernel"""
kernel = kernels.RationalQuadratic()
return kernel.matrix(x, y)
KERNELS = {
'ms_rbf': ms_rbf_kernel,
'rbf': rbf_kernel,
'rq': rq_kernel
}
# Methods for calculating lower-dimensional persistent homology.
# Implementations adapted from https://github.com/BorgwardtLab/topological-autoencoders
class UnionFind:
"""
An implementation of a UnionFind class. The class performs path
compression by default. It uses integers for storing one disjoint
set, assuming that vertices are zero-indexed.
"""
def __init__(self, n_vertices):
"""
Initializes an empty Union--Find data structure for a given
number of vertices.
"""
self._parent = np.arange(n_vertices, dtype=int)
def find(self, u):
"""
Finds and returns the parent of u with respect to the hierarchy.
"""
if self._parent[u] == u:
return u
else:
# Perform path collapse operation
self._parent[u] = self.find(self._parent[u])
return self._parent[u]
def merge(self, u, v):
"""
Merges vertex u into the component of vertex v. Note the
asymmetry of this operation.
"""
if u != v:
self._parent[self.find(u)] = self.find(v)
def roots(self):
"""
Generator expression for returning roots, i.e. components that
are their own parents.
"""
for vertex, parent in enumerate(self._parent):
if vertex == parent:
yield vertex
def _persistent_homology(matrix):
"""Performs persistent homology calculation"""
n_vertices = matrix.shape[0]
uf = UnionFind(n_vertices)
triu_indices = np.triu_indices_from(matrix)
edge_weights = matrix[triu_indices]
edge_indices = np.argsort(edge_weights, kind='stable')
# 1st dimension: 'source' vertex index of edge
# 2nd dimension: 'target' vertex index of edge
persistence_pairs = []
for edge_index, edge_weight in zip(edge_indices, edge_weights[edge_indices]):
u = triu_indices[0][edge_index]
v = triu_indices[1][edge_index]
younger_component = uf.find(u)
older_component = uf.find(v)
# Not an edge of the MST, so skip it
if younger_component == older_component:
continue
elif younger_component > older_component:
uf.merge(v, u)
else:
uf.merge(u, v)
if u < v:
persistence_pairs.append((u, v))
else:
persistence_pairs.append((v, u))
# Return empty cycles component
# -> Changed to not return cycles
return np.array(persistence_pairs, dtype=np.int64)
def persistent_homology(matrix):
return tf.numpy_function(
_persistent_homology, [matrix], tf.int64)
# Methods for Gromov-Wasserstein distance calculations
def _gromov_wasserstein_distance(x, y):
x_p = ot.unif(x.shape[0])
y_q = ot.unif(y.shape[0])
gw_dist = ot.gromov.gromov_wasserstein2(
x, y, x_p, y_q, loss_fun='kl_loss')
return np.array(gw_dist, dtype=np.float32)
def _entropic_gromov_wasserstein_distance(x, y):
x_p = ot.unif(x.shape[0])
y_q = ot.unif(y.shape[0])
gw_dist = ot.gromov.entropic_gromov_wasserstein2(
x, y, x_p, y_q, loss_fun='kl_loss')
return np.array(gw_dist, dtype=np.float32)
def gromov_wasserstein_distance(x, y):
return tf.numpy_function(
_gromov_wasserstein_distance, [x, y], tf.float32)
def entropic_gromov_wasserstein_distance(x, y):
return tf.numpy_function(
_entropic_gromov_wasserstein_distance, [x, y], tf.float32)
OT_DIST = {
'gw': gromov_wasserstein_distance,
'entropic_gw': entropic_gromov_wasserstein_distance
}
# Other
def squared_distance(x, y):
r = tf.expand_dims(x, axis=1)
return tf.math.reduce_sum(tf.math.square(r - y), axis=-1)
def nan2zero(x):
return tf.where(tf.math.is_nan(x), tf.zeros_like(x), x)
def nan2inf(x):
return tf.where(tf.math.is_nan(x), tf.zeros_like(x) + np.inf, x)
def clip_nonzero(x, min_val=1e-8):
clipped_x = tf.clip_by_value(x, min_val, tf.math.reduce_max(x))
return tf.where(x > 0, clipped_x, x)
def nelem(x):
nelem = tf.math.reduce_sum(tf.cast(~tf.math.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.math.equal(nelem, 0.), 1., nelem), x.dtype)
def slice_matrix(matrix, row_idx, col_idx):
row_select = tf.gather(matrix, row_idx, axis=0)
col_select = tf.gather(row_select, col_idx, axis=-1)
return col_select
def l2_norm(x, axis=2, eps=1e-8):
return tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis) + eps)
def size_factors(x):
n = x.sum(1)
return n / np.median(n)
def delegates(to=None, keep=False):
"""Decorator: replace `**kwargs` in signature with params from `to`"""
def _f(f):
if to is None:
to_f, from_f = f.__base__.__init__, f.__init__
else:
to_f, from_f = to, f
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
k = sigd.pop('kwargs')
s2 = {k:v for k,v in inspect.signature(to_f).parameters.items()
if v.default != inspect.Parameter.empty and k not in sigd}
sigd.update(s2)
if keep:
sigd['kwargs'] = k
from_f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
|
import cc_dat_utils
#Part 1
input_dat_file = "data/pfgd_test.dat"
#Use cc_dat_utils.make_cc_level_pack_from_dat() to load the file specified by input_dat_file
data = cc_dat_utils.make_cc_level_pack_from_dat(input_dat_file)
#print the resulting data
print(data)
|
a = int(input("Enter the number:"))
if a%5 == 0 and a%11 == 0:
print(a,"is divisible by both 5 and 11.")
elif a %5 == 0:
print(a," is divisible by 5")
elif a%11 == 0:
print(a,"is divisible by 11")
else:
print(a, "is not divisible by both 11 and 5")
|
import pytest
from ruamel import yaml
from qhub.render import render_template
from qhub.initialize import render_config
@pytest.mark.parametrize(
"project, namespace, domain, cloud_provider, ci_provider, auth_provider",
[
("do-pytest", "dev", "do.qhub.dev", "do", "github-actions", "github"),
("aws-pytest", "dev", "aws.qhub.dev", "aws", "github-actions", "github"),
("gcp-pytest", "dev", "gcp.qhub.dev", "gcp", "github-actions", "github"),
("azure-pytest", "dev", "azure.qhub.dev", "azure", "github-actions", "github"),
],
)
def test_render(project, namespace, domain, cloud_provider, ci_provider, auth_provider, tmp_path):
config = render_config(
project_name=project,
namespace=namespace,
qhub_domain=domain,
cloud_provider=cloud_provider,
ci_provider=ci_provider,
repository="github.com/test/test",
auth_provider=auth_provider,
repository_auto_provision=False,
auth_auto_provision=False,
terraform_state="remote",
kubernetes_version="1.18.0",
disable_prompt=True,
)
config_filename = tmp_path / (project + ".yaml")
with open(config_filename, "w") as f:
yaml.dump(config, f)
output_directory = tmp_path / "test"
render_template(str(output_directory), config_filename, force=True)
|
from utils import read_problem
import sys
import json
problem_id = sys.argv[1]
spec = read_problem(problem_id)
def check_distance(spec, orig_dist, new_dist):
if abs(1.0 * new_dist / orig_dist - 1) <= spec['epsilon'] / 10**6:
return True
return False
def dist2(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
return (x1 - x2) ** 2 + (y1 - y2) ** 2
hole = spec['hole']
# for i in range(len(hole)):
# print (i, (i + 1) % len(hole), dist2(hole[i], hole[(i + 1) % len(hole)]))
with open('solutions/current', 'rt') as f:
current = json.loads(f.read())
print ("======")
def find_all_pairs(new_dist):
v = spec['figure']['vertices']
res = []
for (a, b) in sorted(spec['figure']['edges']):
orig_dist = dist2(v[a], v[b])
if a in current['fixedPoints'] or b in current['fixedPoints']:
continue
if check_distance(spec, orig_dist, new_dist):
res.append(tuple(sorted((a, b))))
return res
for i in range(len(hole)):
a = i
b = (i + 1) % len(hole)
d = dist2(hole[a], hole[b])
# print (i, (i + 1) % len(hole), d)
res = find_all_pairs(d)
print ('hole from', a, 'to', b, '->', res)
|
from gui import *
from player import *
game = game.Game()
# gui = Gui("Pierre", "Margot")
# gui.generate_gui()
# print("End of gui")
game.play()
|
#!/usr/local/bin/python
'''
PtpNeighbor Class
'''
import pcap
import re
import datetime
from AnnounceMessage import AnnounceMessage
from SyncMessage import SyncMessage
# =============================================================================
# PtpNeighbor
#
# Inheriting from `object` (top-level class)
# =============================================================================
class PtpNeighbor(object):
def __init__(self, pkt):
'''
PtpPacket Initialization
Input Attributes:
------------------
pkt: PTP Announce packet to derive PTP details of this neighbor
from
'''
self._sync_period = None
self._delay_period = None
self._announce_period = None
self._time_of_last_sync = 0
self._time_of_last_delay = 0
self._time_of_last_announce = 0
self.new_announce_message(pkt)
@property
def sync_period(self):
"""Get the latest calculated sync_period"""
return self._sync_period
@property
def delay_period(self):
"""Get the latest calculated delay_period"""
return self._delay_period
@property
def announce_period(self):
"""Get the latest calculated announce_period"""
return self._announce_period
def new_announce_message(self, pkt):
'''
Take note of an announce message from this neighbor, to derive
its periodicity
'''
msg = AnnounceMessage(pkt)
self._update_announce_params(msg)
now = datetime.datetime.now()
if self._time_of_last_announce != 0:
self._announce_period = (now - self._time_of_last_announce).total_seconds()
self._time_of_last_announce = now
def new_sync_message(self, pkt):
'''
Take note of a sync message from this neighbor, to derive the
its periodicity
'''
msg = SyncMessage(pkt)
now = datetime.datetime.now()
if self._time_of_last_sync != 0:
self._sync_period = (now - self._time_of_last_sync).total_seconds()
self._time_of_last_sync = now
def _update_announce_params(self, msg):
'''
Update all parameters for this neighbor using info. contained
in the announce message
'''
# According to the pcap data, set this objects' properties
self.src_addr_str = msg.ipv4_src_str
# Obliged to update delay_mode after P_Delay_Req received
self.delay_mode = "E2E"
# Obliged to update step_mode after sync received
self.step_mode = None
self.domain = msg.ipv4_dst % 256
self.priority1 = msg.ptp_bmca_priority1
self.clock_class = msg.ptp_bmca_gm_clock_class
self.accuracy = msg.ptp_bmca_gm_clock_acc
self.variance = msg.ptp_bmca_gm_clock_var
self.priority2 = msg.ptp_bmca_priority2
self.uniq_id = msg.ptp_clock_id
def __str__(self):
stats_str = str()
stats_str += ( '%-15s' % self.src_addr_str ) if self.src_addr_str != None else ''
stats_str += ( ' %3s' % self.delay_mode ) if self.delay_mode != None else ''
stats_str += ( ' %2d' % self.step_mode ) if self.step_mode != None else ' -'
stats_str += ( ' %3d' % self.domain ) if self.domain != None else ''
stats_str += ( ' %3d' % self.priority1 ) if self.priority1 != None else ''
stats_str += ( ' %3d' % self.clock_class ) if self.clock_class != None else ''
stats_str += ( ' 0x%2x' % self.accuracy ) if self.accuracy != None else ''
stats_str += ( ' %5d' % self.variance ) if self.variance != None else ''
stats_str += ( ' %3d' % self.priority2 ) if self.priority2 != None else ''
trimmed_uniq_id = str()
exp = re.compile('0x([a-fA-F0-9]+)')
if self.uniq_id != None:
match = exp.findall(self.uniq_id)
trimmed_uniq_id = match[0]
stats_str += ( ' % 16s' % trimmed_uniq_id )
stats_str += ( ' % 2.2f' % self.sync_period ) if self.sync_period != None else ' - '
stats_str += ( ' % 2.2f' % self.delay_period ) if self.delay_period != None else ' - '
stats_str += ( ' % 2.2f' % self.announce_period ) if self.announce_period != None else ' - '
return stats_str
#==============================================================================
# Class Test
#==============================================================================
import unittest
import time
class TestPtpNeighbor(unittest.TestCase):
def test_new_from_pcap_file(self):
pc = pcap.pcap('single_ptp_announce_packet.pcap')
ts, pkt = pc.next()
pn = PtpNeighbor(pkt)
self.assertEqual(pn.src_addr_str, '192.168.1.2')
self.assertEqual(pn.delay_mode, 'E2E')
self.assertIsNone(pn.step_mode)
self.assertEqual(pn.domain, 129)
self.assertEqual(pn.priority1, 128)
self.assertEqual(pn.clock_class, 6)
self.assertEqual(pn.accuracy, 33)
self.assertEqual(pn.variance, 15652)
self.assertEqual(pn.priority2, 128)
self.assertEqual(pn.uniq_id, '0x001c73ffffb53519')
self.assertIsNone(pn.sync_period)
self.assertIsNone(pn.delay_period)
self.assertIsNone(pn.announce_period)
def test_pcap_file_object_as_string(self):
pc = pcap.pcap('single_ptp_announce_packet.pcap')
ts, pkt = pc.next()
pn = PtpNeighbor(pkt)
actual = str(pn)
expected = "192.168.1.2 E2E - 129 128 6 0x21 15652 128 001c73ffffb53519 - - - "
self.assertEqual(actual, expected)
def test_updating_announce_period(self):
pc = pcap.pcap('single_ptp_announce_packet.pcap')
ts, pkt = pc.next()
pn = PtpNeighbor(pkt)
self.assertIsNone(pn.announce_period)
time.sleep(0.667)
pc = pcap.pcap('single_ptp_announce_packet.pcap')
ts, pkt = pc.next()
pn.new_announce_message(pkt)
self.assertIsNotNone(pn.announce_period)
actual = str(pn)
expected = "192.168.1.2 E2E - 129 128 6 0x21 15652 128 001c73ffffb53519 - - 0.67"
self.assertEqual(actual, expected)
def test_updating_sync_period(self):
ann_pc = pcap.pcap('single_ptp_announce_packet.pcap')
ts, pkt = ann_pc.next()
pn = PtpNeighbor(pkt)
self.assertIsNone(pn.sync_period)
sync_pc = pcap.pcap('single_ptp_sync_packet.pcap')
ts, pkt = sync_pc.next()
pn.new_sync_message(pkt)
time.sleep(0.333)
sync_pc = pcap.pcap('single_ptp_sync_packet.pcap')
ts, pkt = sync_pc.next()
pn.new_sync_message(pkt)
self.assertIsNotNone(pn.sync_period)
actual = str(pn)
expected = "192.168.1.2 E2E - 129 128 6 0x21 15652 128 001c73ffffb53519 0.34 - - "
self.assertEqual(actual, expected)
if __name__ == '__main__':
# Test class
unittest.main()
|
#!/usr/bin/env python
# coding=utf-8
"""
__init__.py
"""
__author__ = 'Rnd495'
import navbar
import playerLogTable
import playerRecordsTable
import highCharts
|
from Ft.Xml import EMPTY_NAMESPACE
class DebugWriter:
def __init__(self):
self.reset()
def reset(self):
self.calls = []
def getMediaType(self):
return ""
def getResult(self):
return ''
def getCurrent(self):
rt = self.calls
self.reset()
return rt
def __makeCall(self,name,args):
self.calls.append((name,args))
def __startCall(self,name,args):
self.calls.append(("Start: " + name,args))
def __endCall(self,name):
self.calls.append(("End: " + name,{}))
def startDocument(self):
self.__startCall("document",())
return
def endDocument(self):
self.__endCall("document")
return
def text(self, text, escapeOutput=1):
self.__makeCall("text",{'text':text})
return
def attribute(self, name, value, namespace=EMPTY_NAMESPACE):
self.__makeCall("attribute",{'name':name,
'value':value,
'namespace':namespace})
return
def processingInstruction(self, target, data):
self.__makeCall("processingInstruction",{'target':target,
'data':data})
return
def comment(self, body):
self.__makeCall('comment',{'body':body})
return
def startElement(self, name, namespace=EMPTY_NAMESPACE, extraNss=None):
self.__startCall("element",{'name':name,
'namespace':namespace})
return
def endElement(self, name):
self.__endCall("element")
return
|
import sys
from queue import LifoQueue
error_table = {')': 3, ']': 57, '}': 1197, '>': 25137}
match_table = {')': '(', ']': '[', '}': '{', '>': '<'}
complete_table = {'(': ')', '[': ']', '{': '}', '<': '>'}
complete_score = {')': 1, ']': 2, '}': 3, '>': 4}
def main():
if len(sys.argv) != 2:
sys.exit("Please provide a file name for input data")
error_score = 0
completes = []
filename = sys.argv[1]
with open(filename, "r") as inputfile:
while True:
line = inputfile.readline()
if not line:
break
tmp = line.strip()
# print(f"read line {tmp}")
counter = {'paren': 0, 'bracket': 0, 'curly_brace': 0, 'angle_brace': 0}
wrong_char = None
stack = LifoQueue(maxsize=9999)
for ch in tmp:
# print(f"{ch}", end='')
if ch in ['(', '[', '{', '<']:
stack.put(ch)
elif ch in [')', ']', '}', '>']:
if stack.empty():
wrong_char = ch
break
else:
tos = stack.get_nowait()
if tos != match_table[ch]:
wrong_char = ch
break
# print()
if wrong_char is None:
autocomplete = 0
while not stack.empty():
autocomplete *= 5
autocomplete += complete_score[complete_table[stack.get_nowait()]]
completes.append(autocomplete)
completes.sort()
middle_idx = int(len(completes) / 2)
print(f"Middle score = {completes[middle_idx]}")
if __name__ == '__main__':
main()
|
import requests
import logging
import re
import time
import datetime
import bisect
import json
import gzip
#import hashlib
from urllib.parse import quote
from pkg_resources import get_distribution, DistributionNotFound
import os
__version__ = 'installed-from-git'
LOGGER = logging.getLogger(__name__)
try:
# this works for the pip-installed package
version = get_distribution(__name__).version
except DistributionNotFound: # pragma: no cover
pass
def myrequests_get(url, params=None, headers=None):
if params:
if 'from_ts' in params:
params['from'] = params['from_ts']
del params['from_ts']
if 'limit' in params:
if not isinstance(params['limit'], int):
# this needs to be an int because we subtract from it elsewhere
params['limit'] = int(params['limit'])
if headers is None:
headers = {}
if 'user-agent' not in headers:
headers['user-agent'] = 'pypi_cdx_toolkit/'+__version__
retry = True
connect_errors = 0
while retry:
try:
resp = requests.get(url, params=params, headers=headers, timeout=(30., 600.))
if resp.status_code == 400 and 'page' not in params:
raise RuntimeError('invalid url of some sort: '+url) # pragma: no cover
if resp.status_code in (400, 404):
LOGGER.debug('giving up with status %d', resp.status_code)
# 400: html error page -- probably page= is too big
# 404: {'error': 'No Captures found for: www.pbxxxxxxm.com/*'} -- not an error
retry = False
break
if resp.status_code in (503, 502, 504, 500): # pragma: no cover
# 503=slow down, 50[24] are temporary outages, 500=Amazon S3 generic error
LOGGER.debug('retrying after 1s for %d', resp.status_code)
time.sleep(1)
continue
resp.raise_for_status()
retry = False
except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError,
requests.exceptions.Timeout) as e:
connect_errors += 1
if connect_errors > 10:
if os.getenv('CDX_TOOLKIT_TEST_REQUESTS'):
print('DYING IN MYREQUEST_GET')
exit(0)
else:
print('Final failure for url='+url)
raise
LOGGER.warning('retrying after 1s for '+str(e))
time.sleep(1)
except requests.exceptions.RequestException as e: # pragma: no cover
LOGGER.warning('something unexpected happened, giving up after %s', str(e))
raise
return resp
fields_to_cc = {'statuscode': 'status', 'original': 'url', 'mimetype': 'mime'}
fields_to_ia = dict([(v, k) for k, v in fields_to_cc.items()])
def munge_filter(filter, source):
if source == 'ia':
for bad in ('=', '!=', '~', '!~'):
if filter.startswith(bad):
raise ValueError('ia does not support the filter '+bad)
for k, v in fields_to_ia.items():
filter = re.sub(r'\b'+k+':', v+':', filter, 1)
if source == 'cc':
for k, v in fields_to_cc.items():
filter = re.sub(r'\b'+k+':', v+':', filter, 1)
return filter
def get_cc_endpoints():
# TODO: cache me
r = myrequests_get('http://index.commoncrawl.org/collinfo.json')
if r.status_code != 200:
raise RuntimeError('error getting list of common crawl indices: '+str(r.status_code)) # pragma: no cover
j = r.json()
endpoints = [x['cdx-api'] for x in j]
if len(endpoints) < 30: # last seen to be 39
raise ValueError('Surprisingly few endoints for common crawl index') # pragma: no cover
# endpoints arrive sorted oldest to newest, but let's force that anyawy
endpoints = sorted(endpoints)
return endpoints
lines_per_page = 3000 # no way to get this from the API without fetching a page
def showNumPages(r):
j = r.json()
if isinstance(j, dict): # pywb always returns json
pages = int(j.get('blocks', 0))
elif isinstance(j, int): # ia always returns text, parsed as a json int
pages = j
else:
raise ValueError('surprised by showNumPages value of '+str(j)) # pragma: no cover
return pages
def pages_to_samples(pages):
# adjust pages for the partial page at the start and end
if pages > 1:
pages = pages - 1.0
elif pages >= 1:
pages = pages - 0.5
pages *= lines_per_page
return int(pages)
def cdx_to_json(resp):
if resp.status_code == 404:
return []
text = resp.text
if text.startswith('{'): # pywb output='json' is jsonl
lines = resp.text.splitlines()
ret = []
for l in lines:
ret.append(json.loads(l))
return ret
# ia output='json' is a json list of lists
if not text.startswith('['):
raise ValueError('cannot decode response, first bytes are '+repr(text[:50])) # pragma: no cover
if text.startswith('[]'):
return []
try:
lines = json.loads(text)
fields = lines.pop(0) # first line is the list of field names
except (json.decoder.JSONDecodeError, KeyError, IndexError): # pragma: no cover
raise ValueError('cannot decode response, first bytes are '+repr(text[:50]))
ret = []
for l in lines:
obj = {}
for f in fields:
value = l.pop(0)
if f in fields_to_cc:
obj[fields_to_cc[f]] = value
else:
obj[f] = value
ret.append(obj)
return ret
# confusingly, python's documentation refers to their float version
# of the unix time as a 'timestamp'. This code uses 'timestamp' to
# mean the CDX concept of timestamp.
TIMESTAMP = '%Y%m%d%H%M%S'
TIMESTAMP_LOW = '19780101000000'
TIMESTAMP_HIGH = '29991231235959'
def pad_timestamp(timestamp):
return timestamp + TIMESTAMP_LOW[len(timestamp):]
days_in_month = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def pad_timestamp_up(timestamp):
timestamp = timestamp + TIMESTAMP_HIGH[len(timestamp):]
month = timestamp[4:6]
timestamp = timestamp[:6] + str(days_in_month[int(month)]) + timestamp[8:]
return timestamp
def timestamp_to_time(timestamp):
utc = datetime.timezone.utc
timestamp = pad_timestamp(timestamp)
try:
return datetime.datetime.strptime(timestamp, TIMESTAMP).replace(tzinfo=utc).timestamp()
except ValueError:
LOGGER.error('cannot parse timestamp, is it a legal date?: '+timestamp)
raise
def time_to_timestamp(t):
return datetime.datetime.fromtimestamp(t, tz=datetime.timezone.utc).strftime(TIMESTAMP)
def apply_cc_defaults(params):
if 'from_ts' not in params or params['from_ts'] is None:
year = 365*86400
if 'to' in params and params['to'] is not None:
to = pad_timestamp_up(params['to'])
params['from_ts'] = time_to_timestamp(timestamp_to_time(to) - year)
LOGGER.debug('no from but to, setting from=%s', params['from_ts'])
else:
params['from_ts'] = time_to_timestamp(time.time() - year)
LOGGER.debug('no from, setting from=%s', params['from_ts'])
def fetch_warc_content(capture):
filename = capture['filename']
offset = int(capture['offset'])
length = int(capture['length'])
cc_external_prefix = 'https://commoncrawl.s3.amazonaws.com'
url = cc_external_prefix + '/' + filename
headers = {'Range': 'bytes={}-{}'.format(offset, offset+length-1)}
resp = myrequests_get(url, headers=headers)
content_bytes = resp.content
# WARC digests can be represented in multiple ways (rfc 3548)
# I have code in a pullreq for warcio that does this comparison
#if 'digest' in capture and capture['digest'] != hashlib.sha1(content_bytes).hexdigest():
# LOGGER.error('downloaded content failed digest check')
if content_bytes[:2] == b'\x1f\x8b':
content_bytes = gzip.decompress(content_bytes)
# hack the WARC response down to just the content_bytes
try:
warcheader, httpheader, content_bytes = content_bytes.strip().split(b'\r\n\r\n', 2)
except ValueError: # not enough values to unpack
return b''
# XXX help out with the page encoding? complicated issue.
return content_bytes
def fetch_wb_content(capture):
if 'url' not in capture or 'timestamp' not in capture:
raise ValueError('capture must contain an url and timestamp')
fetch_url = capture['url']
timestamp = capture['timestamp']
prefix = 'https://web.archive.org/web'
url = '{}/{}{}/{}'.format(prefix, timestamp, 'id_', quote(fetch_url))
resp = myrequests_get(url)
content_bytes = resp.content
return content_bytes
class CDXFetcherIter:
def __init__(self, cdxfetcher, params={}, index_list=None):
self.cdxfetcher = cdxfetcher
self.params = params
if 'page' in params:
raise ValueError('must not set page= in a CDX iterator')
self.endpoint = 0
self.page = -1
self.params['page'] = self.page
self.cdx_objs = []
self.index_list = index_list
self.get_more()
def get_more(self):
while True:
self.page += 1
status, objs = self.cdxfetcher.get_for_iter(self.endpoint, self.page,
params=self.params, index_list=self.index_list)
if status == 'last endpoint':
LOGGER.debug('get_more: I have reached the end')
return # caller will raise StopIteration
if status == 'last page':
LOGGER.debug('get_more: moving to next endpoint')
self.endpoint += 1
self.page = -1
continue
LOGGER.debug('get_more, got %d more objs', len(objs))
self.cdx_objs.extend(objs)
def __iter__(self):
return self
def __next__(self):
while True:
try:
return self.cdx_objs.pop(0)
except IndexError:
LOGGER.debug('getting more in __next__')
self.get_more()
if len(self.cdx_objs) <= 0:
raise StopIteration
class CDXFetcher:
def __init__(self, source='cc', cc_sort='mixed'):
self.source = source
self.cc_sort = cc_sort
self.source = source
if source == 'cc':
self.raw_index_list = get_cc_endpoints()
elif source == 'ia':
self.index_list = ('https://web.archive.org/web/timemap/json',)
elif source.startswith('https://') or source.startswith('http://'):
self.index_list = (source,)
else:
raise ValueError('could not understand source')
def customize_index_list(self, params):
if self.source == 'cc' and ('from' in params or 'from_ts' in params or 'to' in params):
LOGGER.debug('making a custom cc index list')
return self.filter_cc_endpoints(params=params)
else:
return self.index_list
def filter_cc_endpoints(self, params={}):
endpoints = self.raw_index_list.copy()
# chainsaw all of the cc index names to a time, which we'll use as the end-time of its data
cc_times = []
cc_map = {}
timestamps = re.findall(r'CC-MAIN-(\d\d\d\d-\d\d)', ''.join(endpoints))
CC_TIMESTAMP = '%Y-%W-%w' # I think these are ISO weeks
for timestamp in timestamps:
utc = datetime.timezone.utc
t = datetime.datetime.strptime(timestamp+'-0', CC_TIMESTAMP).replace(tzinfo=utc).timestamp()
cc_times.append(t)
cc_map[t] = endpoints.pop(0)
# now I'm set up to bisect in cc_times and then index into cc_map to find the actual endpoint
if 'closest' in params:
closest_t = timestamp_to_time(params['closest'])
if 'from_ts' not in params or params['from_ts'] is None:
# not provided, make 3 months earlier
from_ts_t = closest_t - 3 * 30 * 86400
else:
from_ts_t = timestamp_to_time(params['from_ts'])
if 'to' not in params or params['to'] is None:
# not provided, make 3 months later
to_t = closest_t + 3 * 30 * 86400
else:
to_t = timestamp_to_time(params['to'])
else:
if 'to' in params:
to = pad_timestamp_up(params['to'])
to_t = timestamp_to_time(to)
if 'from_ts' not in params or params['from_ts'] is None:
from_ts_t = to_t - 365 * 86400
else:
from_ts_t = timestamp_to_time(params['from_ts'])
else:
to_t = None
if 'from_ts' not in params or params['from_ts'] is None:
from_ts_t = time.time() - 365 * 86400
else:
from_ts_t = timestamp_to_time(params['from_ts'])
# bisect to find the start and end of our cc indexes
start = bisect.bisect_left(cc_times, from_ts_t) - 1
start = max(0, start)
if to_t is not None:
end = bisect.bisect_right(cc_times, to_t) + 1
end = min(end, len(self.raw_index_list))
else:
end = len(self.raw_index_list)
index_list = self.raw_index_list[start:end]
params['from_ts'] = time_to_timestamp(from_ts_t)
if to_t is not None:
params['to'] = time_to_timestamp(to_t)
if 'closest' in params:
pass
# XXX funky ordering
if self.cc_sort == 'ascending':
pass # already in ascending order
elif self.cc_sort == 'mixed':
index_list.reverse()
else:
raise ValueError('unknown cc_sort arg of '+self.cc_sort)
LOGGER.debug('using cc index range from %s to %s', index_list[0], index_list[-1])
return index_list
def get(self, url, **kwargs):
# from_ts=None, to=None, matchType=None, limit=None, sort=None, closest=None,
# filter=None, fl=None, page=None, pageSize=None, showNumPages=None):
params = kwargs
params['url'] = url
params['output'] = 'json'
if 'filter' in params:
params['filter'] = munge_filter(params['filter'], self.source)
# if 'limit' not in params:
# params['limit'] = 1000
if self.source == 'cc':
apply_cc_defaults(params)
index_list = self.customize_index_list(params)
ret = []
for endpoint in index_list:
resp = myrequests_get(endpoint, params=params)
objs = cdx_to_json(resp) # turns 400 and 404 into []
ret.extend(objs)
if 'limit' in params:
params['limit'] -= len(objs)
if params['limit'] <= 0:
break
return ret
def items(self, url, **kwargs):
params = kwargs
params['url'] = url
params['output'] = 'json'
if 'filter' in params:
params['filter'] = munge_filter(params['filter'], self.source)
if 'limit' not in params:
params['limit'] = 1000
if self.source == 'cc':
apply_cc_defaults(params)
index_list = self.customize_index_list(params)
return CDXFetcherIter(self, params=params, index_list=index_list)
def get_for_iter(self, endpoint, page, params={}, index_list=None):
'''
Specalized get for the iterator
'''
if endpoint >= len(index_list):
return 'last endpoint', []
if params.get('limit', -1) == 0:
return 'last endpoint', [] # a little white lie
endpoint = index_list[endpoint]
params['page'] = page
resp = myrequests_get(endpoint, params=params)
if resp.status_code == 400: # pywb
return 'last page', []
if resp.text == '': # ia
return 'last page', []
ret = cdx_to_json(resp) # turns 404 into []
if 'limit' in params:
params['limit'] -= len(ret)
return 'ok', ret
def get_size_estimate(self, url, as_pages=False, **kwargs):
'''
Get the number of pages that match url
useful additional args: matchType='host' pageSize=1
or, url can end with * or start with *. to set the matchType
'''
params = {'url': url, 'showNumPages': 'true'}
params.update(**kwargs)
if self.source == 'cc':
apply_cc_defaults(params)
index_list = self.customize_index_list(params)
pages = 0
for endpoint in index_list:
resp = myrequests_get(endpoint, params=params)
if resp.status_code == 200:
pages += showNumPages(resp)
else:
pass # silently ignore empty answers # pragma: no cover
if not as_pages:
pages = pages_to_samples(pages)
return pages
|
# -*- coding: utf-8 -*-
import os
import time
import hashlib
import settings
from fileoo import File
class Cache:
def __init__(self):
self.fil = File()
def fingerprint_path(self, key):
fingerprint = hashlib.md5(key).hexdigest()
cache_path = os.path.join(settings.CACHE_PATH, fingerprint + '.gz')
return fingerprint, cache_path
def is_cached(self, key):
fingerprint, cache_path = self.fingerprint_path(key)
if os.path.exists(cache_path) and \
time.time() - os.path.getmtime(cache_path) < settings.CACHE_EXPIRE:
return True
return False
def read(self, key):
fingerprint, cache_path = self.fingerprint_path(key)
if self.is_cached(key):
return self.fil.read(cache_path, content_type='gz').next()
def write(self, key, content):
fingerprint, cache_path = self.fingerprint_path(key)
return self.fil.write(cache_path, content, content_type='gz')
def clear(self, key):
fingerprint, cache_path = self.fingerprint_path(key)
try:
os.remove(cache_path)
return True
except OSError:
return False
|
from datetime import datetime
from functools import total_ordering
from sqlalchemy import asc
from sqlalchemy.sql import sqltypes, schema
from sqlalchemy.exc import SQLAlchemyError
from app.database import BASE, SESSION
from app.database.utils.decorators import with_insertion_lock
from app.database.utils.filters import leave_required_keys
from app.logger import get_logger
log = get_logger(__name__)
@total_ordering
class Video(BASE):
"""Модель таблицы с данными видеофайлов"""
__tablename__ = 'video'
id = schema.Column(sqltypes.Integer, primary_key=True)
name = schema.Column(sqltypes.String(256), nullable=False)
camera_id = schema.Column(sqltypes.Integer,
schema.ForeignKey('camera.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
video_path = schema.Column(sqltypes.String(256), nullable=False)
record_date = schema.Column(sqltypes.Date, nullable=False)
record_time = schema.Column(sqltypes.Time(timezone=True), nullable=False)
extension = schema.Column(sqltypes.String(6), nullable=False)
duration = schema.Column(sqltypes.Integer, nullable=False)
bitrate = schema.Column(sqltypes.Integer, nullable=False)
codec = schema.Column(sqltypes.String(10), nullable=False)
def __repr__(self):
return f"<{self.id} | {self.name}.{self.extension} " \
f"at {self.record_date} {self.record_time} " \
f"duration: {self.duration}>"
def __lt__(self, other):
return self.id <= other.id
def __eq__(self, other):
return isinstance(other, Video) and self.id == other.id
@total_ordering
class VideoServer(BASE):
"""Модель таблицы Видео Сервера"""
__tablename__ = 'video_server'
id = schema.Column(sqltypes.Integer, primary_key=True)
server_name = schema.Column(sqltypes.String(256), nullable=False)
server_dir = schema.Column(sqltypes.String(256), nullable=False)
def __repr__(self):
return f"{self.id} | Server: {self.server_name} at {self.server_dir}"
def __lt__(self, other):
return self.id <= other.id
def __eq__(self, other):
return isinstance(other, VideoServer) and self.id == other.id
@total_ordering
class Camera(BASE):
"""Модель таблицы Камеры"""
__tablename__ = 'camera'
id = schema.Column(sqltypes.Integer, primary_key=True)
server_id = schema.Column(sqltypes.Integer,
schema.ForeignKey('video_server.id', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
camera_name = schema.Column(sqltypes.String(16), nullable=False)
camera_dir = schema.Column(sqltypes.String(256), nullable=False)
def __repr__(self):
return f"{self.id} | {self.camera_name} for server id: {self.server_id} at {self.camera_dir}"
def __lt__(self, other):
return self.id <= other.id
def __eq__(self, other):
return isinstance(other, Camera) and self.id == other.id
def init_tables():
"""Инициализация таблиц в базе данных"""
VideoServer.__table__.create(checkfirst=True)
Camera.__table__.create(checkfirst=True)
Video.__table__.create(checkfirst=True)
@with_insertion_lock
def set_or_get_new_server(server_dir: str) -> VideoServer:
"""
Создает новую запись с сервером
Args:
server_dir (str): Путь до папки сервера
Returns:
VideoServer: Модель с данными видео сервера
"""
if server_dir is None:
raise ValueError("Сервер не может быть NoneType")
video_server = SESSION.query(VideoServer).filter_by(server_dir=server_dir).limit(1).scalar()
if not video_server:
video_server = VideoServer(
server_name=server_dir[server_dir.rfind('/') + 1:], # Найти / справа и отрезать все, что идет перед ним
server_dir=server_dir)
SESSION.add(video_server)
try:
SESSION.commit()
except SQLAlchemyError as err:
SESSION.rollback()
raise ValueError from err
finally:
SESSION.close()
SESSION.close()
return video_server
@with_insertion_lock
def set_or_get_new_camera(camera_dir: str, server: VideoServer) -> Camera:
"""
Добавляет новую камеру в базу данных
Args:
camera_dir (str): Путь до камеры
server (app.db.models.VideoServer): Модель таблицы Видео Сервера,
для которой необходимо добавить камеру
Returns:
Camera: Модель с данными камеры
"""
camera = SESSION.query(Camera).filter_by(camera_dir=camera_dir, server_id=server.id).limit(1).scalar()
if not camera:
camera = Camera(
server_id=server.id,
camera_name=camera_dir,
camera_dir=camera_dir)
SESSION.add(camera)
SESSION.commit()
SESSION.close()
return camera
@with_insertion_lock
def set_or_get_new_video(**kwargs) -> Video:
"""
Добавляет новое видео в базу данных
Args:
**kwargs: Аргументы с данными о видео
Keyword Args:
name (str): Название видео
camera_id (int): ID записи камеры
video_path (PathLike): Путь до видео
record_date (datetime): Дата, когда было записано видео
record_time (datetime): Время, с которого идет запись
extension (str): Расширение видео файла
duration (int): Длина видеоряда
bitrate (int): Битрейт видеоряда
codec (str): Кодек потока
Returns:
Camera: Модель с данными видео
Raises:
KeyError: Если были переданны не все необходимые поля
"""
# Удаляем неуказанные kwargs
required_fields = Video.__table__.columns.keys()
filtered_kwargs = leave_required_keys(kwargs, required_fields)
required_fields.remove('id') # Убираем поле id из требуемых, так как оно не обязательно
# Проверка того, что все необходимые поля переданы
if set(required_fields) - set(filtered_kwargs):
raise KeyError('Не были переданы все необходимые поля')
video = SESSION.query(Video).filter_by(**filtered_kwargs).limit(1).scalar()
if not video:
if 'id' in filtered_kwargs:
filtered_kwargs.pop('id')
video = Video(**filtered_kwargs)
SESSION.add(video)
SESSION.commit()
SESSION.close()
return video
def get_server(**kwargs) -> [VideoServer, None]:
"""
Получить модель видео сервера по заданным параметрам
Args:
**kwargs: Данные модели сервера
Keyword Args:
id (int): ID записи в таблице
server_name (str): Название сервера
server_dir (PathLike): Путь до сервера
Returns:
VideoServer: Модель с данными о сервере
None: Если по переданным параметрам не было найдено сервера
"""
filtered_fields = leave_required_keys(kwargs, VideoServer.__table__.columns.keys())
if 'server_dir' in filtered_fields.keys():
filtered_fields['server_dir'] = str(filtered_fields.get('server_dir')).replace("\\", '/')
video_server = SESSION.query(VideoServer).filter_by(**filtered_fields).limit(1).scalar()
SESSION.close()
return video_server
def get_camera(**kwargs) -> [Camera, None]:
"""
Получить модель камеры по заданным параметрам
Args:
**kwargs: Данные модели камеры
Keyword Args:
id (int): ID записи в таблице
server_id (int): ID сервера, к которому привязанна камера
camera_name (str): Название камеры
camera_dir (PathLike): Путь до камеры
Returns:
Camera: Модель с данными о камере
None: Если по переданным параметрам не было найдено камер
"""
filtered_fields = leave_required_keys(kwargs, Camera.__table__.columns.keys())
if 'camera_dir' in filtered_fields.keys():
filtered_fields['camera_dir'] = str(filtered_fields.get('camera_dir')).replace("\\", '/')
camera = SESSION.query(Camera).filter_by(**filtered_fields).limit(1).scalar()
SESSION.close()
return camera
def get_video(**kwargs) -> [Video, None]:
"""
Получить модель видео по заданным параметрам
Args:
**kwargs: Аргументы с данными о видео
Keyword Args:
id (int): ID записи в таблице
name (str): Название видео
camera_id (int): ID записи камеры
video_path (PathLike): Путь до видео
record_date (datetime): Дата, когда было записано видео
record_time (datetime): Время, с которого идет запись
extension (str): Расширение видео файла
duration (int): Длина видеоряда
bitrate (int): Битрейт видеоряда
codec (str): Кодек потока
Returns:
Video: Модель с данными видео
None: Если по переданным параметрам не было найдено видео
"""
filtered_fields = leave_required_keys(kwargs, Video.__table__.columns.keys())
video = SESSION.query(Video).filter_by(**filtered_fields).limit(1).scalar()
SESSION.close()
return video
def get_video_pool_by_datetime(time_start: datetime, time_end: datetime, camera: Camera) -> list[Video]:
"""
Получить пул видео по заданному временному отрезку
Args:
time_start (datetime): Начальное время видео
time_end (datetime): Конечное время видео
camera (Camera): Модель камеры, которая производила запись
Returns:
list[Video]: Список моделей Video по заданному временному отрезку
"""
video_pool = SESSION \
.query(Video) \
.filter_by(camera_id=camera.id) \
.filter(Video.record_date >= time_start.date(),
Video.record_date <= time_end.date(),
Video.record_time >= time_start.time(),
Video.record_time <= time_end.time()) \
.order_by(asc(Video.record_date)) \
.order_by(asc(Video.record_time)) \
.all()
SESSION.close()
return video_pool
def get_all_video_servers() -> list[VideoServer]:
"""
Получить список всех видео-серверов
Returns:
list[VideoServer]: Список всех видео-серверов в базе данных
"""
video_servers = SESSION.query(VideoServer).all()
SESSION.close()
return video_servers
def get_all_cameras_at_server(server: VideoServer) -> list[Camera]:
"""
Получить список камер, относяшихся к указанному серверу
Args:
server (VideoServer): Видео-сервер, которому отностся камеры
Returns:
list[Camera]: Список всех камер переданного сервера
"""
cameras = SESSION.query(Camera).filter_by(server_id=server.id).all()
SESSION.close()
return cameras
def get_all_videos_from_camera(camera: Camera) -> list[Video]:
"""
Получить список видео, которые были записаны переданной камерой
Args:
camera (Camera): Модель камеры, с которой необходимо получить список видео
Returns:
list[Video]: Список видео, которые были записаны переданной камерой
"""
videos = SESSION.query(Video).filter_by(camera_id=camera.id).all()
SESSION.close()
return videos
if __name__ != '__main__':
init_tables()
|
from floodsystem.stationdata import build_station_list
from Task2B import run
def test_Task2B():
testList = run()
assert testList[0][1] > testList[-1][1]
assert testList[-1][1] > 0.8
|
import os
from typing import IO
from abc import abstractmethod, ABCMeta
class ArquivoEntrada(object, metaclass=ABCMeta):
def __init__(self):
self.linha: str = ''
self.cont: int = 0
@abstractmethod
def ler(self, *args, **kwargs) -> None:
"""
Parametros aceitos nesse metodo:
folder: string com o caminho para o diretorio
file_name: string com o nome do arquivo
"""
@abstractmethod
def escrever(self, *args, **kwargs) -> None:
"""
Método estático para Escrita de um arquivo de entrada do newave
key: chave
reg: registro
fid: apontador para ultima posicao lida
retorna ponteiro para proxima linha
"""
@staticmethod
def verificar_caixa_nome_arquivo(dir_base: str, nome_arq: str) -> str:
"""
Verifica se o arquivo existe com o nome em caixa baixa ou caixa alta
:param dir_base: diretorio onde esta o arquivo
:param nome_arq: nome do arquivo
:return:
"""
# se existir o arquivo com nome em letras maiusculas e minusculas em qualquer ordem, usar o nome
if os.path.exists(os.path.join(dir_base, nome_arq)):
return nome_arq
# caso exista o nome somente em letras minusculas, usa-lo
elif os.path.exists(os.path.join(dir_base, nome_arq.lower())):
return nome_arq.lower()
# caso exista o nome somente em letras maiusculas, usa-lo
elif os.path.exists(os.path.join(dir_base, nome_arq.upper())):
return nome_arq.upper()
else:
# em caso do arquivo na existir (resultados de um caso nao executado) dar preferencia para minusculas
return nome_arq.lower()
def next_line(self, f: IO) -> str:
"""
Metodo que le a proxima linha e atualiza contador
:param f: apontador para a ultima linha lida
:return: linha lida
"""
self.linha = next(f)
self.cont += 1
return self.linha
|
"""This problem was asked by Google.
You are given an array of arrays of integers, where each array corresponds
to a row in a triangle of numbers. For example, [[1], [2, 3], [1, 5, 1]] represents the triangle:
1
2 3
1 5 1
We define a path in the triangle to start at the top and go down one row at
a time to an adjacent value, eventually ending with an entry on the bottom row.
For example, 1 -> 3 -> 5. The weight of the path is the sum of the entries.
Write a program that returns the weight of the maximum weight path.
"""
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.SWIG.common import progress
from ooflib.common import registeredclass
from ooflib.common import debug
from ooflib.common import utils
class AnimationStyle(registeredclass.RegisteredClass):
registry = []
class IndefiniteAnimation(AnimationStyle):
def getProgressStyle(self):
return progress.INDEFINITE
class FiniteAnimation(AnimationStyle):
def getProgressStyle(self):
return progress.DEFINITE
#########
class ForwardAnimation(FiniteAnimation):
def getTimes(self, times):
for t in times:
yield t
registeredclass.Registration(
'Forward',
AnimationStyle,
ForwardAnimation,
ordering=0,
tip="Run the animation once.")
#########
class ReverseAnimation(FiniteAnimation):
def getTimes(self, times):
return reversed(utils.degenerate(times))
registeredclass.Registration(
'Reverse',
AnimationStyle,
ReverseAnimation,
ordering=1,
tip="Run the animation once in reverse.")
#########
class LoopAnimation(IndefiniteAnimation):
def getTimes(self, times):
times = utils.degenerate(times)
while True:
for t in times:
yield t
registeredclass.Registration(
'Loop',
AnimationStyle,
LoopAnimation,
ordering=2,
tip="Run the animation indefinitely.")
#########
class BackAndForthAnimation(IndefiniteAnimation):
def getTimes(self, times):
times = utils.degenerate(times)
n = len(times)
if n == 0:
return
i = 0
step = 1
while True:
yield times[i]
i += step
if i == n or i == -1:
step *= -1
i += 2*step
registeredclass.Registration(
'Back and Forth',
AnimationStyle,
BackAndForthAnimation,
ordering=3,
tip="Run the animation forward, then backwards, then forwards again, ad infinitum.")
|
#!/usr/bin/env python
# python setup.py sdist upload -r pypi
from distutils.core import setup
version = '0.47'
packages = ['ctbBio']
scripts = ['ctbBio/16SfromHMM.py', 'ctbBio/23SfromHMM.py', 'ctbBio/numblast.py', 'ctbBio/besthits.py',
'ctbBio/calculate_coverage.py', 'ctbBio/cluster_ani.py', 'ctbBio/compare_aligned.py',
'ctbBio/concat_align.py', 'ctbBio/crossmap.py', 'ctbBio/fasta.py', 'ctbBio/fasta_length.py',
'ctbBio/fasta_region.py', 'ctbBio/fasta_stats.py', 'ctbBio/fastq2fasta.py', 'ctbBio/fastq_merge.py',
'ctbBio/fastq_split.py', 'ctbBio/filter_fastq_sam.py', 'ctbBio/fix_fasta.py', 'ctbBio/genome_abundance.py',
'ctbBio/genome_coverage.py', 'ctbBio/genome_variation.py', 'ctbBio/lookup-word.py', 'ctbBio/lookup.py',
'ctbBio/mapped.py', 'ctbBio/n50.py', 'ctbBio/name2fasta.py',
'ctbBio/name2faa.py', 'ctbBio/neto.py', 'ctbBio/rec_best_blast.py',
'ctbBio/nr_fasta.py', 'ctbBio/numblast-pident.py', 'ctbBio/orthologer.py', 'ctbBio/orthologer_summary.py',
'ctbBio/parallel.py', 'ctbBio/rRNA_copies.py', 'ctbBio/rRNA_insertions.py', 'ctbBio/rax.py',
'ctbBio/rc.py', 'ctbBio/rp16.py', 'ctbBio/rp16_retreive.sh', 'ctbBio/sam2fastq.py', 'ctbBio/search.py',
'ctbBio/shuffle_genome.py', 'ctbBio/sixframe.py', 'ctbBio/stats.py', 'ctbBio/stockholm2fa.py',
'ctbBio/stockholm2oneline.py', 'ctbBio/strip_align.py', 'ctbBio/strip_align_inserts.py',
'ctbBio/strip_masked.py', 'ctbBio/subset_sam.py', 'ctbBio/subset_reads.py', 'ctbBio/transform.py',
'ctbBio/unmapped.py', 'ctbBio/rRNA_insertions_gff.py',
'ctbBio/ncbi_download.py']
classifiers = ['Programming Language :: Python', 'Programming Language :: Python :: 3']
requirements = ['networkx', 'python-Levenshtein', 'numpy', 'pandas', 'biopython', 'tqdm']
setup(name='ctbBio',
author='Chris Brown',
author_email='ctb@berkeley.edu',
packages=packages,
scripts=scripts,
version=version,
license='MIT',
url='https://github.com/christophertbrown/bioscripts',
description='scripts for working with sequencing data',
install_requires=requirements,
classifiers=classifiers
)
|
"""Adding new operation types
Revision ID: 1cf750b30c08
Revises: e35c7cf01cb4
Create Date: 2021-11-02 23:51:07.308510
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1cf750b30c08'
down_revision = 'e35c7cf01cb4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO operation_type VALUES (20, 'Mascota'), (21, 'Salud'), (22, 'Inversión')")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("DELETE FROM operation_type WHERE id in (20,21,22)")
# ### end Alembic commands ###
|
#!/usr/bin/env python3
import rospy
import rospkg
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import pathlib
import onnxruntime as ort
import numpy as np
import cv2
from olfaction_msgs.msg import gas_sensor, anemometer
import matplotlib.pyplot as plt
# set initial position
initial_position = (2, 14)
source_position = (3, 2)
# initialize path
pkg_path = pathlib.Path(rospkg.RosPack().get_path('pydog_gsl'))
map_path = pkg_path / 'map/occupancy.png'
model_path = pkg_path / 'model/dqn.onnx'
output_path = pkg_path / 'result.csv'
# initialize map_grid
map_grid = cv2.imread(map_path.as_posix())
map_grid = cv2.cvtColor(map_grid, cv2.COLOR_BGR2GRAY)
_, map_grid = cv2.threshold(map_grid, 125, 255, cv2.THRESH_BINARY)
map_grid = map_grid.astype(np.float32)
map_grid /= 255.0
# onnxruntime Policy
class Policy:
def __init__(self, model_path: str) -> None:
self.ort_sess = ort.InferenceSession(model_path)
self.action_name = ['Up', 'Down', 'Left', 'Right']
def __call__(self, model_input):
output = self.ort_sess.run(['output'], {'input': model_input})[0]
return np.argmax(output, axis=1, keepdims=True)[0, 0]
def __len__(self):
return len(self.action_name)
def __getitem__(self, key):
return self.action_name[key]
# observation matrix
class ObservationMatrix:
def __init__(self, map_grid, initial_position, concentration_limit=200.0) -> None:
self.trajectory_matrix = np.zeros(map_grid.shape, dtype=np.float32)
self.obstacle_matrix = map_grid.copy()
self.concentration_matrix = np.zeros(map_grid.shape, dtype=np.float32)
self.airflow_x_matrix = np.zeros(map_grid.shape, dtype=np.float32)
self.airflow_y_matrix = np.zeros(map_grid.shape, dtype=np.float32)
self.agent_position = [initial_position[0], initial_position[1]]
self.concentration_limit = concentration_limit
def get_observation(self):
trajectory_matrix_pad = np.pad(self.trajectory_matrix, (5, 5), 'constant', constant_values=0)
obstacle_matrix_pad = np.pad(self.obstacle_matrix, (5, 5), 'constant', constant_values=1)
concentration_matrix_pad = np.pad(self.concentration_matrix, (5, 5), 'constant', constant_values=0)
airflow_x_matrix_pad = np.pad(self.airflow_x_matrix, (5, 5), 'constant', constant_values=0)
airflow_y_matrix_pad = np.pad(self.airflow_y_matrix, (5, 5), 'constant', constant_values=0)
observation_matrix = np.stack((trajectory_matrix_pad,
obstacle_matrix_pad,
concentration_matrix_pad,
airflow_x_matrix_pad,
airflow_y_matrix_pad), axis=0)
observation_matrix = observation_matrix[:, self.agent_position[0]:self.agent_position[0] + 11, self.agent_position[1]:self.agent_position[1] + 11]
# shape: (1, 5, 11, 11)
return np.expand_dims(observation_matrix, 0)
def move_agent(self, action):
if action == 0:
self.agent_position[0] -= 1
if action == 1:
self.agent_position[0] += 1
if action == 2:
self.agent_position[1] -= 1
if action == 3:
self.agent_position[1] += 1
def get_agent_position(self):
return self.agent_position
def set_observation_data(self, concentration, airflow_x, airflow_y):
if concentration > self.concentration_limit:
concentration = 1.0
else:
concentration = concentration / self.concentration_limit
self.trajectory_matrix[self.agent_position[0], self.agent_position[1]] += 1
self.concentration_matrix[self.agent_position[0], self.agent_position[1]] = concentration
self.airflow_x_matrix[self.agent_position[0], self.agent_position[1]] = airflow_x
self.airflow_y_matrix[self.agent_position[0], self.agent_position[1]] = airflow_y
# actionlib
class MoveBase:
def __init__(self) -> None:
self.move_base_client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.move_base_client.wait_for_server()
def move_to(self, x, y):
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = x
goal.target_pose.pose.position.y = y
goal.target_pose.pose.orientation.w = 1.0
self.move_base_client.send_goal(goal)
wait = self.move_base_client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
return self.move_base_client.get_result()
# sensors
class Sensors:
def __init__(self) -> None:
rospy.Subscriber("/PID/Sensor_reading", gas_sensor, self.gas_sensor_callback)
rospy.Subscriber("/Anemometer/WindSensor_reading", anemometer, self.anemometer_callback)
self.concentration = []
self.airflow_x = []
self.airflow_y = []
def gas_sensor_callback(self, msg):
self.concentration.append(msg.raw)
def anemometer_callback(self, msg):
self.airflow_x.append(msg.wind_speed * np.cos(msg.wind_direction))
self.airflow_y.append(-msg.wind_speed * np.sin(msg.wind_direction))
def read_all(self):
self.concentration.clear()
self.concentration.clear()
self.airflow_x.clear()
self.airflow_y.clear()
rospy.sleep(1.0)
return (sum(self.concentration) / len(self.concentration), sum(self.airflow_x) / len(self.airflow_x), sum(self.airflow_y) / len(self.airflow_y))
def to_map(x, y, grid_x=9, grid_y=16, world_x=4.48, world_y=8.08):
x = (x + 0.5) * world_x / grid_x
y = (y + 0.5) * world_y / grid_y
return x, y
class DataLogger:
def __init__(self) -> None:
self.concentration = []
self.airflow_x = []
self.airflow_y = []
self.position_x = []
self.position_y = []
def log(self, concentration, airflow_x, airflow_y, position_x, position_y):
self.concentration.append(concentration)
self.airflow_x.append(airflow_x)
self.airflow_y.append(airflow_y)
self.position_x.append(position_x)
self.position_y.append(position_y)
def plot(self):
a = np.array([self.position_x, self.position_y, self.concentration, self.airflow_x, self.airflow_y])
a = a.T
np.savetxt(output_path.as_posix(), a, delimiter=",")
plt.figure()
plt.plot(self.concentration)
plt.xlabel('Step')
plt.ylabel('Concentration (ppm)')
plt.show()
if __name__ == '__main__':
rospy.init_node('pydog_gsl_node', anonymous=False)
robot = MoveBase()
obs = ObservationMatrix(map_grid, initial_position)
policy = Policy(model_path.as_posix())
sensors = Sensors()
logger = DataLogger()
print(f'Move to initial position.')
target_position = initial_position
robot.move_to(*to_map(*target_position))
print(f'Start gas source localization task.')
step_count = 0
while not rospy.is_shutdown():
step_count += 1
# read sensors
data = sensors.read_all()
# log data
logger.log(*data, *target_position)
print(f'Step: {step_count:3d}'.center(30, '*'))
print(f'Sensors Data: {data}')
# create observation matrix
obs.set_observation_data(*data)
observation_matrix = obs.get_observation()
# select action
action = policy(observation_matrix)
print(f'Selected Action: {policy[action]}')
# move robot
obs.move_agent(action)
target_position = obs.get_agent_position()
print(f'Target Position: {target_position}')
robot.move_to(*to_map(*target_position))
if target_position[0] == source_position[0] and target_position[1] == source_position[1]:
print(f'Find gas source at step {step_count}.')
break
logger.plot()
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/iliadobrusin/catkin_ws/src/roc_node/msg/Motion.msg;/home/iliadobrusin/catkin_ws/src/roc_node/msg/Movement.msg;/home/iliadobrusin/catkin_ws/src/roc_node/msg/Command.msg"
services_str = ""
pkg_name = "roc"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "roc;/home/iliadobrusin/catkin_ws/src/roc_node/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
from language import *
from generator import *
|
import tensorflow as tf
import sys
from tensorflow.python.platform import gfile
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.util import compat
model_filename ='./out/opt_mnist_convnet.pb'
with tf.Session() as sess:
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g_in = tf.import_graph_def(graph_def)
LOGDIR='/tmp/tensorflow/android'
train_writer = tf.summary.FileWriter(LOGDIR)
train_writer.add_graph(sess.graph)
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from paraview.simple import *
from phactori import *
#phactori_combine_to_single_python_file_subpiece_begin_1
class PhactoriParaviewMultiBlockRecursionControl:
"""see PhactoriDoParaviewMultiblockRecursion();
mOperationPerBlock should be set to a method
which takes 1 parameter, mParameters should be set to the parameter
instance which will be passed to the mOperationPerBlock call"""
def __init__(self):
self.mOperationToDoPerBlock = None
self.mParameters = None
def PhactroiParaviewDoMethodPerBlockRecurse1(inInputCsData, inRecursionControlItem):
"""PhactroiParaviewDoMethodPerBlockRecurse1 is a generic method for doing recursion through
the multiblock dataset and doing something (a callback) on a per leaf block
basis. Called by PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter which got clientside data and calls
itself on internal nodes and calls
inRecursionControlItem.mOperationToDoPerBlock on leaf block nodes"""
if PhactoriDbg(100):
myDebugPrint3('PhactroiParaviewDoMethodPerBlockRecurse1 entered\n', 100)
icsdClassname = inInputCsData.GetClassName()
if icsdClassname == "vtkMultiBlockDataSet" or \
icsdClassname == "vtkExodusIIMultiBlockDataSet":
myDebugPrint3('recursing: ' + icsdClassname + '\n')
numBlocks = inInputCsData.GetNumberOfBlocks()
for ii in range(0, numBlocks):
oneBlock = inInputCsData.GetBlock(ii)
if PhactoriDbg(100):
DebugPrintBlockName(inInputCsData, ii)
if(oneBlock != None):
PhactroiParaviewDoMethodPerBlockRecurse1(oneBlock, inRecursionControlItem)
else:
inRecursionControlItem.mOperationToDoPerBlock(inRecursionControlItem, inInputCsData,
inRecursionControlItem.mParameters)
if PhactoriDbg(100):
myDebugPrint3('PhactroiParaviewDoMethodPerBlockRecurse1 returning\n', 100)
def DebugPrintBlockName(csData, blockIndex):
if PhactoriDbg(100):
oneBlock = csData.GetBlock(blockIndex)
if oneBlock != None:
oneBlockMetaData = csData.GetMetaData(blockIndex)
if oneBlockMetaData != None:
#myDebugPrint3("oneBlockMetaData: " + str(oneBlockMetaData) + "\n")
theBlockName = oneBlockMetaData.Get(vtk.vtkCompositeDataSet.NAME())
myDebugPrint3("block index, name: " + str(blockIndex) + ", " + str(theBlockName) + "\n")
else:
myDebugPrint3("oneBlockMetaData is None (1)\n")
else:
myDebugPrint3("this block is None, now check meta data\n")
oneBlockMetaData = csData.GetMetaData(blockIndex)
if oneBlockMetaData != None:
#myDebugPrint3("oneBlockMetaData: " + str(oneBlockMetaData) + "\n")
theBlockName = oneBlockMetaData.Get(vtk.vtkCompositeDataSet.NAME())
myDebugPrint3("block index, name (2): " + str(blockIndex) + ", " + str(theBlockName) + "\n")
else:
myDebugPrint3("oneBlockMetaData is None (2)\n")
def PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(inRecursionControlItem, inPvFilter):
"""grab client side data object, and use that to do recursion"""
pvClientSideData = inPvFilter.GetClientSideObject().GetOutputDataObject(0)
if pvClientSideData == None:
if PhactoriDbg(100):
myDebugPrint3(
'DoMethodPerBlock: pvClientSideData is None, returning',100)
return
PhactroiParaviewDoMethodPerBlockRecurse1(pvClientSideData, inRecursionControlItem)
class PrintPointAndCellArrayInformationRecursionParams:
def __init__(self):
self.leafVisitCount = 0
def PrintPointAndCellArrayInformationInBlock(recursionObject, inInputCsData, inParameters):
inParameters.leafVisitCount += 1
myDebugPrint3("inParameters.leafVisitCount " + str(inParameters.leafVisitCount) + "\n")
celldata = inInputCsData.GetCellData()
numcellarrays = celldata.GetNumberOfArrays()
myDebugPrint3("number of cell data arrays: " + str(numcellarrays) + "\n")
numcelltuples = celldata.GetNumberOfTuples()
myDebugPrint3("number of cell data tuples: " + str(numcelltuples) + "\n")
for ii in range(0, numcellarrays):
myDebugPrint3(str(ii) + ": " + str(celldata.GetArray(ii).GetNumberOfComponents()) + ": " + str(celldata.GetArrayName(ii)) + "\n")
pointdata = inInputCsData.GetPointData()
numpointarrays = pointdata.GetNumberOfArrays()
myDebugPrint3("number of point data arrays: " + str(numpointarrays) + "\n")
numpointtuples = pointdata.GetNumberOfTuples()
myDebugPrint3("number of point data tuples: " + str(numpointtuples) + "\n")
for ii in range(0, numpointarrays):
myDebugPrint3(str(ii) + ": " + str(pointdata.GetArray(ii).GetNumberOfComponents()) + ": " + str(pointdata.GetArrayName(ii)) + "\n")
def RecursivelyPrintPointAndCellArrayInformation(pvFilter):
if PhactoriDbg(100):
myDebugPrint3("RecursivelyPrintPointAndCellArrayInformation entered\n")
recursionParams = PrintPointAndCellArrayInformationRecursionParams()
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = recursionParams
recursionObj.mOperationToDoPerBlock = PrintPointAndCellArrayInformationInBlock
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, pvFilter)
if PhactoriDbg(100):
myDebugPrint3("RecursivelyPrintPointAndCellArrayInformation returning\n")
#phactori_combine_to_single_python_file_subpiece_end_1
|
import getpass
import os
import re
import subprocess
from scripts.config import *
def accept_warning(s):
c = ''
d = {'Y': True, 'y': True, 'N': False, 'n': False}
while not c in d:
c = input('Warning: %s \n Y/N? ' % s)
return d[c]
def get_env():
domain = input("Input domain : ")
usr = getpass.getuser()
loc = os.getcwd()+"/"+domain
# cmd = "mkdir "+domain
# print(cmd)
# subprocess.call(['sudo', 'mkdir', domain])
if not os.path.exists(loc):
try:
os.makedirs(loc)
except:
if accept_warning("You have no privilege of current location Would you like to own it?"):
subprocess.call(['sudo', 'chown', '-R',usr+":"+usr,'./'])
os.makedirs(loc)
else:
print("You have no previlege!!!")
os._exit(0)
return domain,usr,loc
def add_env(**args):
env = os.path.dirname(os.path.dirname(loc))
print(args)
with open(env+'/.env', 'a') as file:
for i,j in args.items():
tmp = i+'='+j
print(tmp)
file.write(tmp)
file.close()
return False
def ssl_file_gen(domain,usr,loc):
if accept_warning("Do you have ssl cert?"):
return
key = input("Cloudflare API toke : ")
email = input("Cloudflare email : ")
with open(SSL, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{domain}}', '.'.join(domain.split('.')[-2:]), fds)
fce = re.sub(r'{{EMAIL}}', email, fcd)
res = re.sub(r'{{KEY}}', key, fce)
with open(domain+"/"+domain+'.sh', 'w') as ssl_sh:
ssl_sh.write(res)
ssl_sh.close()
fh.close()
# with open(domain+'/start.sh', 'a') as file:
# cmd = "sudo mkdir /etc/nginx/certs"
# file.write(cmd)
# file.close()
# if not os.path.exists("/etc/nginx/certs"):
# os.makedirs("/etc/nginx/certs")
# os.chmod(domain+'.sh', 0o700)
# # os.system(domain+'.sh')
# # os.remove(domain+'.sh')
print("-0- SSL script: {} create successfully".format(domain+"/"+domain+'.sh'))
def docker_file_gen(domain,usr,loc):
if accept_warning("Do you have database already?"):
return
with open(DOCKER, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{domain}}', domain, fds)
fcu = re.sub(r'{{usr}}', usr, fcd)
res = re.sub(r'{{passwd}}', loc+domain+usr, fcu)
with open(domain+"/"+domain+'.run', 'w') as docker_run:
docker_run.write(res)
docker_run.close()
fh.close()
# os.chmod(domain+'.run', 0o700)
# os.system(domain+'.run')
# os.remove(domain+'.run')
#add DATABASE_URL into .env
# conf = "postgres://{}:{}@172.17.0.1:5432/{}".format(usr, loc+domain+usr, domain)
# add_env(DATABASE_URL=conf)
print("-1- Docker config script: {} create successfully".format(domain+"/"+domain+'.run'))
# print("-1- Enviroment file : {} add successfully".format("app/.env"))
def uwsgi_file_gen(domain,usr,loc):
env = os.path.dirname(os.path.dirname(loc))
with open(uWSGI, 'r') as fh:
fds = fh.read()
fce = re.sub(r'{{env}}',env+"/"+domain,fds)
fcu = re.sub(r'{{usr}}',usr,fce)
res = re.sub(r'{{loc}}',loc,fcu)
with open(domain+"/"+domain+'.ini', 'w') as uwsgi_ini:
uwsgi_ini.write(res)
uwsgi_ini.close()
fh.close()
print("-2- uwsgi config file: {} create successfully".format(domain+"/"+domain+'.ini'))
#static
def nginx_file_gen(domain,usr,loc):
with open(NGINX, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{domain}}', domain, fds)
res = re.sub(r'{{loc}}', loc, fcd)
with open(domain+"/"+domain+'.conf', 'w') as nginx_conf:
nginx_conf.write(res)
nginx_conf.close()
fh.close()
print("-3- Nginx config file: {} create successfully".format(domain+"/"+domain+'.conf'))
#static
def service_file_gen(domain,usr,loc):
with open(SERVICE, "r") as fh:
fds = fh.read()
fcd = re.sub(r'{{domain}}', domain, fds)
fcu = re.sub(r'{{usr}}', usr, fcd)
res = re.sub(r'{{loc}}', loc, fcu)
with open(domain+"/"+domain+'.service', 'w') as confservice:
confservice.write(res)
confservice.close()
#reload deamon
fh.close()
print("-4- Systemd service file : {} create successfully".format(domain+"/"+domain+'.service'))
def config_files_gen(domain,usr,loc):
ssl_file_gen(domain,usr,loc)
docker_file_gen(domain,usr,loc)
uwsgi_file_gen(domain,usr,loc)
nginx_file_gen(domain,usr,loc)
service_file_gen(domain,usr,loc)
def script_files_gen(domain,usr,loc):
cmd =[]
files = loc+"/"+domain
c = None
if os.path.exists(files+'.sh'):
c = "sudo mkdir /etc/nginx/certs"
c1 = "sudo "+files+'.sh'
cmd.append(c)
cmd.append(c1)
if os.path.exists(files+'.run'):
c = "sudo "+files+'.run'
cmd.append(c)
if os.path.exists(files+'.conf'):
c = "sudo cp "+files+'.conf '+ NGINX_CONF1
c1 = "sudo cp "+files+'.conf '+ NGINX_CONF2
c2 = "sudo nginx -s reload"
cmd.append(c)
cmd.append(c1)
cmd.append(c2)
if os.path.exists(files+'.service'):
c = "sudo cp "+files+'.service ' + SYSTEMD_CONF
c1 = "sudo systemctl enable "+domain+'.service'
c2 = "sudo systemctl start "+domain+'.service'
cmd.append(c)
cmd.append(c1)
cmd.append(c2)
with open(loc+'/start.sh', 'w') as file:
for c in cmd:
file.write(c+"\n")
file.close()
def script_files_run(domain, usr, loc):
subprocess.call(['sudo', loc+'/start.sh'])
if __name__ == '__main__':
domain,usr,loc = get_env()
config_files_gen(domain, usr, loc)
# script_files_gen(domain, usr, loc)
# script_files_run(domain, usr, loc)
# add_uwsgi(usr,loc)
# add_nginx(loc)
# add_service(usr,loc)
|
import arm
if not arm.is_reload(__name__):
arm.enable_reload(__name__)
redraw_ui = False
target = 'krom'
last_target = 'krom'
export_gapi = ''
last_resx = 0
last_resy = 0
last_scene = ''
last_world_defs = ''
proc_play = None
proc_build = None
proc_publish_build = None
mod_scripts = []
is_export = False
is_play = False
is_publish = False
|
"""Amazon SQS queue implementation."""
from __future__ import annotations
from vine import transform
from .message import AsyncMessage
_all__ = ['AsyncQueue']
def list_first(rs):
"""Get the first item in a list, or None if list empty."""
return rs[0] if len(rs) == 1 else None
class AsyncQueue:
"""Async SQS Queue."""
def __init__(self, connection=None, url=None, message_class=AsyncMessage):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def _NA(self, *args, **kwargs):
raise NotImplementedError()
count_slow = dump = save_to_file = save_to_filename = save = \
save_to_s3 = load_from_s3 = load_from_file = load_from_filename = \
load = clear = _NA
def get_attributes(self, attributes='All', callback=None):
return self.connection.get_queue_attributes(
self, attributes, callback,
)
def set_attribute(self, attribute, value, callback=None):
return self.connection.set_queue_attribute(
self, attribute, value, callback,
)
def get_timeout(self, callback=None, _attr='VisibilityTimeout'):
return self.get_attributes(
_attr, transform(
self._coerce_field_value, callback, _attr, int,
),
)
def _coerce_field_value(self, key, type, response):
return type(response[key])
def set_timeout(self, visibility_timeout, callback=None):
return self.set_attribute(
'VisibilityTimeout', visibility_timeout,
transform(
self._on_timeout_set, callback,
)
)
def _on_timeout_set(self, visibility_timeout):
if visibility_timeout:
self.visibility_timeout = visibility_timeout
return self.visibility_timeout
def add_permission(self, label, aws_account_id, action_name,
callback=None):
return self.connection.add_permission(
self, label, aws_account_id, action_name, callback,
)
def remove_permission(self, label, callback=None):
return self.connection.remove_permission(self, label, callback)
def read(self, visibility_timeout=None, wait_time_seconds=None,
callback=None):
return self.get_messages(
1, visibility_timeout,
wait_time_seconds=wait_time_seconds,
callback=transform(list_first, callback),
)
def write(self, message, delay_seconds=None, callback=None):
return self.connection.send_message(
self, message.get_body_encoded(), delay_seconds,
callback=transform(self._on_message_sent, callback, message),
)
def write_batch(self, messages, callback=None):
return self.connection.send_message_batch(
self, messages, callback=callback,
)
def _on_message_sent(self, orig_message, new_message):
orig_message.id = new_message.id
orig_message.md5 = new_message.md5
return new_message
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, wait_time_seconds=None, callback=None):
return self.connection.receive_message(
self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes,
wait_time_seconds=wait_time_seconds,
callback=callback,
)
def delete_message(self, message, callback=None):
return self.connection.delete_message(self, message, callback)
def delete_message_batch(self, messages, callback=None):
return self.connection.delete_message_batch(
self, messages, callback=callback,
)
def change_message_visibility_batch(self, messages, callback=None):
return self.connection.change_message_visibility_batch(
self, messages, callback=callback,
)
def delete(self, callback=None):
return self.connection.delete_queue(self, callback=callback)
def count(self, page_size=10, vtimeout=10, callback=None,
_attr='ApproximateNumberOfMessages'):
return self.get_attributes(
_attr, callback=transform(
self._coerce_field_value, callback, _attr, int,
),
)
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from setuptools import setup, find_packages
setup(
name='ctypes_third_party_test',
version='0.0.1',
packages=find_packages(),
# Declare one shared lib at the top-level directory (denoted by '').
data_files=[('', ['libasdf-cpp_ctypes-with-third-party.so'])],
)
|
from celery import shared_task
from django.conf import settings
from .models import DiscourseUser, DiscourseGroup
from .request import DiscourseRequest
from http.client import responses
import logging
logger = logging.getLogger(__name__)
@shared_task
def sync_discourse_users():
for discourse_user in DiscourseUser.objects.all():
sync_discourse_user_groups.apply_async(
args=[discourse_user.external_id])
@shared_task
def sync_discourse_groups():
discourse_request = DiscourseRequest.get_instance()
response = discourse_request.get_groups()
if responses[response.status_code] != "OK":
logger.error("Error calling Discourse API: %s" % response.json())
return
discourse_remote_groups = response.json()['groups']
discourse_local_groups = DiscourseGroup.objects.all()
discourse_remote_group_ids = set([group['id']
for group in discourse_remote_groups])
discourse_local_group_ids = set(DiscourseGroup.objects.all(
).values_list('external_id', flat=True))
discourse_group_ids_to_add = discourse_remote_group_ids - discourse_local_group_ids
discourse_group_ids_to_remove = discourse_local_group_ids - discourse_remote_group_ids
for discourse_group in discourse_remote_groups:
if discourse_group['id'] in discourse_group_ids_to_add:
local_discourse_group = DiscourseGroup.objects.get_or_create(
external_id=discourse_group['id'])[0]
local_discourse_group.name = discourse_group['name']
local_discourse_group.save()
for discourse_group_id in discourse_group_ids_to_remove:
DiscourseGroup.objects.get(external_id=discourse_group_id).delete()
@shared_task
def sync_discourse_user_groups(discourse_user_id):
discourse_request = DiscourseRequest.get_instance()
discourse_user = DiscourseUser.objects.get(external_id=discourse_user_id)
response = discourse_request.get_discourse_user(discourse_user_id)
if responses[response.status_code] != "OK":
logger.error("Error calling Discourse API: %s" % response.json())
discourse_remote_user_groups = response.json()['user']['groups']
discourse_local_user_groups = discourse_user.groups.all()
discourse_remote_user_group_ids = set([group['id']
for group in discourse_remote_user_groups])
discourse_local_user_group_ids = set(
discourse_user.groups.all().values_list('external_id', flat=True))
# test value DJANGO_DISCOURSE_REMOTE_PRIORITY in settings, but make it optional
try:
if settings.DJANGO_DISCOURSE_REMOTE_PRIORITY:
remote_priority = True
else:
remote_priority = False
except AttributeError:
remote_priority = False
# build lists based on priority
if remote_priority:
discourse_groups_to_add = discourse_remote_user_group_ids - \
discourse_local_user_group_ids
discourse_groups_to_remove = discourse_local_user_group_ids - \
discourse_remote_user_group_ids
else:
discourse_groups_to_add = discourse_local_user_group_ids - \
discourse_remote_user_group_ids
discourse_groups_to_remove = discourse_remote_user_group_ids - \
discourse_local_user_group_ids
# action based on priority
if remote_priority:
for discourse_group_id in discourse_groups_to_add:
discourse_group = DiscourseGroup.objects.get(
external_id=discourse_group_id)
discourse_user.groups.add(
DiscourseGroup.get_or_create(external_id=discourse_group_id))
if discourse_group.group and discourse_group.group not in discourse_user.user.groups.all():
discourse_user.user.groups.add(discourse_group.group)
for discourse_group_id in discourse_groups_to_remove:
discourse_group = DiscourseGroup.objects.get(
external_id=discourse_group_id)
discourse_user.groups.remove(
DiscourseGroup.get_or_create(external_id=discourse_group_id))
if discourse_group.group and discourse_group.group in discourse_user.user.groups.all():
discourse_user.user.groups.remove(discourse_group.group)
else:
for discourse_group_id in discourse_groups_to_add:
add_discourse_group_to_discourse_user.apply_async(
args=[discourse_group_id, discourse_user.external_id])
for discourse_group_id in discourse_groups_to_remove:
remove_discourse_group_from_discourse_user.apply_async(
args=[discourse_group_id, discourse_user.external_id])
@shared_task
def add_discourse_group_to_discourse_user(discourse_group_id, discourse_user_id):
discourse_request = DiscourseRequest.get_instance()
discourse_user = DiscourseUser.objects.get(external_id=discourse_user_id)
discourse_group = DiscourseGroup.objects.get_or_create(
external_id=discourse_group_id)[0]
response = discourse_request.add_group_to_discourse_user(
discourse_group_id, discourse_user.username)
if responses[response.status_code] == "OK":
discourse_user.groups.add(discourse_group)
if discourse_group.group and discourse_group.group not in discourse_user.user.groups.all():
discourse_user.user.groups.add(discourse_group.group)
elif responses[response.status_code] == "Too Many Requests":
logger.warning(
"Ratelimit calling Discourse API, retrying: %s" % response.json())
add_discourse_group_to_discourse_user.apply_async(
args=[discourse_group_id, discourse_user_id], countdown=600)
else:
logger.error("Failed to call Discourse API: %s" % response.json())
@shared_task
def remove_discourse_group_from_discourse_user(discourse_group_id, discourse_user_id):
discourse_request = DiscourseRequest.get_instance()
discourse_user = DiscourseUser.objects.get(external_id=discourse_user_id)
discourse_group = DiscourseGroup.objects.get_or_create(
external_id=discourse_group_id)[0]
response = discourse_request.remove_group_from_discourse_user(
discourse_group_id, discourse_user.username)
if responses[response.status_code] == "OK":
discourse_user.groups.remove(discourse_group)
if discourse_group.group and discourse_group.group in discourse_user.user.groups.all():
discourse_user.user.groups.remove(discourse_group.group)
elif responses[response.status_code] == "Too Many Requests":
logger.warning(
"Ratelimit calling Discourse API, retrying: %s" % response.json())
remove_discourse_group_from_discourse_user.apply_async(
args=[discourse_group_id, discourse_user_id], countdown=600)
else:
logger.error("Failed to call Discourse API: %s" % response.json())
|
import numpy as np
from .base import IndependenceTest
from ._utils import _CheckInputs
from . import Dcorr
from .._utils import gaussian, check_xy_distmat, chi2_approx
class Hsic(IndependenceTest):
r"""
Class for calculating the Hsic test statistic and p-value.
Hsic is a kernel based independence test and is a way to measure
multivariate nonlinear associations given a specified kernel [#1Hsic]_.
The default choice is the Gaussian kernel, which uses the median distance
as the bandwidth, which is a characteristic kernel that guarantees that
Hsic is a consistent test [#1Hsic]_ [#2Hsic]_.
Parameters
----------
compute_kernel : callable(), optional (default: rbf kernel)
A function that computes the similarity among the samples within each
data matrix. Set to `None` if `x` and `y` are already similarity
matrices. To call a custom function, either create the distance matrix
before-hand or create a function of the form ``compute_kernel(x)``
where `x` is the data matrix for which pairwise similarties are
calculated.
bias : bool (default: False)
Whether or not to use the biased or unbiased test statistics.
See Also
--------
Dcorr : Distance correlation test statistic and p-value.
HHG : Heller Heller Gorfine test statistic and p-value.
Notes
-----
The statistic can be derived as follows [#1Hsic]_:
Let :math:`x` and :math:`y` be :math:`(n, p)` samples of random variables
:math:`X` and :math:`Y`. Let :math:`K^x` be the :math:`n \times n` kernel
similarity matrix of :math:`x` and :math:`D^y` be the :math:`n \times n`
be the kernel similarity matrix of :math:`y`. The Hsic statistic is,
.. math::
\mathrm{Hsic}_n (x, y) = \frac{1}{n^2} \mathrm{tr} (K^x H K^y H)
where :math:`\mathrm{tr} (\cdot)` is the trace operator and :math:`H` is
defined as :math:`H = I - (1/n) J` where :math:`I` is the identity matrix
and :math:`J` is a matrix of ones. The normalized version of Hsic
[#1Dcor]_ and is
.. math::
\mathrm{Hsic}_n (x, y) = \frac{\mathrm{Hsic}_n (x, y)}
{\sqrt{\mathrm{Hsic}_n (x, x)
\mathrm{Hsic}_n (y, y)}}
This version of Hsic is defined using the following centering process
where :math:`\mathbb{1}(\cdot)` is the indicator
function:
.. math::
C^x_{ij} = \left[ D^x_{ij} - \frac{1}{n-2} \sum_{t=1}^n D^x_{it}
- \frac{1}{n-2} \sum_{s=1}^n D^x_{sj}
+ \frac{1}{(n-1) (n-2)} \sum_{s,t=1}^n D^x_{st} \right]
\mathbb{1}_{i \neq j}
and similarly for :math:`C^y`. Then, this unbiased Dcorr is,
.. math::
\mathrm{UHsic}_n (x, y) = \frac{1}{n (n-3)} \mathrm{tr} (C^x C^y)
The normalized version of this covariance [#2Dcor]_ is
.. math::
\mathrm{UHsic}_n (x, y) = \frac{\mathrm{UHsic}_n (x, y)}
{\sqrt{\mathrm{UHsic}_n (x, x)
\mathrm{UHsic}_n (y, y)}}
References
----------
.. [#1Hsic] Gretton, A., Fukumizu, K., Teo, C. H., Song, L., Schölkopf,
B., & Smola, A. J. (2008). A kernel statistical test of
independence. In *Advances in neural information processing
systems* (pp. 585-592).
.. [#2Hsic] Gretton, A., & GyĂśrfi, L. (2010). Consistent nonparametric
tests of independence. *Journal of Machine Learning Research*,
11(Apr), 1391-1423.
"""
def __init__(self, compute_kernel=gaussian, bias=False):
# set statistic and p-value
self.compute_kernel = compute_kernel
self.is_kernel = False
if not compute_kernel:
self.is_kernel = True
self.bias = bias
IndependenceTest.__init__(self, compute_distance=compute_kernel)
def _statistic(self, x, y):
r"""
Helper function that calculates the Hsic test statistic.
Parameters
----------
x, y : ndarray
Input data matrices. `x` and `y` must have the same number of
samples. That is, the shapes must be `(n, p)` and `(n, q)` where
`n` is the number of samples and `p` and `q` are the number of
dimensions. Alternatively, `x` and `y` can be distance matrices,
where the shapes must both be `(n, n)`.
Returns
-------
stat : float
The computed Hsic statistic.
"""
distx = x
disty = y
if not self.is_kernel:
kernx = self.compute_kernel(x)
kerny = self.compute_kernel(y)
distx = 1 - kernx / np.max(kernx)
disty = 1 - kerny / np.max(kerny)
dcorr = Dcorr(compute_distance=None, bias=self.bias)
stat = dcorr._statistic(distx, disty)
self.stat = stat
return stat
def test(self, x, y, reps=1000, workers=1, auto=True):
r"""
Calculates the Hsic test statistic and p-value.
Parameters
----------
x, y : ndarray
Input data matrices. `x` and `y` must have the same number of
samples. That is, the shapes must be `(n, p)` and `(n, q)` where
`n` is the number of samples and `p` and `q` are the number of
dimensions. Alternatively, `x` and `y` can be distance matrices,
where the shapes must both be `(n, n)`.
reps : int, optional (default: 1000)
The number of replications used to estimate the null distribution
when using the permutation test used to calculate the p-value.
workers : int, optional (default: 1)
The number of cores to parallelize the p-value computation over.
Supply -1 to use all cores available to the Process.
auto : bool (default: True)
Automatically uses fast approximation when sample size and size of array
is greater than 20. If True, and sample size is greater than 20, a fast
chi2 approximation will be run. Parameters ``reps`` and ``workers`` are
irrelevant in this case.
Returns
-------
stat : float
The computed Hsic statistic.
pvalue : float
The computed Hsic p-value.
Examples
--------
>>> import numpy as np
>>> from hyppo.independence import Hsic
>>> x = np.arange(7)
>>> y = x
>>> stat, pvalue = Hsic().test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'1.0, 0.00'
The number of replications can give p-values with higher confidence
(greater alpha levels).
>>> import numpy as np
>>> from hyppo.independence import Hsic
>>> x = np.arange(7)
>>> y = x
>>> stat, pvalue = Hsic().test(x, y, reps=10000)
>>> '%.1f, %.2f' % (stat, pvalue)
'1.0, 0.00'
In addition, the inputs can be distance matrices. Using this is the,
same as before, except the ``compute_kernel`` parameter must be set
to ``None``.
>>> import numpy as np
>>> from hyppo.independence import Hsic
>>> x = np.ones((10, 10)) - np.identity(10)
>>> y = 2 * x
>>> hsic = Hsic(compute_kernel=None)
>>> stat, pvalue = hsic.test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'0.0, 1.00'
"""
check_input = _CheckInputs(
x, y, reps=reps, compute_distance=self.compute_kernel
)
x, y = check_input()
if self.is_kernel:
check_xy_distmat(x, y)
if auto and x.shape[0] > 20:
stat, pvalue = chi2_approx(self._statistic, x, y)
self.stat = stat
self.pvalue = pvalue
self.null_dist = None
else:
if not self.is_kernel:
x = self.compute_kernel(x, workers=workers)
y = self.compute_kernel(y, workers=workers)
self.is_kernel = True
stat, pvalue = super(Hsic, self).test(x, y, reps, workers)
return stat, pvalue
|
# CUPS Cloudprint - Print via Google Cloud Print
# Copyright (C) 2011 Simon Cadman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import sys
sys.path.insert(0, ".")
from cloudprintrequestor import CloudPrintRequestor
class MockRequestor(CloudPrintRequestor):
printers = []
def mockSearch(self, path, headers, data, boundary):
return json.dumps({'printers': self.printers})
def mockSubmit(self, path, headers, data, boundary):
if 'FAIL PAGE' in data:
result = {
'success': False,
'message': 'FAIL PAGE was in message'}
elif 'TEST PAGE WITH EXCEPTION' in data:
raise Exception("Test exception")
else:
result = {'success': True}
return json.dumps(result)
def mockPrinter(self, path, headers, data, boundary):
printername = path.split('=')[1].split('&')[0]
foundPrinter = None
for printer in self.printers:
if printer['id'] == printername:
foundPrinter = printer
break
if foundPrinter is None:
return json.dumps(None)
result = {'success': True, 'printers': [foundPrinter]}
return json.dumps(result)
def doRequest(self, path, headers=None, data=None, boundary=None):
if (path.startswith('search?')):
return json.loads(self.mockSearch(path, headers, data, boundary))
if (path.startswith('printer?')):
return json.loads(self.mockPrinter(path, headers, data, boundary))
if (path.startswith('submit')):
return json.loads(self.mockSubmit(path, headers, data, boundary))
return None
|
from __future__ import annotations
import logging
from typing import Dict, Iterator, List, Optional, Tuple
from botovod.exceptions import HandlerNotPassed
from .types import Attachment, Chat, Keyboard, Location, Message
class Agent:
def __init__(self):
self.botovod = None
self.running = False
self.name = None
self.logger = logging.getLogger(__name__)
def __repr__(self) -> str:
return self.__class__.__name__
def listen(self, headers: Dict[str, str], body: str, **scope) -> Tuple[int, Dict[str, str], str]:
self.logger.debug("Get request")
messages = self.parser(headers, body)
for chat, message in messages:
follower = None
if self.botovod.dbdriver:
follower = self.botovod.dbdriver.get_follower(self, chat)
if not follower:
follower = self.botovod.dbdriver.add_follower(self, chat)
for handler in self.botovod.handlers:
try:
handler(self, chat, message, follower, **scope)
except HandlerNotPassed:
continue
break
return self.responser(headers, body)
async def a_listen(self, headers: Dict[str, str], body: str, **scope) -> Tuple[int, Dict[str, str], str]:
self.logger.debug("Get updates")
messages = await self.a_parser(headers, body)
for chat, message in messages:
if self.botovod.dbdriver is not None:
follower = await self.botovod.dbdriver.a_get_follower(self, chat)
if follower is None:
follower = await self.botovod.dbdriver.a_add_follower(self, chat)
else:
follower = None
for handler in self.botovod.handlers:
try:
await handler(self, chat, message, follower, **scope)
except HandlerNotPassed:
continue
break
return await self.a_responser(headers, body)
def start(self):
raise NotImplementedError
async def a_start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
async def a_stop(self):
raise NotImplementedError
def parser(self, headers: Dict[str, str], body: str) -> List[Tuple[Chat, Message]]:
raise NotImplementedError
async def a_parser(self, headers: Dict[str, str], body: str) -> List[Tuple[Chat, Message]]:
raise NotImplementedError
def responser(self, headers: Dict[str, str], body: str) -> Tuple[int, Dict[str, str], str]:
raise NotImplementedError
async def a_responser(self, headers: Dict[str, str],
body: str) -> Tuple[int, Dict[str, str], str]:
raise NotImplementedError
def send_message(self, chat: Chat, text: Optional[str] = None,
images: Iterator[Attachment] = (),
audios: Iterator[Attachment] = (), documents: Iterator[Attachment] = (),
videos: Iterator[Attachment] = (), locations: Iterator[Location] = (),
keyboard: Optional[Keyboard] = None, **raw):
raise NotImplementedError
async def a_send_message(self, chat: Chat, text: Optional[str] = None,
images: Iterator[Attachment] = (), audios: Iterator[Attachment] = (),
documents: Iterator[Attachment] = (),
videos: Iterator[Attachment] = (),
locations: Iterator[Location] = (),
keyboard: Optional[Keyboard] = None,
**raw):
raise NotImplementedError
|
import re
from time import sleep
from timeUtils import clock, elapsed
from ioUtils import saveFile, getFile
from fsUtils import setDir, isDir, mkDir, setFile, isFile, setSubFile
from fileUtils import getBaseFilename
from searchUtils import findSubPatternExt, findPatternExt, findExt
from strUtils import convertCurrency
from webUtils import getWebData, getHTML
from movieDB import movieDB
from os import getcwd
import operator
##############################################################################################################################
# Box Office
##############################################################################################################################
class wikipedia(movieDB):
def __init__(self, basedir=None):
self.name = "wikipedia"
movieDB.__init__(self, dbdir=self.name)
###########################################################################################################################
# Get Box Office Weekend Files
###########################################################################################################################
def downloadWikipediaYearlyData(self, year, outdir, debug=False):
base = "https://en.wikipedia.org/wiki/"
dmap = {}
val = str(int(year) - 1927)+"th_Academy_Awards"
val = val.replace("1th_", "1st_")
val = val.replace("2th_", "2nd_")
val = val.replace("3th_", "3rd_")
val = val.replace("11st_", "11th_")
val = val.replace("12nd_", "12th_")
val = val.replace("13rd_", "13th_")
try:
url = base+val
except:
print("Could not create url for",year)
return
savename = setFile(outdir, str(year)+".p")
if isFile(savename): return
if debug:
print("Downloading {0}".format(url))
getWebData(base=url, savename=savename, useSafari=False)
sleep(1)
def getWikipediaYearlyData(self, startYear = 1934, endYear = 2017, debug=False):
outdir = self.getDataDir()
if debug:
print("Data Directory: {0}".format(outdir))
if not isDir(outdir): mkDir(outdir)
if startYear < 1934:
raise ValueError("Must start at or after 1934")
years = range(int(startYear), int(endYear)+1)
for year in years:
self.downloadWikipediaYearlyData(year, outdir, debug)
###########################################################################################################################
# Parse Box Office Weekend Files
###########################################################################################################################
def reorderWikipediaOscarData(self, text, title):
reorders = ["Best Director", "Best Actress", "Best Actor",
"Best Supporting Actor", "Best Supporting Actress"]
for val in reorders:
if title.find(val) != -1:
if isinstance(text, list):
if len(text) >= 2:
text = [text[1], text[0]]
return text
elif len(text) == 1:
return text
else:
raise ValueError("Error reordering {0}".format(text))
return text
return text
def parseWikipediaOscarDataSpecial(self, ifile, debug = True):
print("HI: {0}".format(ifile))
htmldata = getFile(ifile)
bsdata = getHTML(htmldata)
data = {}
done = False
tables = bsdata.findAll("table", {"class": "wikitable"})
if debug:
print(" Found {0} tables".format(len(tables)))
for table in tables:
if done:
if debug:
print(" Breaking...")
break
## Get <th> data
ths = table.findAll("th")
thData = [x.find('a') for x in ths]
titles = [x.string for x in thData if x is not None]
if len(titles) == 0:
continue
## Get <tr> data
trData = []
trs = table.findAll("tr")
for tr in trs:
tds = tr.findAll("td")
for td in tds:
ul = td.find("ul")
if ul is not None:
trData.append(ul)
print(len(titles))
print(len(trData))
if len(titles) != len(trData):
print("Can not process this data!")
print("Titles: {0}: {1}".format(len(titles), titles))
print("Data: {0}".format(len(trData)))
return None
## Merge titles and data
for title,titleData in zip(titles,trData):
results = []
lis = titleData.findAll("li")
if debug:
print(" Found {0} entries".format(len(lis)))
for k,li in enumerate(lis):
text = []
if k == 0:
for lival in li.findAll("b"):
for ref in lival.findAll("a"):
text.append(ref.string)
else:
for ref in li.findAll("a"):
text.append(ref.string)
if len(text) == 0: continue
if len(text) > 2: text = [text[0], ", ".join(text[1:])]
text = self.reorderWikipediaOscarData(text, title)
results.append(text)
for k,result in enumerate(results):
if isinstance(result, list):
if len(result) == 1: results[k] = result[0]
data[title] = {}
data[title]["Winner"] = results[0]
data[title]["Nominees"] = results[1:]
if debug:
print(" Winner :",data[title]["Winner"])
print(" Nominees:",data[title]["Nominees"])
print("")
return data
def parseWikipediaOscarData(self, ifile, debug = False):
htmldata = getFile(ifile)
bsdata = getHTML(htmldata)
data = {}
done = False
tables = bsdata.findAll("table", {"class": "wikitable"})
if debug:
print(" Found {0} tables".format(len(tables)))
for table in tables:
if done:
if debug:
print(" Breaking...")
break
trs = table.findAll("tr")
if debug:
print(" Found {0} rows".format(len(trs)))
for i,tr in enumerate(trs):
if done:
if debug:
print(" Breaking...")
break
tds = tr.findAll("td")
if debug:
print(" Found {0} cols".format(len(tds)))
for j,td in enumerate(tds):
div = td.find("div")
if div == None:
continue
raise()
ref = div.find("a")
title = ref.string
data[title] = {}
if debug:
print(" Found {0}".format(title))
if data.get(title):
done = True
if debug:
print(" Already know about {0}".format(title))
print(" Breaking...")
break
results = []
ul = td.find("ul")
lis = ul.findAll("li")
#if debug:
# print(" Found {0} entries".format(len(lis)))
for k,li in enumerate(lis):
text = []
if k == 0:
for lival in li.findAll("b"):
for ref in lival.findAll("a"):
text.append(ref.string)
else:
for ref in li.findAll("a"):
text.append(ref.string)
if len(text) == 0: continue
if len(text) > 2: text = [text[0], ", ".join(text[1:])]
text = self.reorderWikipediaOscarData(text, title)
results.append(text)
if debug:
print("Summary\n {0}: {1}".format(title, results))
for k,result in enumerate(results):
if isinstance(result, list):
if len(result) == 1: results[k] = result[0]
data[title]["Winner"] = results[0]
data[title]["Nominees"] = results[1:]
if debug:
print(" Winner :",data[title]["Winner"])
print(" Nominees:",data[title]["Nominees"])
print("")
return data
def processWikipediaYearlyData(self, procYear = None, debug=False):
outdir = self.getDataDir()
if procYear == None:
files = findExt(outdir, ext=".p")
else:
files = findPatternExt(outdir, pattern=str(procYear), ext=".p")
from collections import OrderedDict
movies = OrderedDict()
for ifile in files:
if debug:
print("Processing {0}".format(ifile))
year = getBaseFilename(ifile)
#if year == "1985": continue
htmldata = getFile(ifile)
bsdata = getHTML(htmldata)
results = self.parseWikipediaOscarData(ifile, debug=False)
if len(results) == 0:
results = self.parseWikipediaOscarDataSpecial(ifile, debug=debug)
if len(results) == 0:
raise ValueError("No results for {0}".format(ifile))
for k,v in results.items():
print("====>",year,'\t',k)
print(" Winner :",results[k]["Winner"])
if debug:
print(" Nominees:",results[k]["Nominees"])
print("")
savename = setFile(self.getResultsDir(), "{0}.json".format(year))
print("Saving {0} wikipedia oscar data to {1}".format(year, savename))
saveFile(savename, results)
#yamldata.saveYaml(savename, movies)
|
#!/usr/bin/env python3
from setuptools import setup
# from distutils.core import setup
with open('README.md') as f:
long_description = f.read()
setup(
name = 'py-process',
packages = ['py_process'],
version = '0.0.1',
description = 'engine processs',
long_description=long_description,
long_description_content_type='text/markdown', # This is important!
url = 'https://github.com/FlavioLionelRita/py-process', # use the URL to the github repo
download_url = 'https://github.com/FlavioLionelRita/py-process/tarball/0.0.1',
keywords = ['process', 'business process', 'bpm', 'bpmn','engine' ],
classifiers = [],
author = 'Flavio Lionel Rita',
author_email = 'flaviolrita@hotmail.com'
)
|
import praw
import config
import json
import argparse
import datetime
reddit = praw.Reddit(client_id = config.client_id,
client_secret = config.client_secret,
username = config.username,
password = config.password,
user_agent = config.user_agent)
def get_parser():
parser = argparse.ArgumentParser(description="Reddit Downloader")
parser.add_argument("-u",
"--user",
dest="user",
help="User to retrieve data from",
default=None)
parser.add_argument("-l",
"--limit",
dest="limit",
help="Pull N number of submissions",
default=None)
parser.add_argument("-ul",
"--userlist",
dest="userlist",
help="List of users to pull information from",
default=None)
return parser
def prawAPI(user, lt):
print("Collecting {} submissions".format(user))
u = reddit.redditor(user)
com = reddit.redditor(user).comments.new(limit=lt)
sub = reddit.redditor(user).submissions.new(limit=lt)
redditData = {}
redditData[str(user)] = [{}]
redditData[str(user)][0]['0_comments'] = [{}]
redditData[str(user)][0]['1_submissions'] = [{}]
comCount=0
subCount=0
for comment in com:
comCount+=1
if comCount%10==0:
print("Found: {} Comments".format(comCount))
redditData[str(user)][0]['0_comments'][0][str(comment.id)] = [{}]
redditData[str(user)][0]['0_comments'][0][str(comment.id)][0]['0_Comment Submission'] = comment.submission.title
redditData[str(user)][0]['0_comments'][0][str(comment.id)][0]['1_Text'] = ''.join((comment.body)).encode('utf-8').strip()
redditData[str(user)][0]['0_comments'][0][str(comment.id)][0]['2_Subreddit'] = comment.subreddit.display_name
print("Found: {} Comments Total".format(comCount))
for submission in sub:
subCount+=1
if subCount%10==0:
print("Found: {} Submissions".format(subCount))
redditData[str(user)][0]['1_submissions'][0][str(submission)] = [{}]
redditData[str(user)][0]['1_submissions'][0][str(submission)][0]['0_Title'] = ''.join((submission.title)).encode('utf-8').strip()
redditData[str(user)][0]['1_submissions'][0][str(submission)][0]['1_Text'] = ''.join((comment.body)).encode('utf-8').strip()
redditData[str(user)][0]['1_submissions'][0][str(submission)][0]['2_Subreddit'] = submission.subreddit.display_name
print("Found: {} Submissions".format(subCount))
print
print("Downloaded {} comments from user {}.".format(comCount, user))
print("Downloaded {} submissions from user {}.".format(subCount, user))
with open('User_Hold/reddit_'+user+'.json', 'w') as o:
o.write(json.dumps(redditData, sort_keys=True))
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
limit = args.limit
if limit != None:
limit = int(limit)
if args.user == None:
print("Error: You have not specified a user(s) to retrieve submissions from.")
#users = args.user.split(',')
if args.userlist != None:
with open(args.userlist, "r") as inputF:
d = inputF.readlines()
for user in d:
prawAPI(user.strip(),limit)
else:
users = args.user.split(',')
for s in users:
prawAPI(s, limit)
|
#!/usr/bin/env python
import os
import sys
root_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
os.chdir(root_dir)
#using virtualenv's activate_this.py to reorder sys.path
activate_this = os.path.join(root_dir, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gazetteer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from CauldronHost import *
from CauldronGroup import *
from CauldronGroupHostAssoc import *
from Base import Base
from flask import Flask
from flask import jsonify
from flask_marshmallow import Marshmallow
from Base import ma
from Base import app
def createEngine():
return create_engine('sqlite:///:memory:', echo=True)
def createSessionMaker(engine):
return sessionmaker(bind=engine)
engine = createEngine()
SessionMaker = createSessionMaker(engine)
@app.route("/createSchema")
def createSchema():
createDBSchema(engine)
return constructOKStatus()
@app.route("/host/<host>/<group>", methods=["DELETE"])
@app.route("/host/<host>", defaults = {'group' : None}, methods=["DELETE"])
def removeHost(host, group):
session = SessionMaker()
removeHostFromBase(session, hostName = host, groupName = group)
session.commit()
return constructOKStatus()
def removeHostFromBase(session, hostName, groupName = None):
return "Not yet implemented"
@app.route("/host/<host>/<group>", methods=["POST"])
@app.route("/host/<host>", defaults = {'group' : None}, methods=["POST"])
def addHost(host, group):
session = SessionMaker()
addHostToBase(session, hostName = host, groupName = group)
session.commit()
return constructOKStatus()
@app.route("/group/<group>", methods = ["POST"])
def addGroup(group):
session = SessionMaker()
addHostToBase(session, groupName = group);
session.commit()
return constructOKStatus()
@app.route("/host", defaults = {'group' : None}, methods=["GET"])
@app.route("/host/<group>", methods=["GET"])
def getHosts(group):
session = SessionMaker()
if group is not None:
result = session.query(CauldronHost).all()
else:
result = session.query(CauldronHost).all()
session.commit()
cauldronHostsSchema = CauldronHostSchema(many = True)
dump = cauldronHostsSchema.dump(result)
return jsonify(dump.data)
def createDBSchema(engine):
Base.metadata.create_all(engine);
def getSession(sessionMaker):
return sessionMaker();
def addHostToBase(session, hostName = None, groupName = None):
if hostName is None and groupName is not None:
group = CauldronGroup(name = groupName)
session.add(group)
elif hostName is not None and groupName is None:
host = CauldronHost(ip = hostName)
session.add(host)
elif hostName is not None and groupName is not None:
group = session.query(CauldronGroup).filter_by(name = groupName).first()
if(group is None):
addHostToBase(session, groupName = groupName)
return addHostToBase(session, hostName, groupName)
host = session.query(CauldronHost).filter_by(ip = hostName).first()
if(host is None):
addHostToBase(session, hostName = hostName)
return addHostToBase(session, hostName, groupName)
assoc = CauldronGroupHostAssoc()
assoc.group = group
assoc.host = host
host.groups.append(assoc)
session.add(host)
def constructOKStatus():
return '{ "status" : "OK"}'
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=7777)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="atlas_horizon",
version="0.1",
author="Ihor Kyrylenko",
author_email="igornergyone@gmail.com",
description="A small program to work with .ATL files and making queries to HORIZON system",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/atlas",
packages=setuptools.find_packages(where='src'),
package_dir={
'': 'src',
},
# include_package_data=True,
package_data={
"": ["*.dat", "*.txt"],
},
scripts=["run_atlas.py"],
py_modules=["MODULES/ATL",
"MODULES/Functions",
"MODULES/Observation"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"astropy>=4",
"astroquery>=0.4",
"pandas>=1.1",
"tqdm>=4",
],
python_requires='>=3.8',
)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""config
=========
The configuration is a JSON-formatted file. See ``_config_example``.
Applications may add whatever variables to the configuration file.
"""
import os
import json
import argparse
from typing import Dict, List, Optional, TypeVar, Union
__all__: List[str] = ['Config']
ConfigObject = TypeVar('ConfigObject', bound='Config')
_config_example: str = '''
{
"database": "sqlite://test.db",
"log": "/path/to/sbsearch.log",
"min_edge_length": 3e-4,
"max_edge_length": 0.017,
"uncertainty_ellipse": false,
"padding": 0,
"debug": false,
"arc_limit": 0.17,
"time_limit": 365
}
'''
class Config:
"""SBSearch configuration.
Controls database location, log file location, and ``SBSearch`` options.
Parameters are stored as object keys: ``Config['user']``,
``Config['log']``, etc.
Parameters
----------
**kwargs
Configuration parameters and values.
"""
# list of default files in order of precedence
DEFAULT_FILES: List[str] = [
'sbsearch.cfg',
'.sbsearch.cfg',
os.path.expanduser('~/.config/sbsearch.config')
]
DEFAULT_PARAMETERS: Dict[str, Union[str, float, int, bool]] = {
"database": "sqlite://",
"log": "/dev/null",
"min_edge_length": 3e-4,
"max_edge_length": 0.017,
"uncertainty_ellipse": False,
"padding": 0,
"arc_limit": 0.17,
"time_limit": 365,
"debug": False
}
def __init__(self, **kwargs) -> None:
self.config: Dict[str, Union[str, float, int, bool]] = (
self.DEFAULT_PARAMETERS.copy()
)
self.update(kwargs)
def __getitem__(self, k: str) -> Union[str, float, int, bool]:
return self.config[k]
@classmethod
def find_default_file(cls) -> Union[str, None]:
"""Find the default configuration file, if any exists.
Returns
-------
filename : string
The found file or ``None``, if one could not be found.
Searches the following locations in order of precedence:
{}
""".format('\n '.join(cls.DEFAULT_FILES))
filename: Union[str, None] = None
for filename in cls.DEFAULT_FILES:
if os.path.exists(filename):
break
else:
filename = None
return filename
@classmethod
def from_args(cls, args: argparse.Namespace, find_default: bool = False,
**updates) -> ConfigObject:
"""Initialize from command-line arguments.
Parameters
----------
args: argparse.Namespace
For example, a result from argparse.ArgumentParser.parse_args().
Options checked:
- --config for a configuration file,
- --option, where option is a configuration key (e.g., log,
min_edge_length), replacing spaces with underscores.
find_default: bool, optional
Attempt to find and read a default configuration file when no
config file name is provided in the options. See
``find_default_file``.
**updates
Any other configuration items. However, `args` will take
precedence.
Returns
-------
config: Config
"""
# config file treated separately
config_file: Union[str, None] = getattr(args, 'config')
if config_file is None:
config_file = cls.find_default_file()
k: str
for k in cls.DEFAULT_PARAMETERS:
v: Union[str, float, int, bool, None] = getattr(
args, k.replace(' ', '_'), None)
if v is not None:
updates[k] = v
if find_default:
return cls.from_file(config_file, **updates)
elif config_file is not None:
return cls.from_file(config_file, **updates)
else:
return cls(**updates)
@ classmethod
def from_file(cls, filename: Optional[str] = None, **kwargs) -> ConfigObject:
"""Initialize from JSON-formatted file.
Parameters
----------
filename: string, optional
Name of the file to read, or ``None`` for the default
file ( in order of precedence):
{}
**kwargs
Override saved parameters with these values.
Returns
-------
config: Config
""".format('\n '.join(cls.DEFAULT_FILES))
fn: str = filename
if fn is None:
for fn in cls.DEFAULT_FILES:
if os.path.exists(fn):
break
try:
with open(fn) as f:
config = json.load(f)
except IOError:
print(_config_example)
raise
config.update(**kwargs)
return cls(**config)
def update(self, kwargs) -> None:
self.config.update(kwargs)
|
# Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC # Process Fastq files with trimmomatic
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Setup
# COMMAND ----------
# MAGIC %python
# MAGIC
# MAGIC """
# MAGIC Setup some parameters
# MAGIC """
# MAGIC
# MAGIC _silver_table = "silver_fastq_trimmed"
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Install Trimmomatic
# COMMAND ----------
# MAGIC %conda install -c bioconda trimmomatic
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Trimommatic UDF
# COMMAND ----------
# MAGIC %python
# MAGIC
# MAGIC import os, subprocess
# MAGIC import numpy as np
# MAGIC from pyspark.sql.functions import udf
# MAGIC from pyspark.sql.types import StringType
# MAGIC
# MAGIC @udf('string')
# MAGIC def run_trimmomatic(fastq : str,
# MAGIC fastq_name : str,
# MAGIC params : str) -> str:
# MAGIC """
# MAGIC Run Trimmomatic
# MAGIC
# MAGIC @param fastq | fastq string
# MAGIC @param fastq_name | fastq name
# MAGIC @param params | trimmomatic parameters
# MAGIC """
# MAGIC
# MAGIC # create a temporary file on the worker
# MAGIC _temp_file = f"/tmp/{fastq_name}.fastq"
# MAGIC _temp_out_file = _temp_file.replace(".fastq", ".trimmed.fastq")
# MAGIC
# MAGIC with open(_temp_file, "w") as _fastq:
# MAGIC _fastq.write(fastq)
# MAGIC try:
# MAGIC _res = subprocess.run(["trimmomatic", params,
# MAGIC _temp_file,
# MAGIC _temp_out_file],
# MAGIC stderr = subprocess.STDOUT,
# MAGIC check=True)
# MAGIC except Exception as err:
# MAGIC return err
# MAGIC
# MAGIC # Clean up
# MAGIC os.remove(_temp_file)
# MAGIC
# MAGIC # grab the output file results
# MAGIC with open(_temp_out_file, 'r') as _out_file:
# MAGIC _out_str = _out_file.read()
# MAGIC _out_file.close()
# MAGIC
# MAGIC # remove the output fule
# MAGIC os.remove(_temp_out_file)
# MAGIC
# MAGIC # return the output
# MAGIC return _out_str
# MAGIC
# MAGIC spark.udf.register("run_trimmomatic", run_trimmomatic)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Run Trimmomatic
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Load Data
# COMMAND ----------
# MAGIC %python
# MAGIC
# MAGIC """
# MAGIC Load fastq table
# MAGIC """
# MAGIC
# MAGIC from pyspark.sql import DataFrame
# MAGIC
# MAGIC # load the dataframe
# MAGIC _df : DataFrame = (spark.read
# MAGIC .format("delta")
# MAGIC .table("bronze_fastq"))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Run Trimmomatic
# COMMAND ----------
# MAGIC %python
# MAGIC
# MAGIC """
# MAGIC Run Trimmmomatic
# MAGIC """
# MAGIC
# MAGIC from pyspark.sql import DataFrame
# MAGIC from pyspark.sql.functions import col, lit
# MAGIC
# MAGIC # set some parameters for trimmomatic
# MAGIC _params = ""
# MAGIC
# MAGIC # run the trimmomatic udf
# MAGIC _df_trimmed : DataFrame = (_df.withColumn("params", lit(_params))
# MAGIC .withColumn("trimmed",
# MAGIC run_trimmomatic(col("params"),
# MAGIC col("fastq"),
# MAGIC col("fastq_name")))
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Ingest to Silver Table
# COMMAND ----------
# MAGIC %python
# MAGIC
# MAGIC """
# MAGIC Ingest trimmomatic results to silver table
# MAGIC """
# MAGIC
# MAGIC try:
# MAGIC
# MAGIC # write to dataframe
# MAGIC (_df_trimmed.write
# MAGIC .format("delta")
# MAGIC .mode("append")
# MAGIC .saveAsTable(_silver_table)
# MAGIC )
# MAGIC
# MAGIC except Exception as err:
# MAGIC raise ValueError(f"Failed to write to table {_silver_table}, errror : {err}")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test Query
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC --
# MAGIC -- Read from Silver table
# MAGIC --
# MAGIC
# MAGIC SELECT FASTQ_NAME
# MAGIC FROM silver_fastq_trimmed;
|
"""
Django settings for crsbi project.
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
For production settings see
https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
"""
import getpass
import logging
import os
from kdl_ldap.settings import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = 'crsbi'
PROJECT_TITLE = 'The Corpus of Romanesque Sculpture in Britain & Ireland'
SITE_ID = 1
# -----------------------------------------------------------------------------
# Core Settings
# https://docs.djangoproject.com/en/dev/ref/settings/#id6
# -----------------------------------------------------------------------------
ADMINS = ()
MANAGERS = ADMINS
ALLOWED_HOSTS = ['.crsbi.ac.uk', '.cch.kcl.ac.uk']
ACCOUNT_ACTIVATION_DAYS = 7
AUTH_PROFILE_MODULE = 'sculpture.UserProfile'
LOGIN_REDIRECT_URL = '/mycrsbi/'
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
# https://docs.djangoproject.com/en/dev/topics/cache/
# http://redis.io/topics/lru-cache
# http://niwibe.github.io/django-redis/
CACHE_REDIS_DATABASE = '0'
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/' + CACHE_REDIS_DATABASE,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True
}
}
}
CSRF_COOKIE_SECURE = True
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# -----------------------------------------------------------------------------
# EMAIL SETTINGS
# -----------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = 'noreply@kcl.ac.uk'
EMAIL_HOST = 'smtp.cch.kcl.ac.uk'
EMAIL_PORT = 25
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_SUBJECT_PREFIX = '[Django {}] '.format(PROJECT_NAME)
EMAIL_USE_TLS = False
# Sender of error messages to ADMINS and MANAGERS
SERVER_EMAIL = DEFAULT_FROM_EMAIL
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
GRAPPELLI_INSTALLED = True
INSTALLED_APPS = [
PACKAGE_NAME_GRAPPELLI,
PACKAGE_NAME_FILEBROWSER,
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'django_comments',
'compressor',
'django.contrib.gis',
'iipimage',
'registration',
'mezzanine.boot',
'mezzanine.conf',
'mezzanine.core',
'mezzanine.generic',
'mezzanine.forms',
'mezzanine.pages',
'mezzanine.blog',
]
INSTALLED_APPS += [ # your project apps here
'sculpture',
'kdl_ldap',
'haystack',
]
INTERNAL_IPS = ['127.0.0.1']
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = False
USE_TZ = True
LOGGING_ROOT = os.path.join(BASE_DIR, 'logs')
LOGGING_LEVEL = logging.WARN
if not os.path.exists(LOGGING_ROOT):
os.makedirs(LOGGING_ROOT)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s')
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.path.join(LOGGING_ROOT, 'django.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['file', 'mail_admins'],
'level': LOGGING_LEVEL,
'propagate': True
},
'crsbi': {
'handlers': ['file'],
'level': LOGGING_LEVEL,
'propagate': True
},
}
}
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'mezzanine.core.middleware.SitePermissionMiddleware',
'mezzanine.core.request.CurrentRequestMiddleware',
'mezzanine.core.middleware.TemplateForDeviceMiddleware',
'mezzanine.core.middleware.TemplateForHostMiddleware',
'mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware',
# Uncomment the following if using any of the SSL settings:
#'mezzanine.core.middleware.SSLRedirectMiddleware',
'mezzanine.pages.middleware.PageMiddleware',
'mezzanine.core.middleware.FetchFromCacheMiddleware',
]
ROOT_URLCONF = PROJECT_NAME + '.urls'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),
'sculpture/templates'
]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'mezzanine.pages.context_processors.page',
'mezzanine.conf.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = PROJECT_NAME + '.wsgi.application'
# -----------------------------------------------------------------------------
# Authentication
# https://docs.djangoproject.com/en/dev/ref/settings/#auth
# -----------------------------------------------------------------------------
if 'wagtail.wagtailcore' in INSTALLED_APPS:
LOGIN_URL = '/wagtail/login/'
else:
LOGIN_URL = '/admin/login/'
# -----------------------------------------------------------------------------
# Sessions
# https://docs.djangoproject.com/en/dev/ref/settings/#sessions
# -----------------------------------------------------------------------------
SESSION_COOKIE_SECURE = True
# -----------------------------------------------------------------------------
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
# https://docs.djangoproject.com/en/dev/ref/settings/#static-files
# -----------------------------------------------------------------------------
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL.strip('/'))
if not os.path.exists(STATIC_ROOT):
os.makedirs(STATIC_ROOT)
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_URL = '/static/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, MEDIA_URL.strip('/'))
if not os.path.exists(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
# -----------------------------------------------------------------------------
# Installed Applications Settings
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Django Compressor
# http://django-compressor.readthedocs.org/en/latest/
# -----------------------------------------------------------------------------
COMPRESS_ENABLED = True
COMPRESS_CSS_FILTERS = [
# CSS minimizer
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# -----------------------------------------------------------------------------
# FABRIC
# -----------------------------------------------------------------------------
FABRIC_USER = getpass.getuser()
# -----------------------------------------------------------------------------
# GLOBALS FOR JS
# -----------------------------------------------------------------------------
# Google Analytics ID
GA_ID = ''
# Project settings. These may be overridden in local_settings.py, but
# are specified here because they are likely to be in common across
# dev, stg, and liv.
IMAGE_SERVER_URL = '/images'
IMAGE_SERVER_ROOT = '/vol/crsbi/images/'
TINYMCE_SETUP_JS = 'mezzanine/js/tinymce_setup.js'
RICHTEXT_FILTER = 'sculpture.utils.convert_refs_to_links'
FORMS_USE_HTML5 = True
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
ADMIN_MENU_ORDER = (
('Fieldworker',
('sculpture.FeatureImage', 'sculpture.SiteImage', 'sculpture.Site')),
('Editor',
('sculpture.BibliographyItem', 'sculpture.Contributor',
'sculpture.Country', 'sculpture.Dedication', 'sculpture.Dimension',
'sculpture.Diocese', 'sculpture.FeatureSet',
'sculpture.Feature', 'sculpture.GlossaryTerm', 'sculpture.ImageStatus',
'sculpture.Period', 'sculpture.RegionType', 'sculpture.Region',
'sculpture.SiteStatus', 'sculpture.Settlement',
'sculpture.UserFeedback')),
('Static Content', ('pages.Page', 'generic.ThreadedComment',
('Media Library', 'fb_browse'))),
('Various', ('auth.User', 'auth.Group', 'sites.Site', 'conf.Setting',
'registration.RegistrationProfile')),
)
DASHBOARD_TAGS = (
('mezzanine_tags.app_list', 'blog_tags.quick_blog'),
('comment_tags.recent_comments',),
('mezzanine_tags.recent_actions',),
)
BLOG_SLUG = "news"
COMMENT_FORM_CLASS = 'comment'
# -----------------------------------------------------------------------------
# Automatically generated settings
# -----------------------------------------------------------------------------
# Check which db engine to use:
db_engine = 'django.db.backends.postgresql_psycopg2'
if 'django.contrib.gis' in INSTALLED_APPS:
db_engine = 'django.contrib.gis.db.backends.postgis'
AUTH_LDAP_REQUIRE_GROUP = 'cn=crsbi,' + LDAP_BASE_OU
TESTING = DEBUG
DEBUG_TOOLBAR_CONFIG = {'INTERCEPT_REDIRECTS': False}
if 'debug_toolbar' in INSTALLED_APPS:
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
|
import numpy as np
from min_interval_posets import supergraph as sg
def test1():
# Insertions at ends
str1 = [('m', .5), ('M', .75), ('m', .9), ('M', .3)]
str2 = [('M', .25), ('m', .5), ('M', .7), ('m', .8)]
mat = sg.get_alignmentmat(str1, str2)
mat_round = [[round(mat[i][j],8) for j in range(len(mat[0]))] for i in range(len(mat))]
assert(mat_round == [[0, 0.25, 0.75, 1.45, 2.25],
[0.5, 0.75, 0.25, 0.95, 1.75],
[1.25, 1.0, 1.0, 0.3, 1.1],
[2.15, 1.9, 1.4, 1.2, 0.4],
[2.45, 2.2, 1.7, 1.5, 0.7]])
check_uniqueness = sg.check_bestalignments(str1, str2)
assert(check_uniqueness == "unique optimal alignment")
alignment = sg.get_bestalignment_index(str1, str2)
assert(alignment == ([('M', (0, 0.25)),
('m', (0.5, 0.5)),
('M', (0.75, 0.7)),
('m', (0.9, 0.8)),
('M', (0.3, 0))],
[('None', 0), (0, 1), (1, 2), (2, 3), (3, 'None')]))
alignment_cost = sum((abs(alignment[0][i][1][0]-alignment[0][i][1][1])) for i in range(len(alignment[0])))
assert(alignment_cost == mat[len(mat)-1][len(mat[0])-1])
def test2():
# Insertions at beginning & strings are of different lengths
# NOTE there are two optimal alignments in this example
str1 = [('m', .5), ('M', .05), ('m', .6), ('M', .7), ('m',.3)]
str2 = [('m', .4), ('M', .75), ('m', .33)]
mat = sg.get_alignmentmat(str1, str2)
mat_round = [[round(mat[i][j],8) for j in range(len(mat[0]))] for i in range(len(mat))]
assert(mat_round == [[0, 0.4, 1.15, 1.48],
[0.5, 0.1, 0.85, 1.18],
[0.55, 0.15, 0.8, 1.13],
[1.15, 0.75, 1.4, 1.07],
[1.85, 1.45, 0.8, 1.13],
[2.15, 1.75, 1.1, 0.83]])
check_uniqueness = sg.check_bestalignments(str1, str2)
assert(check_uniqueness == "multiple optimal alignments")
alignment = sg.get_bestalignment_index(str1, str2)
assert(alignment == ([('m', (0.5, 0)),
('M', (0.05, 0)),
('m', (0.6, 0.4)),
('M', (0.7, 0.75)),
('m', (0.3, 0.33))],
[(0, 'None'), (1, 'None'), (2, 0), (3, 1), (4, 2)]))
alignment_cost = sum((abs(alignment[0][i][1][0]-alignment[0][i][1][1])) for i in range(len(alignment[0])))
assert(round(alignment_cost, 8) == round(mat[len(mat)-1][len(mat[0])-1],8))
def test3():
# Insertions in middle of strings and strings are of different lengths
str1 = [('m', .5), ('M', .05), ('m', .4), ('M', .7), ('m',.3)]
str2 = [('m', .5), ('M', .75), ('m', .33)]
mat = sg.get_alignmentmat(str1, str2)
alignment = sg.get_bestalignment_index(str1, str2)
assert(alignment == ([('m', (0.5, 0.5)),
('M', (0.05, 0)),
('m', (0.4, 0)),
('M', (0.7, 0.75)),
('m', (0.3, 0.33))],
[(0, 0), (1, 'None'), (2, 'None'), (3, 1), (4, 2)]))
alignment_cost = sum((abs(alignment[0][i][1][0]-alignment[0][i][1][1])) for i in range(len(alignment[0])))
assert(alignment_cost == mat[len(mat)-1][len(mat[0])-1])
def test4():
# No diagonal moves in traceback
str1 = [('a', .5), ('b', .05), ('c', .6), ('d', .7), ('e',.3)]
str2 = [('f', .4), ('g', .75), ('h', .33)]
mat = sg.get_alignmentmat(str1, str2)
alignment = sg.get_bestalignment_index(str1, str2)
assert(alignment == ([('f', (0, 0.4)),
('g', (0, 0.75)),
('h', (0, 0.33)),
('a', (0.5, 0)),
('b', (0.05, 0)),
('c', (0.6, 0)),
('d', (0.7, 0)),
('e', (0.3, 0))],
[('None', 0),
('None', 1),
('None', 2),
(0, 'None'),
(1, 'None'),
(2, 'None'),
(3, 'None'),
(4, 'None')]))
alignment_cost = sum((abs(alignment[0][i][1][0]-alignment[0][i][1][1])) for i in range(len(alignment[0])))
assert(alignment_cost == mat[len(mat)-1][len(mat[0])-1])
|
import pandas
from variantannotation import genotype_calling
from variantannotation import utilities
#REFACTOR THIS CODEEEEE AASAP
def get_list_from_annovar_csv(df, chunk_ids):
df = df.rename(columns={'1000g2015aug_all': 'ThousandGenomeAll'})
df.Chr = df.Chr.replace(to_replace='chrM', value='chrMT')
df['Start'] = pandas.to_numeric(df['Start'])
df['End'] = pandas.to_numeric(df['End'])
print 'Converting columns to float ...'
df["nci60"] = utilities.to_float(df, "nci60")
df["ThousandGenomeAll"] = utilities.to_float(df, "ThousandGenomeAll")
df["ESP6500si_ALL"] = utilities.to_float(df, "ESP6500si_ALL")
print 'Processing knownGene info ...'
utilities.split_string(df, "Func.knownGene")
utilities.split_string(df, "ExonicFunc.knownGene")
print 'Processing tfbsConsSites info ...'
df["tfbsConsSites"] = df["tfbsConsSites"].dropna().apply(utilities.cell_to_dict)
#print 'Processing targetScanS info ...'
#df["targetScanS"] = df["targetScanS"].dropna().apply(utilities.cell_to_dict)
print 'Processing genomicSuperDups info ...'
df["genomicSuperDups"] = df["genomicSuperDups"].dropna().apply(utilities.cell_to_dict)
print 'Processing cytoBand info ...'
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.split_cytoband)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.lists_to_dict)
print 'Creating hgvs key ...'
df['hgvs_key'] = pandas.Series(chunk_ids)
print 'Processing genotype call info ...'
my_sample_id = df["Otherinfo"].dropna().apply(genotype_calling.split_sample_ID)
genotype_call = my_sample_id.apply(lambda x: x[-3::])
dict_split = genotype_call.apply(genotype_calling.return_dict)
df['Otherinfo'] = dict_split
df = df.rename(columns={'Otherinfo': 'Genotype_Call'})
df = utilities.modify_df(df)
print 'Transforming to JSON from dataFrame'
#Clean up dataframe
df_final = df.where((pandas.notnull(df)), None)
list_dict = df_final.T.to_dict().values()
#Attempt to transform dataframe to dictionary
#Set the ID to be the HGVS_ID
print 'cleaning up...'
for i in range(0, len(list_dict)):
list_dict[i] = utilities.scrub_dict(list_dict[i])
#list_filtered = []
#for key in filtered.keys():
# list_filtered.append({key: filtered[key]})
print 'Done'
return list_dict
#REFACTOR THIS CODEEEEE AASAP
def get_df_from_annovar_csv(df, chunk_ids):
df = df.rename(columns={'1000g2015aug_all': 'ThousandGenomeAll'})
df.Chr = df.Chr.replace(to_replace='chrM', value='chrMT')
df['Start'] = pandas.to_numeric(df['Start'])
df['End'] = pandas.to_numeric(df['End'])
df["nci60"] = utilities.to_float(df, "nci60")
df["ThousandGenomeAll"] = utilities.to_float(df, "ThousandGenomeAll")
df["ESP6500si_ALL"] = utilities.to_float(df, "ESP6500si_ALL")
df["tfbsConsSites"] = df["tfbsConsSites"].dropna().apply(utilities.cell_to_dict)
utilities.split_string(df, "Func.knownGene")
utilities.split_string(df, "ExonicFunc.knownGene")
#df["targetScanS"] = df["targetScanS"].dropna().apply(utilities.cell_to_dict)
df["genomicSuperDups"] = df["genomicSuperDups"].dropna().apply(utilities.cell_to_dict)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.split_cytoband)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.lists_to_dict)
df['hgvs_key'] = pandas.Series(chunk_ids)
my_sample_id = df["Otherinfo"].dropna().apply(genotype_calling.split_sample_ID)
genotype_call = my_sample_id.apply(lambda x: x[-2::])
dict_split = genotype_call.apply(genotype_calling.return_dict)
df['Otherinfo'] = dict_split
df = df.rename(columns={'Otherinfo': 'Genotype_Call'})
#Clean up dataframe
df = utilities.modify_df(df)
df_final = df.where((pandas.notnull(df)), None)
return df_final
#after myvariant data has been obtained
#def join_df_chunks(df):
|
from rest_framework import viewsets
from .serializers import TwitterSerializer
from .models import Twitter
from django.views.decorators.http import require_http_methods
from django.http import HttpResponse, Http404, JsonResponse
import tweepy
from rest_framework.decorators import api_view
class TwitterViewSet(viewsets.ModelViewSet):
serializer_class = TwitterSerializer
queryset = Twitter.objects.all()
|
import dash
from dash.dependencies import Input, Output, State
from server import app
@app.callback(
Output('steps-demo', 'current'),
[Input('steps-demo-go-next', 'nClicks'),
Input('steps-demo-go-last', 'nClicks'),
Input('steps-demo-restart', 'nClicks')],
State('steps-demo', 'current'),
prevent_initial_call=True
)
def steps_callback_demo_part1(go_next, go_last, restart, current):
ctx = dash.callback_context
if ctx.triggered[0]['prop_id'].startswith('steps-demo-go-next'):
return current + 1
elif ctx.triggered[0]['prop_id'].startswith('steps-demo-go-last'):
return max(current - 1, 0)
else:
return 0
@app.callback(
Output('steps-demo-current', 'children'),
Input('steps-demo', 'current'),
prevent_initial_call=True
)
def steps_callback_demo_part2(current):
import time;
time.sleep(1)
return f'当前步骤为:步骤{current}'
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import pandas as pd
try:
import scipy
_SCIPY_ = True
except ImportError:
_SCIPY_ = False
from .utils import fibonacci, pascals_triangle
from .utils import get_drift, get_offset, verify_series
def weights(w):
def _compute(x):
return np.dot(w, x)
return _compute
def dema(close, length=None, offset=None, **kwargs):
"""Indicator: Double Exponential Moving Average (DEMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
ema1 = ema(close=close, length=length, min_periods=min_periods)
ema2 = ema(close=ema1, length=length, min_periods=min_periods)
dema = 2 * ema1 - ema2
# Offset
if offset != 0:
dema = dema.shift(offset)
# Name & Category
dema.name = f"DEMA_{length}"
dema.category = 'overlap'
return dema
def ema(close, length=None, offset=None, **kwargs):
"""Indicator: Exponential Moving Average (EMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length#int(0.25 * length)
adjust = bool(kwargs['adjust']) if 'adjust' in kwargs and kwargs['adjust'] is not None else True
offset = get_offset(offset)
# Calculate Result
if 'presma' in kwargs and kwargs['presma']:
initial_sma = sma(close=close, length=length)[:length]
rest = close[length:]
close = pd.concat([initial_sma, rest])
ema = close.ewm(span=length, min_periods=min_periods, adjust=adjust).mean()
# Offset
if offset != 0:
ema = ema.shift(offset)
# Name & Category
ema.name = f"EMA_{length}"
ema.category = 'overlap'
return ema
def fwma(close, length=None, asc=None, offset=None, **kwargs):
"""Indicator: Fibonacci's Weighted Moving Average (FWMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
asc = asc if asc else True
offset = get_offset(offset)
# Calculate Result
fibs = fibonacci(length - 1)
fwma = close.rolling(length, min_periods=length).apply(weights(fibs), raw=True)
# Offset
if offset != 0:
fwma = fwma.shift(offset)
# Name & Category
fwma.name = f"FWMA_{length}"
fwma.category = 'overlap'
return fwma
def hl2(high, low, offset=None, **kwargs):
"""Indicator: HL2 """
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
offset = get_offset(offset)
# Calculate Result
hl2 = 0.5 * (high + low)
# Offset
if offset != 0:
hl2 = hl2.shift(offset)
# Name & Category
hl2.name = "HL2"
hl2.category = 'overlap'
return hl2
def hlc3(high, low, close, offset=None, **kwargs):
"""Indicator: HLC3"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
offset = get_offset(offset)
# Calculate Result
hlc3 = (high + low + close) / 3
# Offset
if offset != 0:
hlc3 = hlc3.shift(offset)
# Name & Category
hlc3.name = "HLC3"
hlc3.category = 'overlap'
return hlc3
def hma(close, length=None, offset=None, **kwargs):
"""Indicator: Hull Moving Average (HMA)
Use help(df.ta.hma) for specific documentation where 'df' represents
the DataFrame you are using.
"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
half_length = int(length / 2)
sqrt_length = int(math.sqrt(length))
wmaf = wma(close=close, length=half_length)
wmas = wma(close=close, length=length)
hma = wma(close=2 * wmaf - wmas, length=sqrt_length)
# Offset
if offset != 0:
hma = hma.shift(offset)
# Name & Category
hma.name = f"HMA_{length}"
hma.category = 'overlap'
return hma
def ichimoku(high, low, close, tenkan=None, kijun=None, senkou=None, offset=None, **kwargs):
"""Indicator: Ichimoku Kinkō Hyō (Ichimoku)"""
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
tenkan = int(tenkan) if tenkan and tenkan > 0 else 9
kijun = int(kijun) if kijun and kijun > 0 else 26
senkou = int(senkou) if senkou and senkou > 0 else 52
offset = get_offset(offset)
# Calculate Result
tenkan_sen = midprice(high=high, low=low, length=tenkan)
kijun_sen = midprice(high=high, low=low, length=kijun)
span_a = 0.5 * (tenkan_sen + kijun_sen)
span_b = midprice(high=high, low=low, length=senkou)
# Copy Span A and B values before their shift
_span_a = span_a[-kijun:].copy()
_span_b = span_b[-kijun:].copy()
span_a = span_a.shift(kijun)
span_b = span_b.shift(kijun)
chikou_span = close.shift(-kijun)
# Offset
if offset != 0:
tenkan_sen = tenkan_sen.shift(offset)
kijun_sen = kijun_sen.shift(offset)
span_a = span_a.shift(offset)
span_b = span_b.shift(offset)
chikou_span = chikou_span.shift(offset)
# Handle fills
if 'fillna' in kwargs:
span_a.fillna(kwargs['fillna'], inplace=True)
span_b.fillna(kwargs['fillna'], inplace=True)
chikou_span.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
span_a.fillna(method=kwargs['fill_method'], inplace=True)
span_b.fillna(method=kwargs['fill_method'], inplace=True)
chikou_span.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
span_a.name = f"ISA_{tenkan}"
span_b.name = f"ISB_{kijun}"
tenkan_sen.name = f"ITS_{tenkan}"
kijun_sen.name = f"IKS_{kijun}"
chikou_span.name = f"ICS_{kijun}"
chikou_span.category = kijun_sen.category = tenkan_sen.category = 'trend'
span_b.category = span_a.category = chikou_span
# Prepare Ichimoku DataFrame
data = {span_a.name: span_a, span_b.name: span_b, tenkan_sen.name: tenkan_sen, kijun_sen.name: kijun_sen, chikou_span.name: chikou_span}
ichimokudf = pd.DataFrame(data)
ichimokudf.name = f"ICHIMOKU_{tenkan}_{kijun}_{senkou}"
ichimokudf.category = 'overlap'
# Prepare Span DataFrame, assuming it is a 'Daily' content
last_date = close.index[-1]
df_freq = close.index.value_counts().mode()[0]
tdelta = pd.Timedelta(df_freq, unit='d')
new_dt = pd.date_range(start=last_date + tdelta, periods=kijun, freq='B')
spandf = pd.DataFrame(index=new_dt, columns=[span_a.name, span_b.name])
_span_a.index = _span_b.index = new_dt
spandf[span_a.name] = _span_a
spandf[span_b.name] = _span_b
return ichimokudf, spandf
def midpoint(close, length=None, offset=None, **kwargs):
"""Indicator: Midpoint"""
# Validate arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
lowest = close.rolling(length, min_periods=min_periods).min()
highest = close.rolling(length, min_periods=min_periods).max()
midpoint = 0.5 * (lowest + highest)
# Offset
if offset != 0:
midpoint = midpoint.shift(offset)
# Handle fills
if 'fillna' in kwargs:
midpoint.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
midpoint.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
midpoint.name = f"MIDPOINT_{length}"
midpoint.category = 'overlap'
return midpoint
def midprice(high, low, length=None, offset=None, **kwargs):
"""Indicator: Midprice"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
length = int(length) if length and length > 0 else 1
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
lowest_low = low.rolling(length, min_periods=min_periods).min()
highest_high = high.rolling(length, min_periods=min_periods).max()
midprice = 0.5 * (lowest_low + highest_high)
# Offset
if offset != 0:
midprice = midprice.shift(offset)
# Handle fills
if 'fillna' in kwargs:
midprice.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
midprice.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
midprice.name = f"MIDPRICE_{length}"
midprice.category = 'overlap'
return midprice
def ohlc4(open_, high, low, close, offset=None, **kwargs):
"""Indicator: OHLC4"""
# Validate Arguments
open_ = verify_series(open_)
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
offset = get_offset(offset)
# Calculate Result
ohlc4 = 0.25 * (open_ + high + low + close)
# Offset
if offset != 0:
ohlc4 = ohlc4.shift(offset)
# Name & Category
ohlc4.name = "OHLC4"
ohlc4.category = 'overlap'
return ohlc4
def pwma(close, length=None, asc=None, offset=None, **kwargs):
"""Indicator: Pascals Weighted Moving Average (PWMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
asc = asc if asc else True
offset = get_offset(offset)
# Calculate Result
triangle = pascals_triangle(length - 1)
pwma = close.rolling(length, min_periods=length).apply(weights(triangle), raw=True)
# Offset
if offset != 0:
pwma = pwma.shift(offset)
# Name & Category
pwma.name = f"PWMA_{length}"
pwma.category = 'overlap'
return pwma
def rma(close, length=None, offset=None, **kwargs):
"""Indicator: wildeR's Moving Average (RMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
alpha = (1.0 / length) if length > 0 else 1
# Calculate Result
rma = close.ewm(alpha=alpha, min_periods=min_periods).mean()
# Offset
if offset != 0:
rma = rma.shift(offset)
# Name & Category
rma.name = f"RMA_{length}"
rma.category = 'overlap'
return rma
def sma(close, length=None, offset=None, **kwargs):
"""Indicator: Simple Moving Average (SMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
sma = close.rolling(length, min_periods=min_periods).mean()
# Offset
if offset != 0:
sma = sma.shift(offset)
# Name & Category
sma.name = f"SMA_{length}"
sma.category = 'overlap'
return sma
def t3(close, length=None, a=None, offset=None, **kwargs):
"""Indicator: T3"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
a = float(a) if a and a > 0 and a < 1 else 0.7
offset = get_offset(offset)
# Calculate Result
c1 = -a * a ** 2
c2 = 3 * a ** 2 + 3 * a ** 3
c3 = -6 * a ** 2 - 3 * a - 3 * a ** 3
c4 = a ** 3 + 3 * a ** 2 + 3 * a + 1
e1 = ema(close=close, length=length, min_periods=min_periods, **kwargs)
e2 = ema(close=e1, length=length, min_periods=min_periods, **kwargs)
e3 = ema(close=e2, length=length, min_periods=min_periods, **kwargs)
e4 = ema(close=e3, length=length, min_periods=min_periods, **kwargs)
e5 = ema(close=e4, length=length, min_periods=min_periods, **kwargs)
e6 = ema(close=e5, length=length, min_periods=min_periods, **kwargs)
t3 = c1 * e6 + c2 * e5 + c3 * e4 + c4 * e3
# Offset
if offset != 0:
t3 = t3.shift(offset)
# Name & Category
t3.name = f"T3_{length}_{a}"
t3.category = 'overlap'
return t3
def tema(close, length=None, offset=None, **kwargs):
"""Indicator: Triple Exponential Moving Average (TEMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
ema1 = ema(close=close, length=length, min_periods=min_periods)
ema2 = ema(close=ema1, length=length, min_periods=min_periods)
ema3 = ema(close=ema2, length=length, min_periods=min_periods)
tema = 3 * (ema1 - ema2) + ema3
# Offset
if offset != 0:
tema = tema.shift(offset)
# Name & Category
tema.name = f"TEMA_{length}"
tema.category = 'overlap'
return tema
def trima(close, length=None, offset=None, **kwargs):
"""Indicator: Triangular Moving Average (TRIMA) *requires scipy"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
offset = get_offset(offset)
# Calculate Result
half_length = round(0.5 * (length + 1))
sma1 = close.rolling(half_length, min_periods=half_length).mean()
trima = sma1.rolling(half_length, min_periods=half_length).mean()
# Offset
if offset != 0:
trima = trima.shift(offset)
# Name & Category
trima.name = f"TRIMA_{length}"
trima.category = 'overlap'
return trima
def vwap(high, low, close, volume, offset=None, **kwargs):
"""Indicator: Volume Weighted Average Price (VWAP)"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
volume = verify_series(volume)
offset = get_offset(offset)
# Calculate Result
tp = hlc3(high=high, low=low, close=close)
tpv = tp * volume
vwap = tpv.cumsum() / volume.cumsum()
# Offset
if offset != 0:
vwap = vwap.shift(offset)
# Name & Category
vwap.name = "VWAP"
vwap.category = 'overlap'
return vwap
def vwma(close, volume, length=None, offset=None, **kwargs):
"""Indicator: Volume Weighted Moving Average (VWMA)"""
# Validate Arguments
close = verify_series(close)
volume = verify_series(volume)
length = int(length) if length and length > 0 else 10
offset = get_offset(offset)
# Calculate Result
pv = close * volume
vwma = sma(close=pv, length=length) / sma(close=volume, length=length)
# Offset
if offset != 0:
vwma = vwma.shift(offset)
# Name & Category
vwma.name = f"VWMA_{length}"
vwma.category = 'overlap'
return vwma
def wma(close, length=None, asc=None, offset=None, **kwargs):
"""Indicator: Weighted Moving Average (WMA)"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 10
min_periods = int(kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length
asc = asc if asc else True
offset = get_offset(offset)
# Calculate Result
total_weight = 0.5 * length * (length + 1)
weights_ = pd.Series(np.arange(1, length + 1))
weights = weights_ if asc else weights_[::-1]
def linear_weights(w):
def _compute(x):
return (w * x).sum() / total_weight
return _compute
close_ = close.rolling(length, min_periods=length)
wma = close_.apply(linear_weights(weights), raw=True)
# Offset
if offset != 0:
wma = wma.shift(offset)
# Name & Category
wma.name = f"WMA_{length}"
wma.category = 'overlap'
return wma
# Overlap Documentation
hl2.__doc__ = \
"""Average of High-Low (HL2)
Equally weighted Average of two series', namely High and Low.
Sources:
https://www.tradingview.com/study-script-reference/#var_hl2
Calculation:
HL2 = 0.5 * (high + low)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
ichimoku.__doc__ = \
"""Ichimoku Kinkō Hyō (Ichimoku)
It identifies the trend and look for potential signals within that trend.
Sources:
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
Calculation:
Default Inputs:
tenkan=9, kijun=26, senkou=52
tenkan_sen = midprice(high, low, length=tenkan)
kijun_sen = midprice(high, low, length=kijun)
span_a = 0.5 * (tenkan_sen + kijun_sen)
span_b = midprice(high=high, low=low, length=senkou)
span_a = span_a.shift(kijun)
span_b = span_b.shift(kijun)
chikou_span = close.shift(-kijun)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
tenkan (int): Tenkan period. Default: 9
kijun (int): Kijun period. Default: 26
senkou (int): Senkou period. Default: 52
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pandas.Series: New feature generated.
"""
hlc3.__doc__ = \
"""Average of High-Low-Close (HLC3)
Equally weighted Average of three series', namely High, Low, Close.
Sources:
https://www.tradingview.com/study-script-reference/#var_hlc3
Calculation:
HLC3 = (high + low + close) / 3.0
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
ohlc4.__doc__ = \
"""Average of Open-High-Low-Close (OHLC4)
Equally weighted Average of four series', namely Open, High, Low, Close.
Sources:
https://www.tradingview.com/study-script-reference/#var_ohlc4
Calculation:
OHLC4 = 0.25 * (open + high + low + close)
Args:
open (pd.Series): Series of 'open's
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
midpoint.__doc__ = \
"""Midpoint (MIDPOINT)
The Midpoint is the average of the highest and lowest closes over a period.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/midpoint-midpnt/
Calculation:
Default Inputs:
length=1
lowest_close = close.rolling(length).min()
highest_close = close.rolling(length).max()
MIDPOINT = 0.5 * (highest_close + lowest_close)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 14
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
midprice.__doc__ = \
"""Midprice (MIDPRICE)
William's Percent R is a momentum oscillator similar to the RSI that
attempts to identify overbought and oversold conditions.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/midprice-midpri/
Calculation:
Default Inputs:
length=1
lowest_low = low.rolling(length).min()
highest_high = high.rolling(length).max()
MIDPRICE = 0.5 * (highest_high + lowest_low)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
length (int): It's period. Default: 1
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
dema.__doc__ = \
"""Double Exponential Moving Average (DEMA)
The Double Exponential Moving Average attempts to a smoother average with less
lag than the normal Exponential Moving Average (EMA).
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/double-exponential-moving-average-dema/
Calculation:
Default Inputs:
length=10
EMA = Exponential Moving Average
ema1 = EMA(close, length)
ema2 = EMA(ema1, length)
DEMA = 2 * ema1 - ema2
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
ema.__doc__ = \
"""Exponential Moving Average (EMA)
The Exponential Moving Average is more responsive moving average compared to the
Simple Moving Average (SMA). The weights are determined by alpha which is
proportional to it's length. There are several different methods of calculating
EMA. One method uses just the standard definition of EMA and another uses the
SMA to generate the initial value for the rest of the calculation.
Sources:
https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages
https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp
Calculation:
Default Inputs:
length=10
SMA = Simple Moving Average
if kwargs['presma']:
initial = SMA(close, length)
rest = close[length:]
close = initial + rest
EMA = close.ewm(span=length, adjust=adjust).mean()
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
fwma.__doc__ = \
"""Fibonacci's Weighted Moving Average (FWMA)
Fibonacci's Weighted Moving Average is similar to a Weighted Moving Average
(WMA) where the weights are based on the Fibonacci Sequence.
Source: Kevin Johnson
Calculation:
Default Inputs:
length=10,
def weights(w):
def _compute(x):
return np.dot(w * x)
return _compute
fibs = utils.fibonacci(length - 1)
FWMA = close.rolling(length)_.apply(weights(fibs), raw=True)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
asc (bool): Recent values weigh more. Default: True
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
hma.__doc__ = \
"""Hull Moving Average (HMA)
The Hull Exponential Moving Average attempts to reduce or remove lag in moving
averages.
Sources:
https://alanhull.com/hull-moving-average
Calculation:
Default Inputs:
length=10
WMA = Weighted Moving Average
half_length = int(0.5 * length)
sqrt_length = int(math.sqrt(length))
wmaf = WMA(close, half_length)
wmas = WMA(close, length)
HMA = WMA(2 * wmaf - wmas, sqrt_length)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
ichimoku.__doc__ = \
"""Ichimoku Kinkō Hyō (ichimoku)
Developed Pre WWII as a forecasting model for financial markets.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/ichimoku-ich/
Calculation:
Default Inputs:
tenkan=9, kijun=26, senkou=52
MIDPRICE = Midprice
TENKAN_SEN = MIDPRICE(high, low, close, length=tenkan)
KIJUN_SEN = MIDPRICE(high, low, close, length=kijun)
CHIKOU_SPAN = close.shift(-kijun)
SPAN_A = 0.5 * (TENKAN_SEN + KIJUN_SEN)
SPAN_A = SPAN_A.shift(kijun)
SPAN_B = MIDPRICE(high, low, close, length=senkou)
SPAN_B = SPAN_B.shift(kijun)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
tenkan (int): Tenkan period. Default: 9
kijun (int): Kijun period. Default: 26
senkou (int): Senkou period. Default: 52
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: Two DataFrames.
For the visible period: spanA, spanB, tenkan_sen, kijun_sen,
and chikou_span columns
For the forward looking period: spanA and spanB columns
"""
pwma.__doc__ = \
"""Pascal's Weighted Moving Average (PWMA)
Pascal's Weighted Moving Average is similar to a symmetric triangular
window except PWMA's weights are based on Pascal's Triangle.
Source: Kevin Johnson
Calculation:
Default Inputs:
length=10,
def weights(w):
def _compute(x):
return np.dot(w * x)
return _compute
triangle = utils.pascals_triangle(length + 1)
PWMA = close.rolling(length)_.apply(weights(triangle), raw=True)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
asc (bool): Recent values weigh more. Default: True
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
rma.__doc__ = \
"""wildeR's Moving Average (RMA)
The WildeR's Moving Average is simply an Exponential Moving Average (EMA)
with a modified alpha = 1 / length.
Sources:
https://alanhull.com/hull-moving-average
Calculation:
Default Inputs:
length=10
EMA = Exponential Moving Average
alpha = 1 / length
RMA = EMA(close, alpha=alpha)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
sma.__doc__ = \
"""Simple Moving Average (SMA)
The Simple Moving Average is the classic moving average that is the equally
weighted average over n periods.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/simple-moving-average-sma/
Calculation:
Default Inputs:
length=10
SMA = SUM(close, length) / length
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
t3.__doc__ = \
"""Tim Tillson's T3 Moving Average (T3)
Tim Tillson's T3 Moving Average is considered a smoother and more responsive
moving average relative to other moving averages.
Sources:
http://www.binarytribune.com/forex-trading-indicators/t3-moving-average-indicator/
Calculation:
Default Inputs:
length=10, a=0.7
c1 = -a^3
c2 = 3a^2 + 3a^3 = 3a^2 * (1 + a)
c3 = -6a^2 - 3a - 3a^3
c4 = a^3 + 3a^2 + 3a + 1
ema1 = EMA(close, length)
ema2 = EMA(ema1, length)
ema3 = EMA(ema2, length)
ema4 = EMA(ema3, length)
ema5 = EMA(ema4, length)
ema6 = EMA(ema5, length)
T3 = c1 * ema6 + c2 * ema5 + c3 * ema4 + c4 * ema3
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
a (float): 0 < a < 1. Default: 0.7
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
tema.__doc__ = \
"""Triple Exponential Moving Average (TEMA)
A less laggy Exponential Moving Average.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/triple-exponential-moving-average-tema/
Calculation:
Default Inputs:
length=10
EMA = Exponential Moving Average
ema1 = EMA(close, length)
ema2 = EMA(ema1, length)
ema3 = EMA(ema2, length)
TEMA = 3 * (ema1 - ema2) + ema3
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
trima.__doc__ = \
"""Triangular Moving Average (TRIMA)
A weighted moving average where the shape of the weights are triangular and the
greatest weight is in the middle of the period.
Sources:
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/triangular-moving-average-trima/
Calculation:
Default Inputs:
length=10
SMA = Simple Moving Average
half_length = math.round(0.5 * (length + 1))
SMA1 = SMA(close, half_length)
TRIMA = SMA(SMA1, half_length)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
vwap.__doc__ = \
"""Volume Weighted Average Price (VWAP)
The Volume Weighted Average Price that measures the average typical price
by volume. It is typically used with intraday charts to identify general
direction.
Sources:
https://www.tradingview.com/wiki/Volume_Weighted_Average_Price_(VWAP)
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/volume-weighted-average-price-vwap/
Calculation:
tp = typical_price = hlc3(high, low, close)
tpv = tp * volume
VWAP = tpv.cumsum() / volume.cumsum()
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
volume (pd.Series): Series of 'volume's
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
vwma.__doc__ = \
"""Volume Weighted Moving Average (VWMA)
Volume Weighted Moving Average.
Sources:
https://www.motivewave.com/studies/volume_weighted_moving_average.htm
Calculation:
Default Inputs:
length=10
SMA = Simple Moving Average
pv = close * volume
VWMA = SMA(pv, length) / SMA(volume, length)
Args:
close (pd.Series): Series of 'close's
volume (pd.Series): Series of 'volume's
length (int): It's period. Default: 10
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
wma.__doc__ = \
"""Weighted Moving Average (WMA)
The Weighted Moving Average where the weights are linearly increasing and
the most recent data has the heaviest weight.
Sources:
https://en.wikipedia.org/wiki/Moving_average#Weighted_moving_average
Calculation:
Default Inputs:
length=10, asc=True
total_weight = 0.5 * length * (length + 1)
weights_ = [1, 2, ..., length + 1] # Ascending
weights = weights if asc else weights[::-1]
def linear_weights(w):
def _compute(x):
return (w * x).sum() / total_weight
return _compute
WMA = close.rolling(length)_.apply(linear_weights(weights), raw=True)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
asc (bool): Recent values weigh more. Default: True
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
import os
GCP_PROJECT = os.environ['HAIL_GCP_PROJECT']
assert GCP_PROJECT != ''
GCP_ZONE = os.environ['HAIL_GCP_ZONE']
assert GCP_ZONE != ''
GCP_REGION = '-'.join(GCP_ZONE.split('-')[:-1]) # us-west1-a -> us-west1
DOCKER_PREFIX = os.environ.get('HAIL_DOCKER_PREFIX', f'gcr.io/{GCP_REGION}')
assert DOCKER_PREFIX != ''
DOCKER_ROOT_IMAGE = os.environ['HAIL_DOCKER_ROOT_IMAGE']
assert DOCKER_ROOT_IMAGE != ''
DOMAIN = os.environ['HAIL_DOMAIN']
assert DOMAIN != ''
IP = os.environ.get('HAIL_IP')
CI_UTILS_IMAGE = os.environ.get('HAIL_CI_UTILS_IMAGE', f'{DOCKER_PREFIX}/ci-utils:latest')
BUILDKIT_IMAGE = os.environ['HAIL_BUILDKIT_IMAGE']
DEFAULT_NAMESPACE = os.environ['HAIL_DEFAULT_NAMESPACE']
KUBERNETES_SERVER_URL = os.environ['KUBERNETES_SERVER_URL']
BUCKET = os.environ['HAIL_CI_BUCKET_NAME']
|
# Function to generate list of factors
def get_factorsList(n):
l = [1] # will be factor of any no.
# As n/2 is largest factor of n less then n
for i in range(2, n//2+1):
if n%i==0: # Check if factor or not
l.append(i)
# As we already added 1 in list no need to append it again
if n != 1:
l.append(n)
# Oneliner
# return [i for i in range(1,n//2+1) if n%i==0] + ([n] if n!=1 else [])
return l
if __name__ == "__main__":
# List of input no's
list_of_numbers = [23, 46, 65, 34234, 423, 43212]
# Get factor list of given no.
for num in list_of_numbers:
print(get_factorsList(num))
|
import aiohttp
import asyncio
async def main():
async with aiohttp.ClientSession() as session:
async with session.get('http://python.org') as response:
print("Status:", response.status)
print("Content-type:", response.headers['content-type'])
html = await response.text()
print("Body:", html[:15], "...")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
import pytest
pytest.main(['-x', 'tests', '--resultlog=run_tests.log', '-v'])
|
import xarray as xr
from numpy import abs, arange, unique
from os.path import exists
def readOOI (fname):
"""
Update parameter names from OOI to iPROF
Parameters
----------
fname : str
path of OOI Pioneer Profiler mooring downloaded data
Returns
-------
ds : xarray DataSet
dataset with iPROF variable names
"""
if ( exists(fname) ):
ds = xr.open_dataset( fname )
else:
raise TypeError(f'Domain {fname} not recognized')
ds = ds.rename({ 'sea_water_temperature_profiler_depth_enabled': 'prof_T',
'sea_water_temperature_profiler_depth_enabled_qc_agg':'prof_Tflag',
'sea_water_practical_salinity_profiler_depth_enabled': 'prof_S',
'sea_water_practical_salinity_profiler_depth_enabled_qc_agg':'prof_Sflag',}) \
.drop({ 'sea_water_density_profiler_depth_enabled',
'sea_water_density_profiler_depth_enabled_qc_agg',
'sea_water_pressure_profiler_depth_enabled',
'sea_water_pressure_profiler_depth_enabled_qc_agg',})
ds['prof_depth'] = xr.DataArray(-1 * ds.z, dims=ds.z.dims, attrs=ds.z.attrs)
ds=ds.drop('z')
writeTimeVariables (ds)
#ds=addDepthDimension (ds)
(ds['iPROF'], counts) = unique(ds.time, return_counts=True)
return ds
def addDepthDimension (ds):
"""
Create depth coordinate
Parameters
----------
ds : xarray DataSet
OOI Profiler mooring data for one profiler
Returns
-------
ds : xarray DataSet
dataset with iDEPTH coordinate set as a dimension
"""
if ( 'prof_depth' not in ds ):
raise TypeError('Couldn\'t find prof_depth data variable')
if ( 'actual_range' not in ds.prof_depth.attrs ):
raise TypeError('Couldn\'t find prof_depth range attribute')
iDEPTH = arange(max(abs(ds.prof_depth.attrs['actual_range'])) + 1)
return ds.expand_dims({"iDEPTH":iDEPTH})
def writeTimeVariables (ds):
"""
Update from datetime64 times to default iPROF time
Parameters
----------
ds : xarray DataSet
OOI Profiler mooring data for one profiler
Returns
-------
None : adds data variables to existing DataSet
dataset with iPROF time dataArrays
"""
times = ds.time.values.astype('datetime64[s]')
yyyymmdd = []; hhmmss = []
for time in times:
tmp = str(time).partition('T')
yyyymmdd.append( float(''.join( tmp[0].split("-", maxsplit=2) )) )
hhmmss.append( float(''.join( tmp[-1].split(":", maxsplit=2) )) )
ds['prof_YYYYMMDD'] = xr.DataArray(
yyyymmdd, dims=["row"],
attrs={'long_name' : 'year (4 digits), month (2 digits), day (2 digits)'}
)
ds['prof_HHMMSS'] = xr.DataArray(
hhmmss, dims=["row"],
attrs={'long_name' : 'hour (2 digits), minute (2 digits), second (2 digits)'}
)
return None
|
"""This module supports puzzles that place fixed shape regions into the grid."""
from collections import defaultdict
import sys
from typing import Dict, List
from z3 import ArithRef, Int, IntVal, Or, Solver, PbEq
from .fastz3 import fast_and, fast_eq, fast_ne
from .geometry import Lattice, Point, Vector
from .quadtree import ExpressionQuadTree
# Key types for use with the ExpressionQuadTree when adding shape instance
# constraints.
HAS_INSTANCE_ID, NOT_HAS_INSTANCE_ID, HAS_SHAPE_TYPE = range(3)
def canonicalize_shape(shape: List[Vector]) -> List[Vector]:
"""Returns a new shape that's canonicalized.
A canonicalized shape is in sorted order and its first offset is Vector(0, 0).
This helps with deduplication, since equivalent shapes will be canonicalized
identically.
# Arguments
shape (List[Vector]): A list of offsets defining a shape.
# Returns
(List[Vector]): A list of offsets defining the canonicalized version
of the shape, i.e., in sorted order and with first offset equal
to Vector(0, 0).
"""
shape = sorted(shape)
first_negated = shape[0].negate()
return [v.translate(first_negated) for v in shape]
class ShapeConstrainer:
"""Creates constraints for placing fixed shape regions into the grid.
# Arguments
lattice (Lattice): The structure of the grid.
shapes (List[List[Vector]]): A list of region shape definitions.
Each region shape definition should be a list of offsets.
The same region shape definition may be included multiple times to
indicate the number of times that shape may appear (if allow_copies
is false).
solver (z3.Solver, None): A #Solver object. If None, a #Solver will be
constructed.
complete (bool): If true, every cell must be part of a shape region. Defaults
to false.
allow_rotations (bool): If true, allow rotations of the shapes to be placed
in the grid. Defaults to false.
allow_reflections (bool): If true, allow reflections of the shapes to be
placed in the grid. Defaults to false.
allow_copies (bool): If true, allow any number of copies of the shapes to be
placed in the grid. Defaults to false.
"""
_instance_index = 0
def __init__( # pylint: disable=R0913
self,
lattice: Lattice,
shapes: List[List[Vector]],
solver: Solver = None,
complete: bool = False,
allow_rotations: bool = False,
allow_reflections: bool = False,
allow_copies: bool = False
):
ShapeConstrainer._instance_index += 1
if solver:
self.__solver = solver
else:
self.__solver = Solver()
self.__lattice = lattice
self.__complete = complete
self.__allow_copies = allow_copies
self.__shapes = shapes
self.__make_variants(allow_rotations, allow_reflections)
self.__create_grids()
self.__add_constraints()
def __make_variants(self, allow_rotations, allow_reflections):
fs = self.__lattice.transformation_functions(
allow_rotations, allow_reflections)
self.__variants = [
[
list(shape_tuple)
for shape_tuple in {
tuple(canonicalize_shape([f(v) for v in s]))
for f in fs
}
]
for s in self.__shapes
]
def __create_grids(self):
"""Create the grids used to model shape region constraints."""
self.__shape_type_grid: Dict[Point, ArithRef] = {}
for p in self.__lattice.points:
v = Int(f"scst-{ShapeConstrainer._instance_index}-{p.y}-{p.x}")
if self.__complete:
self.__solver.add(v >= 0)
else:
self.__solver.add(v >= -1)
self.__solver.add(v < len(self.__shapes))
self.__shape_type_grid[p] = v
self.__shape_instance_grid: Dict[Point, ArithRef] = {}
for p in self.__lattice.points:
v = Int(f"scsi-{ShapeConstrainer._instance_index}-{p.y}-{p.x}")
if self.__complete:
self.__solver.add(v >= 0)
else:
self.__solver.add(v >= -1)
self.__solver.add(v < len(self.__lattice.points))
self.__shape_instance_grid[p] = v
def __add_constraints(self):
self.__add_grid_agreement_constraints()
self.__add_shape_instance_constraints()
if not self.__allow_copies:
for shape_index, shape in enumerate(self.__shapes):
self.__add_single_copy_constraints(shape_index, shape)
def __add_grid_agreement_constraints(self):
for p in self.__shape_type_grid:
self.__solver.add(
Or(
fast_and(
self.__shape_type_grid[p] == -1,
self.__shape_instance_grid[p] == -1
),
fast_and(
self.__shape_type_grid[p] != -1,
self.__shape_instance_grid[p] != -1
)
)
)
def __add_shape_instance_constraints(self): # pylint: disable=R0914
int_vals = {}
for i in range(max(len(self.__lattice.points), len(self.__variants))):
int_vals[i] = IntVal(i)
quadtree = ExpressionQuadTree(self.__lattice.points)
for instance_id in [self.__lattice.point_to_index(p) for p in self.__lattice.points]:
quadtree.add_expr(
(HAS_INSTANCE_ID, instance_id),
lambda p, i=instance_id: fast_eq(self.__shape_instance_grid[p], int_vals[i]))
quadtree.add_expr(
(NOT_HAS_INSTANCE_ID, instance_id),
lambda p, i=instance_id: fast_ne(self.__shape_instance_grid[p], int_vals[i]))
for shape_index in range(len(self.__variants)):
quadtree.add_expr(
(HAS_SHAPE_TYPE, shape_index),
lambda p, i=shape_index: fast_eq(self.__shape_type_grid[p], int_vals[i]))
root_options = defaultdict(list)
for shape_index, variants in enumerate(self.__variants): # pylint: disable=R1702
for variant in variants:
for root_point in self.__lattice.points:
instance_id = self.__lattice.point_to_index(root_point)
offset_points = set()
for offset_vector in variant:
point = root_point.translate(offset_vector)
if point not in self.__shape_instance_grid:
offset_points = None
break
offset_points.add(point)
if offset_points:
and_terms = []
for p in offset_points:
and_terms.append(quadtree.get_point_expr((HAS_INSTANCE_ID, instance_id), p))
and_terms.append(quadtree.get_point_expr((HAS_SHAPE_TYPE, shape_index), p))
and_terms.append(quadtree.get_other_points_expr(
(NOT_HAS_INSTANCE_ID, instance_id), offset_points))
root_options[root_point].append(fast_and(*and_terms))
for p in self.__lattice.points:
instance_id = self.__lattice.point_to_index(p)
not_has_instance_id_expr = quadtree.get_other_points_expr(
(NOT_HAS_INSTANCE_ID, instance_id), [])
or_terms = root_options[p]
if or_terms:
or_terms.append(not_has_instance_id_expr)
self.__solver.add(Or(*or_terms))
else:
self.__solver.add(not_has_instance_id_expr)
def __add_single_copy_constraints(self, shape_index, shape):
sum_terms = []
for p in self.__shape_type_grid:
sum_terms.append((self.__shape_type_grid[p] == shape_index, 1))
self.__solver.add(PbEq(sum_terms, len(shape)))
@property
def solver(self) -> Solver:
"""(z3.Solver): The #Solver associated with this #ShapeConstrainer."""
return self.__solver
@property
def shape_type_grid(self) -> Dict[Point, ArithRef]:
"""(Dict[Point, ArithRef]): A dictionary of z3 constants of shape types.
Each cell contains the index of the shape type placed in that cell (as
indexed by the shapes list passed in to the #ShapeConstrainer constructor),
or -1 if no shape is placed within that cell.
"""
return self.__shape_type_grid
@property
def shape_instance_grid(self) -> Dict[Point, ArithRef]:
"""(Dict[Point, ArithRef]): z3 constants of shape instance IDs.
Each cell contains a number shared among all cells containing the same
instance of the shape, or -1 if no shape is placed within that cell.
"""
return self.__shape_instance_grid
def print_shape_types(self):
"""Prints the shape type assigned to each cell.
Should be called only after the solver has been checked.
"""
model = self.__solver.model()
min_y = min(p.y for p in self.__shape_type_grid)
min_x = min(p.x for p in self.__shape_type_grid)
max_y = max(p.y for p in self.__shape_type_grid)
max_x = max(p.x for p in self.__shape_type_grid)
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
p = Point(y, x)
shape_index = -1
if p in self.__shape_type_grid:
v = self.__shape_type_grid[p]
shape_index = model.eval(v).as_long()
if shape_index >= 0:
sys.stdout.write(f"{shape_index:3}")
else:
sys.stdout.write(" ")
print()
def print_shape_instances(self):
"""Prints the shape instance ID assigned to each cell.
Should be called only after the solver has been checked.
"""
model = self.__solver.model()
min_y = min(p.y for p in self.__shape_instance_grid)
min_x = min(p.x for p in self.__shape_instance_grid)
max_y = max(p.y for p in self.__shape_instance_grid)
max_x = max(p.x for p in self.__shape_instance_grid)
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
p = Point(y, x)
shape_instance = -1
if p in self.__shape_instance_grid:
v = self.__shape_instance_grid[p]
shape_instance = model.eval(v).as_long()
if shape_instance >= 0:
sys.stdout.write(f"{shape_instance:3}")
else:
sys.stdout.write(" ")
print()
|
from . import call
call()
|
import time
import torch
from abstraction_learning_dataset import AbstractionLearningDataSet
from abstraction_learning_CNN import AbstractionLearningCNN
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
def TrainNetwork():
use_GPU = torch.cuda.is_available()
print ("Train Abstraction Learning CNN, use GPU: ", use_GPU)
## Define training data set and test data sets ##
time_before_reading = time.time()
# This needs to be replaced by the training data set which can be generated with the dataset_generator
trainset = AbstractionLearningDataSet(file_path="../data_sets/random_test_data_set.txt")
trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True, num_workers=4)
print ("Read Training Dataset which took %.3fs" %(time.time() - time_before_reading))
time_before_reading = time.time()
testset_1_random = AbstractionLearningDataSet(file_path="../data_sets/random_test_data_set.txt")
testloader_1_random = torch.utils.data.DataLoader(testset_1_random, batch_size=1,shuffle=False, num_workers=4)
print ("Read Random Test Dataset which took %.3fs" %(time.time() - time_before_reading))
time_before_reading = time.time()
testset_2_simulated = AbstractionLearningDataSet(file_path="../data_sets/simulated_test_data_set.txt")
testloader_2_simulated = torch.utils.data.DataLoader(testset_2_simulated, batch_size=1,shuffle=False, num_workers=4)
print ("Read Simulated Test Dataset which took %.3fs" %(time.time() - time_before_reading))
time_before_reading = time.time()
testset_3_real = AbstractionLearningDataSet(file_path="../data_sets/real_world_test_data_set.txt")
testloader_3_real = torch.utils.data.DataLoader(testset_3_real, batch_size=1,shuffle=False, num_workers=4)
print ("Read Real Test Dataset which took %.3fs" %(time.time() - time_before_reading))
trainset_size = len(trainset)
testset_rand_size = len(testset_1_random)
testset_simu_size = len(testset_2_simulated)
testset_real_size = len(testset_3_real)
## CNN initialization ##
# Net
name = "AbstractionLearningCNN"
net = AbstractionLearningCNN()
if use_GPU:
net = net.cuda()
print(net)
# Loss function and optimizer
learning_rate = 0.0001
costs_loss_function = nn.L1Loss()
feasibility_loss_function = nn.BCELoss()
optimizer = optim.SGD(net.parameters(), learning_rate, momentum=0.9)
cost_loss_weight = float(1.0)
feasible_loss_weight = float(1.0)
print("Learning rate = ", learning_rate)
## Train the CNN ##
print("\nStart training")
last_cost_loss = float('Inf')
last_last_cost_loss = float('Inf')
last_feasible_loss = float('Inf')
last_last_feasible_loss = float('Inf')
for epoch in range(100):
start_time = time.time()
# set all losses to zero
running_cost_loss = 0.0
running_feasible_loss = 0.0
cost_loss_counter = 0
feasible_loss_counter = 0
for i, sample in enumerate(trainloader, 0):
# get the data
heights = sample["heights"].float()
goal_pose = sample["goal_pose"].float()
costs = sample["costs"].float()
feasible = sample["feasible"].float()
# discretize goal pose
goal_pose[0][0] = float(int(goal_pose[0][0]/0.1))
goal_pose[0][1] = float(int(goal_pose[0][1]/0.1))
if goal_pose[0][2] > 0.01:
goal_pose[0][2] = 1.0
if goal_pose[0][2] < -0.01:
goal_pose[0][2] = -1.0
# add 1 dimension to height input
heights_4d = torch.FloatTensor(1,1,72,72)
heights_4d[0] = heights
# send them to GPU if available
if use_GPU:
heights_4d = heights_4d.cuda()
goal_pose = goal_pose.cuda()
costs = costs.cuda()
feasible = feasible.cuda()
# wrap them in Variable
heights_4d = Variable(heights_4d)
goal_pose = Variable(goal_pose)
costs = Variable(costs)
feasible = Variable(feasible)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
output_costs, output_feasible = net(heights_4d, goal_pose)
costs_loss = costs_loss_function(output_costs, costs)
feasible_loss = feasibility_loss_function(output_feasible, feasible)
if (feasible.data[0][0] > 0.5):
loss = cost_loss_weight * costs_loss + feasible_loss_weight * feasible_loss
running_cost_loss += costs_loss.data
running_feasible_loss += feasible_loss.data
cost_loss_counter += 1
feasible_loss_counter += 1
else:
loss = feasible_loss_weight * feasible_loss
running_feasible_loss += feasible_loss.data
feasible_loss_counter += 1
if use_GPU:
loss = loss.cuda()
loss.backward()
optimizer.step()
current_feasible_loss = running_feasible_loss.item() / max(feasible_loss_counter, 1)
current_cost_loss = running_cost_loss.item() / max(cost_loss_counter, 1)
training_time = time.time() - start_time
# Save the CNN at the current episode
checkpoint = {
"name": name,
"state": net.state_dict(),
"epoch": epoch,
}
torch.save(checkpoint, "../CNN_checkpoints/{}_epoch_{}_lr{}.model".format(name, epoch+1, learning_rate))
## Evaluate the network on three different data sets ##
## Random scenes ##
rand_feasibility_correct = 0
rand_sum_costs_at_feasible = 0.0
rand_sum_costs_at_feasible_counter = 0
rand_sum_cost_diffs_at_feasible = 0.0
rand_start_time = time.time()
for i, sample in enumerate(testloader_1_random, 0):
# get the data
heights = sample["heights"].float()
goal_pose = sample["goal_pose"].float()
costs = sample["costs"].float()
feasible = sample["feasible"].float()
# discretize goal pose
goal_pose[0][0] = float(int(goal_pose[0][0]/0.1))
goal_pose[0][1] = float(int(goal_pose[0][1]/0.1))
if goal_pose[0][2] > 0.01:
goal_pose[0][2] = 1.0
if goal_pose[0][2] < -0.01:
goal_pose[0][2] = -1.0
# add 1 dimension to height input
heights_4d = torch.FloatTensor(1,1,72,72)
heights_4d[0] = heights
# send them to GPU if available
if use_GPU:
heights_4d = heights_4d.cuda()
goal_pose = goal_pose.cuda()
# wrap them in Variable
heights_4d = Variable(heights_4d)
goal_pose = Variable(goal_pose)
# get network results
output_costs, output_feasible = net(heights_4d, goal_pose)
if float(output_feasible.data[0]) > 0.5 and float(feasible[0]) > 0.5:
rand_feasibility_correct += 1
if float(feasible[0]) > 0.5:
rand_sum_costs_at_feasible += float(output_costs.data[0])
rand_sum_cost_diffs_at_feasible += abs(float(costs) - float(output_costs.data[0]))
rand_sum_costs_at_feasible_counter += 1
elif float(output_feasible.data[0]) < 0.5 and float(feasible[0]) < 0.5:
rand_feasibility_correct += 1
rand_eval_time = time.time() - rand_start_time
## Simulated scenes ##
simu_feasibility_correct = 0
simu_sum_costs_at_feasible = 0.0
simu_sum_costs_at_feasible_counter = 0
simu_sum_cost_diffs_at_feasible = 0.0
simu_start_time = time.time()
for i, sample in enumerate(testloader_2_simulated, 0):
# get the data
heights = sample["heights"].float()
goal_pose = sample["goal_pose"].float()
costs = sample["costs"].float()
feasible = sample["feasible"].float()
# discretize goal pose
goal_pose[0][0] = float(int(goal_pose[0][0]/0.1))
goal_pose[0][1] = float(int(goal_pose[0][1]/0.1))
if goal_pose[0][2] > 0.01:
goal_pose[0][2] = 1.0
if goal_pose[0][2] < -0.01:
goal_pose[0][2] = -1.0
# add 1 dimension to height input
heights_4d = torch.FloatTensor(1,1,72,72)
heights_4d[0] = heights
# send them to GPU if available
if use_GPU:
heights_4d = heights_4d.cuda()
goal_pose = goal_pose.cuda()
# wrap them in Variable
heights_4d = Variable(heights_4d)
goal_pose = Variable(goal_pose)
# get network results
output_costs, output_feasible = net(heights_4d, goal_pose)
if float(output_feasible.data[0]) > 0.5 and float(feasible[0]) > 0.5:
simu_feasibility_correct += 1
if float(feasible[0]) > 0.5:
simu_sum_costs_at_feasible += float(output_costs.data[0])
simu_sum_cost_diffs_at_feasible += abs(float(costs) - float(output_costs.data[0]))
simu_sum_costs_at_feasible_counter += 1
elif float(output_feasible.data[0]) < 0.5 and float(feasible[0]) < 0.5:
simu_feasibility_correct += 1
simu_eval_time = time.time() - simu_start_time
## Real world scenes ##
real_feasibility_correct = 0
real_sum_costs_at_feasible = 0.0
real_sum_costs_at_feasible_counter = 0
real_sum_cost_diffs_at_feasible = 0.0
real_start_time = time.time()
for i, sample in enumerate(testloader_3_real, 0):
# get the data
heights = sample["heights"].float()
goal_pose = sample["goal_pose"].float()
costs = sample["costs"].float()
feasible = sample["feasible"].float()
# discretize goal pose
goal_pose[0][0] = float(int(goal_pose[0][0]/0.1))
goal_pose[0][1] = float(int(goal_pose[0][1]/0.1))
if goal_pose[0][2] > 0.01:
goal_pose[0][2] = 1.0
if goal_pose[0][2] < -0.01:
goal_pose[0][2] = -1.0
# add 1 dimension to height input
heights_4d = torch.FloatTensor(1,1,72,72)
heights_4d[0] = heights
# send them to GPU if available
if use_GPU:
heights_4d = heights_4d.cuda()
goal_pose = goal_pose.cuda()
# wrap them in Variable
heights_4d = Variable(heights_4d)
goal_pose = Variable(goal_pose)
# get network results
output_costs, output_feasible = net(heights_4d, goal_pose)
if float(output_feasible.data[0]) > 0.5 and float(feasible[0]) > 0.5:
real_feasibility_correct += 1
if float(feasible[0]) > 0.5:
real_sum_costs_at_feasible += float(output_costs.data[0])
real_sum_cost_diffs_at_feasible += abs(float(costs) - float(output_costs.data[0]))
real_sum_costs_at_feasible_counter += 1
elif float(output_feasible.data[0]) < 0.5 and float(feasible[0]) < 0.5:
real_feasibility_correct += 1
real_eval_time = time.time() - real_start_time
print("[%3d](%ds) F-Loss: %.4f, C-loss: %.4f for %d | RANDOM(%ds): F: %d/%d (%.1f%%), avg C: %.3f, avg. error: %.3f | SIMULATED(%ds): F: %d/%d (%.1f%%), avg C: %.3f, avg error: %.3f | REAL(%ds): F: %d/%d (%.1f%%), avg C: %.3f, avg error: %.3f"
%(epoch + 1, training_time, running_feasible_loss / max(feasible_loss_counter,1), running_cost_loss / max(cost_loss_counter,1), cost_loss_counter,
rand_eval_time, rand_feasibility_correct, testset_rand_size, 100 * rand_feasibility_correct/testset_rand_size, rand_sum_costs_at_feasible/max(rand_sum_costs_at_feasible_counter,1), rand_sum_cost_diffs_at_feasible/max(rand_sum_costs_at_feasible_counter,1),
simu_eval_time, simu_feasibility_correct, testset_simu_size, 100 * simu_feasibility_correct/testset_simu_size, simu_sum_costs_at_feasible/max(simu_sum_costs_at_feasible_counter,1), simu_sum_cost_diffs_at_feasible/max(simu_sum_costs_at_feasible_counter,1),
real_eval_time, real_feasibility_correct, testset_real_size, 100 * real_feasibility_correct/testset_real_size, real_sum_costs_at_feasible/max(real_sum_costs_at_feasible_counter,1), real_sum_cost_diffs_at_feasible/max(real_sum_costs_at_feasible_counter,1)))
## Adjust loss weights ##
if current_feasible_loss > last_feasible_loss and last_feasible_loss > last_last_feasible_loss:
feasible_loss_weight = feasible_loss_weight / 5.0
last_feasible_loss = float('Inf')
last_last_feasible_loss = float('Inf')
last_cost_loss = float('Inf')
last_last_cost_loss = float('Inf')
print("New feasible loss weight is %.6f" %(feasible_loss_weight))
else:
last_last_feasible_loss = last_feasible_loss
last_feasible_loss = current_feasible_loss
if current_cost_loss > last_cost_loss and last_cost_loss > last_last_cost_loss:
cost_loss_weight = cost_loss_weight / 5.0
last_cost_loss = float('Inf')
last_last_cost_loss = float('Inf')
print("New cost loss weight is %.6f" %(cost_loss_weight))
else:
last_last_cost_loss = last_cost_loss
last_cost_loss = current_cost_loss
TrainNetwork()
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
:Author: Berk Onat <b.onat@warwick.ac.uk>
:Year: 2017
:Licence: UIUC LICENSE
"""
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_libpymolfile')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_libpymolfile')
_libpymolfile = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_libpymolfile', [dirname(__file__)])
except ImportError:
import _libpymolfile
return _libpymolfile
try:
_mod = imp.load_module('_libpymolfile', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_libpymolfile = swig_import_helper()
del swig_import_helper
else:
import _libpymolfile
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def molfile_plugin_list(maxsize: 'int') -> "PyObject *":
"""molfile_plugin_list(maxsize) -> PyObject *"""
return _libpymolfile.molfile_plugin_list(maxsize)
def molfile_init() -> "int":
"""molfile_init() -> int"""
return _libpymolfile.molfile_init()
def molfile_finish() -> "int":
"""molfile_finish() -> int"""
return _libpymolfile.molfile_finish()
def get_plugin(molcapsule: 'PyObject *', plug_no: 'int') -> "PyObject *":
"""get_plugin(molcapsule, plug_no) -> PyObject *"""
return _libpymolfile.get_plugin(molcapsule, plug_no)
def molfile_plugin_info(molcapsule: 'PyObject *', plugin_no: 'int') -> "PyObject *":
"""molfile_plugin_info(molcapsule, plugin_no) -> PyObject *"""
return _libpymolfile.molfile_plugin_info(molcapsule, plugin_no)
def open_file_read(molcapsule: 'PyObject *', fname: 'char *', ftype: 'char *', natoms: 'int') -> "PyObject *":
"""open_file_read(molcapsule, fname, ftype, natoms) -> PyObject *"""
return _libpymolfile.open_file_read(molcapsule, fname, ftype, natoms)
def open_file_write(molcapsule: 'PyObject *', fname: 'char *', ftype: 'char *', natoms: 'int') -> "PyObject *":
"""open_file_write(molcapsule, fname, ftype, natoms) -> PyObject *"""
return _libpymolfile.open_file_write(molcapsule, fname, ftype, natoms)
def close_file_read(molpack: 'PyObject *') -> "PyObject *":
"""close_file_read(molpack) -> PyObject *"""
return _libpymolfile.close_file_read(molpack)
def close_file_write(molpack: 'PyObject *') -> "PyObject *":
"""close_file_write(molpack) -> PyObject *"""
return _libpymolfile.close_file_write(molpack)
def read_fill_structure(molpack: 'PyObject *', prototype: 'PyObject *') -> "PyObject *":
"""read_fill_structure(molpack, prototype) -> PyObject *"""
return _libpymolfile.read_fill_structure(molpack, prototype)
def write_fill_structure(molpack: 'PyObject *', molarray: 'PyObject *') -> "PyObject *":
"""write_fill_structure(molpack, molarray) -> PyObject *"""
return _libpymolfile.write_fill_structure(molpack, molarray)
def read_fill_bonds(molpack: 'PyObject *') -> "PyObject *":
"""read_fill_bonds(molpack) -> PyObject *"""
return _libpymolfile.read_fill_bonds(molpack)
def write_fill_bonds(molpack: 'PyObject *', moldict: 'PyObject *') -> "PyObject *":
"""write_fill_bonds(molpack, moldict) -> PyObject *"""
return _libpymolfile.write_fill_bonds(molpack, moldict)
def read_fill_angles(molpack: 'PyObject *') -> "PyObject *":
"""read_fill_angles(molpack) -> PyObject *"""
return _libpymolfile.read_fill_angles(molpack)
def write_fill_angles(molpack: 'PyObject *', moldict: 'PyObject *') -> "PyObject *":
"""write_fill_angles(molpack, moldict) -> PyObject *"""
return _libpymolfile.write_fill_angles(molpack, moldict)
def read_fill_next_timestep(molpack: 'PyObject *') -> "PyObject *":
"""read_fill_next_timestep(molpack) -> PyObject *"""
return _libpymolfile.read_fill_next_timestep(molpack)
def write_fill_timestep(molpack: 'PyObject *', moldict: 'PyObject *') -> "PyObject *":
"""write_fill_timestep(molpack, moldict) -> PyObject *"""
return _libpymolfile.write_fill_timestep(molpack, moldict)
def are_plugins_same(molpack_a: 'PyObject *', molpack_b: 'PyObject *') -> "PyObject *":
"""are_plugins_same(molpack_a, molpack_b) -> PyObject *"""
return _libpymolfile.are_plugins_same(molpack_a, molpack_b)
def are_filehandles_same(molpack_a: 'PyObject *', molpack_b: 'PyObject *') -> "PyObject *":
"""are_filehandles_same(molpack_a, molpack_b) -> PyObject *"""
return _libpymolfile.are_filehandles_same(molpack_a, molpack_b)
# This file is compatible with both classic and new-style classes.
|
# Generated by Django 2.2.24 on 2021-10-22 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meeting', '0049_session_purpose'),
]
operations = [
migrations.AddField(
model_name='session',
name='on_agenda',
field=models.BooleanField(default=True, help_text='Is this session visible on the meeting agenda?'),
),
]
|
import gym
import tensorflow as tf
from tensorboardX import SummaryWriter
from agent.continuous.seperate.ppo import PPO
from agent.utils import get_gaes
from example_model.policy.mlp.continuous import MLPContinuousActor
from example_model.policy.mlp.continuous import MLPContinuousCritic
env = gym.make('BipedalWalker-v2')
sess = tf.Session()
state_size = 24
output_size = 4
actor = MLPContinuousActor('actor', state_size, output_size)
critic = MLPContinuousCritic('critic', state_size)
agent = PPO(sess, state_size, output_size, actor, critic)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
#saver.restore(sess, 'model/model')
writer = SummaryWriter()
values, states, actions, dones, logp_ts, rewards = [], [], [], [], [], []
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
rollout = 0
score = 0
ep = 0
n_step = 512
record_score_size = 10
record_score = 0
while True:
score_rollout = 0
rollout += 1
for t in range(n_step):
#if ep % 10 == 0:
# env.render()
a, v_t, logp_t = agent.get_action([o])
a, v_t, logp_t = a[0], v_t[0], logp_t[0]
n_o, r, d, _ = env.step(a)
score += r
score_rollout += r
record_score += r
values.append(v_t)
states.append(o)
actions.append(a)
dones.append(d)
rewards.append(r)
logp_ts.append(logp_t)
o = n_o
if d:
ep += 1
if ep % record_score_size == 0:
if int(ep / record_score_size) < 600:
writer.add_scalar('data/reward', record_score / record_score_size, int(ep / record_score_size))
record_score = 0
writer.add_scalar('data/reward_per_episode', score, ep)
print(score, ep)
score = 0
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
a, v_t, logp_t = agent.get_action([o])
values.append(v_t[0])
next_value = values[1:]
value = values[:-1]
adv, target = get_gaes(rewards, dones, value, next_value, agent.gamma, agent.lamda, False)
value_loss, kl, ent = agent.update(states, actions, target, adv, logp_ts)
writer.add_scalar('data/value_loss_per_rollout', value_loss, rollout)
writer.add_scalar('data/kl_per_rollout', kl, rollout)
writer.add_scalar('data/ent_per_rollout', ent, rollout)
writer.add_scalar('data/reward_per_rollout', score_rollout, rollout)
values, states, actions, dones, logp_ts, rewards = [], [], [], [], [], []
|
"""User configuration file
File organizing all configurations that may be set by user when running the
train.py script.
Call python -m src.train --help for a complete and formatted list of available user options.
"""
import argparse
import time
from random import randint
import os
import socket
import glob
import shutil
import sys
import pathlib
def get_opt():
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--model_name', type=str, default='sononet')
ap.add_argument('-lr', '--learning_rate', type=float, default=1e-4)
ap.add_argument('-ep', '--epochs', type=int, default=75)
ap.add_argument('-bs', '--batch_size', type=int, default=64)
ap.add_argument('-is', '--img_size', type=int, default=224)
ap.add_argument('-g', '--gpus', type=str, default='0')
ap.add_argument('-ex', '--experiment', type=str, default='')
ap.add_argument('-l', '--load_checkpoint_d', type=str, default=None)
ap.add_argument('-v', '--val_split', type=str, default='val')
args = ap.parse_args()
#gets the current time of the run, and adds a four digit number for getting
#different folder name for experiments run at the exact same time.
timestamp = time.strftime("%Y%m%d-%H%M%S") + '-' + str(randint(1000,9999))
args.timestamp = timestamp
#register a few values that might be important for reproducibility
args.screen_name = os.getenv('STY')
args.hostname = socket.gethostname()
if args.gpus is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
else:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES']
import platform
args.python_version = platform.python_version()
import torch
args.pytorch_version = torch.__version__
import torchvision
args.torchvision_version = torchvision.__version__
import numpy as np
args.numpy_version = np.__version__
args.save_folder = './runs/'
pathlib.Path(args.save_folder).mkdir(parents=True, exist_ok=True)
args.output_folder = args.save_folder+'/'+args.experiment+'_'+args.timestamp
os.mkdir(args.output_folder)
log_configs(args)
args = vars(args)
return args
def log_configs(opt):
with open(opt.output_folder + '/opts.txt', 'w') as f:
for key, value in sorted(vars(opt).items()):
f.write(key + ': ' + str(value).replace('\n', ' ').replace('\r', '') + '\r\n')
save_run_state(opt)
def save_command(opt):
command = ' '.join(sys.argv)
with open(f"{opt.output_folder}/command.txt", "w") as text_file:
text_file.write(command)
def save_run_state(opt):
if not os.path.exists(f'{opt.output_folder}/src/'):
os.mkdir(f'{opt.output_folder}/src/')
[shutil.copy(filename, (f'{opt.output_folder}/src/')) for filename in glob.glob(os.path.dirname(__file__) + '/*.py')]
save_command(opt)
|
def find_median(Arr, start, size):
myList = []
for i in range(start, start + size):
myList.append(Arr[i])
myList.sort()
return myList[size // 2]
def fastSelect(array, k):
n = len(array)
if 0 < k <= n:
setOfMedians = []
arr_less_P = []
arr_equal_P = []
arr_more_P = []
i = 0
while i < n // 5:
median = find_median(array, 5 * i, 5)
setOfMedians.append(median)
i += 1
if 5 * i < n:
median = find_median(array, 5 * i, n % 5)
setOfMedians.append(median)
if len(setOfMedians) == 1:
pivot = setOfMedians[0]
elif len(setOfMedians) > 1:
pivot = fastSelect(setOfMedians, len(setOfMedians) // 2)
for element in array:
if element < pivot:
arr_less_P.append(element)
elif element > pivot:
arr_more_P.append(element)
else:
arr_equal_P.append(element)
if k <= len(arr_less_P):
return fastSelect(arr_less_P, k)
elif k > (len(arr_less_P) + len(arr_equal_P)):
return fastSelect(arr_more_P, (k - len(arr_less_P) - len(arr_equal_P)))
else:
return pivot
if __name__ == '__main__':
Arr = [6, 80, 36, 8, 23, 7, 10, 12, 42]
k = 5
print(fastSelect(Arr, k)) # Outputs 12
Arr = [5, 2, 20, 17, 11, 13, 8, 9, 11]
k = 5
print(fastSelect(Arr, k)) # Outputs 11
Arr = [6, 80, 36, 8, 23, 7, 10, 12, 42, 99]
k = 10
print(fastSelect(Arr, k)) # Outputs 99
|
from nbconvert.preprocessors import Preprocessor
class StripOutput(Preprocessor):
"""
Clear prompt number and output (if any) from all notebook cells.
Example
-------
# command line usage:
jupyter nbconvert example.ipynb --pre=nbexample.strip_output.StripOutput
"""
def preprocess_cell(self, cell, resources, index):
"""
Clear prompt number and output (if any) from cell.
"""
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
return cell, resources
|
#Student program - Conor Hennessy - November 2014~2016
import Test
##Defining RunTest function to run task one code
def RunTest():
Test.RunQuiz()
##Defining Savescore function with code to check for headers and get values from test
def Savedata():
print("Writing to file...")
global data
data = [{'Name':name,'Score':score,'Class':classSet}]
global fileHandle
try:
open(str(classSet)+' Class Results.csv', 'r')
header = 'yes'
if header == 'yes':
fileHandle = open(str(classSet)+' Class Results.csv', 'a')
WriteToFile()
except IOError:
fileHandle = open(str(classSet)+' Class Results.csv', 'a')
fileHandle.write('Name,Score\n')
WriteToFile()
##Defining WriteToFile function
def WriteToFile():
for value in data:
fileHandle.write('{Name},{Score}'.format(**value))
fileHandle.write('\n')
fileHandle.close()
print ("Writing to file complete!")
# Function asking what class they are in
def ClassChecker():
global classSet
classSet = input ("What is your class set? ")
if classSet == 'x1' or classSet == 'x2' or classSet == 'x3':
return '\n'
else:
print("[Please input in the form: x1, x2 or x3]")
return ClassChecker()
#Running the test and then writing the score to file
ClassChecker()
if __name__ == '__main__':
RunTest()
from Test import name, score
Savedata()
|
# Generated by Django 3.0.8 on 2020-08-20 08:21
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('bio', models.TextField()),
('phone_number', models.CharField(blank=True, max_length=15)),
('prof_photo', models.ImageField(upload_to='pics/profiles/')),
('user', models.OneToOneField(blank=True, default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project', models.CharField(max_length=50)),
('project_description', models.CharField(max_length=1000)),
('repository_link', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('project_owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Profile')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submitted_date', models.DateTimeField(auto_now_add=True)),
('user_interface', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('user_experience', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('responsiveness', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('design_average', models.IntegerField()),
('functionality', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('io', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('usability_average', models.IntegerField()),
('content_average', models.IntegerField()),
('total_average', models.IntegerField()),
('for_project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Projects')),
('submitted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Profile')),
],
),
migrations.CreateModel(
name='Followers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follower_id', models.IntegerField()),
('for_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Profile')),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=500)),
('submitted_on', models.DateTimeField(auto_now_add=True)),
('for_project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Projects')),
('submitted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectawards.Profile')),
],
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@title: pm_synth.py
@date: 08/13/16
@author: Daniel Guest
@purpose: Provide classes to create phase modulation synthesizer.
"""
import numpy as np
import math
class Synth(object):
def __init__(self, fs=10000, inv_samp=50, n_op=6):
# Initialize parameters
self.fs = fs
self.inv_samp = inv_samp
self.output = [0]*self.inv_samp
self.current_inv = 0
self.amp_val = 0
self.n_op = 6
# Initialize components
self.ops = []
for op in range(self.n_op):
self.ops.append(Operator(self))
self.output_module = Output(self)
# Choose algorithm
self.algorithm = a1(ops=self.ops, output_module=self.output_module)
self.algorithm.implement()
def synth(self):
for op in range(self.n_op):
self.ops[op].run()
self.output_module.run()
self.update_inv()
def update_inv(self):
self.current_inv = self.current_inv + 1
class Algorithm(object):
def __init__(self, ops, output_module):
self.ops = ops
self.output_module = output_module
def implement(self):
print("DO SOMETHING HERE?")
class a1(Algorithm):
def __init__(self, ops, output_module):
Algorithm.__init__(self, ops, output_module)
def implement(self):
self.ops[1].input_connect = [self.ops[0]]
self.ops[3].input_connect = [self.ops[2]]
self.ops[5].input_connect = [self.ops[4]]
self.output_module.input_connect = [self.ops[1],
self.ops[3],
self.ops[5]]
class a2(Algorithm):
def __init__(self, ops, output_module):
Algorithm.__init__(self, ops, output_module)
def implement(self):
self.ops[0].input_connect = [self.ops]
class Component(object):
def __init__(self, master, input_connect=None):
self.master = master
self.input = [0]*self.master.inv_samp
self.output = [0]*self.master.inv_samp
self.input_connect = input_connect
def pull(self):
self.input = [0]*self.master.inv_samp
self.output = [0]*self.master.inv_samp
if self.input_connect == None:
self.input[:] = [0]*self.master.inv_samp
elif len(self.input_connect) == 1:
self.input[:] = self.input_connect[0].output[:]
else:
inputs = []
for i in range(len(self.input_connect)):
inputs.append(self.input_connect[i].output[:])
self.input[:] = [sum(x) for x in zip(*inputs)]
class Operator(Component):
def __init__(self, master, init_freq=200, input_connect=None):
Component.__init__(self, master, input_connect)
self.freq = init_freq
self.phase = [0]*self.master.inv_samp
self.phase_inc = 0
self.phase_delay = [0]
self.amp_val = 0
def run(self):
self.pull()
self.phase_inc = (self.freq/self.master.fs)*(2*np.pi)
for n in range(self.master.inv_samp):
if n == 0:
self.phase[0] = self.phase_delay[0] + self.phase_inc\
+ self.input[0]
else:
self.phase[n] = self.phase[n-1] + self.phase_inc\
+ self.input[n]
if self.phase[n] > math.pi:
self.phase[n] = self.phase[n] - 2*math.pi
self.output[n] = math.sin(self.phase[n])*self.amp_val
self.phase_delay[0] = self.phase[-1]
class Output(Component):
def __init__(self, master, input_connect):
Component.__init__(self, master, input_connect)
def run(self):
self.pull()
self.master.output[:] = self.input[:]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 15:18:53 2020
@author: z003vrzk
"""
# Python imports
import sys, os
import unittest
# Sklearn imports
from sklearn.naive_bayes import ComplementNB
from sklearn.dummy import DummyClassifier
from sklearn.metrics import accuracy_score, make_scorer, precision_score, recall_score
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.base import is_classifier, clone
from sklearn.metrics._scorer import check_scoring
from sklearn.model_selection._split import check_cv
from sklearn.utils import indexable
from sklearn.neighbors import KNeighborsClassifier
# Third party imports
from joblib import Parallel, delayed
import numpy as np
from scipy.sparse import csr_matrix
# Local imports
from bag_cross_validate import (BagScorer,
cross_validate_bag,
_fit_and_score,
bags_2_si,)
#%%
class TestBagScorer(unittest.TestCase):
def setUp(self):
"""Generate some dummy data
Create bags and single-instance data
A set of bags have a shape [n x (m x p)], and can be through of as an
array of bag instances.
n is the number of bags
m is the number of instances within each bag (this can vary between bags)
p is the feature space of each instance"""
n_bags = 100
m_instances = 5 # Static number of bags
p = 5
bags = []
# 25% negative class, 75% positive class
# Bags are created with random data, of shape (n, (m,p))
labels = np.concatenate((np.ones(int(n_bags*0.5)),
np.zeros(int(n_bags*(1-0.5))),
))
for _ in range(n_bags):
_rand = np.random.rand(m_instances, p)
bag = np.where(_rand < 0.25, 1, 0)
bags.append(bag)
bags = np.array(bags)
# Split dummy dataset dataset
rs = ShuffleSplit(n_splits=1, test_size=0.2, train_size=0.8)
train_index, test_index = next(rs.split(bags, labels))
train_bags, train_labels = bags[train_index], labels[train_index]
test_bags, test_labels = bags[test_index], labels[test_index]
self.train_bags, self.train_labels = bags[train_index], labels[train_index]
self.test_bags, self.test_labels = bags[test_index], labels[test_index]
return None
def test_BagScorer(self):
"""Define scoring functions, such as accuracy or recall,
which will be used to score how well single-instance inference
performs on the bag classification task
The scoring functions have some requirements -
a) They are passed to BagScorer on initialization
b) Must have a method "_score_func" with a signature f(y_true, y_pred)
(This is provided by default when using sklearn.metrics.make_scorer)
"""
# Create scoring metrics, and load scoring metric into BagScorer
accuracy_scorer = make_scorer(accuracy_score, normalize=True)
precision_scorer = make_scorer(precision_score, average='weighted')
recall_scorer = make_scorer(recall_score, average='weighted')
# {'normalize':'weighted'}
self.assertDictContainsSubset({'normalize':True}, accuracy_scorer._kwargs)
self.assertIn('_score_func', accuracy_scorer.__dict__.keys())
# Dummy data
train_bags, train_labels = self.train_bags, self.train_labels
test_bags, test_labels = self.test_bags, self.test_labels
# Create a single-instance estimator
compNB = ComplementNB(alpha=1.0, fit_prior=True, class_prior=None, norm=False)
# Test custom scorer
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
bagPrecisionScorer = BagScorer(precision_scorer, sparse=True)
bagRecallScorer = BagScorer(recall_scorer, sparse=True)
estimator = bagAccScorer.estimator_fit(compNB, train_bags, train_labels)
# The estimator is the same for all instances...
accuracy = bagAccScorer(estimator, test_bags, test_labels)
precision = bagPrecisionScorer(estimator, test_bags, test_labels)
recall = bagRecallScorer(estimator, test_bags, test_labels)
self.assertIsInstance(accuracy, float)
self.assertLess(accuracy, 1)
self.assertGreater(accuracy, 0)
self.assertIsInstance(precision, float)
self.assertLess(precision, 1)
self.assertGreater(precision, 0)
self.assertIsInstance(recall, float)
self.assertLess(recall, 1)
self.assertGreater(recall, 0)
return None
def test_scorer_signature(self):
"""Define scoring functions, such as accuracy or recall,
which will be used to score how well single-instance inference
performs on the bag classification task
The scoring functions have some requirements -
a) They are passed to BagScorer on initialization
b) Must have a method "_score_func" with a signature f(y_true, y_pred)
(This is provided by default when using sklearn.metrics.make_scorer)
"""
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
print(accuracy_scorer._kwargs) # {'normalize':'weighted'}
hasattr(accuracy_scorer, '_score_func') # True
self.assertTrue(hasattr(accuracy_scorer, '_score_func'))
def test_BagScorer_signature(self):
# Test custom scorer
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
self.assertTrue(callable(bagAccScorer), msg="BagScorer must be callable")
return None
def test_BagScorer_metric(self):
"""Define scoring functions, such as accuracy or recall,
which will be used to score how well single-instance inference
performs on the bag classification task
The scoring functions have some requirements -
a) They are passed to BagScorer on initialization
b) Must have a method "_score_func" with a signature f(y_true, y_pred)
(This is provided by default when using sklearn.metrics.make_scorer)
Successful conditions:
The bagscorer must report the same performance metrics as when the
metrics are manually calculated
This tests if the bagscorer property fits, trains, and evaluates
the estimator passed to it
"""
# Generate a scoring metric for the bag scorer
accuracy_scorer = make_scorer(accuracy_score)
self.assertTrue(hasattr(accuracy_scorer, '_score_func'),
msg='accuracy scorer must have _score_function method')
# Generate some data
train_bags, train_labels = self.train_bags, self.train_labels
test_bags, test_labels = self.test_bags, self.test_labels
# Create a dummy estimator
dumb = DummyClassifier(strategy='constant', constant=1)
# concatenate arrays across 1st axis
SI_train, SI_train_labels = bags_2_si(train_bags, train_labels)
SI_test, SI_test_labels = bags_2_si(test_bags, test_labels)
dumb.fit(SI_train, SI_train_labels)
pred_test = dumb.predict(SI_test)
pred_train = dumb.predict(SI_train)
"""Calculate the correct number of predictions based on dummy classifier
The dummy classifier predicts 1 always (constant)
The training set bas """
pct_train = sum(train_labels) / len(train_labels)
pct_test = sum(test_labels) / len(test_labels)
dumb_accuracy_train = accuracy_score(SI_train_labels, pred_train)
dumb_accuracy_test = accuracy_score(SI_test_labels, pred_test)
# Test custom scorer, with the same dummy estimator
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
estimator = bagAccScorer.estimator_fit(dumb, train_bags, train_labels)
test_score = bagAccScorer(estimator, test_bags, test_labels)
train_score = bagAccScorer(estimator, train_bags, train_labels)
"""test_score should output the accuracy for predictions among bags
The test_score for bagScorer should be equal to the dumb_accuracy_test
because bag labels are reduced by the most frequest SI prediction
If all SI labels are predicted + then all bags will be predicted +
The accuracy of bag labels reduced by BagScorer will be equal to
percent of bag labels that are positive"""
self.assertEqual(test_score, pct_test)
self.assertEqual(train_score, pct_train)
self.assertEqual(pct_train, dumb_accuracy_train)
self.assertEqual(pct_test, dumb_accuracy_test)
def test_cross_validate_bag(self):
# Scoring
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
# Dummy data
train_bags, train_labels = self.train_bags, self.train_labels
test_bags, test_labels = self.test_bags, self.test_labels
# Define an estimator
dumb = DummyClassifier(strategy='constant', constant=1)
# Calculate metrics manually
expected_accuracy = sum(train_labels) / len(train_labels)
kf = KFold(n_splits = 4)
accuracies = []
for train_index, test_index in kf.split(train_labels):
_fold = train_labels[test_index]
_acc = sum(_fold) / len(_fold)
print(sum(_fold))
accuracies.append(_acc)
print('Global Accuracy : ', sum(train_labels) / len(train_labels))
print('Averaged accuracies : ', np.mean(accuracies))
# Custom scorer
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
scorer = {'bag-accuracy-scorer': bagAccScorer,
}
# Test cross_validate_bag
# Res is a dictonary of lists {'fit_time':[1,2,3],
# 'test_bag-accuracy-scorer':[0.1,0.2,0.3]}
res = cross_validate_bag(dumb, train_bags, train_labels,
cv=4, scoring=scorer,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score='raise')
"""The arithmetic mean of all accuracy predictions should equal the
prediction accuracy of the training bags (At least if all splits are
equal size -> Which is not true if the number of training instances
is not divisible by the number of splits)
This is only true because the dummy classifier always predicts 1
If the splits are not equal size then they will be close to equal"""
self.assertAlmostEqual(np.mean(res['test_bag-accuracy-scorer']),
expected_accuracy, 3)
# Just check the mean also LOL
self.assertEqual(np.mean(res['test_bag-accuracy-scorer']),
expected_accuracy)
# 4 Crossvalidation splits
self.assertTrue(len(res['test_bag-accuracy-scorer']) == 4)
# Assert result has dictionary values
self.assertIn('fit_time', res.keys())
self.assertIn('score_time', res.keys())
return None
def test_fit_and_score(self):
# Scoring
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
# Test estimator
dumb = DummyClassifier(strategy='constant', constant=1)
# Test custom scorer
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
# _fit_and_score testing
X = self.train_bags
y = self.train_labels
scoring = {'bag-accuracy-scorer': bagAccScorer,
}
estimator = dumb
groups = None
cv = 3
n_jobs=3
verbose=0
pre_dispatch=6
fit_params=None
return_estimator=None
error_score='raise'
return_train_score=None
parameters=None
# Test _fit_and_score method
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers = _check_multimetric_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
# Scores is a list of dictonaries
"""When scoring is a dictionary, the returned result looks like
[{'test_scores': {'bag-accuracy-scorer': 0.5185185185185185},
'fit_time': 0.0,
'score_time': 0.0},
{'test_scores': {'bag-accuracy-scorer': 0.5185185185185185},
'fit_time': 0.0,
'score_time': 0.0}, ... ]"""
scores = parallel(
delayed(_fit_and_score)(
clone(estimator), X, y, scorers, train, test, verbose, parameters,
fit_params, return_train_score=return_train_score,
return_times=True, return_estimator=return_estimator,
error_score=error_score)
for train, test in cv.split(X, y, groups))
for score in scores:
bag_scoring_metric = score['test_scores']
self.assertLessEqual(bag_scoring_metric['bag-accuracy-scorer'], 1)
self.assertGreaterEqual(bag_scoring_metric['bag-accuracy-scorer'], 0)
fit_time = score['fit_time']
self.assertIsInstance(fit_time, float)
score_time = score['score_time']
self.assertIsInstance(score_time, float)
return None
def test_fit_and_score_return_dict(self):
# Scoring
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
# Test estimator
dumb = DummyClassifier(strategy='constant', constant=1)
# Test custom scorer
bagAccScorer = BagScorer(accuracy_scorer, sparse=True)
# Rename for easier parameters
X = self.train_bags
y = self.train_labels
scoring = {'bag-scorer':bagAccScorer}
estimator = dumb
groups = None
cv = 3
n_jobs=3
verbose=0
pre_dispatch=6
fit_params=None
return_estimator=True
error_score='raise'
return_train_score=True
parameters=None
# Test _fit_and_score method
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers = _check_multimetric_scoring(estimator, scoring=scoring)
# Use one cross-validation split
generator = cv.split(X, y, groups)
# Get training and test split of training data
train, test = next(generator)
# Generate scores using BagScorer
scores = _fit_and_score(
clone(estimator), X, y, scorers, train, test, verbose, parameters,
fit_params,
return_train_score=return_train_score,
return_times=True,
return_estimator=return_estimator,
return_n_test_samples=False,
error_score=error_score)
# Returned dictionary contains keys
self.assertIn('train_scores', scores.keys())
self.assertIn('test_scores', scores.keys())
self.assertIn('fit_time', scores.keys())
self.assertIn('score_time', scores.keys())
self.assertIn('estimator', scores.keys())
return None
class TestCrossValidation(unittest.TestCase):
def setUp(self):
"""Generate some dummy data
Create bags and single-instance data
A set of bags have a shape [n x (m x p)], and can be through of as an
array of bag instances.
n is the number of bags
m is the number of instances within each bag (this can vary between bags)
p is the feature space of each instance"""
n_bags = 100
m_instances_range = [5,10] # Dynamic number instance per bag
p = 5
bags_sparse = []
# 25% negative class, 75% positive class
# Bags are created with random data, of shape (n, (m,p))
labels = np.concatenate((np.ones(int(n_bags*0.5)),
np.zeros(int(n_bags*(1-0.5))),
))
for n in range(0, n_bags):
m_instances = np.random.randint(m_instances_range[0],
m_instances_range[1],)
bag = np.random.randint(low=0, high=2, size=(m_instances, p))
bag_sparse = csr_matrix(bag)
bags_sparse.append(bag_sparse)
bags_sparse = np.array(bags_sparse)
# Split dummy dataset dataset
rs = ShuffleSplit(n_splits=1, test_size=0.2, train_size=0.8)
train_index, test_index = next(rs.split(bags_sparse, labels))
train_bags, train_labels = bags_sparse[train_index], labels[train_index]
test_bags, test_labels = bags_sparse[test_index], labels[test_index]
self.train_bags, self.train_labels = bags_sparse[train_index], labels[train_index]
self.test_bags, self.test_labels = bags_sparse[test_index], labels[test_index]
return None
def test_bag_cross_validate_sparse(self):
# Scoring
accuracy_scorer = make_scorer(accuracy_score, normalize='weighted')
# Dummy data
train_bags, train_labels = self.train_bags, self.train_labels
test_bags, test_labels = self.test_bags, self.test_labels
# Define an estimator
estimator = KNeighborsClassifier(n_neighbors=10,
weights='uniform',
algorithm='ball_tree',
n_jobs=4)
# Custom scorer
bagAccScorer = BagScorer(accuracy_scorer, input_sparse=True)
scorer = {'bag-accuracy-scorer': bagAccScorer,
}
# Test cross_validate_bag
# Res is a dictonary of lists {'fit_time':[1,2,3],
# 'test_bag-accuracy-scorer':[0.1,0.2,0.3]}
res = cross_validate_bag(estimator, train_bags, train_labels,
cv=4, scoring=scorer,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score='raise')
# 4 Crossvalidation splits
self.assertTrue(len(res['test_bag-accuracy-scorer']) == 4)
# Assert result has dictionary values
self.assertIn('fit_time', res.keys())
self.assertIn('score_time', res.keys())
return None
#%%
if __name__ == '__main__':
# Run all test cases
unittest.main()
# # Run specific test methods...
# runner = unittest.TextTestRunner()
# classes = [TestBagScorer]
# unit_tests_to_run = [
# 'test_BagScorer',
# 'test_BagScorer_dict',
# 'test_BagScorer_metric',
# 'test_BagScorer_signature',
# 'test_cross_validate_bag',
# 'test_fit_and_score',
# 'test_fit_and_score_return_dict',
# 'test_scorer_signature',
# ]
# Run specific test methods... (altenative method)
# suite = unittest.TestSuite()
# suite.addTest(TestBagScorer('test_fit_and_score_return_dict'))
# runner = unittest.TextTestRunner()
# runner.run(suite)
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import rnn
import config
import gen_data
def lstmnet(input_tensor, label_tensor, global_step, phase, reuse_weights):
# input_tensor: [ BATCH_SIZE, SEQUENCE_LENGTH, INPUT_DIMENSION]
# label_tensor: [ BATCH_SIZE ]
# global_step: [ 1 ]
with tf.variable_scope('NeuralNet', reuse=tf.AUTO_REUSE) as scope:
if reuse_weights:
scope.reuse_variables()
X = tf.reshape(input_tensor, [config.batch_size,
config.sequence_length, config.input_dimension])
# X: [ BATCH_SIZE, SEQUENCE_LENGTH, INPUT_DIMENSION]
pkeep = tf.placeholder(tf.float32)
Hin = tf.placeholder(
tf.float32, [config.batch_size, config.hidden_layer_size * config.hidden_layer_depth], name='Hin')
# Hin: [ BATCH_SIZE, INTERNALSIZE * NLAYERS]
cells = [rnn.GRUBlockCell(config.hidden_layer_size)
for _ in range(config.hidden_layer_depth)]
# "naive dropout" implementation
dropcells = [rnn.DropoutWrapper(cell, input_keep_prob=pkeep) for cell in cells]
multicell = rnn.MultiRNNCell(dropcells, state_is_tuple=False)
# dropout for the softmax layer
multicell = rnn.DropoutWrapper(multicell, output_keep_prob=pkeep)
Yr, H = tf.nn.dynamic_rnn(multicell, X, dtype=tf.float32,
initial_state=Hin, parallel_iterations=config.batch_size)
H = tf.identity(H, name='H') # just to give it a name
Yr_shaped = tf.reshape(
Yr, [config.batch_size, config.sequence_length, config.hidden_layer_size])
# Yr: [ BATCH_SIZE, SEQUENCE_LENGTHLEN, INTERNALSIZE ]
# H: [ BATCH_SIZE, INTERNALSIZE*NLAYERS ] # this is the last state in the sequence
Yr_lazy = Yr_shaped[:, config.lazy_cell_num:, :]
# Yr_lazy: [ BATCH_SIZE, LABEL_LENGTH, INTERNALSIZE ]
Yr_lazys = tf.split(Yr_lazy, config.label_length, axis=1)
# Yr_lazys: [ LABEL_LENGTH ][ BATCH_SIZE, INTERNALSIZE ]
# Append a fully connected layer after each non-lazy grucell output
Ys = list()
reuse = reuse_weights
for Yl in Yr_lazys:
Yl = tf.reshape(Yl, [config.batch_size, config.hidden_layer_size])
with tf.variable_scope('NeuraNetFullyConnLayer', reuse=tf.AUTO_REUSE) as scope:
if reuse:
scope.reuse_variables()
Y = layers.fully_connected(Yl, config.output_dimension,
activation_fn=None, reuse=reuse_weights, scope='NeuralNetFullyConnLayer')
reuse = True
Ys.append(Y)
YLogits = tf.stack(Ys, axis=1, name='Ys')
# YLogits: [ BATCH_SIZE, LABEL_LENGTH, OUTPUT_DIMENSION ]
with tf.variable_scope('TrainingAndLoss', reuse=tf.AUTO_REUSE) as scope:
if reuse_weights:
scope.reuse_variables()
starter_learning_rate = config.learning_rate
learning_rate = tf.train.inverse_time_decay(
starter_learning_rate, global_step, config.decay_steps, config.decay_rate)
y_ = tf.reshape(label_tensor, [config.batch_size])
# y_: [BATCH_SIZE] # int(s) identifying correct function
# One-Hot encoode y_
yo_ = tf.one_hot(y_, config.output_dimension, 1.0, 0.0)
yos_ = tf.reshape(yo_, shape=[config.batch_size, 1, config.output_dimension])
# yos_: [ BATCH_SIZE, config.output_dimension ]
yot_ = tf.tile(yos_, [1, config.label_length, 1])
# yot_: [ BATCHSIZE, LABEL_LENGTH, OUTPUT_DIMENSION ]
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=yot_, logits=YLogits))
train_op = tf.train.RMSPropOptimizer(
learning_rate=config.learning_rate, decay=config.decay_rate).minimize(cross_entropy)
# accuracy
with tf.name_scope('Summary') as scope:
# select last output:
output = tf.transpose(YLogits, [1, 0, 2])
# output: [ SEEQLEN, BATCH_SIZE, config.output_dimension]
Ylast = tf.gather(output, int(output.get_shape()[0])-1)
# last: [ BATCH_SIZE , config.output_dimension]
correct_prediction = tf.equal(tf.argmax(Ylast, 1), tf.argmax(yo_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar(phase + "/loss", cross_entropy)
tf.summary.scalar(phase + "/acc", accuracy)
summary_op = tf.summary.merge_all()
return Hin, pkeep, train_op, summary_op
|
#!/usr/bin/python3
"""
In MATLAB, there is a very useful function called 'reshape', which can reshape a
matrix into a new one with different size but keep its original data.
You're given a matrix represented by a two-dimensional array, and two positive
integers r and c representing the row number and column number of the wanted
reshaped matrix, respectively.
The reshaped matrix need to be filled with all the elements of the original
matrix in the same row-traversing order as they were.
If the 'reshape' operation with given parameters is possible and legal, output
the new reshaped matrix; Otherwise, output the original matrix.
"""
from typing import List
class Solution:
def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(nums), len(nums[0])
if m * n != r * c:
return nums
ret = []
for i in range(m):
for j in range(n):
if (i * n + j) % c == 0:
ret.append([])
ret[-1].append(nums[i][j])
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.