max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
Lincoln/Rougue/main.py
|
gcvalderrama/Jarvis
| 0
|
12776951
|
from pythonrouge import pythonrouge
ROUGE = './RELEASE-1.5.5/ROUGE-1.5.5.pl'
data_path = './RELEASE-1.5.5/data'
peer = "Tokyo is the one of the biggest city in the world."
model = "The capital of Japan, Tokyo, is the center of Japanese economy."
score = pythonrouge.pythonrouge(peer, model, ROUGE, data_path)
print(score)
peer = "Tokyo is the one of the biggest city in the world."
model = "Tokyo is one of the biggest city in the world."
score = pythonrouge.pythonrouge(peer, model, ROUGE, data_path)
print(score)
peer = 'President Bush''s nomination of black conservative <NAME> to ' \
'replace the Supreme Court''s first black Justice, liberal Thurgood' \
'Marshall, split the Senate down the middle. Thomas''s opposition to' \
'affirmative action alienated civil rights activists while his Catholic' \
'upbringing and interest in the priesthood raised alarm in' \
'abortion-rights groups. The Judiciary Committee deadlocked 7-7 and' \
'the nomination was referred to the Senate without recommendation after' \
'extended televised hearings on charges of sexual harassment against' \
'the nominee. Thomas was confirmed by a close 52-48 vote but he ' \
'commented that nothing could give him back his good name.'
model = "<NAME> was confirmed as Supreme Court Justice in October 1991" \
"by a razor- thin margin of 52-48. Thomas, who has opposed affirmative" \
"action has not taken public stands on other key issues. His reputation" \
"was damaged by accusations of sexual harassment. As the youngest" \
"justice he is expected to be on the court for decades."
score = pythonrouge.pythonrouge(peer, model, ROUGE, data_path)
print(score)
| 2.5625
| 3
|
rest/microscopy/__init__.py
|
informatics-isi-edu/microscopy
| 3
|
12776952
|
#!/usr/bin/env python3
#
# Copyright 2013-2014 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .printer import *
import webauthn2
__all__ = [
'printer'
]
## setup web service configuration data
global_env = webauthn2.merge_config(
jsonFileName='ermrest_config.json',
built_ins={
"default_limit": 100,
"db": "microscopy",
"dbn": "postgres",
"dbmaxconnections": 8
}
)
## setup webauthn2 handler
webauthn2_config = global_env.get('webauthn2', dict(web_cookie_name='ermrest'))
webauthn2_config.update(dict(web_cookie_path='/ermrest'))
webauthn2_manager = webauthn2.Manager(overrides=webauthn2_config)
## setup web urls
def web_urls():
"""Builds and returns the web_urls for web.py.
"""
def printerClass(superClass, printers):
class C (superClass):
def __init__(self):
self.printers = printers
superClass.__init__(self)
return C
urls = (
# print job and print control
'/printer/([^/]+)/job', printerClass(printer.PrintJob, global_env.get('printers')),
'/printer/([^/]+)/job/([^/]+)/', printerClass(printer.PrintJob, global_env.get('printers')),
'/printer/([^/]+)/control/([^/]+)/', printerClass(printer.PrintControl, global_env.get('printers'))
)
return tuple(urls)
| 2.171875
| 2
|
setup.py
|
SeanNobel/d4rl-pybullet
| 130
|
12776953
|
<gh_stars>100-1000
from setuptools import setup, find_packages
setup(name="d4rl_pybullet",
version="0.1",
license="MIT",
description="Datasets for data-driven deep reinforcement learnig with Pybullet environments",
url="https://github.com/takuseno/d4rl-pybullet",
install_requires=["gym", "pybullet", "h5py"],
packages=["d4rl_pybullet"])
| 1.351563
| 1
|
test/unit/services/eth/test_eth_extension_block_cleanup_service.py
|
doubleukay/bxgateway
| 21
|
12776954
|
import os
from mock import MagicMock
from bxcommon.services.extension_transaction_service import ExtensionTransactionService
from bxcommon.services.transaction_service import TransactionService
from bxcommon.test_utils import helpers
from bxcommon.utils import convert
from bxcommon.utils.object_hash import Sha256Hash, SHA256_HASH_LEN
from bxgateway.messages.eth.protocol.new_block_eth_protocol_message import NewBlockEthProtocolMessage
from bxgateway.services.eth.eth_extension_block_cleanup_service import EthExtensionBlockCleanupService
from bxgateway.testing.abstract_block_cleanup_service_test import AbstractBlockCleanupServiceTest
from bxgateway.services.eth.abstract_eth_block_cleanup_service import AbstractEthBlockCleanupService
from bxgateway.services.eth.eth_block_queuing_service import EthBlockQueuingService
class EthExtensionBlockCleanupServiceTest(AbstractBlockCleanupServiceTest):
def setUp(self) -> None:
super().setUp()
node_conn = MagicMock()
self.node.block_queuing_service = EthBlockQueuingService(self.node, node_conn)
self.node.connection_pool.add(1, "127.0.0.0", 8002, node_conn)
def _get_sample_block(self, file_path):
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(file_path))))
with open(os.path.join(root_dir, "samples/eth_sample_block.txt")) as sample_file:
btc_block = sample_file.read().strip("\n")
buf = bytearray(convert.hex_to_bytes(btc_block))
parsed_block = NewBlockEthProtocolMessage(msg_bytes=buf)
return parsed_block
def _test_mark_blocks_and_request_cleanup(self):
marked_block = Sha256Hash(binary=helpers.generate_bytearray(SHA256_HASH_LEN))
prev_block = Sha256Hash(binary=helpers.generate_bytearray(SHA256_HASH_LEN))
tracked_blocks = []
self.cleanup_service.on_new_block_received(marked_block, prev_block)
self.transaction_service.track_seen_short_ids(marked_block, [])
for _ in range(self.block_confirmations_count - 1):
tracked_block = Sha256Hash(binary=helpers.generate_bytearray(SHA256_HASH_LEN))
self.transaction_service.track_seen_short_ids(tracked_block, [])
tracked_blocks.append(tracked_block)
unmarked_block = Sha256Hash(binary=helpers.generate_bytearray(SHA256_HASH_LEN))
self.assertIsNone(self.cleanup_service.last_confirmed_block)
self.cleanup_service.mark_blocks_and_request_cleanup([marked_block, *tracked_blocks])
self.assertEqual(marked_block, self.cleanup_service.last_confirmed_block)
self.assertTrue(self.cleanup_service.is_marked_for_cleanup(marked_block))
self.assertFalse(self.cleanup_service.is_marked_for_cleanup(unmarked_block))
self.assertEqual(marked_block, self.cleanup_service.last_confirmed_block)
def _test_block_cleanup(self):
block_msg = self._get_sample_block(self._get_file_path())
transactions = list(block_msg.txns())
block_hash = block_msg.block_hash()
transaction_hashes = []
for idx, tx in enumerate(transactions):
tx_hash = tx.hash()
tx_content = str(tx).encode()
self.transaction_service.set_transaction_contents(tx_hash, tx_content)
self.transaction_service.assign_short_id(tx_hash, idx + 1)
transaction_hashes.append(tx_hash)
self.cleanup_service._block_hash_marked_for_cleanup.add(block_hash)
self.cleanup_service.clean_block_transactions(block_msg, self.transaction_service)
self.assertEqual(0, self.transaction_service._total_tx_contents_size)
for tx_hash in transaction_hashes:
self.assertFalse(self.transaction_service.has_transaction_contents(tx_hash))
self.node.post_block_cleanup_tasks.assert_called_once_with(
block_hash,
[],
transaction_hashes
)
def test_mark_blocks_and_request_cleanup(self):
self._test_mark_blocks_and_request_cleanup()
def test_block_cleanup(self):
self._test_block_cleanup()
def test_block_confirmation_cleanup(self):
self._test_block_confirmation_cleanup()
def _get_transaction_service(self) -> TransactionService:
return ExtensionTransactionService(self.node, 1)
def _get_cleanup_service(self) -> AbstractEthBlockCleanupService:
return EthExtensionBlockCleanupService(self.node, 1)
def _get_file_path(self) -> str:
return __file__
| 1.9375
| 2
|
peartree/__init__.py
|
yiyange/peartree
| 0
|
12776955
|
<reponame>yiyange/peartree
from . import parallel # noqa: F401
from peartree.__version__ import __version__ # noqa: F401
from peartree.paths import (
load_feed_as_graph,
get_representative_feed,
load_synthetic_network_as_graph) # noqa: F401
from peartree.plot import generate_plot # noqa: F401
from peartree.toolkit import reproject # noqa: F401
__all__ = [
'__version__',
'generate_plot',
'get_representative_feed',
'load_feed_as_graph',
'load_synthetic_network_as_graph',
'reproject',
]
| 1.382813
| 1
|
typeform_viz/migrations/0001_initial.py
|
jacobs-hack/jh16_typeform_wrapper
| 0
|
12776956
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-18 21:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JHAPP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.TextField()),
('last_name', models.TextField()),
('email', models.EmailField(max_length=254)),
('coming_from', models.TextField()),
('nationality', models.TextField()),
('degree', models.TextField()),
('graduation', models.DateField()),
('major', models.TextField()),
('university', models.TextField()),
('_18yo', models.BooleanField()),
('needs_reimbursement', models.BooleanField()),
('needs_visa', models.BooleanField()),
('github', models.TextField()),
('devpost', models.TextField()),
('linkedin', models.TextField()),
('personal_site', models.TextField()),
('first_hackathon', models.BooleanField()),
('why_jacobshack', models.TextField()),
('previous_projects', models.TextField()),
('tshirt_size', models.TextField()),
('dietary_requirements', models.TextField()),
('has_team', models.BooleanField()),
('names_of_teammates', models.TextField()),
('cv', models.FileField(upload_to='')),
('agree_to_policy', models.TextField()),
('agree_to_coc', models.TextField()),
('accepted', models.BooleanField(default=False)),
('sentmail', models.BooleanField(default=False)),
],
),
]
| 1.476563
| 1
|
old/pepper1/src/libpepper/vals/functions/pepfunction.py
|
andybalaam/pepper
| 2
|
12776957
|
<filename>old/pepper1/src/libpepper/vals/functions/pepfunction.py
# Copyright (C) 2011-2013 <NAME> and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
# For just as each of us has one body with many members, and these members
# do not all have the same function, Romans 12 v4
from libpepper.values import PepValue
from pepcallable import PepCallable
class PepFunction( PepCallable ):
def __init__( self ):
PepValue.__init__( self )
# self.arg_types_and_names = arg_types_and_names
def is_known( self, env ):
return True
| 1.945313
| 2
|
repokid/plugin.py
|
boost-entropy-repos-org/repokid
| 999
|
12776958
|
# Copyright 2021 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
from typing import Any
from typing import Dict
from typing import Optional
from repokid import CONFIG
from repokid.types import RepokidConfig
logger = logging.getLogger("repokid")
class RepokidPlugin:
def __init__(self, config: Optional[RepokidConfig] = None):
if config:
self.config = config
else:
self.config = CONFIG
class M_A(type):
pass
class Singleton(M_A):
_instances: Dict[str, Singleton] = {}
def __call__(cls, *args: Any, **kwargs: Any) -> Singleton:
if cls.__name__ not in cls._instances:
cls._instances[cls.__name__] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls.__name__]
| 2.109375
| 2
|
07/07.py
|
stevenpclark/aoc2019
| 0
|
12776959
|
<reponame>stevenpclark/aoc2019<gh_stars>0
import itertools
class Amplifier(object):
def __init__(self, x, phase):
self.x = x[:]
self.inputs = [phase]
self.i = 0
def get_output(self, input):
self.inputs.append(input)
i = self.i
x = self.x
while True:
cmd = str(x[i]).rjust(5, '0')
op = int(cmd[-2:])
if op == 99:
return None
if op == 3:
x[x[i+1]] = self.inputs.pop(0)
i += 2
continue
if op == 4:
output = x[x[i+1]]
i += 2
#save state here
self.i = i
return output
modes = [int(c) for c in reversed(cmd[1:-2])]
#print('modes:', modes)
operands = []
for j, mode in enumerate(modes):
if mode == 0: #position mode
operands.append(x[x[i+j+1]])
elif mode == 1: #immediate mode
operands.append(x[i+j+1])
else:
raise Exception('bad')
#print('operands:', operands)
if op == 1:
x[x[i+3]] = operands[0] + operands[1]
i += 4
elif op == 2:
x[x[i+3]] = operands[0] * operands[1]
i += 4
elif op == 5: #jump-if-true
if operands[0]:
i = operands[1]
else:
i += 3
elif op == 6: #jump-if-false
if not operands[0]:
i = operands[1]
else:
i += 3
elif op == 7: #less-than
x[x[i+3]] = int(operands[0]<operands[1])
i += 4
elif op == 8: #equals
x[x[i+3]] = int(operands[0]==operands[1])
i += 4
else:
raise Exception('bad2')
def get_max_output(x, phase_range, do_loop):
final_outputs = []
for phases in itertools.permutations(phase_range):
amps = [Amplifier(x, phase) for phase in phases]
prev_output = 0
while True:
for amp in amps:
prev_output = amp.get_output(prev_output)
if prev_output is not None:
final_output = prev_output
if not do_loop or prev_output is None:
break
final_outputs.append(final_output)
return max(final_outputs)
def main():
with open('input.txt', 'r') as f:
s = f.read()
#s = '3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0'
#s = '3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5'
x = [int(s2) for s2 in s.split(',')]
print(get_max_output(x, range(5), do_loop=False))
print(get_max_output(x, range(5,10), do_loop=True))
if __name__ == '__main__':
main()
| 3.109375
| 3
|
tbump/test/project/version_info.py
|
achary/tbump
| 50
|
12776960
|
<filename>tbump/test/project/version_info.py
version_info = (1, 2, 41, "alpha", 1)
| 1.078125
| 1
|
problem-003/Rithvik.py
|
arnavb/lockheed-martin-practice
| 2
|
12776961
|
with open("Prob03.in.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
nola = content.pop(0)
for x in content:
x = x.strip(" ")
x = x.split()
add = int(x[0]) + int(x[1])
multi = int(x[0]) * int(x[1])
print( str(add) + " " + str(multi))
| 3.28125
| 3
|
simple_autocorrect_pt/insertion_filter.py
|
brenoskuk/simple_autocorrect_pt
| 0
|
12776962
|
<filename>simple_autocorrect_pt/insertion_filter.py
#important https://docs.python.org/3/library/stdtypes.html#string-methods
import string
from unicodedata import normalize
import os
#cria versões sem acentos das palavras
def delete_acentos(text_input, codif='utf-8'):
return normalize('NFKD', text_input).encode('ASCII', 'ignore').decode('ASCII')
#cria versões com todas as letras minúsculas
def all_lower(text_input):
return text_input.lower()
#percorre todos os templates e adiciona frases modificadas
if __name__ == '__main__':
more = []
dir = './data/nlu_data'
for file in os.listdir(dir):
if file != 'synonyms.md' and file != 'lookups.md' and file != 'base.md' and 'describe_' not in file and file !='compare_scores.md':
f = open(dir + "/" + file, 'r')
line = f.readline()
line = f.readline()
while (line):
if line[-1] != '\n':
line = line + '\n'
new1 = delete_acentos(line)
if line != new1:
more.append(new1)
new2 = all_lower(new1)
if new2 != new1:
more.append(new2)
new = all_lower(line)
if line != new:
more.append(new)
line = f.readline()
f.close()
f = open(dir + "/" + file, 'a')
f.write('\n' + ''.join(more))
f.close()
more = []
| 2.921875
| 3
|
gaphor/C4Model/c4model.py
|
mrmonkington/gaphor
| 867
|
12776963
|
<reponame>mrmonkington/gaphor
# This file is generated by profile_coder.py. DO NOT EDIT!
from __future__ import annotations
from gaphor.core.modeling.properties import (
association,
attribute,
relation_many,
relation_one,
)
from gaphor.UML import Actor, Package
class C4Container(Package):
description: attribute[str]
location: attribute[str]
ownerContainer: relation_one[C4Container]
owningContainer: relation_many[C4Container]
technology: attribute[str]
type: attribute[str]
class C4Database(C4Container):
pass
class C4Person(Actor):
description: attribute[str]
location: attribute[str]
C4Container.description = attribute("description", str)
C4Container.location = attribute("location", str)
C4Container.ownerContainer = association(
"ownerContainer", C4Container, upper=1, opposite="owningContainer"
)
C4Container.owningContainer = association(
"owningContainer", C4Container, composite=True, opposite="ownerContainer"
)
C4Container.technology = attribute("technology", str)
C4Container.type = attribute("type", str)
C4Person.description = attribute("description", str)
C4Person.location = attribute("location", str)
C4Container.namespace.subsets.add(C4Container.ownerContainer) # type: ignore[attr-defined]
C4Container.ownedMember.subsets.add(C4Container.owningContainer) # type: ignore[attr-defined]
| 2.09375
| 2
|
src/layoutparser/elements/utils.py
|
frankiert/layout-parser
| 2,931
|
12776964
|
<reponame>frankiert/layout-parser
# Copyright 2021 The Layout Parser team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union, Dict, Dict, Any, Optional, Tuple
import numpy as np
from PIL import Image
def cvt_coordinates_to_points(coords: Tuple[float, float, float, float]) -> np.ndarray:
x_1, y_1, x_2, y_2 = coords
return np.array(
[
[x_1, y_1], # Top Left
[x_2, y_1], # Top Right
[x_2, y_2], # Bottom Right
[x_1, y_2], # Bottom Left
]
)
def cvt_points_to_coordinates(points: np.ndarray) -> Tuple[float, float, float, float]:
x_1 = points[:, 0].min()
y_1 = points[:, 1].min()
x_2 = points[:, 0].max()
y_2 = points[:, 1].max()
return (x_1, y_1, x_2, y_2)
def perspective_transformation(
M: np.ndarray, points: np.ndarray, is_inv: bool = False
) -> np.ndarray:
if is_inv:
M = np.linalg.inv(M)
src_mid = np.hstack([points, np.ones((points.shape[0], 1))]).T # 3x4
dst_mid = np.matmul(M, src_mid)
dst = (dst_mid / dst_mid[-1]).T[:, :2] # 4x2
return dst
def vertice_in_polygon(vertice: np.ndarray, polygon_points: np.ndarray) -> bool:
# The polygon_points are ordered clockwise
# The implementation is based on the algorithm from
# https://demonstrations.wolfram.com/AnEfficientTestForAPointToBeInAConvexPolygon/
points = polygon_points - vertice # shift the coordinates origin to the vertice
edges = np.append(points, points[0:1, :], axis=0)
return all([np.linalg.det([e1, e2]) >= 0 for e1, e2 in zip(edges, edges[1:])])
# If the points are ordered clockwise, the det should <=0
def polygon_area(xs: np.ndarray, ys: np.ndarray) -> float:
"""Calculate the area of polygons using
`Shoelace Formula <https://en.wikipedia.org/wiki/Shoelace_formula>`_.
Args:
xs (`np.ndarray`): The x coordinates of the points
ys (`np.ndarray`): The y coordinates of the points
"""
# Refer to: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
# The formula is equivalent to the original one indicated in the wikipedia
# page.
return 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
| 1.78125
| 2
|
if97/h2o.py
|
nikolai-kummer/iapws
| 5
|
12776965
|
from if97 import region1, region2, region3, region4
###########################################################
##### Pressure-Temperature Formulation #####
###########################################################
def idRegion(P, T):
"""Identification of region from IF97 specification
using pressure and temperature as primary varibles"""
# Constant boundaries
Pbnd0 = region1.Pbnd0
Pbnd1 = region1.Pbnd1
Tbnd01 = region1.Tbnd01
Tbnd25 = region2.Tbnd25
Tbnd13 = region1.Tbnd13
# non-constant boundaries
Pbnd32 = region3.bnd23P(min(max(T, Tbnd13), 863.15))
Pbnd4 = satP(min(max(T, Tbnd01), Tbnd13))
region = 0
if (P >= Pbnd0) and (T >= Tbnd01) and (P <= Pbnd1) and (T <= Tbnd25):
if (T <= Tbnd13) and (P >= Pbnd4):
region = 1
elif (T < Tbnd13) or (P <= Pbnd32):
region = 2
else:
# region 3 via P,T relations not implemented
region = 0
assert (region is not 0), "Water properties not avalable!"
return region
#### water properties ####
def g(P, T, region = 0):
"""Specific gibbs free energy [kJ / kg K]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.g(P, T)
elif region is 2:
return region2.g(P, T)
else:
return 0.000
def v(P, T, region = 0):
"""Specific volume [m^3 / kg]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.v(P, T)
elif region is 2:
return region2.v(P, T)
else:
return 0.000
def u(P, T, region = 0):
"""Specific internal energy [kJ / kg]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.u(P, T)
elif region is 2:
return region2.u(P, T)
else:
return 0.000
def s(P, T, region = 0):
"""Specific entropy [kJ / kg K]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.s(P, T)
elif region is 2:
return region2.s(P, T)
else:
return 0.000
def h(P, T, region = 0):
"""Specific enthalpy [kJ / kg]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.h(P, T)
elif region is 2:
return region2.h(P, T)
else:
return 0.000
def cp(P, T, region = 0):
""" Specific isobaric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.cp(P, T)
elif region is 2:
return region2.cp(P, T)
else:
return 0.000
def cv(P, T, region = 0):
""" Specific isochoric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.cv(P, T)
elif region is 2:
return region2.cv(P, T)
else:
return 0.000
def w(P, T, region = 0):
""" Speed of sound [m / s]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.w(P, T)
elif region is 2:
return region2.w(P, T)
else:
return 0.000
def a(P, T, region = 0):
"""Isobaric cubic expansion coefficient [1 / K]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.a(P, T)
elif region is 2:
return region2.a(P, T)
else:
return 0.000
def k(P, T, region = 0):
"""Isothermal compressibility [kg / kJ]"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.k(P, T)
elif region is 2:
return region2.k(P, T)
else:
return 0.000
#### water property derivatives ####
def dgdP(P, T, region = 0):
""" Derivative of specific gibbs free energy [kJ m^3 / kg kJ]
w.r.t pressure at constant temperature"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dgdP(P, T)
elif region is 2:
return region2.dgdP(P, T)
else:
return 0.000
def dvdP(P, T, region = 0):
""" Derivative of specific volume [m^3 m^3 / kg kJ]
w.r.t pressure at constant temperature"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dvdP(P, T)
elif region is 2:
return region2.dvdP(P, T)
else:
return 0.000
def dudP(P, T, region = 0):
""" Derivative of specific internal energy [kJ m^3 / kg kJ]
w.r.t pressure at constant temperature"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dudP(P, T)
elif region is 2:
return region2.dudP(P, T)
else:
return 0.000
def dsdP(P, T, region = 0):
""" Derivative of specific entropy [kJ m^3 / kg K kJ]
w.r.t pressure at constant temperature"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dsdP(P, T)
elif region is 2:
return region2.dsdP(P, T)
else:
return 0.000
def dhdP(P, T, region = 0):
""" Derivative of specific enthalpy [kJ m^3 / kg kJ]
w.r.t pressure at constant temperature"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dhdP(P, T)
elif region is 2:
return region2.dhdP(P, T)
else:
return 0.000
def dgdT(P, T, region = 0):
""" Derivative of specific gibbs free energy [kJ / kg K]
w.r.t temperature at constant pressure"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dgdT(P, T)
elif region is 2:
return region2.dgdT(P, T)
else:
return 0.000
def dvdT(P, T, region = 0):
""" Derivative of specific volume [m^3 / kg K]
w.r.t temperature at constant pressure"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dvdT(P, T)
elif region is 2:
return region2.dvdT(P, T)
else:
return 0.000
def dudT(P, T, region = 0):
""" Derivative of specific internal energy [kJ / kg K]
w.r.t temperature at constant pressure"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dudT(P, T)
elif region is 2:
return region2.dudT(P, T)
else:
return 0.000
def dsdT(P, T, region = 0):
""" Derivative of specific entropy [kJ / kg K K]
w.r.t temperature at constant pressure"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dsdT(P, T)
elif region is 2:
return region2.dsdT(P, T)
else:
return 0.000
def dhdT(P, T, region = 0):
""" Derivative of specific enthalpy [kJ / kg K]
w.r.t temperature at constant pressure"""
if region is 0:
region = idRegion(P, T)
if region is 1:
return region1.dhdT(P, T)
elif region is 2:
return region2.dhdT(P, T)
else:
return 0.000
###########################################################
##### Pressure-Enthalpy Formulation #####
###########################################################
def idRegion_h(P, h):
"""Identification of region from IF97 specification
using pressure and enthalpy as primary variables"""
# supporting boundaries
Tbnd01 = region1.Tbnd01
Pbnd4 = satP(Tbnd01)
Tbnd25 = region2.Tbnd25
Tbnd13 = region1.Tbnd13
Tbnd32 = region3.bnd23T(min(max(P, 16.5292), 100.0))
Tbnd4 = satT(P)
# Enthalpy- pressure boundaries
Pbnd0 = region1.Pbnd0
Pbnd1 = region1.Pbnd1
hbnd01 = region1.h(Pbnd4, Tbnd01)
hbnd25 = region2.h(Pbnd0, Tbnd25)
Pbndh1 = satP(Tbnd13)
hbnd13 = region1.h(P, Tbnd13)
hbnd32 = region2.h(P, Tbnd32)
hbnd14 = region1.h(P, Tbnd4)
hbnd42 = region2.h(P, Tbnd4)
region = 0
if (P >= Pbnd0) and (h >= hbnd01) and (P <= Pbnd1) and (h <= hbnd25):
if (P >= Pbndh1):
if (h <= hbnd13):
region = 1
elif (h >= hbnd32):
region = 2
else:
# region 3 via P,h relations not implemented
region = 0
else:
if (h <= hbnd14):
region = 1
elif (h >= hbnd42):
region = 2
else:
region = 4
assert (region is not 0), "Water properties not avalable!"
return region
#### water properties ####
def g_h(P, h, region = 0):
"""Specific gibbs free energy [kJ / kg]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.g_h(P, h)
elif region is 2:
return region2.g_h(P, h)
elif region is 4:
return region4.g_h(P, h)
else:
return 0.000
def v_h(P, h, region = 0):
"""Specific volume [m^3 / kg]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.v_h(P, h)
elif region is 2:
return region2.v_h(P, h)
elif region is 4:
return region4.v_h(P, h)
else:
return 0.000
def u_h(P, h, region = 0):
"""Specific internal energy [kJ / kg]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.u_h(P, h)
elif region is 2:
return region2.u_h(P, h)
elif region is 4:
return region4.u_h(P, h)
else:
return 0.000
def s_h(P, h, region = 0):
"""Specific entropy [kJ / kg K]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.s_h(P, h)
elif region is 2:
return region2.s_h(P, h)
elif region is 4:
return region4.s_h(P, h)
else:
return 0.000
def T_h(P, h, region = 0):
""" Temperature [K]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.T_h(P, h)
elif region is 2:
return region2.T_h(P, h)
elif region is 4:
return region4.satT(P)
else:
return 0.000
def cp_h(P, h, region = 0):
""" Specific isobaric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.cp_h(P, h)
elif region is 2:
return region2.cp_h(P, h)
elif region is 4:
return region4.cp_h(P, h)
else:
return 0.000
def cv_h(P, h, region = 0):
""" Specific isochoric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.cv_h(P, h)
elif region is 2:
return region2.cv_h(P, h)
elif region is 4:
return region4.cv_h(P, h)
else:
return 0.000
def w_h(P, h, region = 0):
""" Speed of sound [m / s]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.w_h(P, h)
elif region is 2:
return region2.w_h(P, h)
elif region is 4:
return region4.w_h(P, h)
else:
return 0.000
def a_h(P, h, region = 0):
"""Isobaric cubic expansion coefficient [1 / K]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.a_h(P, h)
elif region is 2:
return region2.a_h(P, h)
elif region is 4:
return region4.a_h(P, h)
else:
return 0.000
def k_h(P, h, region = 0):
"""Isothermal compressibility [kg / kJ]"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.k_h(P, h)
elif region is 2:
return region2.k_h(P, h)
elif region is 4:
return region4.k_h(P, h)
else:
return 0.000
#### water property derivatives ####
def dgdP_h(P, h, region = 0):
""" Derivative of specific gibbs free energy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dgdP_h(P, h)
elif region is 2:
return region2.dgdP_h(P, h)
elif region is 4:
return region4.dgdP_h(P, h)
else:
return 0.000
def dvdP_h(P, h, region = 0):
""" Derivative of specific volume [m^3 m^3 / kg kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dvdp_h(P, h)
elif region is 2:
return region2.dvdP_h(P, h)
elif region is 4:
return region4.dvdP_h(P, h)
else:
return 0.000
def dudP_h(P, h, region = 0):
""" Derivative of specific internal energy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dudP_h(P, h)
elif region is 2:
return region2.dudP_h(P, h)
elif region is 4:
return region4.dudP_h(P, h)
else:
return 0.000
def dsdP_h(P, h, region = 0):
""" Derivative of specific entropy [kJ m^3 / kg K kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dsdP_h(P, h)
elif region is 2:
return region2.dsdP_h(P, h)
elif region is 4:
return region4.dsdP_h(P, h)
else:
return 0.000
def dhdP_h(P, h, region = 0):
""" Derivative of specific enthalpy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return 0.000
elif region is 2:
return 0.000
elif region is 4:
return region4.dhdP_h(P, h)
else:
return 0.000
def dTdP_h(P, h, region = 0):
""" Derivative of Temperature [K m^3 / kJ]
w.r.t pressure at constant specific enthalpy"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dTdP_h(P, h)
elif region is 2:
return region2.dTdP_h(P, h)
elif region is 4:
return region4.dTsdP(P)
else:
return 0.000
def dgdh_h(P, h, region = 0):
""" Derivative of specific gibbs free energy [kJ kg / kg kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dgdh_h(P, h)
elif region is 2:
return region2.dgdh_h(P, h)
elif region is 4:
return region4.dgdh_h(P, h)
else:
return 0.000
def dvdh_h(P, h, region = 0):
""" Derivative of specific volume [m^3 kg / kg kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dvdh_h(P, h)
elif region is 2:
return region2.dvdh_h(P, h)
elif region is 4:
return region4.dvdh_h(P, h)
else:
return 0.000
def dudh_h(P, h, region = 0):
""" Derivative of specific internal energy [kJ kg / kg kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dudh_h(P, h)
elif region is 2:
return region2.dudh_h(P, h)
elif region is 4:
return region4.dudh_h(P, h)
else:
return 0.000
def dsdh_h(P, h, region = 0):
""" Derivative of specific entropy [kJ kg / kg K kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dsdh_h(P, h)
elif region is 2:
return region2.dsdh_h(P, h)
elif region is 4:
return region4.dsdh_h(P, h)
else:
return 0.000
def dhdh_h(P, h, region = 0):
""" Derivative of specific enthalpy [kJ kg / kg kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return 1.000
elif region is 2:
return 1.000
elif region is 4:
return 1.000
else:
return 0.000
def dTdh_h(P, h, region = 0):
""" Derivative of Temperature [K kg / kJ]
w.r.t specific enthalpy at constant pressure"""
if region is 0:
region = idRegion_h(P, h)
if region is 1:
return region1.dTdh_h(P, h)
elif region is 2:
return region2.dTdh_h(P, h)
elif region is 4:
return 0.000
else:
return 0.000
###########################################################
##### Pressure-Entropy Formulation #####
###########################################################
def idRegion_s(P, s):
"""Identification of region from IF97 specification
using pressure and enthalpy as primary variables"""
# supporting boundaries
Tbnd01 = region1.Tbnd01
Pbnd4 = satP(Tbnd01)
Tbnd25 = region2.Tbnd25
Tbnd13 = region1.Tbnd13
Tbnd32 = region3.bnd23T(min(max(P, 16.5292), 100.0))
Tbnd4 = satT(P)
# Enthalpy- pressure boundaries
Pbnd0 = region1.Pbnd0
Pbnd1 = region1.Pbnd1
sbnd01 = region1.s(P, Tbnd01)
sbnd25 = region2.s(P, Tbnd25)
Pbndh1 = satP(Tbnd13)
sbnd13 = region1.s(P, Tbnd13)
sbnd32 = region2.s(P, Tbnd32)
sbnd14 = region1.s(P, Tbnd4)
sbnd42 = region2.s(P, Tbnd4)
region = 0
if (P >= Pbnd0) and (s >= sbnd01) and (P <= Pbnd1) and (s <= sbnd25):
if (P >= Pbndh1):
if (s <= sbnd13):
region = 1
elif (s >= sbnd32):
region = 2
else:
# region 3 via P,h relations not implemented
region = 0
else:
if (s <= sbnd14):
region = 1
elif (s >= sbnd42):
region = 2
else:
region = 4
assert (region is not 0), "Water properties not avalable!"
return region
#### water properties ####
def g_s(P, s, region = 0):
"""Specific gibbs free energy [kJ / kg]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.g_s(P, s)
elif region is 2:
return region2.g_s(P, s)
elif region is 4:
return region4.g_s(P, s)
else:
return 0.000
def v_s(P, s, region = 0):
"""Specific volume [m^3 / kg]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.v_s(P, s)
elif region is 2:
return region2.v_s(P, s)
elif region is 4:
return region4.v_s(P, s)
else:
return 0.000
def u_s(P, s, region = 0):
"""Specific internal energy [kJ / kg]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.u_s(P, s)
elif region is 2:
return region2.u_s(P, s)
elif region is 4:
return region4.u_s(P, s)
else:
return 0.000
def T_s(P, s, region = 0):
""" Temperature [K]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.T_s(P, s)
elif region is 2:
return region2.T_s(P, s)
elif region is 4:
return region4.satT(P)
else:
return 0.000
def h_s(P, s, region = 0):
"""Specific entropy [kJ / kg]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.h_s(P, s)
elif region is 2:
return region2.h_s(P, s)
elif region is 4:
return region4.h_s(P, s)
else:
return 0.000
def cp_s(P, s, region = 0):
""" Specific isobaric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.cp_s(P, s)
elif region is 2:
return region2.cp_s(P, s)
elif region is 4:
return region4.cp_s(P, s)
else:
return 0.000
def cv_s(P, s, region = 0):
""" Specific isochoric heat capacity [kJ / kg K]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.cv_s(P, s)
elif region is 2:
return region2.cv_s(P, s)
elif region is 4:
return region4.cv_s(P, s)
else:
return 0.000
def w_s(P, s, region = 0):
""" Speed of sound [m / s]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.w_s(P, s)
elif region is 2:
return region2.w_s(P, s)
elif region is 4:
return region4.w_s(P, s)
else:
return 0.000
def a_s(P, s, region = 0):
"""Isobaric cubic expansion coefficient [1 / K]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.a_s(P, s)
elif region is 2:
return region2.a_s(P, s)
elif region is 4:
return region4.a_s(P, s)
else:
return 0.000
def k_s(P, s, region = 0):
"""Isothermal compressibility [kg / kJ]"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.k_s(P, s)
elif region is 2:
return region2.k_s(P, s)
elif region is 4:
return region4.k_s(P, s)
else:
return 0.000
#### water property derivatives ####
def dgdP_s(P, s, region = 0):
""" Derivative of specific gibbs free energy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dgdP_s(P, s)
elif region is 2:
return region2.dgdP_s(P, s)
elif region is 4:
return region4.dgdP_s(P, s)
else:
return 0.000
def dvdP_s(P, s, region = 0):
""" Derivative of specific volume [m^3 m^3 / kg kJ]
w.r.t pressure at constant specific entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dvdP_s(P, s)
elif region is 2:
return region2.dvdP_s(P, s)
elif region is 4:
return region4.dvdP_s(P, s)
else:
return 0.000
def dudP_s(P, s, region = 0):
""" Derivative of specific internal energy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dudP_s(P, s)
elif region is 2:
return region2.dudP_s(P, s)
elif region is 4:
return region4.dudP_s(P, s)
else:
return 0.000
def dsdP_s(P, s, region = 0):
""" Derivative of specific entropy [kJ m^3 / kg K kJ]
w.r.t pressure at constant specific/equilibrium entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return 0.000
elif region is 2:
return 0.000
elif region is 4:
return region4.dsdP_s(P, s)
else:
return 0.000
def dhdP_s(P, s, region = 0):
""" Derivative of specific enthalpy [kJ m^3 / kg kJ]
w.r.t pressure at constant specific entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dhdP_s(P, s)
elif region is 2:
return region2.dhdP_s(P, s)
elif region is 4:
return region4.dhdP_s(P, s)
else:
return 0.000
def dTdP_s(P, s, region = 0):
""" Derivative of Temperature [K m^3 / kJ]
w.r.t pressure at constant specific entropy"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dTdP_s(P, s)
elif region is 2:
return region2.dTdP_s(P, s)
elif region is 4:
return region4.dTsdP(P)
else:
return 0.000
def dgds_s(P, s, region = 0):
""" Derivative of specific gibbs free energy [kJ kg K / kg kJ]
w.r.t specific entropy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dgds_s(P, s)
elif region is 2:
return region2.dgds_s(P, s)
elif region is 4:
return region4.dgds_s(P, s)
else:
return 0.000
def dvds_s(P, s, region = 0):
""" Derivative of specific volume [m^3 kg K / kg kJ]
w.r.t specific entropy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dvds_s(P, s)
elif region is 2:
return region2.dvds_s(P, s)
elif region is 4:
return region4.dvds_s(P, s)
else:
return 0.000
def duds_s(P, s, region = 0):
""" Derivative of specific internal energy [kJ kg K / kg kJ]
w.r.t specific entropy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.duds_s(P, s)
elif region is 2:
return region2.duds_s(P, s)
elif region is 4:
return region4.duds_s(P, s)
else:
return 0.000
def dsds_s(P, s, region = 0):
""" Derivative of specific entropy [kJ kg K / kg K kJ]
w.r.t specific entropy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return 1.000
elif region is 2:
return 1.000
elif region is 4:
return 1.000
else:
return 0.000
def dhds_s(P, s, region = 0):
""" Derivative of specific enthalpy [kJ kg K / kg kJ]
w.r.t specific entropy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dhds_s(P, s)
elif region is 2:
return region2.dhds_s(P, s)
elif region is 4:
return region4.dhds_s(P, s)
else:
return 0.000
def dTds_s(P, s, region = 0):
""" Derivative of Temperature [K kg K / kJ]
w.r.t enthalpy at constant pressure"""
if region is 0:
region = idRegion_s(P, s)
if region is 1:
return region1.dTds_s(P, s)
elif region is 2:
return region2.dTds_s(P, s)
elif region is 4:
return 0.000
else:
return 0.000
###########################################################
##### Pressure Only (Saturation) Formulation #####
###########################################################
#### P-T saturation curves ####
def satP(T):
""" Saturation Pressure [Mpa]
for specified Temperature"""
return region4.satP(T)
def satT(P):
""" Saturation Temperature [K]
for specified Pressure"""
return region4.satT(P)
#### Saturated liquid properties ####
def gf(P):
""" Specific gibbs free energy [kJ / kg]
of saturated liquid"""
return region4.gf(P)
def vf(P):
""" Specific volume [m^3 / kg]
of saturated liquid"""
return region4.vf(P)
def uf(P):
""" Specific internal energy [kJ / kg]
of saturated liquid"""
return region4.uf(P)
def sf(P):
""" Specific entropy [kJ / kg K]
of saturated liquid"""
return region4.sf(P)
def hf(P):
""" Specific enthalpy [kJ / kg]
of saturated liquid"""
return region4.hf(P)
def cpf(P):
""" Specific isobaric heat capacity [kJ / kg K]
of saturated liquid"""
return region4.cpf(P)
def cvf(P):
""" Specific isochoric heat capacity [kJ / kg K]
of saturated liquid"""
return region4.cvf(P)
def wf(P):
""" Speed of sound [m / s]
of saturated liquid"""
return region4.wf(P)
def af(P):
"""Isobaric cubic expansion coefficient [1 / K]
of saturated liquid"""
return region4.af(P)
def kf(P):
"""Isothermal compressibility [kg / kJ]
of saturated liquid"""
return region4.kf(P)
#### Saturated vapor properties ####
def gg(P):
""" Specific gibbs free energy [kJ / kg]
of saturated vapor"""
return region4.gg(P)
def vg(P):
""" Specific volume [m^3 / kg]
of saturated vapor"""
return region4.vg(P)
def ug(P):
""" Specific internal energy [kJ / kg]
of saturated vapor"""
return region4.ug(P)
def sg(P):
""" Specific entropy [kJ / kg K]
of saturated vapor"""
return region4.sg(P)
def hg(P):
""" Specific enthalpy [kJ / kg]
of saturated vapor"""
return region4.hg(P)
def cpg(P):
""" Specific isobaric heat capacity [kJ / kg K]
of saturated vapor"""
return region4.cpg(P)
def cvg(P):
""" Specific isochoric heat capacity [kJ / kg K]
of saturated vapor"""
return region4.cvg(P)
def wg(P):
""" Speed of sound [m / s]
of saturated vapor"""
return region4.wg(P)
def ag(P):
"""Isobaric cubic expansion coefficient [1 / K]
of saturated vapor"""
return region4.ag(P)
def kg(P):
"""Isothermal compressibility [kg / kJ]
of saturated vapor"""
return region4.kg(P)
#### delta saturation properties ####
def gfg(P):
""" Specific gibbs free energy; [kJ / kg]
saturation rise of"""
return region4.gfg(P)
def vfg(P):
""" Specific volume; [m^3 / kg]
saturation rise of"""
return region4.vfg(P)
def ufg(P):
""" Specific internal energy; [kJ / kg]
saturation rise of"""
return region4.ufg(P)
def sfg(P):
""" Specific entropy; [kJ / kg K]
saturation rise of"""
return region4.sfg(P)
def hfg(P):
""" Specific enthalpy; [kJ / kg]
saturation rise of"""
return region4.hfg(P)
def cpfg(P):
""" Specific isobaric heat capacity; [kJ / kg K]
saturation rise of"""
return region4.cpfg(P)
def cvfg(P):
""" Specific isochoric heat capacity; [kJ / kg K]
saturation rise of"""
return region4.cvfg(P)
def wfg(P):
""" Speed of sound; [m / s]
saturation rise of"""
return region4.wfg(P)
def afg(P):
"""Isobaric cubic expansion coefficient; [1 / K]
saturation rise of"""
return region4.afg(P)
def kfg(P):
"""Isothermal compressibility; [kg / kJ]
saturation rise of"""
return region4.kfg(P)
#### Saturated liquid derivatives ####
def dgfdP(P):
""" Derivative of Specific gibbs free energy [kJ m^3 / kg kJ]
of saturated liquid w.r.t. pressure"""
return region4.dgfdP(P)
def dvfdP(P):
""" Derivative of Specific volume [m^3 m^3 / kg kJ]
of saturated liquid w.r.t. pressure"""
return region4.dvfdP(P)
def dufdP(P):
""" Derivative of Specific internal energy [kJ m^3 / kg kJ]
of saturated liquid w.r.t. pressure"""
return region4.dufdP(P)
def dsfdP(P):
""" Derivative of Specific entropy [kJ m^3 / kg K kJ]
of saturated liquid w.r.t. pressure"""
return region4.dsfdP(P)
def dhfdP(P):
""" Derivative of Specific enthalpy [kJ m^3 / kg kJ]
of saturated liquid w.r.t. pressure"""
return region4.dhfdP(P)
#### Saturated vapor derivatives ####
def dggdP(P):
""" Derivative of Specific gibbs free energy [kJ m^3 / kg kJ]
of saturated vapor w.r.t. pressure"""
return region4.dggdP(P)
def dvgdP(P):
""" Derivative of Specific volume [m^3 m^3 / kg kJ]
of saturated vapor w.r.t. pressure"""
return region4.dvgdP(P)
def dugdP(P):
""" Derivative of Specific internal energy [kJ m^3 / kg kJ]
of saturated vapor w.r.t. pressure"""
return region4.dugdP(P)
def dsgdP(P):
""" Derivative of Specific entropy [kJ m^3 / kg K kJ]
of saturated vapor w.r.t. pressure"""
return region4.dsgdP(P)
def dhgdP(P):
""" Derivative of Specific enthalpy [kJ m^3 / kg kJ]
of saturated vapor w.r.t. pressure"""
return region4.dhgdP(P)
#### Delta saturation derivatives ####
def dgfgdP(P):
""" Derivative of Specific gibbs free energy [kJ m^3 / kg kJ]
w.r.t. pressure; saturation rise of"""
return region4.dgfgdP(P)
def dvfgdP(P):
""" Derivative of Specific volume [m^3 m^3 / kg kJ]
w.r.t. pressure; saturation rise of"""
return region4.dvfgdP(P)
def dufgdP(P):
""" Derivative of Specific internal energy [kJ m^3 / kg kJ]
w.r.t. pressure; saturation rise of"""
return region4.dufgdP(P)
def dsfgdP(P):
""" Derivative of Specific entropy [kJ m^3 / kg K kJ]
w.r.t. pressure; saturation rise of"""
return region4.dsfgdP(P)
def dhfgdP(P):
""" Derivative of Specific enthalpy [kJ m^3 / kg kJ]
w.r.t. pressure; saturation rise of"""
return region4.dhfgdP(P)
| 2.515625
| 3
|
apps/users/backends.py
|
gene1wood/spark
| 3
|
12776966
|
import hashlib
import os
from django.contrib.auth import models as auth_models
from django.contrib.auth.backends import ModelBackend
# http://fredericiana.com/2010/10/12/adding-support-for-stronger-password-hashes-to-django/
"""
from future import django_sha256_support
Monkey-patch SHA-256 support into Django's auth system. If Django ticket #5600
ever gets fixed, this can be removed.
"""
def get_hexdigest(algorithm, salt, raw_password):
"""Generate SHA-256 hash."""
if algorithm == 'sha256':
return hashlib.sha256((salt + raw_password).encode('utf8')).hexdigest()
else:
return get_hexdigest_old(algorithm, salt, raw_password)
get_hexdigest_old = auth_models.get_hexdigest
auth_models.get_hexdigest = get_hexdigest
def set_password(self, raw_password):
"""Set SHA-256 password."""
algo = 'sha256'
salt = os.urandom(5).encode('hex') # Random, 10-digit (hex) salt.
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '$'.join((algo, salt, hsh))
auth_models.User.set_password = set_password
class Sha256Backend(ModelBackend):
"""
Overriding the Django model backend without changes ensures our
monkeypatching happens by the time we import auth.
"""
pass
| 2.609375
| 3
|
emell/computation/__init__.py
|
jakevoytko/emell
| 2
|
12776967
|
"""Pure math functions used by the ML routines."""
from emell.computation.constant import constant
from emell.computation.identity import identity
from emell.computation.l1_loss import l1_loss
from emell.computation.l2_loss import l2_loss
from emell.computation.quadratic_cost import quadratic_cost
from emell.computation.quadratic_loss import delta_quadratic_loss, quadratic_loss
from emell.computation.relu import relu, relu_prime
__all__ = [
"constant",
"delta_quadratic_loss",
"identity",
"l1_loss",
"l2_loss",
"relu",
"relu_prime",
"quadratic_cost",
"quadratic_loss",
]
| 2.15625
| 2
|
ebc/cafe/skins/ebc_cafe_custom_templates/getFotoCapa.py
|
lflrocha/ebc.cafe
| 0
|
12776968
|
<gh_stars>0
## Script (Python) "getFotoCapa"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=Retorna um nome de arquivo para ser usado como foto da capa
padrao = [ 'capa01.jpg','capa02.jpg','capa03.jpg','capa04.jpg','capa05.jpg', \
'capa06.jpg','capa07.jpg', ]
ultimo = context.portal_catalog.searchResults(portal_type='Programa', sort_on='getData', sort_order='reverse', review_state='published')[0]
imagem = ultimo.getImagem
if not imagem:
imagem = context.portal_url() + '/' + padrao[random.randint(0,6)]
else:
imagem = ultimo.getURL() + "/imagem"
return imagem
| 2.6875
| 3
|
wallet/admin.py
|
dacom-dark-sun/dacom-api
| 0
|
12776969
|
from django.contrib import admin
from wallet.models import Wallet, Transaction
admin.site.register([
Wallet,
Transaction,
])
| 1.328125
| 1
|
flask_project/campaign_manager/insights_functions/count_feature.py
|
russbiggs/MapCampaigner
| 24
|
12776970
|
<filename>flask_project/campaign_manager/insights_functions/count_feature.py<gh_stars>10-100
__author__ = '<NAME> <<EMAIL>>'
__date__ = '17/05/17'
from campaign_manager.insights_functions._abstract_overpass_insight_function \
import AbstractOverpassInsightFunction
class CountFeature(AbstractOverpassInsightFunction):
function_name = "Number of feature in group"
# attribute of insight function
need_feature = True
def get_ui_html_file(self):
""" Get ui name in templates
:return: string name of html
:rtype: str
"""
return "piechart"
def get_summary_html_file(self):
""" Get summary name in templates
:return: string name of html
:rtype: str
"""
return ""
def get_details_html_file(self):
""" Get summary name in templates
:return: string name of html
:rtype: str
"""
return ""
def process_data(self, raw_data):
""" Get geometry of campaign.
:param raw_data: Raw data that returns by function provider
:type raw_data: dict
:return: processed data
:rtype: dict
"""
processed_data = []
required_attributes = self.get_required_attributes()
# process data based on required attributes
req_attr = required_attributes
for raw_data in raw_data:
if 'tags' not in raw_data:
continue
processed_data.append(raw_data['tags'])
return processed_data
def post_process_data(self, data):
""" Process data regarding output.
This needed for processing data for counting or grouping.
:param data: Data that received from open street map
:type data: dict
:return: Processed data
:rtype: dict
"""
output = {
'last_update': self.last_update,
'updating': self.is_updating,
'data': {},
'features_data': self.get_function_raw_data()
}
data = data
for current_data in data:
group_type = 'unknown'
group_key = self.feature
features = self.feature.split('=')
if len(features) > 0:
group_key = features[0]
try:
group_type = current_data[group_key]
except KeyError:
pass
building_group = u'{group_type}'.format(
group_key=group_key,
group_type=group_type.capitalize()
)
if building_group not in output['data']:
output['data'][building_group] = 0
output['data'][building_group] += 1
return output
| 2.546875
| 3
|
tutorials/overview.py
|
fatiando-bot/erizo
| 0
|
12776971
|
<filename>tutorials/overview.py
"""
.. _overview:
Overview
========
Brief description of Erizo and the main
functionality. It's a good idea to explain the package conventions.
The library
-----------
Most classes and functions are available through the :mod:`erizo` top level package.
"""
import erizo
###############################################################################
# Functionality 1
# ---------------
#
# Example of functionality 1. The basics.
print(erizo.function())
| 2.140625
| 2
|
VideoTools/__init__.py
|
ausport/labelImg
| 0
|
12776972
|
<reponame>ausport/labelImg<gh_stars>0
from .Video import VideoObject
| 1.109375
| 1
|
wxc_sdk/person_settings/monitoring.py
|
jeokrohn/wxc_sdk
| 0
|
12776973
|
"""
call monitoring API
"""
from typing import Optional, Union
from pydantic import Field
from .common import PersonSettingsApiChild
from ..base import ApiModel, webex_id_to_uuid
from ..common import MonitoredMember, CallParkExtension
__all__ = ['MonitoredElementMember', 'MonitoredElement', 'Monitoring',
'MonitoringApi']
class MonitoredElementMember(MonitoredMember):
#: The location name where the call park extension is.
location_name: Optional[str] = Field(alias='location')
#: The location Id for the location.
location_id: Optional[str]
@property
def ci_location_id(self) -> Optional[str]:
return self.location_id and webex_id_to_uuid(self.location_id)
class MonitoredElement(ApiModel):
#: monitored person or place
member: Optional[MonitoredElementMember]
# TODO: documentation defect: attribute is documented as "cpe"
#: monitored call park extension
cpe: Optional[CallParkExtension] = Field(alias='callparkextension')
class Monitoring(ApiModel):
#: Call park notification is enabled or disabled.
call_park_notification_enabled: Optional[bool]
#: Settings of monitored elements which can be person, place, or call park extension.
#: for updates IDs can be used directly instead of :class:`MonitoredElement` objects
monitored_elements: Optional[list[Union[str, MonitoredElement]]]
@property
def monitored_cpes(self) -> list[CallParkExtension]:
return [me.cpe for me in self.monitored_elements or []
if me.cpe]
@property
def monitored_members(self) -> list[MonitoredElementMember]:
return [me.member for me in self.monitored_elements or []
if me.member]
class MonitoringApi(PersonSettingsApiChild):
"""
API for person's call monitoring settings
"""
feature = 'monitoring'
def read(self, *, person_id: str, org_id: str = None) -> Monitoring:
"""
Retrieve a Person's Monitoring Settings
Retrieves the monitoring settings of the person, which shows specified people, places or, call park
extensions under monitoring. Monitors the line status which indicates if a person or place is on a call and
if a call has been parked on that extension.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: monitoring settings
:rtype: :class:`Monitoring`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = self.get(ep, params=params)
return Monitoring.parse_obj(data)
def configure(self, *, person_id: str, settings: Monitoring, org_id: str = None):
"""
Configure Call Waiting Settings for a Person
Configure a Person's Call Waiting Settings
With this feature, a person can place an active call on hold and answer an incoming call. When enabled,
while you are on an active call, a tone alerts you of an incoming call and you can choose to answer or ignore
the call.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: settings for update
:type settings: :class:`Monitoring`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = {}
if settings.call_park_notification_enabled is not None:
data['enableCallParkNotification'] = settings.call_park_notification_enabled
if settings.monitored_elements is not None:
id_list = []
for me in settings.monitored_elements:
if isinstance(me, str):
id_list.append(me)
else:
id_list.append(me.member and me.member.member_id or me.cpe and me.cpe.cpe_id)
data['monitoredElements'] = id_list
self.put(ep, params=params, json=data)
| 2.265625
| 2
|
rcsnn/nn_ext/bd_mon.py
|
pgfeldman/RCSNN
| 1
|
12776974
|
<reponame>pgfeldman/RCSNN<gh_stars>1-10
from rcsnn.nn_ext.CruiserController import CruiserController
from rcsnn.nn_ext.MissileController import MissileController
from rcsnn.nn_ext.NavigateController import NavigateController
from rcsnn.base.DataDictionary import DataDictionary, DictionaryTypes, DictionaryEntry
from rcsnn.base.CommandObject import CommandObject
from rcsnn.base.Commands import Commands
from rcsnn.base.ResponseObject import ResponseObject
from rcsnn.base.Responses import Responses
from rcsnn.base.BaseController import BaseController
def choose_new_target():
pass
def step_scenario():
choose_new_target()
def main():
"""
Exercise the class in a toy hierarchy that initializes, runs, and terminates. The hierarchy is controlled from the
main loop and has two controllers, a "parent" and a "child"
"""
# create the data dictionary and add "elapsed-time" as a float
ddict = DataDictionary()
elapsed_time_entry = DictionaryEntry("elapsed-time", DictionaryTypes.FLOAT, 0)
ddict.add_entry(elapsed_time_entry)
# Create the command object that will send commands from the main loop to the only module in this hierarchy
top_to_ship_cmd_obj = CommandObject("board-monitor", "ship-controller")
de = DictionaryEntry(top_to_ship_cmd_obj.name, DictionaryTypes.COMMAND, top_to_ship_cmd_obj)
ddict.add_entry(de)
# Create the response object that will send responses from the module to the main loop
top_to_ship_rsp_obj = ResponseObject("board-monitor", "ship-controller")
de = DictionaryEntry(top_to_ship_rsp_obj.name, DictionaryTypes.RESPONSE, top_to_ship_rsp_obj)
ddict.add_entry(de)
# Create an instance of a controller. An actual controller would inherit from this class rather than instancing the
# default
ship_ctrl = CruiserController("ship-controller", ddict)
ship_ctrl.set_cmd_obj(top_to_ship_cmd_obj)
ship_ctrl.set_rsp_obj(top_to_ship_rsp_obj)
# Add the child controller under the ship controller
navigation_ctrl = NavigateController("navigate-controller", ddict)
BaseController.link_parent_child(ship_ctrl, navigation_ctrl, ddict)
missile_ctrl = MissileController("missile-controller", ddict)
BaseController.link_parent_child(ship_ctrl, missile_ctrl, ddict)
# Set the INIT command that will start the hierarchy, then iterate until the INIT->RUN->TERMINATE sequence completes
top_to_ship_cmd_obj.set(Commands.INIT, 1)
done = False
current_step = 0
while not done:
print("\nstep[{}]---------------".format(current_step))
elapsed_time_entry.data += 0.1
ddict.store(skip = 1)
ddict.log_to_csv("testlog.csv", 1)
# ---------- step all the controllers
ship_ctrl.step()
print(ship_ctrl.to_string())
navigation_ctrl.step()
print(navigation_ctrl.to_string())
missile_ctrl.step()
print(missile_ctrl.to_string())
if top_to_ship_cmd_obj.test(Commands.INIT) and ship_ctrl.rsp.test(Responses.DONE):
top_to_ship_cmd_obj.set(Commands.RUN, 2)
elif top_to_ship_cmd_obj.test(Commands.RUN) and ship_ctrl.rsp.test(Responses.DONE): # handle new targets
top_to_ship_cmd_obj.set(Commands.TERMINATE, 3)
elif top_to_ship_cmd_obj.test(Commands.TERMINATE) and ship_ctrl.rsp.test(Responses.DONE):
done = True
current_step += 1
# for debugging
if current_step == 100:
done = True
print("\nDataDictionary:\n{}".format(ddict.to_string()))
ddict.to_excel("../../data/", "ship-controller.xlsx")
if __name__ == "__main__":
main()
| 2.9375
| 3
|
ssig_site/context_processors.py
|
LeoMcA/103P_2018_team51
| 0
|
12776975
|
from django.conf import settings as s
def settings(request):
return {
'GOOGLE_MAPS_KEY': s.GOOGLE_MAPS_KEY,
}
| 1.414063
| 1
|
scrapyinfo/urls.py
|
ToonoW/SpiderManager
| 34
|
12776976
|
from django.urls import path
from scrapyinfo import views
urlpatterns = [
path('refresh_platform_information', views.RefreshPlatformView.as_view()),
path('scrapyds', views.ScrapydList.as_view()),
path('scrapyd/<pk>', views.ScrapydDetial.as_view()),
path('projects', views.ProjectList.as_view()),
path('project/<pk>', views.ProjectDetial.as_view()),
path('spiders', views.SpiderList.as_view()),
path('spider/<pk>', views.SpiderDetial.as_view()),
path('groups', views.GroupList.as_view()),
path('group/<pk>', views.GroupDetial.as_view()),
]
| 1.789063
| 2
|
dsp_ap/signals.py
|
jpauwels/city_dsp_ap
| 0
|
12776977
|
from .util import Audio
from abc import ABC, abstractmethod
import numpy as np
from scipy import fft, signal
from IPython.display import display
from bokeh.plotting import figure, show
from bokeh.layouts import gridplot
from bokeh.models.mappers import LinearColorMapper
from bokeh.models.ranges import DataRange1d
from bokeh.models.tools import HoverTool
from bokeh.palettes import Viridis256
from bokeh.io import output_notebook
output_notebook()
def get_samples_and_rate(input_signal, samplerate):
if isinstance(input_signal, TimeSignal):
if samplerate is not None:
print('Explicitly defined samplerate gets ignored when input is a TimeSignal', samplerate)
samples = input_signal.samples
samplerate = input_signal.samplerate
elif np.ndim(input_signal) > 0:
if samplerate is None:
raise ValueError('The samplerate needs to be defined explicitly when input is an array or other iterable')
samples = np.asarray(input_signal)
else:
raise TypeError('Only TimeSignals, Numpy arrays or other iterables are supported as input, not {}'.format(type(input_signal)))
return samples, samplerate
def get_samples(input_signal):
if isinstance(input_signal, TimeSignal):
return input_signal.samples
elif np.ndim(input_signal) > 0:
return np.asarray(input_signal)
else:
raise TypeError('Only TimeSignals, Numpy arrays or other iterables are supported as input, not {}'.format(type(input_signal)))
def get_both_samples_and_rate(input_signal1, input_signal2, samplerate=None):
samples1, samplerate1 = get_samples_and_rate(input_signal1, samplerate)
samples2, samplerate2 = get_samples_and_rate(input_signal2, samplerate)
if samplerate1 != samplerate2:
raise ValueError('Both signals need to have the same samplerate')
return samples1, samples2, samplerate1
def get_both_samples(input_signal1, input_signal2):
samples1 = get_samples(input_signal1)
samples2 = get_samples(input_signal2)
if isinstance(input_signal1, TimeSignal) and isinstance(input_signal2, TimeSignal) and input_signal1.samplerate != input_signal2.samplerate:
raise ValueError('Both signals need to have the same samplerate')
return samples1, samples2
def same_type_as(output_samples, input_signal):
if isinstance(input_signal, TimeSignal):
return type(input_signal)(output_samples, input_signal.samplerate)
else:
return output_samples
class Signal(ABC):
@abstractmethod
def plot(self, **fig_args):
pass
def _repr_html_(self):
return show(self.plot())
def display(self, **fig_args):
show(self.plot(**fig_args))
class TimeSignal(Signal):
def __init__(self, samples, samplerate):
self.samples = samples
self.samplerate = samplerate
self.timepoints = np.arange(len(samples)) / samplerate
def plot(self, **fig_args):
fig = figure(width=800, height=400, x_axis_label='time [s]', y_axis_label='amplitude',
tools='pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset', active_drag='pan')
fig.line(self.timepoints, self.samples, line_width=2)
return fig
class AudioSignal(TimeSignal):
def __init__(self, samples, samplerate):
super().__init__(samples, samplerate)
def play(self, normalize=False):
return display(Audio(self.samples, rate=self.samplerate, normalize=normalize))
def plot(self, **fig_args):
default_args = {
'width': 900, 'height': 300,
'x_axis_label': 'time [s]', 'y_axis_label': 'amplitude',
'y_range': (-1, 1),
'tools': 'xpan,xwheel_zoom,box_zoom,xzoom_in,xzoom_out,save,reset',
'active_drag': 'xpan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
}
hover_tool = HoverTool(
tooltips=[('time [s]', '$x{0.000}'), ('amplitude', '$y{0.000}')],
mode='vline',
)
fig = figure(**{**default_args, **fig_args})
fig.line(self.timepoints, self.samples, line_width=2)
fig.add_tools(hover_tool)
return fig
class Spectrum(Signal):
def __init__(self, input, samplerate=None, num_bins=None, power=1, decibels=True):
samples, samplerate = get_samples_and_rate(input, samplerate)
if num_bins is None:
num_bins = len(samples)
self.power = power
self.decibels = decibels
self.spectrum = np.abs(fft.rfft(samples, num_bins))
self.frequencies = np.arange(len(self.spectrum)) * samplerate / num_bins
if decibels:
self.spectrum = power * 10 * np.log10(self.spectrum)
else:
self.spectrum **= power
def plot(self, **fig_args):
default_args = {
'width': 900, 'height': 300,
'x_axis_label': 'frequency [Hz]', 'y_axis_label': 'amplitude',
'tools': 'pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset',
'active_drag': 'pan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
}
hover_tool = HoverTool(
tooltips=[('frequency [Hz]', '$x{0.}'), ['amplitude', '$y{0.000}']],
mode='vline',
)
if self.power == 2:
default_args['y_axis_label'] = 'power'
hover_tool.tooltips[1][0] = 'power'
if self.decibels:
default_args['y_axis_label'] += ' [dB]'
hover_tool.tooltips[1][0] += ' [dB]'
fig = figure(**{**default_args, **fig_args})
fig.line(self.frequencies, self.spectrum, line_width=2)
fig.add_tools(hover_tool)
return fig
class PowerSpectrum(Spectrum):
def __init__(self, input, samplerate=None, num_bins=None, decibels=True):
super().__init__(input, samplerate=samplerate, num_bins=num_bins, power=2, decibels=decibels)
class Spectrogram(Signal):
def __init__(self, input_signal, frame_duration, step_duration, samplerate=None, num_bins=None, window='hann', power=1, decibels=True):
samples, samplerate = get_samples_and_rate(input_signal, samplerate)
self.power = power
self.decibels = decibels
frame_size = round(frame_duration * samplerate)
overlap_size = round((frame_duration-step_duration) * samplerate)
self.frequencies, self.times, self.array = signal.stft(samples, fs=samplerate, window=window, nperseg=frame_size, noverlap=overlap_size)
if decibels:
self.array = power * 10 * np.log10(self.array)
else:
self.array **= power
def plot(self, lowest_value=None, highest_value=None, palette=None, **fig_args):
if not palette:
palette = list(reversed(Viridis256))
if not lowest_value:
lowest_value = np.min(np.abs(self.array))
if not highest_value:
highest_value = np.max(np.abs(self.array))
default_args = {
'width': 900, 'height': 400,
'x_axis_label': 'time [s]', 'y_axis_label': 'frequency [Hz]',
'tools': 'hover,pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset',
'active_drag': 'pan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
'tooltips': [('time [s]', '$x{0.000}'), ('frequency [Hz]', '$y{0.}'), ['amplitude', '@image']],
}
if self.power == 2:
default_args['tooltips'][2][0] = 'power'
if self.decibels:
default_args['tooltips'][2][0] += ' [dB]'
fig = figure(**{**default_args, **fig_args})
if isinstance(fig.x_range, DataRange1d):
fig.x_range.range_padding = 0
if isinstance(fig.y_range, DataRange1d):
fig.y_range.range_padding = 0
mapper = LinearColorMapper(palette=palette, low=lowest_value, high=highest_value)
fig.image([np.abs(self.array)], x=self.times[0], y=self.frequencies[0], dw=self.times[-1], dh=self.frequencies[-1], color_mapper=mapper)
return fig
| 2.375
| 2
|
zksync_sdk/wallet.py
|
mmagician/zksync-python
| 0
|
12776978
|
<gh_stars>0
from decimal import Decimal
from typing import List, Optional, Tuple, Union
from zksync_sdk.ethereum_provider import EthereumProvider
from zksync_sdk.ethereum_signer import EthereumSignerInterface
from zksync_sdk.types import (ChangePubKey, ChangePubKeyCREATE2, ChangePubKeyEcdsa,
ChangePubKeyTypes, EncodedTx,
ForcedExit, Token,
TokenLike, Tokens, TransactionWithSignature, Transfer,
TxEthSignature, Withdraw, )
from zksync_sdk.zksync_provider import FeeTxType, ZkSyncProviderInterface
from zksync_sdk.zksync_signer import ZkSyncSigner
DEFAULT_VALID_FROM = 0
DEFAULT_VALID_UNTIL = 2 ** 32 - 1
class WalletError(Exception):
pass
class TokenNotFoundError(WalletError):
pass
class Wallet:
def __init__(self, ethereum_provider: EthereumProvider, zk_signer: ZkSyncSigner,
eth_signer: EthereumSignerInterface, provider: ZkSyncProviderInterface):
self.ethereum_provider = ethereum_provider
self.zk_signer = zk_signer
self.eth_signer = eth_signer
self.zk_provider = provider
self.tokens = Tokens(tokens=[])
async def send_signed_transaction(self, tx: EncodedTx, eth_signature: TxEthSignature,
fast_processing: bool = False) -> str:
return await self.zk_provider.submit_tx(tx, eth_signature, fast_processing)
async def send_txs_batch(self, transactions: List[TransactionWithSignature],
signatures: Optional[
Union[List[TxEthSignature], TxEthSignature]
] = None) -> List[str]:
return await self.zk_provider.submit_txs_batch(transactions, signatures)
async def set_signing_key(self, fee_token: TokenLike, *,
eth_auth_data: Union[ChangePubKeyCREATE2, ChangePubKeyEcdsa] = None,
fee: Decimal = None, nonce: int = None,
valid_from=DEFAULT_VALID_FROM, valid_until=DEFAULT_VALID_UNTIL):
change_pub_key, eth_signature = await self.build_change_pub_key(fee_token,
eth_auth_data=eth_auth_data,
fee=fee, nonce=nonce,
valid_from=valid_from,
valid_until=valid_until)
return await self.send_signed_transaction(change_pub_key, eth_signature)
async def build_change_pub_key(
self, fee_token: TokenLike, *,
fee: Decimal = None, nonce: int = None,
eth_auth_data: Union[ChangePubKeyCREATE2, ChangePubKeyEcdsa] = None,
valid_from=DEFAULT_VALID_FROM, valid_until=DEFAULT_VALID_UNTIL
):
account_id, new_nonce = await self.zk_provider.get_account_nonce(self.address())
nonce = nonce or new_nonce
token = await self.resolve_token(fee_token)
if isinstance(eth_auth_data, ChangePubKeyEcdsa):
eth_auth_type = ChangePubKeyTypes.ecdsa
elif isinstance(eth_auth_data, ChangePubKeyCREATE2):
eth_auth_type = ChangePubKeyTypes.create2
else:
eth_auth_type = ChangePubKeyTypes.onchain
if fee is None:
if eth_auth_type == ChangePubKeyTypes.ecdsa:
fee = await self.zk_provider.get_transaction_fee(FeeTxType.change_pub_key_ecdsa,
self.address(),
fee_token)
elif eth_auth_type == ChangePubKeyTypes.onchain:
fee = await self.zk_provider.get_transaction_fee(FeeTxType.change_pub_key_onchain,
self.address(),
fee_token)
elif eth_auth_type == ChangePubKeyTypes.create2:
fee = await self.zk_provider.get_transaction_fee(FeeTxType.change_pub_key_create2,
self.address(),
fee_token)
fee = fee.total_fee
else:
fee = token.from_decimal(fee)
new_pubkey_hash = self.zk_signer.pubkey_hash_str()
change_pub_key = ChangePubKey(
account=self.address(),
account_id=account_id,
new_pk_hash=new_pubkey_hash,
token=token,
fee=fee,
nonce=nonce,
valid_until=valid_until,
valid_from=valid_from,
eth_auth_data=eth_auth_data
)
eth_signature = await self.eth_signer.sign(change_pub_key.get_eth_tx_bytes())
eth_auth_data = change_pub_key.get_auth_data(eth_signature.signature)
change_pub_key.eth_auth_data = eth_auth_data
zk_signature = self.zk_signer.sign_tx(change_pub_key)
change_pub_key.signature = zk_signature
return change_pub_key, eth_signature
async def forced_exit(self, target: str, token: TokenLike, fee: Decimal = None,
valid_from=DEFAULT_VALID_FROM, valid_until=DEFAULT_VALID_UNTIL) -> str:
transfer, eth_signature = await self.build_forced_exit(target, token, fee,
valid_from, valid_until)
return await self.send_signed_transaction(transfer, eth_signature)
async def build_forced_exit(
self,
target: str,
token: TokenLike,
fee: Decimal = None,
valid_from=DEFAULT_VALID_FROM,
valid_until=DEFAULT_VALID_UNTIL
) -> Tuple[ForcedExit, TxEthSignature]:
account_id, nonce = await self.zk_provider.get_account_nonce(self.address())
token = await self.resolve_token(token)
if fee is None:
fee = await self.zk_provider.get_transaction_fee(FeeTxType.withdraw, target, token.id)
fee = fee.total_fee
else:
fee = token.from_decimal(fee)
forced_exit = ForcedExit(initiator_account_id=account_id,
target=target,
fee=fee,
nonce=nonce,
valid_from=valid_from,
valid_until=valid_until,
token=token)
eth_signature = await self.eth_signer.sign_tx(forced_exit)
zk_signature = self.zk_signer.sign_tx(forced_exit)
forced_exit.signature = zk_signature
return forced_exit, eth_signature
def address(self):
return self.eth_signer.address()
async def build_transfer(self, to: str, amount: Decimal, token: TokenLike,
fee: Decimal = None,
valid_from=DEFAULT_VALID_FROM,
valid_until=DEFAULT_VALID_UNTIL) -> Tuple[Transfer, TxEthSignature]:
account_id, nonce = await self.zk_provider.get_account_nonce(self.address())
token = await self.resolve_token(token)
if fee is None:
fee = await self.zk_provider.get_transaction_fee(FeeTxType.transfer, to, token.id)
fee = fee.total_fee
else:
fee = token.from_decimal(fee)
transfer = Transfer(account_id=account_id, from_address=self.address(),
to_address=to,
amount=token.from_decimal(amount), fee=fee,
nonce=nonce,
valid_from=valid_from,
valid_until=valid_until,
token=token)
eth_signature = await self.eth_signer.sign_tx(transfer)
zk_signature = self.zk_signer.sign_tx(transfer)
transfer.signature = zk_signature
return transfer, eth_signature
async def transfer(self, to: str, amount: Decimal, token: TokenLike,
fee: Decimal = None,
valid_from=DEFAULT_VALID_FROM, valid_until=DEFAULT_VALID_UNTIL) -> str:
transfer, eth_signature = await self.build_transfer(to, amount, token, fee,
valid_from, valid_until)
return await self.send_signed_transaction(transfer, eth_signature)
async def build_withdraw(self, eth_address: str, amount: Decimal, token: TokenLike,
fee: Decimal = None, fast: bool = False,
valid_from=DEFAULT_VALID_FROM,
valid_until=DEFAULT_VALID_UNTIL) -> (Withdraw, TxEthSignature):
account_id, nonce = await self.zk_provider.get_account_nonce(self.address())
token = await self.resolve_token(token)
if fee is None:
tx_type = FeeTxType.fast_withdraw if fast else FeeTxType.withdraw
fee = await self.zk_provider.get_transaction_fee(tx_type, eth_address, token.id)
fee = fee.total_fee
else:
fee = token.from_decimal(fee)
withdraw = Withdraw(account_id=account_id, from_address=self.address(),
to_address=eth_address,
amount=token.from_decimal(amount), fee=fee,
nonce=nonce,
valid_from=valid_from,
valid_until=valid_until,
token=token)
eth_signature = await self.eth_signer.sign_tx(withdraw)
zk_signature = self.zk_signer.sign_tx(withdraw)
withdraw.signature = zk_signature
return withdraw, eth_signature
async def withdraw(self, eth_address: str, amount: Decimal, token: TokenLike,
fee: Decimal = None, fast: bool = False,
valid_from=DEFAULT_VALID_FROM, valid_until=DEFAULT_VALID_UNTIL) -> str:
withdraw, eth_signature = await self.build_withdraw(eth_address, amount, token, fee, fast,
valid_from, valid_until)
return await self.send_signed_transaction(withdraw, eth_signature, fast)
async def get_balance(self, token: TokenLike, type: str):
account_state = await self.get_account_state()
token = await self.resolve_token(token)
if type == "committed":
token_balance = account_state.committed.balances.get(token.symbol)
else:
token_balance = account_state.verified.balances.get(token.symbol)
if token_balance is None:
token_balance = 0
return token_balance
async def get_account_state(self):
return await self.zk_provider.get_state(self.address())
async def is_signing_key_set(self) -> bool:
account_state = await self.get_account_state()
signer_pub_key_hash = self.zk_signer.pubkey_hash_str()
return account_state.id is not None and\
account_state.committed.pub_key_hash == signer_pub_key_hash
async def resolve_token(self, token: TokenLike) -> Token:
resolved_token = self.tokens.find(token)
if resolved_token is not None:
return resolved_token
self.tokens = await self.zk_provider.get_tokens()
resolved_token = self.tokens.find(token)
if resolved_token is None:
raise TokenNotFoundError
return resolved_token
| 2.125
| 2
|
src/dataset.py
|
florianvonunold/DYME
| 5
|
12776979
|
<filename>src/dataset.py<gh_stars>1-10
import torch
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels, numerical_features, prediction_positions):
self.encodings = encodings
self.labels = labels
self.numerical_features = numerical_features
self.prediction_positions = prediction_positions
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx]).to(torch.float32)
# add numerical features: utterance feature vectors (specified metrics per utterance) concatenated sequentially
# + prediction position as scalar
item['numerical_features'] = torch.tensor(self.numerical_features[idx]) # get metrics
item['numerical_features'] = item['numerical_features'].to(torch.float32) # convert to float32
item['prediction_position'] = torch.tensor([self.prediction_positions[idx]])
item['ids'] = torch.tensor([idx])
return item
def __len__(self):
return len(self.labels)
| 2.703125
| 3
|
tests/integration/relation_name_tests/test_relation_name.py
|
dlb8685/dbt-redshift
| 18
|
12776980
|
<reponame>dlb8685/dbt-redshift
from tests.integration.base import DBTIntegrationTest, use_profile
class TestAdapterDDL(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_dbt(["seed"])
@property
def schema(self):
return "adapter_ddl"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
"config-version": 2,
"seeds": {
"quote_columns": False,
},
}
@use_profile("redshift")
def test_redshift_long_name_succeeds(self):
self.run_dbt(["run"], expect_pass=True)
| 2.109375
| 2
|
experiments/xor_rxor_spiral_exp/spiral_appendix_plot.py
|
jdey4/progressive-learning
| 1
|
12776981
|
<reponame>jdey4/progressive-learning<filename>experiments/xor_rxor_spiral_exp/spiral_appendix_plot.py
#%%
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
# %%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def generate_spirals(N, D=2, K=5, noise = 0.5, acorn = None, density=0.3):
#N number of poinst per class
#D number of features,
#K number of classes
X = []
Y = []
if acorn is not None:
np.random.seed(acorn)
if K == 2:
turns = 2
elif K==3:
turns = 2.5
elif K==5:
turns = 3.5
elif K==7:
turns = 4.5
else:
print ("sorry, can't currently surpport %s classes " %K)
return
mvt = np.random.multinomial(N, 1/K * np.ones(K))
if K == 2:
# r = np.linspace(0.01, 1, N)
r = np.random.uniform(0,1,size=int(N/K))
r = np.sort(r)
t = np.linspace(0, np.pi* 4 * turns/K, int(N/K)) + noise * np.random.normal(0, density, int(N/K))
dx = r * np.cos(t)
dy = r* np.sin(t)
X.append(np.vstack([dx, dy]).T )
X.append(np.vstack([-dx, -dy]).T)
Y += [0] * int(N/K)
Y += [1] * int(N/K)
else:
for j in range(1, K+1):
r = np.linspace(0.01, 1, int(mvt[j-1]))
t = np.linspace((j-1) * np.pi *4 *turns/K, j* np.pi * 4* turns/K, int(mvt[j-1])) + noise * np.random.normal(0, density, int(mvt[j-1]))
dx = r * np.cos(t)
dy = r* np.sin(t)
dd = np.vstack([dx, dy]).T
X.append(dd)
#label
Y += [j-1] * int(mvt[j-1])
return np.vstack(X), np.array(Y).astype(int)
def get_colors(colors, inds):
c = [colors[i] for i in inds]
return c
#%% Plotting the result
#mc_rep = 50
mean_error = unpickle('result/mean_spiral.pickle')
std_error = unpickle('result/std_spiral.pickle')
spiral3 = (100*np.arange(0.5, 7.25, step=0.25)).astype(int)
spiral5 = (100*np.arange(0.5, 7.50, step=0.25)).astype(int)
n1s = spiral3
n2s = spiral5
ns = np.concatenate((n1s, n2s + n1s[-1]))
ls=['-', '--']
algorithms = ['Uncertainty Forest', 'Lifelong Forest']
TASK1='3 spirals'
TASK2='5 spirals'
fontsize=30
labelsize=28
colors = sns.color_palette("Set1", n_colors = 2)
fig = plt.figure(constrained_layout=True,figsize=(23,14))
gs = fig.add_gridspec(14, 23)
ax1 = fig.add_subplot(gs[7:,:6])
# for i, algo in enumerate(algorithms):
ax1.plot(ns, mean_error[0], label=algorithms[0], c=colors[1], ls=ls[np.sum(0 > 1).astype(int)], lw=3)
#ax1.fill_between(ns,
# mean_error[0] + 1.96*std_error[0],
# mean_error[0] - 1.96*std_error[0],
# where=mean_error[0] + 1.96*std_error[0] >= mean_error[0] - 1.96*std_error[0],
# facecolor=colors[1],
# alpha=0.15,
# interpolate=True)
ax1.plot(ns, mean_error[1], label=algorithms[1], c=colors[0], ls=ls[np.sum(1 > 1).astype(int)], lw=3)
#ax1.fill_between(ns,
# mean_error[1] + 1.96*std_error[1, ],
# mean_error[1] - 1.96*std_error[1, ],
# where=mean_error[1] + 1.96*std_error[1] >= mean_error[1] - 1.96*std_error[1],
# facecolor=colors[0],
# alpha=0.15,
# interpolate=True)
ax1.set_ylabel('Generalization Error (%s)'%(TASK1), fontsize=fontsize)
ax1.legend(loc='upper right', fontsize=20, frameon=False)
ax1.set_ylim(0.2, 0.7)
ax1.set_xlabel('Total Sample Size', fontsize=fontsize)
ax1.tick_params(labelsize=labelsize)
ax1.set_yticks([0.2, 0.4,.6])
ax1.set_xticks([250,750,1500])
ax1.axvline(x=750, c='gray', linewidth=1.5, linestyle="dashed")
ax1.set_title('3 spirals', fontsize=30)
right_side = ax1.spines["right"]
right_side.set_visible(False)
top_side = ax1.spines["top"]
top_side.set_visible(False)
ax1.text(150, np.mean(ax1.get_ylim()), "%s"%(TASK1), fontsize=26)
ax1.text(900, np.mean(ax1.get_ylim()), "%s"%(TASK2), fontsize=26)
#plt.tight_layout()
#plt.savefig('./result/figs/generalization_error_xor.pdf',dpi=500)
#####
mean_error = unpickle('result/mean_spiral.pickle')
std_error = unpickle('result/std_spiral.pickle')
algorithms = ['Uncertainty Forest', 'Lifelong Forest']
TASK1='3 spirals'
TASK2='5 spirals'
ax1 = fig.add_subplot(gs[7:,8:14])
# for i, algo in enumerate(algorithms):
ax1.plot(ns[len(n1s):], mean_error[2, len(n1s):], label=algorithms[0], c=colors[1], ls=ls[1], lw=3)
#ax1.fill_between(ns[len(n1s):],
# mean_error[2, len(n1s):] + 1.96*std_error[2, len(n1s):],
# mean_error[2, len(n1s):] - 1.96*std_error[2, len(n1s):],
# where=mean_error[2, len(n1s):] + 1.96*std_error[2, len(n1s):] >= mean_error[2, len(n1s):] - 1.96*std_error[2, len(n1s):],
# facecolor=colors[1],
# alpha=0.15,
# interpolate=True)
ax1.plot(ns[len(n1s):], mean_error[3, len(n1s):], label=algorithms[1], c=colors[0], ls=ls[1], lw=3)
#ax1.fill_between(ns[len(n1s):],
# mean_error[3, len(n1s):] + 1.96*std_error[3, len(n1s):],
# mean_error[3, len(n1s):] - 1.96*std_error[3, len(n1s):],
# where=mean_error[3, len(n1s):] + 1.96*std_error[3, len(n1s):] >= mean_error[3, len(n1s):] - 1.96*std_error[3, len(n1s):],
# facecolor=colors[0],
# alpha=0.15,
# interpolate=True)
ax1.set_ylabel('Generalization Error (%s)'%(TASK2), fontsize=fontsize)
ax1.legend(loc='upper right', fontsize=20, frameon=False)
# ax1.set_ylim(-0.01, 0.22)
ax1.set_xlabel('Total Sample Size', fontsize=fontsize)
ax1.tick_params(labelsize=labelsize)
ax1.set_yticks([0.2, 0.4, 0.6])
#ax1.set_yticks([0.15, 0.2])
ax1.set_xticks([250,750,1500])
ax1.axvline(x=750, c='gray', linewidth=1.5, linestyle="dashed")
ax1.set_ylim(0.2, 0.7)
ax1.set_xlim(0,1500)
ax1.set_title('5 spirals', fontsize=30)
right_side = ax1.spines["right"]
right_side.set_visible(False)
top_side = ax1.spines["top"]
top_side.set_visible(False)
# ax1.set_ylim(0.14, 0.36)
ax1.text(150, np.mean(ax1.get_ylim()), "%s"%(TASK1), fontsize=26)
ax1.text(900, np.mean(ax1.get_ylim()), "%s"%(TASK2), fontsize=26)
#plt.tight_layout()
#plt.savefig('./result/figs/generalization_error_nxor.pdf',dpi=500)
#####
mean_error = unpickle('result/mean_te_spiral.pickle')
std_error = unpickle('result/std_te_spiral.pickle')
algorithms = ['Backward Transfer', 'Forward Transfer']
TASK1='3 spirals'
TASK2='5 spirals'
ax1 = fig.add_subplot(gs[7:,16:22])
ax1.plot(ns, mean_error[0], label=algorithms[0], c=colors[0], ls=ls[0], lw=3)
#ax1.fill_between(ns,
# mean_error[0] + 1.96*std_error[0],
# mean_error[0] - 1.96*std_error[0],
# where=mean_error[1] + 1.96*std_error[0] >= mean_error[0] - 1.96*std_error[0],
# facecolor=colors[0],
# alpha=0.15,
# interpolate=True)
ax1.plot(ns[len(n1s):], mean_error[1, len(n1s):], label=algorithms[1], c=colors[0], ls=ls[1], lw=3)
#ax1.fill_between(ns[len(n1s):],
# mean_error[1, len(n1s):] + 1.96*std_error[1, len(n1s):],
# mean_error[1, len(n1s):] - 1.96*std_error[1, len(n1s):],
# where=mean_error[1, len(n1s):] + 1.96*std_error[1, len(n1s):] >= mean_error[1, len(n1s):] - 1.96*std_error[1, len(n1s):],
# facecolor=colors[0],
# alpha=0.15,
# interpolate=True)
ax1.set_ylabel('Transfer Efficiency', fontsize=fontsize)
ax1.legend(loc='upper right', fontsize=20, frameon=False)
ax1.set_ylim(.92, 1.1)
ax1.set_xlabel('Total Sample Size', fontsize=fontsize)
ax1.tick_params(labelsize=labelsize)
ax1.set_yticks([.92,1,1.08])
ax1.set_xticks([250,750,1500])
ax1.axvline(x=750, c='gray', linewidth=1.5, linestyle="dashed")
right_side = ax1.spines["right"]
right_side.set_visible(False)
top_side = ax1.spines["top"]
top_side.set_visible(False)
ax1.hlines(1, 50,1500, colors='gray', linestyles='dashed',linewidth=1.5)
ax1.text(150, np.mean(ax1.get_ylim()), "%s"%(TASK1), fontsize=26)
ax1.text(900, np.mean(ax1.get_ylim()), "%s"%(TASK2), fontsize=26)
#plt.tight_layout()
#plt.savefig('./result/figs/TE.pdf',dpi=500)
#####
colors = sns.color_palette('Dark2', n_colors=5)
X, Y = generate_spirals(750, 2, 3, noise = 2.5)
Z, W = generate_spirals(750, 2, 5, noise = 2.5)
ax = fig.add_subplot(gs[:6,4:10])
ax.scatter(X[:, 0], X[:, 1], c=get_colors(colors, Y), s=50)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('3 spirals', fontsize=30)
#plt.tight_layout()
ax.axis('off')
#plt.savefig('./result/figs/gaussian-xor.pdf')
###
colors = sns.color_palette('Dark2', n_colors=5)
ax = fig.add_subplot(gs[:6,11:16])
ax.scatter(Z[:, 0], Z[:, 1], c=get_colors(colors, W), s=50)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('5 spirals', fontsize=30)
ax.axis('off')
#plt.tight_layout()
plt.savefig('./result/figs/spiral_exp.pdf')
# %%
| 2.46875
| 2
|
desafio005.py
|
RickChaves29/Desafios-Python
| 0
|
12776982
|
numero = int(input('Digite um número: '))
a = numero - 1
s = numero + 1
print('Seu número é {} o antecessor é {} e o sucessor é {}'.format(numero, a, s))
| 4.125
| 4
|
nlp/src/aad/aad_session_middleware.py
|
dupuyjs/lis-project
| 0
|
12776983
|
from starlette.authentication import (
AuthCredentials,
AuthenticationBackend,
UnauthenticatedUser,
)
from .aad_authentication_client import AadAuthenticationClient
class AadSessionMiddleware(AuthenticationBackend):
async def authenticate(self, request):
"""Authenticate a request.
If authentication is successful, defining a user instance
"""
try:
if not request.session or not request.session.get("aad_id"):
return AuthCredentials(None), UnauthenticatedUser()
aad_client = AadAuthenticationClient(session=request.session)
# Do not validate signature, since we may have here a
# microsoft graph token, that we can't validate
# but it's fine since we are not on the web api side
user = await aad_client.get_user(False)
if user is None:
return AuthCredentials(None), UnauthenticatedUser()
return AuthCredentials(user.scopes), user
except Exception:
return AuthCredentials(None), UnauthenticatedUser()
| 2.609375
| 3
|
src/lib/bigcities.py
|
taygetea/weather
| 1
|
12776984
|
<reponame>taygetea/weather
import json
"""
This code cleans the city data from here, and puts it into a JSON file if the city has a population over
some threshold. This is used to create a heuristic as to what is meant when a user inputs the name of a city.
"""
threshold = 100000
with open('worldcitiespop.txt') as f:
f.next()
cities = [city.split(',') for city in f]
cleancities = []
for i in cities:
if i[4]:
cleancities.append([i[0], i[1], int(i[4])] + [round(float(x), 2) for x in i[5:7]])
countries = list(set([city[0] for city in cleancities]))
cityset = list(set([city[1] for city in cleancities]))
localcities = lambda cc, size: [x for x in cleancities if x[0] == cc and x[2] > size]
citydict = {}
for country in countries:
cities = localcities(country, threshold)
if len(cities):
for city in cities:
singlecity = dict(Country=city[0], Population=city[2], Longitude=city[3], Latitude=city[4])
if city[1] not in citydict.keys():
citydict[city[1]] = [singlecity]
else:
citydict[city[1]].append(singlecity)
print city[1]
with open('clean.json', 'w+') as cleaned:
json.dump(citydict, cleaned, indent=4, sort_keys=True)
| 3.515625
| 4
|
Desafios/034 desafio.py
|
Grigolo/python-aprendizado
| 0
|
12776985
|
'''
Escreva um programa que pergunte o salário de
um funcionário e calcule o valor do seu aumento
-> Para salários superiores a R$ 1250,00 calcule aumento de 10%
-> Para os inferiores ou iguais, o aumento é de 15%.
'''
salario = float(input("Digite o seu salário: "))
s = salario
if salario > 1250:
salario = salario + (salario * 10) / 100
print("Salário antigo {}, com aumento ficou salário de {}.".format(s, salario))
else:
print("Salário antigo {} novo salário após aumento {}".format(s, salario + (salario * 15)/100))
| 3.84375
| 4
|
src/analyze/track/analyze_exit_points.py
|
kishorekolli/deep_racer_guru
| 0
|
12776986
|
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import tkinter as tk
from src.analyze.track.track_analyzer import TrackAnalyzer
from src.episode.episode import LAP_COMPLETE, OFF_TRACK, CRASHED, REVERSED, LOST_CONTROL
from src.graphics.track_graphics import TrackGraphics
from src.analyze.core.controls import EpisodeRadioButtonControl, OutcomesCheckButtonControl
class AnalyzeExitPoints(TrackAnalyzer):
def __init__(self, guru_parent_redraw, track_graphics: TrackGraphics, control_frame: tk.Frame):
super().__init__(guru_parent_redraw, track_graphics, control_frame)
self._episodes_control = EpisodeRadioButtonControl(guru_parent_redraw, control_frame, True)
self._outcome_control = OutcomesCheckButtonControl(guru_parent_redraw, control_frame)
def build_control_frame(self, control_frame):
self._episodes_control.add_to_control_frame()
self._outcome_control.add_to_control_frame()
def redraw(self):
if self._episodes_control.show_filtered():
episodes = self.filtered_episodes
elif self._episodes_control.show_all():
episodes = self.all_episodes
else:
episodes = None
if episodes:
for e in episodes:
colour = None
if e.outcome == LAP_COMPLETE and self._outcome_control.show_lap_complete():
colour = "green"
if e.outcome == OFF_TRACK and self._outcome_control.show_off_track():
colour = "orange"
if e.outcome == CRASHED and self._outcome_control.show_crashed():
colour = "red"
if e.outcome == REVERSED and self._outcome_control.show_reversed():
colour = "cyan"
if e.outcome == LOST_CONTROL and self._outcome_control.show_lost_control():
colour = "yellow"
if colour:
exit_point = (e.events[-1].x, e.events[-1].y)
self.track_graphics.plot_dot(exit_point, 3, colour)
elif self._episodes_control.show_evaluations() and self.all_episodes:
start_wp = self.all_episodes[0].events[0].closest_waypoint_index
start_percent = self.current_track.get_waypoint_percent_from_race_start(start_wp)
whole_lap = False
for v in self.evaluation_phases:
for p in v.progresses:
if p == 100:
whole_lap = True
else:
p += start_percent
if p > 100:
p -= 100
exit_point = self.current_track.get_percent_progress_point_on_centre_line(p)
self.track_graphics.plot_dot(exit_point, 3, "orange")
if whole_lap:
exit_point = self.current_track.get_percent_progress_point_on_centre_line(start_percent)
self.track_graphics.plot_dot(exit_point, 5, "green")
| 2.390625
| 2
|
treasure/Phase1/Basic Python1/hello.py
|
treasurechristain/python-challenge-solutions
| 0
|
12776987
|
print('My name is <NAME>')
| 1.875
| 2
|
load_embedding.py
|
vinayakaraj-t/CIKM2020
| 1
|
12776988
|
import pandas as pd
import numpy as np
from urllib.parse import urlparse
import io
import gc
import re
import string
from utils import *
import tensorflow as tf
def load_vectors(fname,count_words):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
data_list=[]
for line in fin:
tokens = line.rstrip().split(' ')
tk = tokens[0]
if tk in count_words:
vec=list(map(float, tokens[1:]))
data[tk] = vec
data_list.append(vec)
return data,data_list
def glove_load_vectors(fname,count_words):
data={}
fastvec = open(fname)
counter=1
data_list=[]
while counter>0:
try:
f=fastvec.__next__()
tokens = f.rstrip().split(' ')
tk=tokens[0]
if tk in count_words:
vec = list(map(float, tokens[1:]))
data[tk] = vec
data_list.append(vec)
counter+=1
except:
print("total tokens",counter)
counter=0
pass
return data,data_list
def create_embeddings(train_data,embedding_path,wordvec_name,stop_set,word_dim):
entity1 = train_data["entities"].apply(lambda x: combine_entity(x))
mention_dt = train_data["hashtags"].apply(lambda x: hashtag(x))
url_dt1 = train_data["urls"].apply(lambda x: process_urlPath(x,0,stop_set))
url_dt2 = train_data["urls"].apply(lambda x: process_urlPath(x,1,stop_set))
mention_splt = train_data["mentions"].apply(lambda x: hashtag(x))
dt_concat =pd.concat([entity1,mention_dt,url_dt1,url_dt2,mention_splt],axis=0)
print("create entity tokenizer")
tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='',
lower=True,
split=" ",
char_level=False,
oov_token=None)
#tokenizer.fit_on_texts(pd.concat([entity1,mention_dt,url_dt,mention_splt],axis=0))
tokenizer.fit_on_texts(dt_concat)
count_thres = 15
count_words = {w:c for w,c in tokenizer.word_counts.items() if c >= count_thres}
word_counts= len(count_words)+1#one for oov and one for less count words
tokenizer = tf.keras.preprocessing.text.Tokenizer(
num_words=word_counts,
filters='',
lower=True,
split=" ",
char_level=False,
oov_token=None)
#tokenizer.fit_on_texts(pd.concat([entity1,mention_dt,url_dt,mention_splt],axis=0))
tokenizer.fit_on_texts(dt_concat)
print("load embedding vectors")
if wordvec_name.split(".")[0]=="glove":
fastvec,fastvec_list = glove_load_vectors(embedding_path,count_words)
else:
fastvec,fastvec_list = load_vectors(embedding_path,count_words)
cand=np.array(fastvec_list,dtype='float32')
mu=np.mean(cand, axis=0)
Sigma=np.cov(cand.T)
norm=np.random.multivariate_normal(mu, Sigma, 1)
norm = list(np.reshape(norm, word_dim))
word_counts = len(count_words)+1
word_vectors = np.zeros((word_counts,word_dim))
id_w = tokenizer.index_word
for k in range(1,word_vectors.shape[0]):
ky = id_w[k]
if ky in fastvec:
word_vectors[k,:]=fastvec[ky]
else:
word_vectors[k,:]= norm
return tokenizer,word_counts,word_vectors
| 2.53125
| 3
|
shapelets/phs/ModColor.py
|
richarms/shapelets
| 18
|
12776989
|
<reponame>richarms/shapelets
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
bold='\033[1m'
nobold='\033[0m'
Separator="================================%s=================================="
silent=0
def Str(strin0,col="red",Bold=True):
if silent==1: return strin0
strin=str(strin0)
if col=="red":
ss=FAIL
if col=="green":
ss=OKGREEN
elif col=="yellow":
ss=WARNING
elif col=="blue":
ss=OKBLUE
elif col=="green":
ss=OKGREEN
ss="%s%s%s"%(ss,strin,ENDC)
if Bold: ss="%s%s%s"%(bold,ss,nobold)
return ss
def Sep(strin=None,D=1):
if D!=1:
return Str(Separator%("="*len(strin)))
else:
return Str(Separator%(strin))
def Title(strin,Big=False):
print
print
if Big: print Sep(strin,D=0)
print Sep(strin)
if Big: print Sep(strin,D=0)
print
def disable():
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
| 2.453125
| 2
|
server.py
|
teovoinea/opv
| 1
|
12776990
|
<filename>server.py
import socket
import time
UDP_IP = "127.0.0.1"
UDP_PORT = 4242
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
start_time = time.time()
while True:
data, addr = sock.recvfrom(1500) # buffer size is 1024 bytes
print (time.time() - start_time)
start_time = time.time()
| 2.796875
| 3
|
pypeit/core/wavecal/spectrographs/templ_keck_deimos.py
|
mcoughlin/PypeIt
| 0
|
12776991
|
""" Generate the wavelength templates for Keck/DEIMOS"""
import os
from pypeit.core.wavecal import templates
# Keck/DEIMOS
def keck_deimos_600ZD():
binspec = 1
slits = [0, 1]
lcut = [7192.]
xidl_file = os.path.join(templates.template_path, 'Keck_DEIMOS', '600ZD', 'deimos_600.sav')
outroot = 'keck_deimos_600.fits'
templates.build_template(xidl_file, slits, lcut, binspec, outroot, lowredux=True)
def keck_deimos_830G(overwrite=False):
binspec = 1
outroot = 'keck_deimos_830G.fits'
# 3-12 = blue 6508 -- 8410
# 7-24 = blue 8497 -- 9925 (no lines after XeI)
ifiles = [0, 0, 1]
slits = [12, 14, 24]
lcut = [8400., 8480]
wfile1 = os.path.join(templates.template_path, 'Keck_DEIMOS', '830G_M_8600', 'MasterWaveCalib_A_1_03.json')
wfile2 = os.path.join(templates.template_path, 'Keck_DEIMOS', '830G_M_8600', 'MasterWaveCalib_A_1_07.json')
# det_dict
det_cut = {}
det_cut['dets'] = [[1, 2, 3, 4], [5, 6, 7, 8]]
det_cut['wcuts'] = [[0, 9000.], [8200, 1e9]] # Significant overlap is fine
#
templates.build_template([wfile1, wfile2], slits, lcut, binspec, outroot, lowredux=False,
ifiles=ifiles, det_cut=det_cut, chk=True, overwrite=overwrite)
def keck_deimos_1200G(overwrite=False):
binspec = 1
outroot = 'keck_deimos_1200G.fits'
# 3-3 = blue 6268.23 -- 7540
# 3-14 = red 6508 -- 7730
# 7-3 = blue 7589 -- 8821
# 7-17 = red 8000 - 9230
# 7c-0 = red 9120 -- 9950
ifiles = [3, 5, 4, 0, 0, 1, 1, 2]
slits = [1261, 1652, 132, 3, 14, 3, 17, 0]
lcut = [5200., 5580., 6800., 7450., 7730., 8170, 9120]
wfile1 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', 'MasterWaveCalib_A_1_03.json')
wfile2 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', 'MasterWaveCalib_A_1_07.json')
wfile3 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', 'MasterWaveCalib_A_1_07c.json')
wfile4 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', '1200G_bluetilt',
'MasterWaveCalib_B_1_02_useS1261.fits')
wfile5 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', '1200G_bluetilt',
'MasterWaveCalib_B_1_06_useS0132.fits')
wfile6 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', '1200G_bluetilt',
'MasterWaveCalib_B_1_02_useS1652.fits')
#wfile7 = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200G', '1200G_bluetilt',
# 'MasterWaveCalib_B_1_06_useS1649.fits')
files = [wfile1, wfile2, wfile3, wfile4, wfile5, wfile6] #, wfile7]
# det_dict
det_cut = None
# det_cut = {}
# det_cut['dets'] = [[1,2,3,4], [5,6,7,8]]
# det_cut['wcuts'] = [[0,9000.], [8200,1e9]] # Significant overlap is fine
#
templates.build_template(files, slits, lcut, binspec, outroot, lowredux=False,
ifiles=ifiles, det_cut=det_cut, chk=True, subtract_conti=True,
overwrite=overwrite, shift_wave=True)
def keck_deimos_1200B(overwrite=False):
binspec = 1
outroot = 'keck_deimos_1200B.fits'
# PypeIt fits
wpath = os.path.join(templates.template_path, 'Keck_DEIMOS', '1200B')
basefiles = ['MasterWaveCalib_A_1_02_useS0106.fits', 'MasterWaveCalib_A_1_02_useS0291.fits',
'MasterWaveCalib_A_1_06_useS0106.fits', 'MasterWaveCalib_A_1_06_useS0287.fits']
wfiles = [os.path.join(wpath, basefile) for basefile in basefiles]
# Snippets
ifiles = [1, 0, 1, 0, 3, 2]
slits = [291, 106, 291, 106, 287, 106]
wv_cuts = [4493., 4870., 5100., 5260., 5810.]
assert len(wv_cuts) == len(slits)-1
# det_dict
det_cut = None
#
templates.build_template(wfiles, slits, wv_cuts, binspec, outroot,
ifiles=ifiles, det_cut=det_cut, chk=True, normalize=False, lowredux=False,
subtract_conti=True, overwrite=overwrite, shift_wave=True)
def keck_deimos_900ZD(overwrite=False):
binspec = 1
outroot = 'keck_deimos_900ZD.fits'
# PypeIt fits
wpath = os.path.join(templates.template_path, 'Keck_DEIMOS', '900ZD')
basefiles = ['MasterWaveCalib_A_1_01_useS1046.fits', 'MasterWaveCalib_A_1_03_useS0600.fits',
'MasterWaveCalib_A_1_06_useS0054.fits', 'MasterWaveCalib_A_1_02_useS0066.fits',
'MasterWaveCalib_A_1_06_useS0193.fits']
wfiles = [os.path.join(wpath, basefile) for basefile in basefiles]
# Snippets
ifiles = [0, 1, 2, 3, 4, 5]
slits = [1046, 600, 54, 66, 193]
wv_cuts = [5250., 5878., 7100., 8245.]
assert len(wv_cuts) == len(slits)-1
# det_dict
det_cut = None
#
templates.build_template(wfiles, slits, wv_cuts, binspec, outroot,
ifiles=ifiles, det_cut=det_cut, chk=True,
normalize=False, lowredux=False,
subtract_conti=True, overwrite=overwrite,
shift_wave=True)
if __name__ == '__main__':
#keck_deimos_600ZD()
#keck_deimos_830G(overwrite=False) # False for Testing; True for real
#keck_deimos_1200G(overwrite=False)
#keck_deimos_1200B()
keck_deimos_900ZD(overwrite=False)
pass
| 1.953125
| 2
|
setup.py
|
USGS-EROS/lcmap-merlin
| 0
|
12776992
|
from setuptools import find_packages
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='lcmap-merlin',
version='2.3.1',
description='Python client library for LCMAP rasters',
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: Public Domain',
'Programming Language :: Python :: 3.6',
],
keywords='usgs lcmap eros',
url='http://github.com/usgs-eros/lcmap-merlin',
author='USGS EROS LCMAP',
author_email='',
license='Unlicense',
packages=find_packages(),
install_requires=[
'cytoolz',
'numpy',
'requests',
'python-dateutil',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[test]
extras_require={
'test': ['pytest',
'pytest-cov',
'hypothesis',
'vcrpy',
],
'doc': ['sphinx',
'sphinx-autobuild',
'sphinx_rtd_theme'],
'dev': ['jupyter', 'readline'],
},
# entry_points={
#'console_scripts': [''],
# },
include_package_data=True,
zip_safe=False)
| 1.453125
| 1
|
Day66-75/code/douban/douban/pipelines.py
|
EngrSaad2/Python-100-Days
| 37
|
12776993
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DoubanPipeline(object):
# def __init__(self, server, port):
# pass
# @classmethod
# def from_crawler(cls, crawler):
# return cls(crawler.settings['MONGO_SERVER'],
# crawler.settings['MONGO_PORT'])
def process_item(self, item, spider):
return item
| 2.078125
| 2
|
core/dataset/loader_multi_ocrtoc.py
|
rakeshshrestha31/DIST-Renderer
| 0
|
12776994
|
<gh_stars>0
'''
This file is partly adapted from the original PMO repository
[*] https://github.com/chenhsuanlin/photometric-mesh-optim
'''
import numpy as np
import os, sys
import copy
import json
import torch
import trimesh
import open3d as o3d
import torch.utils.data
import cv2
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from common.geometry import Camera
class LoaderMultiOCRTOC(torch.utils.data.Dataset):
def __init__(self, data_dir, class_num, scale=1, num_points=10000, focal=None):
self.class_num = class_num
self.data_dir = data_dir
self.seq_list = [
## chairs
# '1b30b6c7-b465-49d8-87e6-dd2314e53ad2',
# 'e5fc4a48-5120-48c7-9f72-b59f53a5c34e',
'70865842-b9d4-4b18-96b0-0cb8776b6f71'
## sofas
# '55f7c741-d263-4049-bb8a-168d9eea1c77'
# '779cc50f-6bfe-41d9-8c27-7671bf77e450'
]
self.scale = scale
self.num_points = num_points
self.focal = focal
def __len__(self):
return len(self.seq_list)
def sample_points_from_ply(self, num_points_all, ply_fname):
pcd = o3d.io.read_point_cloud(ply_fname)
points = np.asarray(pcd.points, dtype=np.float32)
random_indices = np.random.choice(
range(points.shape[0]), num_points_all, replace=False
)
return points[random_indices, :]
def resize_image(self, image, scale):
h, w = image.shape[:2]
new = np.zeros(image.shape)
ns_h, ns_w = int(h*scale), int(w*scale)
if scale < 1:
new[int(h/2 -ns_h/2):int(h/2 + ns_h/2), int(w/2-ns_w/2):int(w/2 + ns_w/2)] = cv2.resize(image, (ns_h, ns_w))
else:
new_img = cv2.resize(image, (ns_h, ns_w))
h_new, w_new = new_img.shape[:2]
new = new_img[int(h_new/2 - h/2):int(h_new/2 + h/2), int(w_new/2 - w/2):int(w_new/2 + w/2)]
return new
def __getitem__(self, idx):
instance_name = self.seq_list[idx]
cam = np.load(
os.path.join(self.data_dir, instance_name, 'dist_camera_data.npz'),
allow_pickle=True
)
# rgb_dir = os.path.join(self.data_dir, instance_name, 'segmented_color')
rgb_dir = os.path.join(self.data_dir, instance_name, 'rgb_undistort')
mask_dir = os.path.join(self.data_dir, instance_name, 'mask')
img_list = []
mask_list = []
camera_list = []
for img_idx, (cam_id, extr) in enumerate(cam['extr'].item().items()):
if not 0 <= cam_id <= 53:
continue
# rgba = cv2.imread(
# os.path.join(rgb_dir, f'segmented_color_{cam_id:03}.png'),
# cv2.IMREAD_UNCHANGED
# ).astype(np.float32) / 255.0
# img_cur = rgba[..., :3]
# mask_cur = rgba[..., 3]
img_cur = cv2.imread(
os.path.join(rgb_dir, f'color_{cam_id:03}.png')
).astype(np.float32) / 255.0
mask_cur = cv2.imread(
os.path.join(mask_dir, f'mask_{cam_id:03}.png'),
cv2.IMREAD_UNCHANGED
).astype(np.float32) / 255.0
cam_cur = Camera(cam['intr'], extr)
if self.focal is not None:
img_cur = self.resize_image(img_cur, self.focal)
mask_cur = self.resize_image(mask_cur.astype(np.float), self.focal)
mask_cur[mask_cur<1] = 0
mask_cur = mask_cur.astype(np.bool)
cam_cur.intrinsic[0, 0] = cam_cur.intrinsic[0, 0]*self.focal
cam_cur.intrinsic[1, 1] = cam_cur.intrinsic[1, 1]*self.focal
if self.scale != 1:
mask_cur = cv2.resize(mask_cur.astype(np.float), None, fx=self.scale, fy=self.scale)
mask_cur[mask_cur<1] = 0
mask_cur = mask_cur.astype(np.bool)
img_cur = cv2.resize(img_cur, None, fx=self.scale, fy=self.scale)
cam_cur.intrinsic[:2] = cam_cur.intrinsic[:2] * self.scale
cam_cur.intrinsic[0, 2] = img_cur.shape[1] / 2.0
cam_cur.intrinsic[1, 2] = img_cur.shape[0] / 2.0
img_list.append(torch.from_numpy(img_cur).float())
mask_list.append(torch.from_numpy(mask_cur).type(torch.uint8).cuda())
camera_list.append(cam_cur)
# get gt point cloud
ply_fname = os.path.join(
self.data_dir, instance_name, 'gt_labels_dist.ply'
)
points_gt = self.sample_points_from_ply(self.num_points, ply_fname)
return instance_name, img_list, mask_list, camera_list, points_gt
| 1.71875
| 2
|
tests/test_models/test_state.py
|
lowercaselife/AirBnB_clone
| 0
|
12776995
|
<reponame>lowercaselife/AirBnB_clone
#!/usr/bin/python3
"""State unittests"""
import unittest
from models.state import State
import datetime
import time
class TestState(unittest.TestCase):
"""class TestState"""
def test_state_class_membership_and_attributes(self):
"""State is right class with correct attrs"""
state = State()
self.assertIsNotNone(state.id)
self.assertIsNotNone(state.created_at)
self.assertIsNotNone(state.updated_at)
self.assertIsInstance(state, State)
self.assertIsNotNone(state.name)
def test_state_attr_type(self):
"""State attributes are correct type"""
state = State()
self.assertIsInstance(state.id, str)
self.assertEqual(len(state.id), 36)
self.assertIsInstance(state.created_at, datetime.datetime)
self.assertIsInstance(state.updated_at, datetime.datetime)
self.assertIsInstance(state.name, str)
def test_state_updated_at_matches_created_at_initialization(self):
"""State updated_at is same as create_at"""
state = State()
self.assertEqual(state.updated_at, state.created_at)
def test_state_str_method(self):
"""State str method creates accurate representation"""
state = State()
state_str = state.__str__()
self.assertIsInstance(state_str, str)
self.assertEqual(state_str[:7], '[State]')
self.assertEqual(state_str[8:46], '({})'.format(state.id))
self.assertDictEqual(eval(state_str[47:]), state.__dict__)
def test_state_save_method(self):
"""State save method alters update_at date"""
state = State()
time.sleep(0.0001)
state.save()
self.assertNotEqual(state.updated_at, state.created_at)
def test_state_to_dict_method(self):
"""State to_dict method creates accurate dictionary"""
state = State()
state_dict = state.to_dict()
self.assertIsInstance(state_dict, dict)
self.assertEqual(state_dict['id'], state.id)
self.assertEqual(state_dict['__class__'], type(state).__name__)
self.assertEqual(
state_dict['created_at'], state.created_at.isoformat())
self.assertEqual(
state_dict['updated_at'], state.updated_at.isoformat())
self.assertIsInstance(state.created_at, datetime.datetime)
self.assertIsInstance(state.updated_at, datetime.datetime)
def test_state_dict_to_instance_with_kwargs(self):
"""State can instantiate new object with dictionary"""
state = State()
state.name = "Betty"
state.number = 972
state_dict = state.to_dict()
new_state = State(**state_dict)
new_state_dict = new_state.to_dict()
self.assertFalse(new_state is state)
self.assertDictEqual(new_state_dict, state_dict)
def test_state_dict_to_instance_with_empty_kwargs(self):
"""State can instantiate new object with empty dict"""
state_dict = {}
new_state = State(**state_dict)
new_state_dict = new_state.to_dict()
self.assertIsInstance(new_state, State)
self.assertIsNotNone(new_state.id)
self.assertIsNotNone(new_state.created_at)
self.assertIsNotNone(new_state.updated_at)
if __name__ == '__main__':
unittest.main()
| 3.0625
| 3
|
reproschema/models/tests/test_schema.py
|
sanuann/reproschema-py
| 3
|
12776996
|
<filename>reproschema/models/tests/test_schema.py
from .. import Protocol, Activity, Item
def test_constructors():
Protocol()
Activity()
Item()
version = "1.0.0-rc2"
proto = Protocol(version=version)
assert proto.schema["schemaVersion"] == version
act = Activity(version)
assert act.schema["schemaVersion"] == version
item = Item(version)
assert item.schema["schemaVersion"] == version
def test_constructors_from_data():
Protocol.from_data({"@type": "reproschema:Protocol"})
Activity.from_data({"@type": "reproschema:Activity"})
Item.from_data({"@type": "reproschema:Field"})
| 2.203125
| 2
|
core/views/__init__.py
|
capy-pl/nccu-grade-system
| 2
|
12776997
|
from .DeleteView import DeleteView
from .ListView import ListView
from .View import View
from .CreateView import CreateView
from .UpdateView import UpdateView
from .DetailView import DetailView
| 1.164063
| 1
|
tests/test_services.py
|
cornelius-muhatia/flask-resource-chassis
| 0
|
12776998
|
from datetime import datetime
from unittest import TestCase
from flask import Flask
from flask_sqlalchemy import SQLAlchemy, Model
from sqlalchemy import Integer, String, Column, func
from flask_resource_chassis import ChassisService, ValidationError
class Test(Model):
id = Column(Integer, primary_key=True)
name = Column(String(5), nullable=False)
class TestChassisService(TestCase):
def setUp(self):
self.app = Flask(__name__)
self.db = SQLAlchemy(self.app)
class Gender(self.db.Model):
id = self.db.Column(self.db.Integer, primary_key=True)
gender = self.db.Column(self.db.String(254), nullable=False)
is_deleted = self.db.Column(self.db.Boolean, nullable=False, default=False)
is_active = self.db.Column(self.db.Boolean, nullable=False, default=True)
created_at = self.db.Column(self.db.DateTime, nullable=False, server_default=func.now(),
default=datetime.utcnow)
self.Gender = Gender
self.service = ChassisService(self.app, self.db, self.Gender)
self.db.create_all()
def test_create(self):
"""
Tests entity successful creation
"""
gender = self.Gender()
gender.gender = "Male"
gender = self.service.create(gender)
self.assertIsNotNone(gender.id)
def test_update(self):
"""
Test ChassisService update() method. Test cases include:
1. Successful entity update
2. id validation
"""
gender = self.Gender()
gender.gender = "Female"
gender = self.service.create(gender)
try:
self.service.update(self.Gender(), -1)
self.fail("Chassis service id validation failed")
except ValidationError:
pass
gender2 = self.Gender()
gender2.gender = "Trans-Gender"
gender2.is_active = False
self.service.update(gender2, gender.id)
gender3 = self.Gender.query.filter_by(id=gender.id).first()
self.assertEqual(gender3.gender, gender2.gender)
self.assertEqual(gender3.is_active, gender2.is_active)
self.assertEqual(gender3.created_at, gender.created_at)
def test_delete(self):
"""
Test ChassisService delete() method
"""
gender = self.Gender()
gender.gender = "Female"
gender = self.service.create(gender)
self.service.delete(gender.id)
gender = self.Gender.query.filter_by(id=gender.id).first()
self.assertTrue(gender.is_deleted)
| 3
| 3
|
intrinsic/evaluation/classifiers/dataset_readers/word_vector_reader.py
|
UKPLab/linspector
| 21
|
12776999
|
from typing import Dict, List, Iterator
import json
import logging
from overrides import overrides
import tqdm
import os
import sys
import codecs
import numpy as np
from allennlp.common import Params
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, ArrayField, TextField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
@DatasetReader.register("word_vectors")
class WordVectorDatasetReader(DatasetReader):
"""
Reads a text file for classification task
Expected format for each input line: word and tag (optional), separated by a tab.
The output of ``read`` is a list of ``Instance``s with the following fields:
token: ``TokenField``
label: ``LabelField``
Parameters
----------
lazy : ``bool`` (optional, default=False)
Passed to ``DatasetReader``. If this is ``True``, training will start sooner, but will
take longer per batch. This also allows training with datasets that are too large to fit
in memory.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)}
@overrides
def text_to_instance(self, token: List[Token], label: str = None) -> Instance:
token_field = TextField(token, self._token_indexers)
fields = {'token': token_field}
if label is not None:
fields['label'] = LabelField(label)
return Instance(fields)
@overrides
def _read(self, file_path: str) -> Iterator[Instance]:
"""
Reads input file.
Args:
file_path (str): path for file
"""
with codecs.open(file_path, encoding='utf-8') as f:
for line in f:
items = line.strip().split('\t')
token = items[0]
# label is optional
if len(items) > 1:
label = items[1]
else:
label = None
yield self.text_to_instance([Token(token)], label)
| 2.359375
| 2
|
DictionaryCreator.py
|
Ramin-RX7/DramaX
| 14
|
12777000
|
<reponame>Ramin-RX7/DramaX
import sys
import argparse
import rx7 as rx
from LIB import Dictionary
from LIB.Functions import wait_for_input, get_files, pause
# TODO: add write to file directly
print = rx.style.print
BANNER = '''
`7MM"""Yb. .g8"""bgd
MM `Yb. .dP' `M
MM `Mb dM' `
MM MM MM
MM ,MP MM.
MM ,dP' `Mb. ,'
.JMMmmmdP' ictionary `"bmmmd' reator
'''
if __name__ == "__main__":
while True:
rx.cls()
print(BANNER,'gold_3b')
print('BECAREFULL WHEN USING THIS PROGRAM TO CREATE LARGE DICTIONARIES (MORE THAN 50M WORDS)!', 'red', style='bold')
print('DO NOT TRY THIS FOR LARGE NUMBER OF WORDS (MORE THAN 100M WORDS)!\n', 'red', style='bold')
if len(sys.argv) > 1:
parser = argparse.ArgumentParser(
'Dictionary Creator',
description='Use this app to create dictionaries',
allow_abbrev=False,
)
parser.add_argument('-c','--characters',
required=True,
help='List of characters to use in Dictionary'
)
parser.add_argument('-l','--length',
metavar='LENGTH', required=True, type=int,
help='Max Words Length of Dictionary'
)
parser.add_argument('path',metavar='PATH',
help='Path to a file to save dictionary')
parser.add_argument('-I','--Ignore-Memory', action='store_true',
help='Ignore Memory Warnings and Errors',default=False)
'''
parser.add_argument('-s','--save-memory', action='store_true',
help='Save memory (Lower Speed)')
'''
args = parser.parse_args()
SS = args.characters
LENGTH = args.length
FILE = args.path
ignore_memory = args.Ignore_Memory
#save_memory = args.save_memory
else:
SS = wait_for_input('Enter Characters of Dictionary: ')
while True:
LENGTH = wait_for_input('Enter Max Length of Dictionary: ')
try:
LENGTH = int(LENGTH)
assert LENGTH > 0
break
except:
print('Length Should be an integer and higher than 0')
FILE = get_files('Enter Path To save Dictionary: ',
check_if_exists=False, times=1)[0]
if rx.files.exists(FILE):
print('[*] File Already Exists.', 'dodger_blue_1')
replace = wait_for_input('Replace File? [Yes/No] ')
if not replace.lower() in ('y','yes'):
print('[-] Operation Cancelled By User.', 'red')
sys.exit()
ignore_memory = False
#save_memory = wait_for_input('Save Memory? [Yes/No] ')
TOTAL = 0
ALL_CHARS = 0
for i in range(1, LENGTH+1):
TOTAL += len(SS)**i
ALL_CHARS += len(SS)**i * (i+2)
#< Checking Memory >#
if ALL_CHARS*10+200000000 > int(rx.system.ram_free(False)):
print('[-] NOT ENOUGH MEMORY TO START OPERATION', 'red')
print(f'(This dictionary needs "{rx.convert_bytes(ALL_CHARS*10+200000000)}" But you have "{rx.system.ram_free()}")')
pause()
sys.exit()
if ignore_memory and ALL_CHARS*10+1500000000 > int(rx.system.ram_total(False))//2:
print('[*] This Dictionary Needs More than Half of Your Memory', 'red')
sure = wait_for_input('Enter "yes" to Start [Yes/No] ')
if not sure.lower() in ('y','yes'):
print('[-] Operation Cancelled By User.', 'red')
sys.exit()
rx.cls()
print(BANNER,'gold_3b')
print('Characters to use: ', end='')
print(SS, 'dodger_blue_1')
print('Words Max Length: ', end='')
print(LENGTH, 'dodger_blue_1')
#if not save_memory:
print('Required Memory: ', end='')
print(rx.convert_bytes(ALL_CHARS*10+(ALL_CHARS*10)/2.5), 'dodger_blue_1')
print()
START = rx.record()
DICT = []
i = 1
Progress = TOTAL//100#int(str(TOTAL//100)[:2]+'00')
for word in Dictionary.dict_creator_generator(SS, LENGTH):
DICT.append(word)
i += 1
if i % Progress == 0:
print('\r'+'[*] Generating Words: '+str(i)+'/'+str(TOTAL),'dodger_blue_1', end='')
'''
if save_memory:
rx.write(FILE,'\n'.join(DICT),'a','\n')
DICT = []
'''
print('\r[*] Number of Generated Words: '+str(TOTAL), 'dodger_blue_1')
print(f'[*] Operation finnished in {START.lap(Round=4)} seconds', 'dodger_blue_1')
rx.write(FILE, '\n'.join(DICT))
print('[+] Dictionary File Has Been Created Successfully.', 'green')
print(f'[*] (Address: {rx.files.abspath(FILE)})', 'dodger_blue_1')
del DICT
print()
pause('Press Enter To Exit ...')
sys.exit()
| 2.921875
| 3
|
pysnmp/CISCO-LAG-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
| 11
|
12777001
|
<filename>pysnmp/CISCO-LAG-CAPABILITY.py
#
# PySNMP MIB module CISCO-LAG-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-LAG-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:47:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, ModuleCompliance, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "AgentCapabilities")
Gauge32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, ObjectIdentity, MibIdentifier, iso, Bits, ModuleIdentity, IpAddress, TimeTicks, Unsigned32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "ObjectIdentity", "MibIdentifier", "iso", "Bits", "ModuleIdentity", "IpAddress", "TimeTicks", "Unsigned32", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoLagCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 332))
ciscoLagCapability.setRevisions(('2012-04-02 00:00', '2011-09-27 00:00', '2010-11-01 00:00', '2009-11-19 00:00', '2007-07-10 10:00', '2006-06-15 12:00', '2004-02-04 00:00',))
if mibBuilder.loadTexts: ciscoLagCapability.setLastUpdated('201204020000Z')
if mibBuilder.loadTexts: ciscoLagCapability.setOrganization('Cisco Systems, Inc.')
clagCapV12R0111bEXCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0111bEXCat6K = clagCapV12R0111bEXCat6K.setProductRelease('Cisco IOS 12.1(11b)EX on Catalyst 6000/6500\n and Cisco 7600 series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0111bEXCat6K = clagCapV12R0111bEXCat6K.setStatus('current')
clagCapV12R0217SXCat6KPfc2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0217SXCat6KPfc2 = clagCapV12R0217SXCat6KPfc2.setProductRelease('Cisco IOS 12.2(17)SX on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC2 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0217SXCat6KPfc2 = clagCapV12R0217SXCat6KPfc2.setStatus('current')
clagCapV12R0217SXCat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0217SXCat6KPfc3 = clagCapV12R0217SXCat6KPfc3.setProductRelease('Cisco IOS 12.2(17)SX on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0217SXCat6KPfc3 = clagCapV12R0217SXCat6KPfc3.setStatus('current')
clagCapCatOSV08R0101 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapCatOSV08R0101 = clagCapCatOSV08R0101.setProductRelease('Cisco CatOS 8.1(1).')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapCatOSV08R0101 = clagCapCatOSV08R0101.setStatus('current')
clagCapV12R0218SXF5PCat6KPfc2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0218SXF5PCat6KPfc2 = clagCapV12R0218SXF5PCat6KPfc2.setProductRelease('Cisco IOS 12.2(18)SXF5 on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC2 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0218SXF5PCat6KPfc2 = clagCapV12R0218SXF5PCat6KPfc2.setStatus('current')
clagCapV12R0218SXF5PCat6KPfc3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 6))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0218SXF5PCat6KPfc3 = clagCapV12R0218SXF5PCat6KPfc3.setProductRelease('Cisco IOS 12.2(18)SXF5 on Catalyst 6000/6500\n and Cisco 7600 series devices with PFC3 card.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0218SXF5PCat6KPfc3 = clagCapV12R0218SXF5PCat6KPfc3.setStatus('current')
clagCapV12R0233SXHPCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 7))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0233SXHPCat6K = clagCapV12R0233SXHPCat6K.setProductRelease('Cisco IOS 12.2(33)SXH on Catalyst 6000/6500\n devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0233SXHPCat6K = clagCapV12R0233SXHPCat6K.setStatus('current')
clagCapV12R0252SGPCat4K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 8))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0252SGPCat4K = clagCapV12R0252SGPCat4K.setProductRelease('Cisco IOS 12.2(52)SG on Cat4K family devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0252SGPCat4K = clagCapV12R0252SGPCat4K.setStatus('current')
clagCapV12R0250SYPCat6K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 9))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0250SYPCat6K = clagCapV12R0250SYPCat6K.setProductRelease('Cisco IOS 12.2(50)SY on Catalyst 6000/6500\n devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV12R0250SYPCat6K = clagCapV12R0250SYPCat6K.setStatus('current')
clagCapV15R0001SYPCat6k = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 10))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV15R0001SYPCat6k = clagCapV15R0001SYPCat6k.setProductRelease('Cisco IOS 15.0(1)SY on Catalyst 6000/6500\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV15R0001SYPCat6k = clagCapV15R0001SYPCat6k.setStatus('current')
clagCapV15R0101SGPCat4K = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 332, 11))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV15R0101SGPCat4K = clagCapV15R0101SGPCat4K.setProductRelease('Cisco IOS 15.1(1)SG on Cat4K family devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
clagCapV15R0101SGPCat4K = clagCapV15R0101SGPCat4K.setStatus('current')
mibBuilder.exportSymbols("CISCO-LAG-CAPABILITY", clagCapV12R0111bEXCat6K=clagCapV12R0111bEXCat6K, clagCapV12R0217SXCat6KPfc3=clagCapV12R0217SXCat6KPfc3, ciscoLagCapability=ciscoLagCapability, clagCapCatOSV08R0101=clagCapCatOSV08R0101, clagCapV12R0233SXHPCat6K=clagCapV12R0233SXHPCat6K, clagCapV15R0101SGPCat4K=clagCapV15R0101SGPCat4K, clagCapV12R0218SXF5PCat6KPfc2=clagCapV12R0218SXF5PCat6KPfc2, clagCapV12R0217SXCat6KPfc2=clagCapV12R0217SXCat6KPfc2, clagCapV12R0252SGPCat4K=clagCapV12R0252SGPCat4K, clagCapV15R0001SYPCat6k=clagCapV15R0001SYPCat6k, clagCapV12R0250SYPCat6K=clagCapV12R0250SYPCat6K, PYSNMP_MODULE_ID=ciscoLagCapability, clagCapV12R0218SXF5PCat6KPfc3=clagCapV12R0218SXF5PCat6KPfc3)
| 1.734375
| 2
|
src/nodemgr/common/docker_containers.py
|
jnpr-pranav/contrail-controller
| 37
|
12777002
|
import docker
import logging
from nodemgr.common.docker_mem_cpu import DockerMemCpuUsageData
class DockerContainersInterface:
def __init__(self):
self._client = docker.from_env()
if hasattr(self._client, 'api'):
self._client = self._client.api
def list(self, all_=True):
return self._client.containers(all=all_)
def inspect(self, id_):
try:
return self._client.inspect_container(id_)
except docker.errors.APIError:
logging.exception('docker')
return None
def execute(self, id_, line_):
exec_op = self._client.exec_create(id_, line_, tty=True)
res = ''
try:
# string or stream result works unstable
# using socket with own read implementation
socket = self._client.exec_start(exec_op["Id"], tty=True, socket=True)
socket.settimeout(10.0)
while True:
part = socket.recv(1024)
if len(part) == 0:
break
res += part
finally:
if socket:
# There is cyclic reference there
# https://github.com/docker/docker-py/blob/master/docker/api/client.py#L321
# sock => response => socket
# https://github.com/docker/docker-py/issues/2137
try:
socket._response = None
except AttributeError:
pass
socket.close()
data = self._client.exec_inspect(exec_op["Id"])
e = data.get("ExitCode", 0)
if 0 != e:
logging.critical("Exit data: {}".format(data))
return (e, res)
def query_usage(self, id_, last_cpu_, last_time_):
return DockerMemCpuUsageData(id_, last_cpu_, last_time_)
| 2.21875
| 2
|
basics/solutions/get-data-urllib.py
|
carlosal1015/ACM-Python-Tutorials-KAUST-2015
| 5
|
12777003
|
<gh_stars>1-10
import urllib
urllib.urlretrieve("http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data",
"data")
urllib.urlretrieve("http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.names",
"description")
!cat description
| 1.898438
| 2
|
Ta-Irikulau/framework/pomdp/agent/proxy.py
|
mjtsai1974/DevBlog
| 0
|
12777004
|
<filename>Ta-Irikulau/framework/pomdp/agent/proxy.py
from framework.pomdp.basics.histree import HisTreeNode
from framework.pomdp.basics.histree import HisTree
import numpy as np
"""
The design/implementation of the agent in POMDP world
"""
class Agent(object):
def __init__(self):
self._transition_model = None
self._observation_model = None
self._reward_model = None
self._horizon_length = -1
self._discount_factor = 1
self._root_node = None
self._histree = None
self._logger = None
def Configure(self, T, O, R, horizon_length, discount_factor):
self._transition_model = T
self._observation_model = O
self._reward_model = R
self._horizon_length = horizon_length
self._discount_factor = discount_factor
def Inflate(self, root_node, histree, logger):
self._root_node = root_node
self._histree = histree
self._logger = logger
self._logger.Info('Agent initialization...')
"""
Calculate the immediate reward with regard to the updated belief by means of probabilistic simulation
"""
def CalculateImmediateReward(self, updated_belief, a, s_list, o_list):
simulated_prob_ary = np.array(updated_belief) #the very initial is the same as belief state
b_equal_distributed = False
immediate_reward = [-1 for _ in updated_belief]
simulated_prob = [0 for _ in updated_belief]
ret_dict = {}
"""
I think after the very initial rigid defined version, I should try to express action/state/observation
in terms of factors(maybe human readerable), that's the topology of expresion, possibly the Bayesian!!
to be conti...
"""
if a.Name == 'Listen':
#I think we should return in according to reward model...to be conti
for i in range(len(updated_belief)):
simulated_prob_ary[i] = np.random.random()
simulated_prob[i] = simulated_prob_ary[i]
simulated_prob = self.NormalizeProbability(simulated_prob)
self._logger.Info(' A({}): immediate reward = {}'.format(a.Name, immediate_reward))
self._logger.Info(' simulated probs = {}'.format(simulated_prob))
ret_dict['immediate_reward'] = immediate_reward #directly return -1 as immediate return
ret_dict['simulated_prob'] = simulated_prob
return ret_dict
elif (a.Name == 'Open_Left') or (a.Name == 'Open_Right'):
#Need to simulate tiger position by random with regards to updated_belief
#I think we should simulate according to the given b(s) interval for all s of belief state
if ((np.max(simulated_prob_ary) - np.min(simulated_prob_ary)) < 1e-2) or (np.max(simulated_prob_ary) == np.min(simulated_prob_ary)):
b_equal_distributed = True #if the (max - min < 0.01) or (max == min), treat all belief state equal probabilistically distributed
for i in range(len(updated_belief)):
simulated_prob_ary[i] = np.random.random()
simulated_prob[i] = simulated_prob_ary[i]
simulated_prob = self.NormalizeProbability(simulated_prob)
"""
if (b_equal_distributed == True):
#np.random.seed(len(updated_belief)) #It doesn't meets the requirement
for i in range(len(updated_belief)):
simulated_prob_ary[i] = np.random.random()
else:
#Randomize in distinct interval for each state
for i in range(len(updated_belief)):
simulated_prob_ary[i] = np.random.choice(updated_belief[i], size = 1)
"""
#Get the index of the maximum in simulated_prob_ary
index_max_simulated_prob_ary = np.argmax(simulated_prob_ary) #ignore the case that there might exists euqal probability
immediate_reward[index_max_simulated_prob_ary] = self._reward_model.GetRwrardByActionStateObservation(
o_list[index_max_simulated_prob_ary],
s_list[index_max_simulated_prob_ary],
a)
for i in range(len(immediate_reward)):
if i != index_max_simulated_prob_ary:
immediate_reward[i] = self._reward_model.GetRwrardByActionStateObservation(
o_list[i],
s_list[i],
a)
self._logger.Info(' A({}): immediate reward = {}'.format(a.Name, immediate_reward))
self._logger.Debug(' equal distributed = {}'.format(b_equal_distributed))
self._logger.Info(' simulated probs = {}, max index = {}'.format(simulated_prob, index_max_simulated_prob_ary))
ret_dict['immediate_reward'] = immediate_reward
ret_dict['simulated_prob'] = simulated_prob
return ret_dict
"""
Calculate the accumulative rewards for distinct HisTreeNode in each layer of the simulated HisTree.
We are using bottom up style.
"""
def CalculateSimulationReward(self):
#Figure out the list containing the layer from bottom to the top
layer_bottom_up = [layer for layer in range(self._horizon_length)]
layer_bottom_up.reverse()
self._logger.Info('------------------------------------')
self._logger.Info('Agent calculate simulation reward')
self._logger.Info('------------------------------------')
#Accumulate from the bottom to the top
for layer in layer_bottom_up:
#if we reach layer = 0, just break
if layer == 0:
break
self._logger.Info('Calculate simulation reward over horizon = {}'.format(layer))
#Get all nodes in this layer
all_nodes_in_layer = self._histree.GetHistoryByLayer(layer)
self._logger.Info('Nodes at this layer = {}'.format(all_nodes_in_layer))
#Iterate each node in this layer of HisTree
for node in all_nodes_in_layer:
#Get parent node of current node
parent_node = node.Parent
self._logger.Debug('(node, parent) = ({},{})'.format(node, parent_node))
#Calculate the immediate reward
ary_belief = np.array(node.Belief)
ary_immediate_reward = np.array(node.ImmediateRewardData)
node.ImmediateReward = np.dot(ary_belief, ary_immediate_reward)
self._logger.Debug(' Calculate immediate reward({}) = belief({}) * immediate reward({})'.format(node.ImmediateReward, ary_belief, ary_immediate_reward))
if layer > 1:
#Get index of node as child node in parent node's child list,
#this is the index of parent node's Prob_o_s_a_Data
child_index = parent_node.GetChildIndex(node)
#Calculate temporal reward of this node with respect to its immediate reward
temp_reward = parent_node.Prob_o_s_a_Data[child_index] * node.ImmediateReward * pow((self._discount_factor), (layer - 1))
self._logger.Debug(' Parent node Prob_o_s_a_Data = {}'.format(parent_node.Prob_o_s_a_Data))
self._logger.Debug(' Temporal reward({}) = Parent_node.Prob_o_s_a_Data({}) * node.ImmediateReward({}) * {}^{}'.format(
temp_reward, parent_node.Prob_o_s_a_Data[child_index], node.ImmediateReward, self._discount_factor, (layer - 1)
))
#Add temporal reward of this node and current accumulative reward of this node to parent node's current reward
parent_node.CurrentReward = temp_reward + node.CurrentReward
self._logger.Info(' parent_node.CurrentReward({}) = temporal reward({}) + node.CurrentReward({})'.format(
parent_node.CurrentReward, temp_reward, node.CurrentReward))
else:
temp_reward = node.CurrentReward
node.CurrentReward = node.ImmediateReward + temp_reward
self._logger.Info(' node.FinalReward({}) = node.ImmediateReward({}) + node.CurrentReward({})'.format(
node.CurrentReward, node.ImmediateReward, temp_reward))
"""
Return the normalized probability, since the estimated out beliefs might not be summed to 1
"""
def NormalizeProbability(self, probs):
normalized_probs = [0 for _ in range(len(probs))]
total_prob = 0
for b in probs:
total_prob += b
for i, b in enumerate(probs):
normalized_probs[i] = b / total_prob
return normalized_probs
"""
Return parent nodes with regards to the same or different observation
"""
def GetRelatedNodesByObservation(self, o, parent_nodes, the_same_as_o = 1):
if (len(parent_nodes) == 1) and (parent_nodes[0].Parent == None): #The root node
return parent_nodes #Return only the root node
else:
if the_same_as_o == 1:
return [p for p in parent_nodes if (p.ObservationByActionTag.endswith(o.Name) == True)]
else:
return [p for p in parent_nodes if (p.ObservationByActionTag.endswith(o.Name) == False)]
"""
Return parent nodes with regards to the given world state of s(input by caller in its observation o) to be the prior
"""
def GetPriorNodesByObservation(self, o, world_s, parent, parent_nodes, the_same_as_world_state = 1, include_parent = 1):
parent_nodes_o = None
if (len(parent_nodes) == 1) and (parent_nodes[0].Parent == None): #The root node
return parent_nodes #Return only the root node
else:
if the_same_as_world_state == 1:
parent_nodes_o = [p for p in parent_nodes if (p.ObservationByActionTag.endswith(world_s.Name) == True)]
else:
parent_nodes_o = [p for p in parent_nodes if (p.ObservationByActionTag.endswith(world_s.Name) == False)]
if (include_parent == 1): #and (o.Name != world_s.Name):
if parent not in parent_nodes_o:
parent_nodes_o.insert(0, parent)
if (include_parent == 0):
#Remove the parent node in the parent_nodes_o, if any
if parent in parent_nodes_o:
parent_nodes_o.remove(parent)
return parent_nodes_o
"""
Calculate the total probability of transiting from si to sj(represented by o) after action a has been taken
"""
def GetTotalTransitiveProbability(self, a, sj, s_list, parent_nodes):
#Get related transitive probability from si to sj by action a
transitive_probability_list = [self._transition_model.Probability(si, sj, a) for si in s_list]
total_transitive_probability = 0
for p in parent_nodes:
for i, b in enumerate(p.Belief):
total_transitive_probability += transitive_probability_list[i] * b
return total_transitive_probability
"""
Do the belief update by action and observation and parent onodes,
it returns the [b'(s1), b'(s2),..., b'(sn)] from prior b, where
this input o is the world state we assume/believe we are in.
"""
def UpdateBelief(self, a, o, s_list, parent_node, parent_nodes):
"""
This input parameter o implies that we believe that we are in the state indicated by observation o
"""
updated_belief = [0.0 for _ in range(len(s_list))]
parents_nodes_o = None #[] #[0] the set of the same o, [1]the set of different o
parents_nodes_o_s = None
#parents_nodes_o.append(self.GetRelatedNodesByObservation(o, parent_nodes, 1))
#parents_nodes_o.append(self.GetRelatedNodesByObservation(o, parent_nodes, 0))
for i, s in enumerate(s_list):
likeli = self._observation_model.Probability(s, o, a) #The probability that you make s in o(to be believed world state), after by action a
#Calculate probability of making observation s,P(s|s), under world state o, by action a
if o.Name == s.Name:
if len(parent_nodes) == 2:
parents_nodes_o = [parent_node]
else:
parents_nodes_o = self.GetPriorNodesByObservation(s, o, parent_node, parent_nodes, 1, 1)
#We believe we make observation o, in state o(o=s), should choose parent nodes of the same observation o
prob_transite_to_s = self.GetTotalTransitiveProbability(a, s, s_list, parents_nodes_o)
prob_o_s_a = likeli * prob_transite_to_s
else:
if len(parent_nodes) == 2: #for the case 2 parent nodes, when the given world state o is different from current enumerated b(s), this is not a good design...to be conti
parents_nodes_o = [parent_node]
else:
parents_nodes_o = self.GetPriorNodesByObservation(s, o, parent_node, parent_nodes, 1, 1)
#We believe we make observation o, in state s(o!=s), should choose parent nodes of the different observation o
prob_transite_to_s = self.GetTotalTransitiveProbability(a, s, s_list, parents_nodes_o)
prob_o_s_a = likeli * prob_transite_to_s
self._logger.Info('b({})'.format(s))
self._logger.Debug(' Nominator part:')
self._logger.Debug(' Likeli of O(P({}|{})) in W_S({}) by A({}) = {}'.format(s, s, o, a, likeli))
self._logger.Debug(' Total transitive probability to S({}) = {}'.format(s, prob_transite_to_s))
self._logger.Debug(' Parent nodes for O(P({}|{})), W_S({}) by A({}):'.format(s, s, o, a))
for p in parents_nodes_o:
self._logger.Debug(' {}'.format(p))
self._logger.Debug(' {}'.format(prob_o_s_a))
#Build the parent node list to be used for the unvisited states
parents_nodes_o_s = []
for p in parent_nodes:
if p not in parents_nodes_o:
parents_nodes_o_s.append(p)
if len(parents_nodes_o_s) == 0:
parents_nodes_o_s = [parent_node]
#Calculate the total probability of making observation s, in all states s'(s_prime), by action a
self._logger.Debug(' Denominator part:')
prob_o_a = 0
for s_prime in s_list: #s_prime is treated as world state in this loop
if s_prime.Name == o.Name:
continue #For we have enumerated this same state in nominator part, continue to next
likeli = self._observation_model.Probability(s, s_prime, a)
prob_transite_to_s_prime = self.GetTotalTransitiveProbability(a, s, s_list, parents_nodes_o_s)
prob_o_a += likeli * prob_transite_to_s_prime
"""
if o.Name == s_prime.Name:
#We believe we make observation s, in state s_prime, should choose parent nodes of the same observation o,
#containing the input parent node in it
parents_nodes_o_s = self.GetPriorNodesByObservation(s, s_prime, parent_node, parent_nodes, 1, 1)
prob_transite_to_s_prime = self.GetTotalTransitiveProbability(a, s, s_list, parents_nodes_o_s)
prob_o_a += likeli * prob_transite_to_s_prime
else:
#We believe we make observation s, in state s_prime(o!=s_prime), should choose parent nodes of the different observation o,
#excluding the input parent node in it
parents_nodes_o_s = self.GetPriorNodesByObservation(s, s_prime, parent_node, parent_nodes, 1, 0)
prob_transite_to_s_prime = self.GetTotalTransitiveProbability(a, s, s_list, parents_nodes_o_s)
prob_o_a += likeli * prob_transite_to_s_prime
"""
self._logger.Debug(' Likeli of O(P({}|{})) in W_S({}) by A({}) = {}'.format(s, s, s_prime, a, likeli))
self._logger.Debug(' Total transitive probability to S({}) = {}'.format(s, prob_transite_to_s_prime))
self._logger.Debug(' Parent nodes for O(P({}|{})), W_S({}) by A({}):'.format(s, s, s_prime, a))
for p in parents_nodes_o_s:
self._logger.Debug(' {}'.format(p))
self._logger.Debug(' {}'.format(prob_o_a))
#Add nominator part to the denominator part to have a complete total probability
prob_o_a += prob_o_s_a
updated_belief[i] = prob_o_s_a / prob_o_a
self._logger.Debug('{}'.format(updated_belief[i]))
updated_belief = self.NormalizeProbability(updated_belief)
self._logger.Info('normalize {}'.format(updated_belief))
return updated_belief
"""
The simulation evaluates out a convergent result in tree structure, according to which the agent
can run or predict the optimal action. It is the monotonic action manner with the tree structure
represented in the aggregation of the same action a.
"""
def Simulation(self, a_list, o_list, s_list):
self._logger.Info('Agent simulation...')
actions = a_list
observations = o_list
states = s_list
index = 0
#loop until we reach horizon_length
for layer in range(self._horizon_length):
self._logger.Info('------------------------------------')
self._logger.Info('Agent simulation over horizon = {}'.format(layer))
self._logger.Info('------------------------------------')
#Get all nodes in each layer and make distinct evaluation
parents = self._histree.GetHistoryByLayer(layer)
self._logger.Info('All parent nodes:')
for p in parents:
self._logger.Info(' {}'.format(p))
index = 0
#Enumerate in the unit of each action
for a in actions:
#Generate new histree node in according to a,o combination
if layer == 0:
p_a_list = parents #It is the root node only
else:
p_a_list = [p for p in parents if (p.ObservationByActionTag.startswith(a.Name) == True)]
self._logger.Info('Parent nodes of A({}):'.format(a))
for p in p_a_list:
self._logger.Info(' {}'.format(p))
#Enumerate each parent of this same action
for parent in p_a_list:
#Enumerate each observation after taking action a
for o in observations:
self._logger.Info('Enumerate A({}), O({}) from {}'.format(a, o, parent))
#This o is the to be believed world state
updated_belief = self.UpdateBelief(a, o, states, parent, p_a_list)
#Calculate the immediate reward during simulation phase
ret_dict = self.CalculateImmediateReward(updated_belief, a, s_list, o_list)
#Create HisTreeNode object for each a, o at this layer
obj_HisTreeNode = HisTreeNode(parent, a, o, updated_belief, (layer + 1), index)
obj_HisTreeNode.ImmediateRewardData = ret_dict['immediate_reward']
obj_HisTreeNode.Prob_o_s_a_Data = ret_dict['simulated_prob']
index += 1
#Add this new HisTreeNode object to HisTree at (layer + 1)
self._histree.AddNodeInHistory((layer + 1), obj_HisTreeNode)
#Calculate the simulated rewards with regard to each action
self.CalculateSimulationReward()
def Run(self):
pass
| 2.703125
| 3
|
experiments/mj60/wf_cutter.py
|
sweigart/pygama
| 13
|
12777005
|
#!/usr/bin/env python3
import os, time, json
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
import scipy.signal as signal
import argparse
import pdb
import tinydb as db
from pygama import DataSet
from pygama.analysis.calibration import *
from pygama.analysis.histograms import *
import pygama.utils as pgu
from matplotlib.lines import Line2D
from pygama.utils import set_plot_style
set_plot_style("clint")
def main():
"""
mj60 waveform viewer
"""
run_db, cal_db = "runDB.json", "calDB.json"
par = argparse.ArgumentParser(description="waveform viewer for mj60")
arg, st, sf = par.add_argument, "store_true", "store_false"
arg("-ds", nargs='*', action="store", help="load runs for a DS")
arg("-r", "--run", nargs=1, help="load a single run")
arg("-db", "--writeDB", action=st, help="store results in DB")
args = vars(par.parse_args())
# -- declare the DataSet --
if args["ds"]:
ds_lo = int(args["ds"][0])
try:
ds_hi = int(args["ds"][1])
except:
ds_hi = None
ds = DataSet(ds_lo, ds_hi,
md=run_db, cal = cal_db) #,tier_dir=tier_dir)
if args["run"]:
ds = DataSet(run=int(args["run"][0]),
md=run_db, cal=cal_db)
# Which run number is the being analyzed
# run = 249
# run = 214
# run = 204
# run = 278
# working on analysis for the AvsE cut in mj60
# t1df, t2df = chunker(run)
# cutwf, t2cut = cutter(t1df, t2df, run)
# histograms(cutwf, t2cut, run)
# histograms(ds)
drift_correction(ds, ds_lo)
# def histograms(t1df, t2df, run):
def histograms(ds):
t2 = ds.get_t2df()
print(t2.columns)
exit()
t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir,run))
t2df = pd.read_hdf(t2df, key="df")
# n = "tslope_savgol"
# n = "current_max"
# n = "tslope_pz"
n = "tail_tau"
# n = "tail_amp"
e = "e_cal"
x = t2df[e]
# y = t2df[n]
y = t2df[n] / x
plt.clf()
# H, xedges, yedges = np.histogram2d(t2df["tail_tau"], t2df["e_ftp"], bins=[2000,200], range=[[0, 6600], [0, 5]])
plt.hist2d(x, y, bins=[1000,200], range=[[0, 200], [0, .001]], norm=LogNorm(), cmap='jet')
# plt.hist2d(x, y, bins=[1000,1000], norm=LogNorm())
# plt.scatter(H[0],H[1])
# f = plt.figure(figsize=(20,5))
# p1 = f.add_subplot(111, title='Test', xlabel='Energy (keV)', ylabel=n)
# h1,xedg1,yedg1 = np.histogram2d(x, y, bins=[1000,200], range=[[0,2000],[0,100]])
# h1 = h1.T
# # hMin, hMax = np.amin(h1), np.amax(h1)
# # im1 = p1.imshow(h1,cmap='jet',vmin=hMin,vmax=hMax, aspect='auto') #norm=LogNorm())
# im1 = p1.imshow(h1,cmap='jet', origin='lower', aspect='auto', norm=LogNorm(), extent=[xedg1[0], xedg1[-1], yedg1[0], yedg1[-1]])
# cb1 = f.colorbar(im1, ax=p1)#, fraction=0.037, pad=0.04)
cbar = plt.colorbar()
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.title("Run {}".format(run))
plt.xlabel("Energy (keV)", ha='right', x=1)
plt.ylabel(n, ha='right', y=1)
# cbar.ax.set_ylabel('Counts')
# plt.ylabel("tslope_savgol", ha='right', y=1)
# plt.ylabel("A/E_ftp", ha='right', y=1)
# plt.tight_layout()
# # plt.savefig('./plots/meeting_plots/run{}_{}_vs_{}.png'.format(run, n, e))
# plt.show()
# xlo, xhi, xpb = 0, 10000, 10
# xP, hP = get_hist(t2df["trap_max"], xlo, xhi, xpb)
#
# plt.plot(xP, hP, ls='steps', lw=1.5, c='m',
# label="pygama trap_max, {} cts".format(sum(hP)))
# plt.xlabel("Energy (uncal)", ha='right', x=1)
# plt.ylabel("Counts", ha='right', y=1)
# plt.legend()
plt.tight_layout()
plt.show()
def chunker(run):
t1df = os.path.expandvars('{}/t1_run{}.h5'.format(tier_dir,run))
t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir,run))
t2df = pd.read_hdf(t2df, key="df")
t2df_chunk = t2df[:75000]
key = "/ORSIS3302DecoderForEnergy"
wf_chunk = pd.read_hdf(t1df, key, where="ievt < {}".format(75000))
wf_chunk.reset_index(inplace=True) # required step -- fix pygama "append" bug
t2df = t2df.reset_index(drop=True)
# create waveform block. mask wfs of unequal lengths
icols = []
for idx, col in enumerate(wf_chunk.columns):
if isinstance(col, int):
icols.append(col)
wf_block = wf_chunk[icols].values
# print(wf_block.shape, type(wf_block))
# print(t2df_chunk)
return wf_block, t2df_chunk
def cutter(t1df, t2df, run):
# t2cut = t2df.loc[(t2df.e_cal>3.1099]
t2cut = t2df
print(t2cut.index)
print(t2cut)
cutwf = t1df[t2cut.index]
print(cutwf)
# xvals = np.arange(0,3000)
# start = time.time()
# for i in range(len(t2cut.index)):
# # for i in range(0,5):
# plt.plot(xvals, cutwf[i], lw=1)
# plt.xlabel('Sample Number', ha='right', x=1.0)
# plt.ylabel('ADC Value', ha='right', y=1.0)
# plt.tight_layout()
# plt.show()
return cutwf, t2cut
def drift_correction(ds, ds_lo):
## testing a drift time correction code
# t1df = ds.get_t1df()
# t1df.reset_index(inplace=True)
# t2df = ds.get_t2df()
"""
Take a single DataSet and window it so that the output file only contains
events near an expected peak location.
"""
# a user has to figure out the uncalibrated energy range of the K40 peak
# xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk)
# xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3
t2df = ds.get_t2df()
calDB = ds.calDB
query = db.Query()
table = calDB.table("cal_pass1")
vals = table.all()
df_cal = pd.DataFrame(vals) # <<---- omg awesome
df_cal = df_cal.loc[df_cal.ds==ds_lo]
p1cal = df_cal.iloc[0]["p1cal"]
cal = p1cal * np.asarray(t2df["e_ftp"])
xlo = 2.46e6
xhi = 2.5e6
hE, xE = ph.get_hist(t2df["energy"], bins=100, range=(xlo, xhi))
plt.semilogy(xE, hE, ls='steps', lw=1, c='r')
import matplotlib.ticker as ticker
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e'))
plt.locator_params(axis='x', nbins=5)
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.show()
# plt.savefig(f"./plots/cage_ds{ds.ds_lo}_winK40.pdf")
t1df = pd.DataFrame()
for run in ds.paths:
ft1 = ds.paths[run]["t1_path"]
print(f"Scanning ds {ds.ds_lo}, run {run}\n file: {ft1}")
for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4):
t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)]
print(t1df_win.shape)
t1df = pd.concat([t1df, t1df_win], ignore_index=True)
print('It worked? maybe?')
h5_opts = {
"mode":"w", # overwrite existing
"append":False,
"format":"table",
# "complib":"blosc:zlib", # no compression, increases I/O speed
# "complevel":1,
# "data_columns":["ievt"]
}
t1df.reset_index(inplace=True)
t1df.to_hdf('./test_dt_file.h5', key="df_windowed", **h5_opts)
print("wrote file")
exit()
# key = "/ORSIS3302DecoderForEnergy"
# wf_chunk = pd.read_hdf(t1df, key, where="ievt < {}".format(75000))
# wf_chunk.reset_index(inplace=True) # required step -- fix pygama "append" bug
t2df = t2df.reset_index(drop=True)
# create waveform block. mask wfs of unequal lengths
number = 20000
icols = []
for idx, col in enumerate(t1df.columns):
if isinstance(col, int):
icols.append(col)
wfs = t1df[icols].values
wfs = np.asarray(wfs)
# wfs = wfs[:number]
# t2df_chunk = t2df[:number]
# print(wf_block.shape, type(wf_block))
# print(t2df_chunk)
t0 = np.asarray(t2df['t0'])
energy = np.asarray(t2df['e_ftp'])
# energy = 0.4066852222964447 * energy
baseline = wfs[:, 0:500]
avg_bl = []
for i in range(len(wfs)):
avg_bl.append(np.mean(baseline[i], keepdims=True))
avg_bl = np.asarray(avg_bl)
wfs = np.asarray(wfs)
wfs = wfs - avg_bl
clk = 100e6
decay = 78
wfs = pz(wfs, decay, clk)
t100 = []
t0_raw = []
wf_raw = []
e_raw = []
for i in range(len(wfs)):
t100_t = np.where(wfs[i] > energy[i])
t100_t = t100_t[0]
if len(t100_t) > 0:
t100_t = t100_t[0]
t100.append(t100_t)
t0_raw.append(t0[i])
wf_raw.append(wfs[i])
e_raw.append(energy[i])
e_raw = np.asarray(e_raw)
index = np.where(e_raw < 7300)[0]
t100 = np.asarray(t100)
t0_raw = np.asarray(t0_raw)
wf_raw = np.asarray(wf_raw)
e_raw = e_raw[index]
t100 = t100[index]
t0_raw = t0_raw[index]
wf_raw = wf_raw[index]
e_raw = 0.4066852222964447 * e_raw
wf_raw = 0.4066852222964447 * wf_raw
hist, bins = np.histogram(e_raw, bins=2700, range=[0,2700])
b = (bins[:-1] + bins[1:]) / 2
plt.plot(b, hist, ls="steps", color='black')
plt.tight_layout()
plt.show()
plt.clf()
# xvals = np.arange(0,3000)
# start = time.time()
# for i in range(len(t100)):
#
# plt.plot(xvals, wf_raw[i], lw=1)
# plt.vlines(t0_raw[i], np.amin(wf_raw[i]), e_raw[i], color='r', linewidth=1.5, label='t0')
# plt.vlines(t100[i], np.amin(wf_raw[i]), e_raw[i], color='g', linewidth=1.5, label='t100')
# plt.hlines(e_raw[i], t0_raw[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
# plt.xlabel('Sample Number', ha='right', x=1.0)
# plt.ylabel('ADC Value', ha='right', y=1.0)
# plt.legend()
# plt.tight_layout()
# plt.show()
# exit()
"""
a1 = (t100 - t0_raw) * e_raw
a_wf = []
for i in range(len(wf_raw)):
a2 = sum(wf_raw[i,t0[i]:t100[i]])
a_wf.append(a2)
a_drift = a1 - a_wf
# a_drift = a_drift.tolist()
# print(a_drift)
# exit()
a_test = a_drift[np.where((e_raw > 2600) & (e_raw < 2630))]
e_test = e_raw[np.where((e_raw > 2600) & (e_raw < 2630))]
plt.hist2d(e_test, a_test, bins=[30,100], range=[[2600, 2630], [0, np.amax(a_test)]], norm=LogNorm(), cmap='jet')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
exit()
"""
xvals = np.arange(0,3000)
start = time.time()
for i in range(0,number):
# for i in range(0,5):
plt.plot(xvals, wfs[i], lw=1)
plt.vlines(t0[i], np.amin(wfs[i]), energy[i], color='r', linewidth=1.5, label='t0')
plt.vlines(t100[i], np.amin(wfs[i]), energy[i], color='g', linewidth=1.5, label='t100')
plt.hlines(energy[i], t0[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
plt.xlabel('Sample Number', ha='right', x=1.0)
plt.ylabel('ADC Value', ha='right', y=1.0)
plt.legend()
plt.tight_layout()
plt.show()
# input:
# fsignal: PZ-corrected and INL-corrected signal of length len, from channel chan
# Dets: MJ detector info data structure
# PSA: contains filter params to use for trapezoids
# CTC_factor: the value used in the correction, usually CTC.e_dt_slope[chan]
# outputs:
# returned value: energy in keV, or -1.0f in case of error
# t0: start time of drift/signal
# e_adc: energy in ADC units
# e_raw: uncorrected energy in 0.001 ADC units
# drift: charge trapping value (drift time * charge)
# to be used for optimizing correction, in ADC units
# CTC correction = drift*ctc_factor[chan]
def pz(wfs, decay, clk):
"""
pole-zero correct a waveform
decay is in us, clk is in Hz
"""
# get linear filter parameters, in units of [clock ticks]
dt = decay * (1e10 / clk)
rc = 1 / np.exp(1 / dt)
num, den = [1, -1], [1, -rc]
# reversing num and den does the inverse transform (ie, PZ corrects)
pz_wfs = signal.lfilter(den, num, wfs)
return pz_wfs
# return wfs, t2df_chunk
if __name__=="__main__":
main()
| 2.140625
| 2
|
TranskribusDU/tasks/DU_Table/DU_Table_Annotator.py
|
Transkribus/TranskribusDU
| 20
|
12777006
|
# -*- coding: utf-8 -*-
"""
USAGE: DU_Table_Annotator.py input-folder
You must run this on your GT collection to create a training collection.
If you pass a folder, you get a new folder with name postfixed by a_
Does 2 things:
- 1 -
Annotate textlines for Table understanding (finding rows and columns)
It tags the TextLine, to indicate:
- the table header, vs data, vs other stuff:
@DU_header = 'CH' | 'D' | 'O'
- the vertical rank in the table cell:
@DU_row = 'B' | 'I' | 'E' | 'S' | 'O'
- something regarding the number of text in a cell??
# NO SURE THIS WORKS...
@DU_col = 'M' | 'S' | 'O'
- 2 -
Aggregate the borders of the cells by linear regression to reflect them
as a line, which is stored as a SeparatorRegion element.
Copyright Naver Labs Europe 2017, 2018
<NAME>
<NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
import tasks.DU_Table.DU_ABPTableRCAnnotation
if __name__ == "__main__":
try:
#we expect a folder
sInputDir = sys.argv[1]
if not os.path.isdir(sInputDir): raise Exception()
except IndexError:
traceln("Usage: %s <folder>" % sys.argv[0])
exit(1)
sOutputDir = "a_"+sInputDir
traceln(" - Output will be in ", sOutputDir)
try:
os.mkdir(sOutputDir)
os.mkdir(os.path.join(sOutputDir, "col"))
except:
pass
lsFilename = [s for s in os.listdir(os.path.join(sInputDir, "col")) if s.endswith(".mpxml") ]
lsFilename.sort()
lsOutFilename = [os.path.join(sOutputDir, "col", "a_"+s) for s in lsFilename]
if not lsFilename:
lsFilename = [s for s in os.listdir(os.path.join(sInputDir, "col")) if s.endswith(".pxml") ]
lsFilename.sort()
lsOutFilename = [os.path.join(sOutputDir, "col", "a_"+s[:-5]+".mpxml") for s in lsFilename]
lsInFilename = [os.path.join(sInputDir , "col", s) for s in lsFilename]
traceln(lsFilename)
traceln("%d files to be processed" % len(lsFilename))
tasks.DU_Table.DU_ABPTableRCAnnotation.main(lsInFilename, lsOutFilename)
| 2.703125
| 3
|
src/spaceone/core/scheduler/scheduler.py
|
ku524/python-core
| 0
|
12777007
|
# -*- coding: utf-8 -*-
import json
import logging
import time
from multiprocessing import Process
from uuid import uuid4
import schedule
from jsonschema import validate
from scheduler import Scheduler as CronSchedulerServer
from spaceone.core import queue
from spaceone.core.error import ERROR_CONFIGURATION
from spaceone.core.scheduler.task_schema import SPACEONE_TASK_SCHEMA
_LOGGER = logging.getLogger(__name__)
class BaseScheduler(Process):
def __init__(self, queue, **kwargs):
super().__init__()
self.queue = queue
self.config = None
def push_task(self):
# Create Task
tasks = self.create_task()
_LOGGER.debug(f'[push_task] task: {len(tasks)}')
for task in tasks:
try:
validate(task, schema=SPACEONE_TASK_SCHEMA)
json_task = json.dumps(task)
_LOGGER.debug(f'[push_task] Task schema: {task}')
queue.put(self.queue, json_task)
except Exception as e:
print(e)
_LOGGER.debug(f'[push_task] Task schema: {task}, {e}')
def run(self):
NotImplementedError('scheduler.run is not implemented')
def create_task(self):
NotImplementedError('scheduler.create_task is not implemented')
class IntervalScheduler(BaseScheduler):
def __init__(self, queue, interval):
super().__init__(queue)
self.config = self.parse_config(interval)
def parse_config(self, expr):
""" expr
format: integer (second)
"""
try:
if isinstance(expr, int):
return int(expr)
except Exception as e:
_LOGGER.error(f'[parse_config] Wrong configraiton, {e}')
def run(self):
schedule.every(self.config).seconds.do(self.push_task)
while True:
schedule.run_pending()
time.sleep(1)
class HourlyScheduler(BaseScheduler):
"""
HourlyScheduler starts every HH:00
If you want to start at different minutes
send minute like ':15' meaning every 15 minute
"""
def __init__(self, queue, interval=1, minute=':00'):
super().__init__(queue)
self.config = self.parse_config(interval)
self.minute = minute
def parse_config(self, expr):
""" expr
format: integer (hour)
"""
try:
if isinstance(expr, int):
return int(expr)
except Exception as e:
_LOGGER.error(f'[parse_config] Wrong configuration, {e}')
raise ERROR_CONFIGURATION(key='interval')
def run(self):
# Call push_task in every hour
schedule.every(self.config).hours.at(self.minute).do(self.push_task)
while True:
schedule.run_pending()
time.sleep(1)
class CronScheduler(BaseScheduler):
"""
cronjob: min hour day month week
"""
def __init__(self, queue, rule):
super().__init__(queue)
self.config = self.parse_config(rule)
def parse_config(self, expr):
""" exprd
format: min hour day month week
* * * * *
"""
# TODO: verify format
return expr
def run(self):
if self.config is False:
# May be error format
return
scheduler = CronSchedulerServer(10)
scheduler.add(f"{uuid4()}", self.config, self.push_task)
scheduler.start()
| 2.09375
| 2
|
pyros_interfaces_mock/mockpublisher.py
|
pyros-dev/pyros-common
| 0
|
12777008
|
from __future__ import absolute_import
from pyros_interfaces_common.transient_if import TransientIf
class MockPublisher(TransientIf):
"""
MockPublisher is a mock of the class handling conversion from Python API to Publisher call
"""
def __init__(self, topic_name, topic_type):
# getting the fullname to make sure we start with /
topic_name = topic_name if topic_name.startswith('/') else '/' + topic_name
topic_type = topic_type.msgtype
super(MockPublisher, self).__init__(topic_name, topic_type)
def cleanup(self):
pass
def publish(self, msg):
# System should be set by system itself ( TODO : investigate proper way to inject dependency for Mock...)
return self.system.transfer(msg, self.name)
| 2.421875
| 2
|
asyncdns/utils.py
|
rdrey/asyncdns
| 12
|
12777009
|
import base64 as b64
import struct
import encodings
from .wireformat import *
from . import constants
MAX_PACKET_SIZE = 4000
_rcode_strings = [ 'No error',
'Format error',
'Server failure',
'Non-existent domain',
'Not implemented',
'Query refused',
'Name exists when it should not',
'RR set exists when it should not',
'RR set that should exist does not',
'Server not authoritative for zone',
'Name not contained in zone',
None,
None,
None,
None,
None,
'Bad OPT version OR TSIG signature failure',
'Key not recognized',
'Signature out of time window',
'Bad TKEY mode',
'Duplicate key name',
'Algorithm not supported' ]
_rrtype_strings = [ None, 'A', 'NS', 'MD', 'MF', 'CNAME', 'SOA', 'MB', 'MG', 'MR',
'NUL', 'WKS', 'PTR', 'HINFO', 'MINFO', 'MX', 'TXT', 'RP',
'AFSDB', 'X25', 'ISDN', 'RT', 'NSAP', 'NSAPPTR', 'SIG',
'KEY', 'PX', 'GPOS',
'AAAA', 'LOC', 'NXT', 'EID', 'NIMLOC', 'SRV', 'ATMA', 'NAPTR',
'KX', 'CERT', 'A6', 'DNAME', 'SINK', 'OPT', 'APL', 'DS',
'SSHFP', 'IPSECKEY', 'RRSIG', 'NSEC', 'DNSKEY', 'DHCID',
'NSEC3', 'NSEC3PARAM', 'TLSA', None, None, 'HIP', None,
None, None, 'CDS', 'CDNSKEY', 'OPENPGPKEY' ]
_rrtype_extras = { 99: 'SPF', 100: 'UINFO', 101: 'UID', 102: 'GID', 103: 'UNSPEC',
249: 'TKEY', 250: 'TSIG', 251: 'IXFR', 252: 'AXFR',
253: 'MAILB', 254: 'MAILA', 255: 'ANY', 256: 'URI',
257: 'CAA', 32768: 'TA', 32769: 'DLV' }
_rrclass_strings = [ None, 'IN', 'CS', 'CH', 'HS' ]
def escape_string(byte_string):
try:
ustr = byte_string.decode('ascii')
return ustr
except UnicodeError:
ustr = byte_string.decode('ascii', 'backslashreplace').replace('"', '\\"')
return '"{}"'.format(ustr)
def base64(byte_string):
return b64.b64encode(byte_string).decode('ascii')
def rcode_to_string(rcode):
"""Convert an RCODE to a string"""
try:
s = _rcode_strings[rcode]
except KeyError:
s = None
if s is None:
s = 'Unknown ({})'.format(rcode)
return s
def rrtype_to_string(rrt):
"""Convert an RR type to a string"""
try:
s = _rrtype_strings[rrt]
except KeyError:
s = _rrtype_extras.get(rrt, None)
if s is None:
s = 'TYPE{}'.format(rrt)
return s
def rrclass_to_string(rrt):
"""Convert an RR class to a string"""
try:
s = _rrclass_strings[rrt]
except KeyError:
s = None
if s is None:
if rrt == NONE:
s = 'NONE'
elif rrt == ANY:
s = 'ANY'
else:
s = 'CLASS{}'.format(rrt)
return s
def decode_domain(packet, ptr):
result = []
saved = False
saved_ptr = None
while True:
length = packet[ptr]
ptr += 1
if not length:
break
if length < 64:
result.append(packet[ptr:ptr+length])
ptr += length
elif (length & 0xc0) == 0xc0:
low = packet[ptr]
ptr += 1
offset = ((length & 0x3f) << 8) | low
if offset > len(packet):
raise ValueError('Bad reply to DNS query')
if not saved:
saved = True
saved_ptr = ptr
ptr = offset
if saved:
ptr = saved_ptr
return (b'.'.join(result), ptr)
def domain_to_unicode(domain):
return '.'.join([encodings.idna.ToUnicode(label)
for label in domain.split(b'.')])
def domain_from_unicode(domain):
domain = domain.rstrip('.')
return b'.'.join([encodings.idna.ToASCII(label)
for label in domain.split('.')])
def decode_pascal_string(packet, ptr):
slen = packet[ptr]
ptr += 1
s = packet[ptr:ptr+slen]
ptr += slen
return (s, ptr)
def build_dns_packet(uid, query, wants_recursion=False, unicast=False):
flags = QUERY
if wants_recursion:
flags |= RD
header = struct.pack(b'>HHHHHH', uid, flags, 1, 0, 0, 1)
packet = [header]
for label in query.name.split(b'.'):
if len(label) > 63:
raise ValueError('DNS label too long')
if len(label) == 0:
continue
packet.append(struct.pack(b'>B', len(label)))
packet.append(label)
q_class = query.q_class
if unicast:
q_class |= 0x8000
packet.append(struct.pack(b'>BHH', 0, query.q_type, q_class))
# Add an OPT record to indicate EDNS support
packet.append(struct.pack(b'>BHHLH', 0, constants.OPT, MAX_PACKET_SIZE,
DO, 0))
return b''.join(packet)
| 1.382813
| 1
|
hobbies/migrations/0004_auto_20200309_1310.py
|
ablades/sideline
| 1
|
12777010
|
# Generated by Django 3.0.4 on 2020-03-09 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hobbies', '0003_userhobbies'),
]
operations = [
migrations.AlterField(
model_name='hobbies',
name='img_url',
field=models.URLField(default='https://www.okea.org/wp-content/uploads/2019/10/placeholder.png', max_length=1000),
),
]
| 1.382813
| 1
|
tools/xdl/xdlrcviz.py
|
leonardt/magma
| 167
|
12777011
|
<filename>tools/xdl/xdlrcviz.py
import sexpr
import sys
import os
from pprint import pprint
from subprocess import Popen, PIPE
fname = sys.argv[1]
name = os.path.basename(fname).split('.')[0]
file = open(fname)
source = ""
for line in file.readlines():
if line[0] != "#":
source += line
sexpr.input(source)
s = sexpr.parse()
while len(s) == 1:
s = s[0]
table = {}
for x in s:
table[x[0]] = x[1:]
class Element():
def __init__(self,name):
self.name = name
self.cfg = []
self.inputs = []
self.outputs = []
def canelide(self):
if len(self.cfg) == 0:
if len(self.inputs) == 0 and len(self.outputs) == 1:
return self.outputs[0] == self.name
elif len(self.inputs) == 1 and len(self.outputs) == 0:
return self.inputs[0] == self.name
return False
class Primitive():
def __init__(self,sexpr):
self.name = sexpr[1]
#pprint(sexpr)
input,output = Element("input"),Element("output")
self.elements = [ input, output ]
self.connections = {} # (e0,outputpin,e1,inputpin) => true
for i in sexpr[4:]:
if i[0] == "pin":
if i[3] == "input":
input.outputs.append(i[2])
self.connections[ ("input",i[2],i[1],i[2]) ] = True
else:
output.inputs.append(i[2])
self.connections[ (i[1],i[2],"output",i[2]) ] = True
elif i[0] == "element":
e = Element(i[1])
self.elements.append(e)
for ii in i[2:]:
if isinstance(ii,list):
if ii[0] == "pin":
getattr(e,ii[2]+"s").append(ii[1])
elif ii[0] == "conn":
if ii[3] == "==>":
self.connections[ (ii[1],ii[2],ii[4],ii[5]) ] = True
else:
self.connections[ (ii[4],ii[5],ii[1],ii[2]) ] = True
elif ii[0] == "cfg":
e.cfg = ii[1:]
def save(self):
print("Saving %s" % self.name)
p = Popen(["dot","-Tpdf","-o","%s_%s.pdf" % (self.name,name)], stdin=PIPE)
f = p.stdin
def write(s):
f.write(s)
if self.name == "PCIE_3_0":
sys.stdout.write(s)
write("digraph G {\n")
write(" graph [rankdir = LR];\n")
write(" node[shape=record];\n")
for e in self.elements:
def namefmt(xs):
return "|".join([ "<%s>%s" % (x,x) for x in xs])
def quote(x):
return """ \\"%s\\" """ % x.replace("<","\\<").replace(">","\\>").replace("|","\\|")
cfgstring = '\\n'.join([quote(x) for x in e.cfg])
if e.canelide():
write(""" %s[label="<%s>%s"];\n""" % (e.name,e.name,e.name))
else:
write(""" %s[label="{ {%s} | %s\\n%s | {%s} }"];\n""" % (e.name,namefmt(e.inputs),e.name,cfgstring,namefmt(e.outputs)))
for t in self.connections.keys():
write(" %s:%s -> %s:%s;\n" % t)
write("}")
f.close()
if p.wait() != 0:
raise
for i in table["primitive_defs"]:
if i[0] == "primitive_def":
p = Primitive(i)
try:
p.save()
except:
print("Failed to save %s" % p.name)
| 2.65625
| 3
|
ming_person_wiki/wiki_crawler/spiders/main.py
|
RUiN-jiarun/Chinese-Traditional-Art-KG
| 0
|
12777012
|
<reponame>RUiN-jiarun/Chinese-Traditional-Art-KG<gh_stars>0
import scrapy
from scrapy.loader import ItemLoader
import pymongo
from wiki_crawler.items import WikiCrawlerItem
import zhconv
import re
def inter(a,b):
return list(set(a)&set(b))
class MainSpider(scrapy.Spider):
name = 'main'
allowed_domains = ['zh.wikipedia.org']
db = pymongo.MongoClient("mongodb://127.0.0.1:27017/")["db_wikikg"]
# 爬取的url范围由上一个数据库db_urls决定
start_urls = []
names = []
db_urls = db['db_urls']
for x in db_urls.find():
# id_strip = x['_id'][:x['_id'].find(' (')] if x['_id'].find(' (') != -1 else x['_id']
# names.append(id_strip)
names.append(x['_id'])
start_urls.append(x['url'])
# print(start_urls)
# print(names)
# 创建一个关于三元组的数据库
db_triples = db['db_triples']
# 结构:_id sub_name attr obj_name
# 可能要将obj_name与names[]进行范围比对,确定我们查找的范围
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
# TODO: 分析页面规则
sub_name = response.xpath('/html/body/div[3]/h1/text()').getall()[0]
attr = response.xpath('/html/body/div[3]/div[3]/div[5]/div[1]/table/tbody/tr/td/table/tbody/tr[1]/th/div[2]').getall()
# 可能有多个属性,分别查找
attr_len = len(attr)
# l1[0] = l1[0].replace("(", "")
# l1[1] = l1[1].replace(")", "")
# l1[0] += l1[1]
# if l2[0][-1] == "年":
# l2.clear()
sub_name = re.sub(u"\\(.*?\\)|\\{.*?}|\\[.*?]", "", sub_name)
sub_name = sub_name.replace(' ', '')
sub_name = zhconv.convert(sub_name, 'zh-hans')
print(sub_name)
for i in range(0, attr_len):
attr[i] = re.sub('<(\S*?)[^>]*>.*?|<.*? />', '', attr[i])
attr[i] = zhconv.convert(attr[i], 'zh-hans')
print(attr)
# print(attr_len)
for i in range(1, attr_len+1):
allInfo = response.xpath('/html/body/div[3]/div[3]/div[5]/div[1]/table[@class="navbox"]['+str(i)+']//a/text()').getall()
for k in range(0,len(allInfo)):
allInfo[k] = allInfo[k].replace('\u3000', '')
allInfo[k] = zhconv.convert(allInfo[k], 'zh-hans')
# print(allInfo)
interset = inter(allInfo, self.names)
if sub_name in interset:
interset.remove(sub_name)
print(interset)
for j in interset:
# self.db_triples.insert_one(
# {
# '_id': sub_name + '_' + attr[i-1] + '_' + interset[j],
# 'sub_name': sub_name,
# 'attr': attr[i-1],
# 'obj_name': interset[j]
# }
# )
try:
print('insert!')
self.db_triples.insert_one(
{
'_id': sub_name + '_' + attr[i-1] + '_' + j,
'sub_name': sub_name,
'attr': attr[i-1],
'obj_name': j
}
)
except pymongo.errors.DuplicateKeyError:
print('Key Conflict')
| 2.703125
| 3
|
cascade_config.py
|
ArthurDeclercq/cascade-config
| 0
|
12777013
|
"""Cascading configuration from the CLI and config files."""
__version__ = "0.2.0"
import json
import os
from abc import ABC, abstractmethod
from argparse import ArgumentParser, Namespace
from typing import Dict
import jsonschema
class CascadeConfig:
"""Cascading configuration."""
def __init__(self, validation_schema=None, none_overrides_value=False):
"""
Cascading configuration.
Parameters
----------
validation_schema: str, path-like, dict, or cascade_config.ValidationSchema, optional
JSON Schema to validate fully cascaded configuration
none_overrides_value: bool
If True, a None value overrides a not-None value from the previous configuration.
If False, None values will never override not-None values.
Examples
--------
>>> cascade_conf = CascadeConfig(validation_schema="config_schema.json")
>>> cascade_conf.add_json("config_default.json")
>>> cascade_conf.add_json("config_user.json")
>>> config = cascade_conf.parse()
"""
self.validation_schema = validation_schema
self.none_overrides_value = none_overrides_value
self.sources = []
@property
def validation_schema(self):
"""JSON Schema to validate fully cascaded configuration."""
return self._validation_schema
@validation_schema.setter
def validation_schema(self, value):
"""Set validation schema."""
if value:
self._validation_schema = ValidationSchema.from_object(value)
else:
self._validation_schema = None
def _update_dict_recursively(self, original: Dict, updater: Dict) -> Dict:
"""Update dictionary recursively."""
for k, v in updater.items():
if isinstance(v, dict):
original[k] = self._update_dict_recursively(original.get(k, {}), v)
elif v or k not in original: # v is not None, or key does not exist yet
original[k] = v
elif self.none_overrides_value: # v is None, but can override previous value
original[k] = v
return original
def add_dict(self, *args, **kwargs):
"""
Add dictionary configuration source to source list.
*args and **kwargs are passed to :class:`cascade_config.DictConfigSource()`.
"""
source = DictConfigSource(*args, **kwargs)
self.sources.append(source)
def add_argumentparser(self, *args, **kwargs):
"""
Add argumentparser configuration source to source list.
*args and **kwargs are passed to :class:`cascade_config.ArgumentParserConfigSource()`.
"""
source = ArgumentParserConfigSource(*args, **kwargs)
self.sources.append(source)
def add_namespace(self, *args, **kwargs):
"""
Add argparse Namespace configuration source to source list.
*args and **kwargs are passed to :class:`cascade_config.NamespaceConfigSource()`.
"""
source = NamespaceConfigSource(*args, **kwargs)
self.sources.append(source)
def add_json(self, *args, **kwargs):
"""
Add JSON configuration source to source list.
*args and **kwargs are passed to :class:`cascade_config.JSONConfigSource()`.
"""
source = JSONConfigSource(*args, **kwargs)
self.sources.append(source)
def parse(self) -> Dict:
"""Parse all sources, cascade, validate, and return cascaded configuration."""
config = dict()
for source in self.sources:
config = self._update_dict_recursively(config, source.load())
if self.validation_schema:
jsonschema.validate(config, self.validation_schema.load())
return config
class _ConfigSource(ABC):
"""Abstract base class for configuration source."""
def __init__(self, source, validation_schema=None, subkey=None) -> None:
"""
Initialize a single configuration source.
Parameters
----------
source : str, path-like, dict, argparse.ArgumentParser
source for the configuration, either a dictionary, path to a file, or
argument parser.
validation_schema: str, path-like, dict, or cascade_config.ValidationSchema, optional
JSON Schema to validate single configuration
subkey : str
adds the configuration to a subkey of the final cascased configuration;
e.g. specifying a subkey `"user"` for a configuration source, would add it
under the key `"user"` in the cascaded configuration, instead of updating
the root of the existing configuration
Methods
-------
load()
load the configuration from the source and return it as a dictionary
"""
self.source = source
self.validation_schema = validation_schema
self.subkey = subkey
@property
def validation_schema(self):
"""Get validation_schema."""
return self._validation_schema
@validation_schema.setter
def validation_schema(self, value):
"""Set validation schema."""
if value:
self._validation_schema = ValidationSchema.from_object(value)
else:
self._validation_schema = None
@abstractmethod
def _read(self):
"""Read source into dict."""
pass
def load(self) -> Dict:
"""Read, validate, and place in subkey if required."""
if self.subkey:
config = dict()
config[self.subkey] = self._read()
else:
config = self._read()
if self.validation_schema:
jsonschema.validate(config, self.validation_schema.load())
return config
class DictConfigSource(_ConfigSource):
"""Dictionary configuration source."""
def _read(self) -> Dict:
if not isinstance(self.source, dict):
raise TypeError("DictConfigSource `source` must be a dict")
return self.source
class JSONConfigSource(_ConfigSource):
"""JSON configuration source."""
def _read(self) -> Dict:
if not isinstance(self.source, (str, os.PathLike)):
raise TypeError(
"JSONConfigSource `source` must be a string or path-like object"
)
with open(self.source, "rt") as json_file:
config = json.load(json_file)
return config
class ArgumentParserConfigSource(_ConfigSource):
"""ArgumentParser configuration source."""
def _read(self) -> Dict:
if not isinstance(self.source, ArgumentParser):
raise TypeError(
"ArgumentParserSource `source` must be an argparse.ArgumentParser object"
)
config = vars(self.source.parse_args())
return config
class NamespaceConfigSource(_ConfigSource):
"""Argparse Namespace configuration source."""
def _read(self) -> Dict:
if not isinstance(self.source, Namespace):
raise TypeError(
"NamespaceConfigSource `source` must be an argparse.Namespace object"
)
config = vars(self.source)
return config
class ValidationSchema:
"""ValidationSchema."""
def __init__(self, source):
"""ValidationSchema."""
self.source = source
@classmethod
def from_object(cls, obj):
"""Return ValidationSchema from str, path-like, dict, or ValidationSchema."""
if isinstance(obj, (str, os.PathLike, Dict)):
return cls(obj)
elif isinstance(obj, cls):
return obj
else:
raise TypeError(
f"Cannot create ValidationSchema from type {type(obj)}. Must be a "
"string, path-like, dict, or cascade_config.ValidationSchema object"
)
def load(self) -> Dict:
"""Load validation schema."""
if isinstance(self.source, (str, os.PathLike)):
with open(self.source, "rt") as json_file:
schema = json.load(json_file)
elif isinstance(self.source, Dict):
schema = self.source
else:
raise TypeError(
"ValidationSchema `source` must be of type string, path-like, or dict"
)
return schema
| 2.921875
| 3
|
MINTConformance/src/org/nema/medical/mint/MintDicomCompare.py
|
aweigold/medical-imaging-network-transport
| 0
|
12777014
|
#!/usr/bin/python
# -----------------------------------------------------------------------------
# $Id$
#
# Copyright (C) 2010 MINT Working group. All rights reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Contact <EMAIL> if any conditions of this
# licensing are not clear to you.
# -----------------------------------------------------------------------------
import base64
import getopt
import glob
import os
import string
import sys
import traceback
from os.path import join
from struct import unpack
from org.nema.medical.mint.MINT_Dictionary import MINT_Dictionary
from org.nema.medical.mint.DicomAttribute import DicomAttribute
from org.nema.medical.mint.DicomStudy import DicomStudy
from org.nema.medical.mint.DicomSeries import DicomSeries
from org.nema.medical.mint.DicomInstance import DicomInstance
from org.nema.medical.mint.MintAttribute import MintAttribute
from org.nema.medical.mint.MintStudy import MintStudy
# -----------------------------------------------------------------------------
# MintDicomCompare
# -----------------------------------------------------------------------------
class MintDicomCompare():
def __init__(self, dicomStudy, mintStudy):
self.__dicom = dicomStudy
self.__studyInstanceUID = self.__dicom.studyInstanceUID()
self.__mint = mintStudy
self.__offsets = {}
self.__count = 0
self.__verbose = False
self.__seriesCompared = 0
self.__instancesCompared = 0
self.__textTagsCompared = 0
self.__sequencesCompared = 0
self.__itemsCompared = 0
self.__inlineBinaryCompared = 0
self.__binaryItemsCompared = 0
self.__bytesCompared = 0
self.__lazy= False
self.__output = None
self.__readOffsets()
def tidy(self):
if self.__output != None: self.__output.close()
def setVerbose(self, verbose):
self.__verbose = verbose
def setLazy(self, lazy):
self.__lazy = lazy
def setOutput(self, output):
if output == "": return
if self.__output != None: self.__output.close()
if os.access(output, os.F_OK):
raise IOError("File already exists - "+output)
self.__output = open(output, "w")
def compare(self):
dicm = self.__dicom
mint = self.__mint
self.__seriesCompared = 0
self.__instancesCompared = 0
self.__check("Number of series",
dicm.numSeries(),
mint.numSeries())
numSeries = min(dicm.numSeries(), mint.numSeries())
for n in range(0, numSeries):
dicomSeries = dicm.series(n)
mintSeries = mint.series(n)
self.__check("Number of instances",
dicomSeries.numInstances(),
mintSeries.numInstances(),
dicomSeries.seriesInstanceUID())
numInstances = min(dicomSeries.numInstances(), mintSeries.numInstances())
for m in range(0, numInstances):
instance = dicomSeries.instance(m)
self.__compareInstances(instance, mint)
self.__instancesCompared += 1
self.__seriesCompared += 1
# ---
# Print out stats if verbose.
# ---
if self.__verbose:
if self.__output == None:
print "%10d series compared." % (self.__seriesCompared)
print "%10d instance(s) compared." % (self.__instancesCompared)
print "%10d text tags(s) compared." % (self.__textTagsCompared)
print "%10d sequence(s) compared." % (self.__sequencesCompared)
print "%10d item(s) compared." % (self.__itemsCompared)
print "%10d inline binary item(s) compared." % (self.__inlineBinaryCompared)
print "%10d binary item(s) compared." % (self.__binaryItemsCompared)
print "%10d byte(s) compared." % (self.__bytesCompared)
else:
self.__output.write("%10d series compared.\n" % (self.__seriesCompared))
self.__output.write("%10d instance(s) compared.\n" % (self.__instancesCompared))
self.__output.write("%10d text tag(s) compared.\n" % (self.__textTagsCompared))
self.__output.write("%10d sequence(s) compared.\n" % (self.__sequencesCompared))
self.__output.write("%10d items(s) compared.\n" % (self.__itemsCompared))
self.__output.write("%10d inline binary item(s) compared.\n" % (self.__inlineBinaryCompared))
self.__output.write("%10d binary item(s) compared.\n" % (self.__binaryItemsCompared))
self.__output.write("%10d byte(s) compared.\n" % (self.__bytesCompared))
# ---
# Always print differences.
# ---
if self.__count != 0:
if self.__output == None:
print "%10d difference(s) found." % (self.__count)
else:
self.__output.write("%10d difference(s) found.\n" % (self.__count))
self.__dicom.tidy()
return self.__count
def __readOffsets(self):
# ---
# TODO:
# ---
# offsets = os.path.join(self.__binary, "offsets.dat")
# if offsets in self.__binaryitems: self.__binaryitems.remove(offsets)
# if os.path.isfile(offsets):
# table = open(offsets, "r")
# line = table.readline()
# while line != "":
# tokens = line.split()
# assert len(tokens) == 2
# self.__offsets[tokens[0]] = tokens[1]
# line = table.readline()
# table.close()
pass
def __compareInstances(self, instance, mint):
self.__compareHeaders(instance, mint)
# ---
# Check Study Instance ID.
# ---
self.__check("UI",
instance.studyInstanceUID(),
mint.studyInstanceUID())
# ---
# Check Series Instance ID.
# ---
mintSeriesInstanceUID = "None"
mintSeries = mint.seriesByUID(instance.seriesInstanceUID())
if mintSeries != None:
mintSeriesInstanceUID = mintSeries.seriesInstanceUID()
self.__check("UI",
instance.seriesInstanceUID(),
mintSeriesInstanceUID,
instance.seriesInstanceUID())
# ---
# Check SOP Instance ID.
# ---
mintSopInstanceUID = "None"
mintInstance = mint.instanceByUID(instance.sopInstanceUID())
if mintInstance != None:
mintSopInstanceUID = mintInstance.sopInstanceUID()
self.__check("UI",
instance.sopInstanceUID(),
mintSopInstanceUID,
instance.seriesInstanceUID(),
instance.sopInstanceUID())
# ---
# Check tags.
# ---
numAttributes = instance.numAttributes()
for n in range(0, numAttributes):
dicomAttr = instance.attribute(n)
self.__checkTag(instance, mint, dicomAttr)
def __compareHeaders(self, instance, mint):
# ---
# Check tags.
# ---
numAttributes = instance.header().numAttributes()
for n in range(0, numAttributes):
dicomAttr = instance.header().attribute(n)
self.__checkTag(instance, mint, dicomAttr)
def __check(self, msg, obj1, obj2, series="", sop=""):
if obj1 != obj2:
self.__count += 1
print "- Study Instance UID", self.__studyInstanceUID
if series != "":
print " - Series Instance UID", series
if sop != "":
print " - SOP Instance UID", sop
print "+++", msg, ":", obj1, "!=", obj2
def __checkTag(self, instance, mint, dicomAttr):
tag = dicomAttr.tag()
# ---
# Optional and deprecated Group Length tags are not included so we don't look for them.
# ---
if tag[4:8] == "0000": return
attr = mint.find(tag, instance.seriesInstanceUID(), instance.sopInstanceUID())
if attr == None:
self.__check("Data Element",
tag,
"None",
instance.seriesInstanceUID(),
instance.sopInstanceUID())
else:
self.__checkAttribute(dicomAttr,
attr,
instance.seriesInstanceUID(),
instance.sopInstanceUID())
def __checkAttribute(self, dicomAttr, attr, seriesInstanceUID, sopInstanceUID):
# ---
# The MINT study may have a more explicit VR for private tags so promote
# original DICOM tag if necessary.
# ---
if dicomAttr.vr() == "UN" and attr.vr() != "UN":
dicomAttr.promote(attr.vr())
if dicomAttr.vr() != "":
self.__check(dicomAttr.tag()+" VR",
dicomAttr.vr(),
attr.vr(),
seriesInstanceUID,
sopInstanceUID)
# ---
# Check binary items and values.
# ---
if dicomAttr.isBinary():
self.__checkBinary(dicomAttr, attr, seriesInstanceUID, sopInstanceUID)
else:
self.__check(dicomAttr.tag()+" Value",
dicomAttr.val(),
attr.val(),
seriesInstanceUID,
sopInstanceUID)
self.__textTagsCompared += 1
# Check for sequence
if dicomAttr.vr() == "SQ":
self.__sequencesCompared += 1
# ---
# Check number of items.
# ---
numItems1 = dicomAttr.numItems()
numItems2 = attr.numItems()
self.__check(dicomAttr.tag()+" Number of items",
numItems1,
numItems2,
seriesInstanceUID,
sopInstanceUID)
if numItems1 == numItems2:
for i in range(0, numItems1):
item1 = dicomAttr.item(i)
item2 = attr.item(i)
self.__checkAttribute(item1, item2, seriesInstanceUID, sopInstanceUID)
self.__itemsCompared += 1
def __checkBinary(self, dicomAttr, mintAttr, seriesInstanceUID, sopInstanceUID):
if not dicomAttr.hasBinary():
self.__checkInlineBinary(dicomAttr, mintAttr, seriesInstanceUID, sopInstanceUID)
return
if self.__lazy: return
# ---
# Check for DICOM binary item
# ---
bid1 = dicomAttr.binary()
if bid1 == None:
self.__check(dicomAttr.tag()+" missing binary",
"None",
"<Binary>",
seriesInstanceUID,
sopInstanceUID)
return
# ---
# Check for MINT binary item.
# ---
if mintAttr.bid() == None:
self.__check(mintAttr.tag()+" missing bid "+mintAttr.bid(),
"<Binary>",
"None",
seriesInstanceUID,
sopInstanceUID)
return
bid2 = self.__mint.open(mintAttr.bid())
# ---
# TODO: Check to see if this is single file or multi-file binary.
# ---
# if len(self.__offsets) > 0:
# dat2 = self.__binaryitems[0]
# ---
# TODO: Position the MINT binary file pointer.
# ---
# boffset = 0
# if len(self.__offsets):
# boffset = int(self.__offsets[attr.bid()])
# bid2.seek(boffset)
# ---
# Read in a block.
# ---
BUFLEN = 1024
bytesToRead = dicomAttr.vl()
assert bytesToRead > 0
bufsize = min(bytesToRead, BUFLEN)
block = 0
buf1 = bid1.read(bufsize)
buf2 = bid2.read(bufsize)
bytesToRead -= len(buf1)
bytes1 = unpack('B'*len(buf1), buf1)
bytes2 = unpack('B'*len(buf2), buf2)
n = len(bytes1)
while n > 0:
# ---
# Loop through block.
# ---
diff = False
for i in range(0, n):
if bytes1[i] != bytes2[i]:
self.__check(dicomAttr.tag()+" byte "+str(block*bufsize+i),
hex(bytes1[i]),
hex(bytes2[i]),
seriesInstanceUID,
sopInstanceUID)
diff = True
break
self.__bytesCompared += 1
# ---
# Skip to end if difference was found.
# ---
if diff or bytesToRead == 0:
n = 0
else:
bufsize = min(bytesToRead, BUFLEN)
buf1 = bid1.read(bufsize)
buf2 = bid2.read(bufsize)
bytesToRead -= len(buf1)
assert bytesToRead >= 0
bytes1 = unpack('B'*len(buf1), buf1)
bytes2 = unpack('B'*len(buf2), buf2)
n = len(bytes1)
block += 1
bid1.close()
bid2.close()
self.__binaryItemsCompared += 1
def __checkInlineBinary(self, dicomAttr, mintAttr, seriesInstanceUID, sopInstanceUID):
self.__check(dicomAttr.tag()+" <Binary>",
dicomAttr.bytes(),
mintAttr.bytes(),
seriesInstanceUID,
sopInstanceUID)
self.__inlineBinaryCompared += 1
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
def main():
# ---
# Get options.
# ---
progName = os.path.basename(sys.argv[0])
(options, args)=getopt.getopt(sys.argv[1:], "o:p:vlh")
# ---
# Check for output option.
# ---
output = ""
for opt in options:
if opt[0] == "-o":
output = opt[1]
# ---
# Check for port option.
# ---
port = "8080"
for opt in options:
if opt[0] == "-p":
port = opt[1]
# ---
# Check for verbose option.
# ---
verbose = False
for opt in options:
if opt[0] == "-v":
verbose = True
# ---
# Check for lazy option.
# ---
lazy = False
for opt in options:
if opt[0] == "-l":
lazy = True
# ---
# Check for help option.
# ---
help = False
for opt in options:
if opt[0] == "-h":
help = True
try:
# ---
# Check usage.
# ---
argc = len(args)
if help or argc < 3:
print "Usage:", progName, "[options] <dicom_study_dir> <hostname> <uuid>"
print " -o <output>: output filename (defaults to stdout)"
print " -p <port>: defaults to 8080"
print " -v: verbose"
print " -l: lazy check (skips binary content)"
print " -h: displays usage"
sys.exit(1)
# ---
# Read MINT metadata.
# ---
dicomStudyDir = args[0];
hostname = args[1];
uuid = args[2];
dataDictionary = MINT_Dictionary(hostname, port)
dicomStudy = DicomStudy(dicomStudyDir, dataDictionary)
mintStudy = MintStudy(hostname, port, uuid)
studies = MintDicomCompare(dicomStudy, mintStudy)
studies.setVerbose(verbose)
studies.setLazy(lazy)
studies.setOutput(output)
status = studies.compare()
studies.tidy()
return status
except Exception, exception:
traceback.print_exception(sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2])
sys.exit(1)
if __name__ == "__main__":
main()
| 1.546875
| 2
|
text_analysis.py
|
doctorparadox/pyTwintel
| 2
|
12777015
|
import nltk
## helper function to create a word/count dict from a List of tweets
def wordCount(tweetlist):
words_dict = {}
# Remove "common" words
cw = open('common_words.txt', 'r')
cw_str = cw.read()
cw_tokens = nltk.word_tokenize(cw_str)
for tweet in tweetlist:
line_words = tweet.split()
punctuations = '''`!()-[]{};:'"\,<>./?@#$%^&*_~'''
uncommon_words = [word for word in line_words if word not in cw_tokens]
## if we've seen the word before, increment the counter.
## else add as a new key in the dict
for raw_word in line_words:
## lower-case each word and strip out punctuation
cased_word = raw_word.lower()
# we're dealing with unicode here, so convert as necessary
word = cased_word.encode('utf-8').translate(None, punctuations)
if word not in cw_tokens:
if word in words_dict.keys():
words_dict[word] += 1
else:
words_dict[word] = 1
return words_dict
# Print the raw word list used by this user
def printWords(tweetlist):
unordered_words = wordCount(tweetlist)
keylist = unordered_words.keys()
keylist.sort()
for key in keylist:
print "%s: %s" % (key, unordered_words.get(key))
# Print the top 20 words used with their counts
def printTopWords(tweetlist):
unordered_words = wordCount(tweetlist)
ordered_values = sorted(unordered_words.values(), reverse=True)
## list of the top 20 values in the dict; if there are duplicates,
## we would still want to print them
top_20 = ordered_values[:20]
for key, value in sorted(unordered_words.iteritems(), key=lambda (k,v): (v,k), reverse=True):
if value in top_20:
print key, '-- appears', value, 'times'
# Grab the top 20 words used with their counts, and return them as a dict
def getTopWords(tweetlist):
unordered_words = wordCount(tweetlist)
ordered_values = sorted(unordered_words.values(), reverse=True)
## list of the top 20 values in the dict; if there are duplicates,
## we would still want to print them
top_20 = ordered_values[:20]
top_words = {}
for key, value in sorted(unordered_words.iteritems(), key=lambda (k,v): (v,k), reverse=True):
if value in top_20:
top_words["'"+key+"'"] = value
return top_words
# Parse the tweet text for named entities and returns a List of them
def getNamedEntities(tweetlist):
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in tweetlist]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
def extract_entity_names(t):
entity_names = []
if hasattr(t, 'label') and t.label:
if t.label() == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(extract_entity_names(child))
return entity_names
entity_names = []
for tree in chunked_sentences:
# Print results per sentence
# print extract_entity_names(tree)
entity_names.extend(extract_entity_names(tree))
# return unique entity names
return set(entity_names)
# Function for printing out a nice column-based display of a long list
def fmtcols(mylist, cols):
maxwidth = max(map(lambda x: len(x), mylist))
justifyList = map(lambda x: x.ljust(maxwidth), mylist)
lines = (' '.join(justifyList[i:i+cols])
for i in xrange(0,len(justifyList),cols))
print "\n".join(lines)
| 3.578125
| 4
|
tasks.py
|
volpatto/pysodes
| 2
|
12777016
|
from invoke import task
from invoke.exceptions import Exit
from pathlib import Path
from typing import Optional
import os
import shutil
import sys
BUILD_DIR_DEFAULT = Path(os.environ['BUILD_DIR'].replace(":", ""))
def _get_vcvars_paths():
template = r"%PROGRAMFILES(X86)%\Microsoft Visual Studio\2017\{edition}\VC\Auxiliary\Build\vcvarsall.bat"
template = os.path.expandvars(template)
editions = ('BuildTools', 'Professional', 'WDExpress', 'Community')
return tuple(Path(template.format(edition=edition)) for edition in editions)
def strip_and_join(s: str):
return ' '.join(line.strip() for line in s.splitlines() if line.strip() != '')
def echo(c, msg: str):
from colorama.ansi import Fore, Style
if c.config.run.echo:
print(f"{Fore.WHITE}{Style.BRIGHT}{msg}{Style.RESET_ALL}")
def remove_directory(path: Path):
if path.is_dir():
print(f"Removing {path}")
shutil.rmtree(path)
else:
print(f"Not removing {path} (not a directory)")
def _get_and_prepare_build(
c,
clean: bool = False,
build_subdirectory: Path = BUILD_DIR_DEFAULT
) -> Path:
'''
Returns build directory where `cmake` shall be called from. Creates it and
possibly removes its contents (and artifacts_dir contents) if `clean=True`
is passed.
'''
build_dir = build_subdirectory
if clean:
remove_directory(build_dir)
build_dir.mkdir(parents=True, exist_ok=not clean)
return build_dir
def _get_cmake_command(
build_dir: Path,
cmake_generator: str,
cmake_arch: Optional[str] = None,
config: str = 'Release',
):
'''
:param build_dir: Directory from where cmake will be called.
'''
root_dir = Path(__file__).parent
relative_root_dir = Path(os.path.relpath(root_dir, build_dir))
relative_artifacts_dir = Path(os.path.relpath(build_dir))
return strip_and_join(f"""
cmake
-G "{cmake_generator}"
{f'-A "{cmake_arch}"' if cmake_arch is not None else ""}
-DCMAKE_BUILD_TYPE={config}
-DCMAKE_INSTALL_PREFIX="{relative_artifacts_dir.as_posix()}"
"{str(relative_root_dir)}"
""")
def _get_wrappers_command(wrappers_dir: Path) -> str:
conda_prefix = os.environ['CONDA_PREFIX']
if sys.platform.startswith('win'):
autodiff_env_path = f"{conda_prefix}\\Library\\bin"
else:
autodiff_env_path = f"{conda_prefix}/bin"
return strip_and_join(f"""
create-wrappers
-t conda
--bin-dir {autodiff_env_path}
--dest-dir {wrappers_dir}
--conda-env-dir {conda_prefix}
""")
def _get_test_command():
test_command = strip_and_join(f"""
pytest .
-n auto
""")
return test_command
if sys.platform.startswith('win'):
@task
def msvc(c, clean=False, config='Release'):
"""
Generates a Visual Studio project at the "build/msvc" directory.
Assumes that the environment is already configured using:
conda devenv
activate env_name_here
"""
build_dir, artifacts_dir = _get_and_prepare_build(
c,
clean=clean,
build_subdirectory=BUILD_DIR_DEFAULT / "msvc",
)
cmake_command = _get_cmake_command(build_dir=build_dir, cmake_generator="Visual Studio 15 2017",
cmake_arch="x64", config=config)
os.chdir(build_dir)
c.run(cmake_command)
@task
def compile(c, clean=False, config='Release', number_of_jobs=-1, gen_wrappers=False):
"""
Compiles by running CMake and building with `ninja`.
Assumes that the environment is already configured using:
conda devenv
[source] activate env_name
"""
build_dir = _get_and_prepare_build(
c,
clean=clean,
build_subdirectory=BUILD_DIR_DEFAULT,
)
cmake_command = _get_cmake_command(build_dir=build_dir, cmake_generator="Ninja", config=config)
build_command = strip_and_join(f"""
cmake
--build .
--target install
--config {config}
--
{f"-j {number_of_jobs}" if number_of_jobs >= 0 else ""}
{"-d keeprsp" if sys.platform.startswith("win") else ""}
""")
commands = [cmake_command, build_command]
if gen_wrappers:
wrappers_command = _get_wrappers_command(build_dir / "wrappers/conda")
commands.append(wrappers_command)
if sys.platform.startswith('win'):
for vcvars_path in _get_vcvars_paths():
if not vcvars_path.is_file():
continue
commands.insert(0, f'"{vcvars_path}" amd64')
break
else:
raise Exit(
'Error: Commands to configure MSVC environment variables not found.',
code=1,
)
os.chdir(build_dir)
c.run("&&".join(commands))
@task
def clear(c, build_dir_path=BUILD_DIR_DEFAULT):
"""
Clear build directory
"""
remove_directory(build_dir_path)
@task
def wrappers(c, wrappers_dir=BUILD_DIR_DEFAULT / "wrappers/conda"):
"""
Wrappers bin generated by conda environment as passed with --wrappers-dir dir_path
"""
remove_directory(wrappers_dir)
if sys.platform.startswith('win'):
print(f"Generating conda wrappers to {wrappers_dir} from {os.environ['CONDA_PREFIX']}\\Library\\bin")
else:
print(f"Generating conda wrappers to {wrappers_dir} from {os.environ['CONDA_PREFIX']}/bin")
generate_wrappers_command = _get_wrappers_command(wrappers_dir)
echo(c, generate_wrappers_command)
c.run(generate_wrappers_command, pty=True, warn=True)
@task
def tests(c):
"""
Execute tests in pytest, if any
"""
test_command = _get_test_command()
c.run(test_command, pty=True)
| 2.171875
| 2
|
setup.py
|
lucasdavid/convolutional-cuda
| 5
|
12777017
|
<gh_stars>1-10
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='convolutional',
description='Convolutional Networks implemented in CUDA.',
long_description=open('README.md').read(),
version='0.1',
packages=['convolutional'],
scripts=[],
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/lucasdavid/convolutional-cuda',
download_url='https://github.com/lucasdavid/convolutional-cuda/archive/master.zip',
install_requires=['numpy', 'pycuda', 'scikit-learn'],
tests_require=open('requirements-dev.txt').readlines(),
)
| 1.289063
| 1
|
lazy_vasping/calc_parser.py
|
alexsquires/lazy_vasping
| 0
|
12777018
|
<reponame>alexsquires/lazy_vasping
import time, os, yaml
import pandas as pd
from tqdm import tqdm
from vasppy.summary import find_vasp_calculations
from pymatgen.io.vasp import Poscar, Xdatcar, Vasprun
from pathlib import Path
from monty.serialization import loadfn
MODULE_DIR = Path(__file__).resolve().parent
def load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s" % fname)))
if "PARENT" in config:
parent_config = _load_yaml_config(config["PARENT"])
for k, v in parent_config.items():
if k not in config:
config[k] = v
elif isinstance(v, dict):
v_new = config.get(k, {})
v_new.update(v)
config[k] = v_new
return config
def assess_stdout(directory):
"""
Parses the std_out (it is assumed it is named "vasp_out") and reports any found errors.
args:
- directory (str): directory to look for "vasp_out"
returnes:
- errors (list): list of error codes
"""
all_errors = load_yaml_config("errors.yaml")
errors = []
errors_subset_to_catch = list(all_errors.keys())
with open(f'{directory}/vasp_out') as handler:
for line in handler:
l = line.strip()
for err, msgs in all_errors.items():
for msg in msgs:
if l.find(msg) != -1:
errors.append(err)
return errors
def file_age(filepath):
"""
given a file, determines the last time that file was updated in seconds
args:
- filepath (str): path to file
returns:
- m_time (float): time since file last modified in seconds
"""
m_time = time.time() - os.path.getmtime(filepath)
return m_time
def assess_OUTCAR(directory):
"""
asseses whether a directory contains an OUTCAR file
args:
- directory (str): directory to check for OUTCAR
returns:
- outcar_update_time (float): time in seconds since the OUTCAR was modified
"""
if os.path.exists(f'{directory}/OUTCAR'):
try:
outcar_update_time = file_age(f'{directory}/OUTCAR')
except:
outcar_update_time = None
else:
outcar_update_time = None
return outcar_update_time
def assess_CONTCAR(directory):
"""
asseses whether a directory contains a properly formatted vasp contcar
args:
- directory (str): directory to check for CONTCAR
returns:
- contcar (bool): whether the directory contains a readable CONTCAR
"""
if os.path.exists(f'{directory}/CONTCAR'):
try:
Poscar.from_file(f'{directory}/CONTCAR')
contcar = True
except:
contcar = False
else:
contcar = False
return contcar
def assess_XDATCAR(directory):
"""
reports how many ionic steps a calculation has run for by reading the XDATCAR (cannot always rely on vasprun, as it is unreadable if the caluclation is unfinished)
args:
- directory (str): directory to check for XDATCAR
returns:
- xdatcar (int): the number of steps saved to the XDATCAR
"""
if os.path.exists(f'{directory}/XDATCAR'):
try:
xdatcar = len(Xdatcar(f'{directory}/XDATCAR').structures)
except:
xdatcar = None
else:
xdatcar = None
return xdatcar
def assess_vasprun(directory):
"""
checks whether calculations completed, and converged
args:
- directory (str): directory to check for vasprun.xml
returns:
- contcar (bool): whether the directory contains a converged vasprun
"""
if os.path.exists(f'{directory}/vasprun.xml'):
try:
vasprun = Vasprun(f'{directory}/vasprun.xml', parse_eigen=False, parse_dos=False).converged
except:
vasprun = False
else:
vasprun = False
return vasprun
def parse_calcs():
"""
find all vasp caluclations in directories "below" current directory and generate a ".csv"
summarising the status of these calculations ("calc_data.csv")
args:
- None
returns:
- None
"""
calculation_status = {}
home = os.getcwd()
entries = []
calculations = find_vasp_calculations()
for calc_dir in tqdm(calculations):
calc_status = {'converged':assess_vasprun(f'{home}/{calc_dir}'),
'errors':assess_stdout(f'{home}/{calc_dir}'),
'contcar':assess_CONTCAR(f'{home}/{calc_dir}'),
'ionic_steps':assess_XDATCAR(f'{home}/{calc_dir}'),
'last_updated': assess_OUTCAR(f'{home}/{calc_dir}')}
calculation_status.update({calc_dir:calc_status})
df = pd.DataFrame.from_dict(calculation_status, orient='index')
df.to_csv('calc_data.csv')
| 2.265625
| 2
|
tests/test_DB2.py
|
kschweiger/Mimir3
| 0
|
12777019
|
import sys
import os
import shutil
import json
from glob import glob
#sys.path.insert(0, os.path.abspath('..'))
#sys.path.insert(0, os.path.abspath('.'))
#print(sys.path)
import mimir.backend.database
from mimir.backend.database import DataBase, Model
from mimir.backend.entry import Item, ListItem
import unittest
import pytest
import coverage
import os
import copy
import datetime
#DEBUGGING
import tracemalloc
if os.getcwd().endswith("tests"):
mimir_dir = os.getcwd()[0:-len("/tests")]
dir2tests = os.getcwd()
else:
mimir_dir = os.getcwd()
dir2tests = os.getcwd()+"/tests"
files = ["testStructure/rootFile1.mp4",
"testStructure/folder1/folder1file1.mp4",
"testStructure/folder1/folder1file2.mp4",
"testStructure/folder2/folder2file1.mp4",
"testStructure/folder2/folder2file2.mp4"]
folder = ["folder1", "folder2"]
def getDataTime():
currently = datetime.datetime.now()
day = currently.day
month = currently.month
year = currently.year
hour = currently.hour
minutes = currently.minute
sec = currently.second
fulldate = "{0:02}{3}{1:02}{3}{2:02}".format(day, month, year-2000, ".")
fulltime = "{0:02}:{1:02}:{2:02}".format(hour, minutes, sec)
return fulldate, fulltime
@pytest.fixture(scope="module")
def preCreatedDB():
os.system("touch "+dir2tests+"/testStructure/rootFile1")
os.system("touch "+dir2tests+"/testStructure/folder2/folder2file1.mp4")
os.system("touch "+dir2tests+"/testStructure/folder2/folder2file2.mp4")
os.system("touch "+dir2tests+"/testStructure/folder2/folder3/folder3file1.mp4")
os.system("touch "+dir2tests+"/testStructure/folder1/folder1file1.mp4")
os.system("touch "+dir2tests+"/testStructure/folder1/folder1file2.mp4")
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
## Set Ratings for furure tests
# Expected Order: ["3", "2", "4", "1", "5", "0"]
database.modifySingleEntry("1", "Rating", "2", byID = True )
database.modifySingleEntry("2", "Rating", "4", byID = True )
database.modifySingleEntry("3", "Rating", "5", byID = True )
database.modifySingleEntry("4", "Rating", "3", byID = True )
database.modifySingleEntry("5", "Rating", "1", byID = True )
# Expected Order: ["5", "4", "3", "2", "1", "0"]
database.modifySingleEntry("0", "SingleItem", "Xi", byID = True )
database.modifySingleEntry("1", "SingleItem", "Tau", byID = True )
database.modifySingleEntry("2", "SingleItem", "Ny", byID = True )
database.modifySingleEntry("3", "SingleItem", "Eta", byID = True )
database.modifySingleEntry("4", "SingleItem", "Bea", byID = True )
database.modifySingleEntry("5", "SingleItem", "Alpha", byID = True )
database.modifyListEntry("0", "ListItem", "Blue", byID = True)
database.modifyListEntry("0", "ListItem", "Double Orange", byID = True)
database.modifyListEntry("0", "ListItem", "Triple Orange", byID = True)
database.modifyListEntry("3", "ListItem", "Lavender", byID = True)
database.modifyListEntry("4", "ListItem", "Lavender", byID = True)
database.modifyListEntry("4", "ListItem", "Pinkish", byID = True)
database.modifyListEntry("4", "ListItem", "Spring", byID = True)
Entry0 = database.getEntryByItemName("ID", "0")[0]
Entry1 = database.getEntryByItemName("ID", "1")[0]
Entry2 = database.getEntryByItemName("ID", "2")[0]
Entry3 = database.getEntryByItemName("ID", "3")[0]
Entry4 = database.getEntryByItemName("ID", "4")[0]
Entry5 = database.getEntryByItemName("ID", "5")[0]
# Expected Order: ["0", "2", "3", "5", "1", "4"]
Entry0.changeItemValue("Added", "30.01.19|00:00:00")
Entry1.changeItemValue("Added", "20.01.19|00:00:00")
Entry2.changeItemValue("Added", "29.01.19|00:00:00")
Entry3.changeItemValue("Added", "29.01.19|00:00:00")# Same time: Fall back to ID
Entry4.changeItemValue("Added", "15.01.19|00:00:00")
Entry5.changeItemValue("Added", "26.01.19|00:00:00")
# Expected Order: ["0", "3", "4", "5", "1", "2"]
Entry0.replaceItemValue("Changed", "24.02.19|00:00:00", Entry0.getItem("Changed").value[0])
Entry1.replaceItemValue("Changed", "10.02.19|00:00:00", Entry1.getItem("Changed").value[0])
Entry2.replaceItemValue("Changed", "23.02.19|00:00:00", Entry2.getItem("Changed").value[0])
Entry3.replaceItemValue("Changed", "22.02.19|00:00:00", Entry3.getItem("Changed").value[0])
Entry4.replaceItemValue("Changed", "21.02.19|00:00:00", Entry4.getItem("Changed").value[0])
Entry5.replaceItemValue("Changed", "20.02.19|00:00:00", Entry5.getItem("Changed").value[0])
Entry0.addItemValue("Changed", "25.03.19|00:00:00")
Entry1.addItemValue("Changed", "19.03.19|00:00:00")
Entry2.addItemValue("Changed", "23.01.19|00:00:00")
Entry3.addItemValue("Changed", "22.03.19|00:00:00")
Entry4.addItemValue("Changed", "21.03.19|00:00:00")
Entry5.addItemValue("Changed", "20.03.19|00:00:00")
database.saveMain()
for item in database.model.allItems:
database.cacheAllValuebyItemName(item)
#shutil.copytree(dbRootPath+"/.mimir", dbRootPath+"/.mimir2") #For testing
shutil.rmtree(dbRootPath+"/.mimir")
return database
def test_01_Model_init():
config = mimir_dir+"/conf/modeltest.json"
jsonModel = None
with open(config) as f:
jsonModel = json.load(f)
testModel = Model(config)
bools = []
bools.append(testModel.modelName == jsonModel["General"]["Name"])
bools.append(testModel.modelDesc == jsonModel["General"]["Description"])
bools.append(testModel.extentions == jsonModel["General"]["Types"])
allitems = {}
allitems.update(testModel.items)
allitems.update(testModel.listitems)
for item in allitems:
for spec in allitems[item]:
bools.append(jsonModel[item][spec] == allitems[item][spec])
res = True
for b in bools:
if not b:
res = b
break
assert res
def test_02_DB_init_new():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
print(database.model.listitems)
filesindbRoot = glob(dbRootPath+"/**/*.mp4", recursive = True)
filesindbRoot = [x.replace(dbRootPath+"/", "") for x in filesindbRoot]
allEntriesSaved = True
for entry in database.entries:
if entry.Path not in filesindbRoot:
allEntriesSaved = False
assert allEntriesSaved
for item in database.model.allItems:
assert not database.cachedValuesChanged[item]
del database
def test_03_DB_raise_RuntimeError_existing_mimirDir():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if not os.path.exists(dbRootPath+"/.mimir"):
os.makedirs(dbRootPath+"/.mimir")
with pytest.raises(RuntimeError):
database = DataBase(dbRootPath, "new", config)
del database
def test_04_DB_save():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
#Check database is save
assert database.saveMain()
assert database.saveMain()
#shutil.copytree(dbRootPath+"/.mimir", dbRootPath+"/.mimir2")
#assert validateDatabaseJSON(database, config, database.savepath)
#check if backup was created
day, month, year = datetime.date.today().day, datetime.date.today().month, datetime.date.today().year
fulldate = "{2:02}-{1:02}-{0:02}".format(day, month, year-2000)
assert os.path.exists(dbRootPath+"/.mimir/mainDB.{0}.backup".format(fulldate)) == True
del database
def test_05_DB_equal():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database1 = DataBase(dbRootPath, "new", config)
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database2 = DataBase(dbRootPath, "new", config)
assert database1 == database2
del database1, database2
def test_06_DB_notequal():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database1 = DataBase(dbRootPath, "new", config)
os.system("rm "+dir2tests+"/testStructure/newfile.mp4")
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database2 = DataBase(dbRootPath, "new", config)
os.system("touch "+dir2tests+"/testStructure/newfile.mp4")
database2.findNewFiles()
os.system("rm "+dir2tests+"/testStructure/newfile.mp4")
assert database1 != database2
del database1, database2
def test_07_DB_load():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
database.saveMain()
loadedDB = DataBase(dbRootPath, "load")
assert database == loadedDB
assert loadedDB.maxID == len(loadedDB.entries)-1 #Since 0 is a valid ID
del database
def test_08_DB_getAllValues():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
with pytest.raises(KeyError):
database.getAllValuebyItemName("Blubb")
values = database.getAllValuebyItemName("Path")
filesindbRoot = glob(dbRootPath+"/**/*.mp4", recursive = True)
filesindbRoot = [x.replace(dbRootPath+"/", "") for x in filesindbRoot]
assert values == set(filesindbRoot)
del database
def test_09_DB_getEntrybyItemName():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
with pytest.raises(KeyError):
database.getEntryByItemName("Blubb", "folder2file")
found = False
for entry in database.entries:
print(entry.getItem("Name").value)
if entry.getItem("Name").value == "folder2file1":
found = True
break
assert found
entrybyItemName = database.getEntryByItemName("Name", "folder2file1")
assert entry in entrybyItemName
del database
def test_10_DB_removeEntry_exceptions():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
##############################################
#Raise exception for not specified vector
# No vector specified
with pytest.raises(RuntimeError):
database.remove(1)
# More than one vector specified
with pytest.raises(RuntimeError):
database.remove(1, byID = True, byName = True)
##############################################
#Raise exception type
# ID
with pytest.raises(TypeError):
database.remove([], byID = True)
with pytest.raises(TypeError):
database.remove(1, byID = 1)
# Name/Path
with pytest.raises(TypeError):
database.remove(1, byName = True)
##############################################
#Raise exception by ID: out of range
with pytest.raises(IndexError):
database.remove(1000, byID = True)
##############################################
#Raise exception by Name/Path: not in DB
with pytest.raises(KeyError):
database.remove("RandomName", byName = True)
with pytest.raises(KeyError):
database.remove("RandomPath", byPath = True)
del database
def test_11_DB_removeEntry():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
#Remove by ID
databaseID = copy.deepcopy(database)
id2remove = 2
entry2Remove = databaseID.getEntryByItemName("ID",str(id2remove))[0]
databaseID.remove(id2remove, byID = True)
assert not entry2Remove in databaseID.entries
#Remove by Name
databaseName = copy.deepcopy(database)
name2remove = "folder2file1"
entry2Remove = databaseName.getEntryByItemName("Name",name2remove)[0]
databaseName.remove(name2remove, byName = True)
assert not entry2Remove in databaseName.entries
#Remove by Path
databasePath = copy.deepcopy(database)
file2remove = "folder2/folder2file1.mp4"
path2remove = dbRootPath+"/"+file2remove
entry2Remove = databasePath.getEntryByItemName("Path",file2remove)[0]
databasePath.remove(file2remove, byPath = True)
assert not entry2Remove in databasePath.entries
del database
def test_12_DB_findNewFiles_append():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
lastIDbeforeAppend = database.maxID
os.system("touch "+dir2tests+"/testStructure/newfile.mp4")
newFiles, pairs = database.findNewFiles()
os.system("rm "+dir2tests+"/testStructure/newfile.mp4")
assert "newfile.mp4" in newFiles
assert len(newFiles) == 1
asEntry = False
for entry in database.entries:
if entry.Path == "newfile.mp4":
asEntry = True
newEntry = entry
break
assert asEntry
assert int(newEntry.ID) == lastIDbeforeAppend+1
assert database.maxID == lastIDbeforeAppend+1
del database
def test_13_p1_DB_query():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
updatedEntry1 = database.getEntryByItemName("ID", "0")[0]
updatedEntry2 = database.getEntryByItemName("ID", "1")[0]
updatedEntry1.changeItemValue("SingleItem", "ReplacedValue")
updatedEntry1.addItemValue("ListItem", "AddedValue")
updatedEntry2.changeItemValue("SingleItem", "ReplacedValue")
########################################################
#First names wrong
with pytest.raises(KeyError):
database.query(["Blubb", "SingleItem"], "SomeQuery")
#Second names wrong
with pytest.raises(KeyError):
database.query(["SingleItem", "Blubb"], "SomeQuery")
########################################################
resultEntry = database.query(["SingleItem","ListItem"], ["ReplacedValue"])
resultID = database.query(["SingleItem","ListItem"], ["ReplacedValue"], returnIDs = True)
found1, found2 = False, False
if updatedEntry1 in resultEntry:
found1 = True
if updatedEntry2 in resultEntry:
found2 = True
foundEntry = found1 and found2
assert resultID == ["0", "1"]
resultID = database.query(["SingleItem","ListItem"], ["AddedValue", "ReplacedValue"], returnIDs = True)
assert resultID == ["0"]
del database
@pytest.mark.parametrize("Query, IDsExp", [("!Lavender", ["0","1","2","5"]), ("!Xi", ["1","2","3","4","5"]), ("!Eta Lavender", ["4"])])
def test_13_p2_DB_query(Query, IDsExp, preCreatedDB):
qList = Query.split(" ")
resultID = preCreatedDB.query(["SingleItem","ListItem"], qList, returnIDs = True)
assert resultID == IDsExp
@pytest.mark.parametrize("Query, IDsExp", [("Triple Orange", ["0"])])
def test_13_p3_DB_query(Query, IDsExp, preCreatedDB):
qList = Query.split(" ")
resultID = preCreatedDB.query(["SingleItem","ListItem"], qList, returnIDs = True)
assert resultID == IDsExp
def test_14_DB_modifyEntry():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
thisDate, thisTime = getDataTime()
#--------------------- SingleItem -------------------------
#Replace single Item value
database.modifySingleEntry("1", "SingleItem", "changedItemValue", byID = True )
changedEntry = database.getEntryByItemName("ID", "1")[0]
assert "changedItemValue" in changedEntry.getAllValuesbyName("SingleItem")
change_datetime = changedEntry.getAllValuesbyName("Changed")
change_datetime = list(change_datetime)[0]
assert change_datetime != "emptyChanged"
date, time = change_datetime.split("|")
assert date == thisDate
assert time[0:1] == thisTime[0:1]
#Check if Item is present in database
with pytest.raises(KeyError):
database.modifySingleEntry("1", "BLubbb", "changedItemValue", byID = True )
with pytest.raises(TypeError):
database.modifySingleEntry("1", "ListItem", "changedItemValue", byID = True )
#---------------------- ListItem --------------------------
with pytest.raises(TypeError):
database.modifyListEntry("1", "SingleItem", "appendedItemValue", "Append", byID = True)
#Append but first default schould be remove when appending the fist actual value
origEntry = database.getEntryByItemName("ID", "1")[0]
database.modifyListEntry("1", "ListItem", "initialValue", "Append", byID = True)
changedEntry = database.getEntryByItemName("ID", "1")[0]
#print(database.model.getDefaultValue("ListItem"))
assert ("initialValue" in changedEntry.getAllValuesbyName("ListItem")
and database.model.getDefaultValue("ListItem") not in changedEntry.getAllValuesbyName("ListItem")
and len(changedEntry.getAllValuesbyName("ListItem")) == 1)
#Append
change_datetime = changedEntry.getAllValuesbyName("Changed")
change_datetime = list(change_datetime)[0]
assert change_datetime != "emptyChanged"
date, time = change_datetime.split("|")
assert date == thisDate
assert time[0:1] == thisTime[0:1]
print("-------- Append ----------")
origEntry = database.getEntryByItemName("ID", "1")[0]
databaseAppend = copy.deepcopy(database)
databaseAppend.modifyListEntry("1", "ListItem", "appendedItemValue", "Append", byID = True)
changedEntry = databaseAppend.getEntryByItemName("ID", "1")[0]
assert ( "appendedItemValue" in changedEntry.getAllValuesbyName("ListItem")
and origEntry.getAllValuesbyName("ListItem").issubset(changedEntry.getAllValuesbyName("ListItem")) )
#Replace
print("-------- Replace ----------")
databaseReplace = copy.deepcopy(databaseAppend)
databaseReplace.modifyListEntry("1", "ListItem", "replacedItemValue", "Replace", "initialValue", byID = True)
changedEntry = databaseReplace.getEntryByItemName("ID", "1")[0]
assert ("replacedItemValue" in changedEntry.getAllValuesbyName("ListItem")
and "initialValue" not in changedEntry.getAllValuesbyName("ListItem"))
#Remove
print("-------- Remove I ----------")
databaseAppend.modifyListEntry("1", "ListItem", None, "Remove", "appendedItemValue", byID = True)
changedEntry = databaseAppend.getEntryByItemName("ID", "1")[0]
assert "appendedItemValue" not in changedEntry.getAllValuesbyName("ListItem")
#Remove empty entry
print("-------- Remove II ----------")
databaseReplace.modifyListEntry("1", "ListItem", None, "Remove", "appendedItemValue", byID = True)
databaseReplace.modifyListEntry("1", "ListItem", None, "Remove", "replacedItemValue", byID = True)
changedEntry = databaseReplace.getEntryByItemName("ID", "1")[0]
assert (set(databaseReplace.model.listitems["ListItem"]["default"]) == changedEntry.getAllValuesbyName("ListItem"))
print("-------- Change date for ListItem ----------")
database.modifyListEntry("2", "ListItem", "initialValue", "Append", byID = True)
changedEntry = database.getEntryByItemName("ID", "2")[0]
change_datetime = changedEntry.getAllValuesbyName("Changed")
change_datetime = list(change_datetime)[0]
assert change_datetime != "emptyChanged"
date, time = change_datetime.split("|")
assert date == thisDate
assert time[0:1] == thisTime[0:1]
def test_15_DB_status():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
#DB not saved
assert not database.getStatus()
#DB saved
database.saveMain()
assert database.getStatus()
#DB changed - new File
os.system("touch "+dir2tests+"/testStructure/newfile.mp4")
newFiles = database.findNewFiles()
os.system("rm "+dir2tests+"/testStructure/newfile.mp4")
assert not database.getStatus()
database.saveMain()
assert database.getStatus()
#DB changed - changed Entry
def test_16_DB_random():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
allIDs = database.getAllValuebyItemName("ID")
randID = database.getRandomEntry(chooseFrom = allIDs)
assert randID in allIDs
def test_17_DB_random_all():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
allIDs = database.getAllValuebyItemName("ID")
randID = database.getRandomEntryAll()
assert randID in allIDs
def test_18_DB_random_weighted():
config = mimir_dir+"/conf/modeltest.json"
dbRootPath = dir2tests+"/testStructure"
if os.path.exists(dbRootPath+"/.mimir"):
shutil.rmtree(dbRootPath+"/.mimir")
database = DataBase(dbRootPath, "new", config)
allIDs = database.getAllValuebyItemName("ID")
with pytest.raises(NotImplementedError):
randID = database.getRandomEntry(chooseFrom = allIDs, weighted = True)
#assert randID in allIDs
def test_19_DB_getSortedIDs(preCreatedDB):
with pytest.raises(KeyError):
sorted_addedIDs = preCreatedDB.getSortedIDs("BLUBB")
with pytest.raises(NotImplementedError):
sorted_addedIDs = preCreatedDB.getSortedIDs("ListItem")
#Get sorted by Added (SingleItem with datetime)
expected_added = ["0", "2", "3", "5", "1", "4"]
sorted_addedIDs = preCreatedDB.getSortedIDs("Added", reverseOrder = True)
print(sorted_addedIDs)
for iId, expected_id in enumerate(expected_added):
assert expected_id == sorted_addedIDs[iId]
#Same but with reverse order --> Test if ID sorting is independent of reverse
expected_added = ["4", "1", "5", "2", "3", "0"]
sorted_addedIDs = preCreatedDB.getSortedIDs("Added", reverseOrder = False)
for iId, expected_id in enumerate(expected_added):
assert expected_id == sorted_addedIDs[iId]
#Get sorted by Changed (Listentry with datetime)
expected_changed = ["0", "3", "4", "5", "1", "2"]
sorted_changedIDs = preCreatedDB.getSortedIDs("Changed")
for iId, expected_id in enumerate(expected_changed):
assert expected_id == sorted_changedIDs[iId]
#Get sorted by Singleitem (alphabetically)
expected_singleItem = ["5", "4", "3", "2", "1", "0"]
sorted_singleIDs = preCreatedDB.getSortedIDs("SingleItem", reverseOrder = False)
for iId, expected_id in enumerate(expected_singleItem):
assert expected_id == sorted_singleIDs[iId]
#Get sorted by Rating (numerically)
expected_rating = ["3", "2", "4", "1", "5", "0"]
sorted_ratingIDs = preCreatedDB.getSortedIDs("Rating")
for iId, expected_id in enumerate(expected_rating):
assert expected_id == sorted_ratingIDs[iId]
def test_20_DB_updatedOpened(preCreatedDB):
preCreatedDB.updateOpened("1")
thisDate, thisTime = getDataTime()
changedEntry = preCreatedDB.getEntryByItemName("ID", "1")[0]
change_datetime = list(changedEntry.getAllValuesbyName("Opened"))[0]
date, time = change_datetime.split("|")
assert date == thisDate
assert time[0:1] == thisTime[0:1]
def test_21_DB_guessSecondaryDBItembyPath(preCreatedDB):
#1: Test if "elements" are part of the secondaryDB
newFile = "testStructure/Blue/Xi.mp4"
options = preCreatedDB.getItemsPyPath(newFile)
assert "Xi" in options["SingleItem"]
assert "Blue" in options["ListItem"]
assert options["SingleItem"] == set(["Xi"]) and options["ListItem"] == set(["Blue"])
#2: Test if it works when subparts of a "element" are part of secondaryDB
#2.1: Fast version which will not try to split strings
newFile = "testStructure/Pink/BlueXi.mp4"
options = preCreatedDB.getItemsPyPath(newFile, fast=True)
assert "Xi" not in options["SingleItem"]
assert "Blue" not in options["ListItem"]
assert options["SingleItem"] == set([]) and options["ListItem"] == set([])
#2.2; Test with enables splitting
options = preCreatedDB.getItemsPyPath(newFile)
assert "Xi" in options["SingleItem"]
assert "Blue" in options["ListItem"]
assert options["SingleItem"] == set(["Xi"]) and options["ListItem"] == set(["Blue"])
#2.3: Test lowercase match
newFile = "testStructure/Pink/bluexi.mp4"
options = preCreatedDB.getItemsPyPath(newFile)
assert "Xi" in options["SingleItem"]
assert "Blue" in options["ListItem"]
assert options["SingleItem"] == set(["Xi"]) and options["ListItem"] == set(["Blue"])
#3: Test for items with whitespace - Find exact match
newFile = "testStructure/Pink/Double_Orange.mp4"
options = preCreatedDB.getItemsPyPath(newFile, whitespaceMatch = True)
assert options["ListItem"] == set(["Double Orange"])
#3.1 Test whitespace lowercase:
newFile = "testStructure/Pink/double_orange.mp4"
options = preCreatedDB.getItemsPyPath(newFile, whitespaceMatch = True)
assert options["ListItem"] == set(["Double Orange"])
#4: Test for items with whitespace - find partial match
newFile = "testStructure/Pink/Orange_Hand.mp4"
options = preCreatedDB.getItemsPyPath(newFile)
assert "Double Orange" in options["ListItem"]
assert "Triple Orange" in options["ListItem"]
assert options["ListItem"] == set(["Triple Orange", "Double Orange"])
#5: Test for items with whitespace - find partial match, exact mathc deactivated
newFile = "testStructure/Pink/Double_Orange.mp4"
options = preCreatedDB.getItemsPyPath(newFile, whitespaceMatch = False)
assert options["ListItem"] == set(["Triple Orange", "Double Orange"])
#Check if it works with ne values that are added before save/load
newFile = "testStructure/folder/Red.mp4"
options = preCreatedDB.getItemsPyPath(newFile)
assert "Red" not in options["ListItem"]
preCreatedDB.modifyListEntry("0", "ListItem", "Red", byID = True)
options = preCreatedDB.getItemsPyPath(newFile)
print("-------------------",options)
assert "Red" in options["ListItem"]
def test_22_DB_splitBySep(preCreatedDB):
split1 = preCreatedDB.splitBySep(".", ["a.b","c.d-e"])
assert ["a","b","c","d-e"] == split1
split2 = preCreatedDB.splitBySep("-", split1)
assert ["a","b","c","d","e"] == split2
def test_23_DB_recursiveSplit(preCreatedDB):
strings2Split = "A-b_c+d.e"
strings2Expect = set(["A","b","c","d","e"])
assert strings2Expect == preCreatedDB.splitStr(strings2Split)
@pytest.mark.parametrize("ID, nExpected", [("4", 3), ("1", 0), ("3", 1)])
def test_24_DB_countListItem(ID, nExpected, preCreatedDB):
assert preCreatedDB.getCount(ID, "ListItem", byID = True) == nExpected
def test_25_DB_cachedValues(mocker, preCreatedDB):
assert preCreatedDB.cachedValuesChanged.keys() == preCreatedDB.model.allItems
mocker.spy(DataBase, "cacheAllValuebyItemName")
###### Test caching for ListItem entries
values_ListItem_preChange = preCreatedDB.getAllValuebyItemName("ListItem")
assert DataBase.cacheAllValuebyItemName.call_count == 0
preCreatedDB.modifyListEntry("4", "ListItem", "Cyan", byID = True)
values_ListItem_postChange = preCreatedDB.getAllValuebyItemName("ListItem")
assert DataBase.cacheAllValuebyItemName.call_count == 1
assert list(set(values_ListItem_postChange)-set(values_ListItem_preChange)) == ["Cyan"]
###### Test caching for SingleItem Entries
Entry4 = preCreatedDB.getEntryByItemName("ID", "4")[0]
oldValue = Entry4.getItem("SingleItem").value
newValue = "Gamma"
preCreatedDB.modifySingleEntry("4", "SingleItem", newValue, byID = True)
values_ListItem_postChange = preCreatedDB.getAllValuebyItemName("SingleItem")
assert DataBase.cacheAllValuebyItemName.call_count == 2
assert oldValue not in values_ListItem_postChange and newValue in values_ListItem_postChange
def test_26_DB_changedPaths(preCreatedDB):
updatedFiles = preCreatedDB.checkChangedPaths()
assert updatedFiles == []
preCreatedDB.modifySingleEntry("folder2/folder2file2.mp4", "Path", "folder2file2.mp4", byPath = True)
thisID = preCreatedDB.getEntryByItemName("Path", "folder2file2.mp4")[0].getItem("ID").value
updatedFiles = preCreatedDB.checkChangedPaths()
thisNewPath = preCreatedDB.getEntryByItemName("ID", thisID)[0].getItem("Path").value
theID, oldPath, newPath = updatedFiles[0]
assert theID == thisID
assert oldPath == "folder2file2.mp4"
assert newPath == "folder2/folder2file2.mp4"
assert thisNewPath == "folder2/folder2file2.mp4"
def test_27_DB_missingFiles(preCreatedDB):
missingFiles = preCreatedDB.getMissingFiles()
assert missingFiles == []
os.system("rm "+dir2tests+"/testStructure/folder2/folder2file2.mp4")
missingFiles = preCreatedDB.getMissingFiles()
assert missingFiles == ["folder2/folder2file2.mp4"]
os.system("touch "+dir2tests+"/testStructure/folder2/folder2file2.mp4")
def test_28_DB_checkMissingFileAndReSort(preCreatedDB):
preCreatedDB2 = copy.deepcopy(preCreatedDB)
os.system("rm "+dir2tests+"/testStructure/folder2/folder2file2.mp4")
removedID = preCreatedDB2.getEntryByItemName("Path", "folder2/folder2file2.mp4")[0].getItem("ID").value
movedPath = preCreatedDB2.getEntryByItemName("ID",
str(preCreatedDB2.maxID))[0].getItem("Path").value
oldMaxID = preCreatedDB2.maxID
IDChanges = preCreatedDB2.checkMissingFiles()
os.system("touch "+dir2tests+"/testStructure/folder2/folder2file2.mp4")
oldID, newID = IDChanges[0]
assert newID == removedID
assert oldID == oldMaxID
assert movedPath == preCreatedDB2.getEntryByItemName("ID", removedID)[0].getItem("Path").value
if __name__ == "__main__":
unittest.main()
| 2.078125
| 2
|
data_loader.py
|
LiamLYJ/scene_seg
| 6
|
12777020
|
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
import cv2
import glob
import random
# depracated
# def get_data_direct(img_size, texture_size,
# imgs_fn = None, textures_fn = None, sample_dir = None, sep = ':', format = '*.png',
# mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
# if sample_dir is None:
# imgs_fn = imgs_fn.split(sep)
# textures_fn = textures_fn.split(sep)
# else:
# all_images = glob.glob(os.path.join(sample_dir, format))
# all_images = sorted(all_images)
# imgs_fn = []
# textures_fn = []
# for file in all_images:
# if 'img' in file.split('/')[-1]:
# imgs_fn.append(file)
# elif 'texture' in file.split('/')[-1]:
# textures_fn.append(file)
# else:
# raise ValueError('not sure which type if this one: %s'%(file))
# batch_size = len(imgs_fn)
# assert len(imgs_fn) == len(textures_fn)
# imgs = []
# textures = []
# for index in range(batch_size):
# img_cur = Image.open(imgs_fn[index])
# img_cur = img_cur.resize([img_size, img_size])
# # it could be rgba
# img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
# imgs.append(img_cur)
#
# texture_cur = Image.open(textures_fn[index])
# texture_cur = texture_cur.resize([texture_size, texture_size])
# # it could be rgba
# texture_cur = (np.asarray(texture_cur)[...,:3] / 255.0 - mean) / std
# textures.append(texture_cur)
#
# imgs = np.array(imgs).reshape([batch_size, img_size, img_size, 3])
# textures = np.array(textures).reshape([batch_size, texture_size, texture_size, 3])
# imgs = np.transpose(imgs, [0, 3, 1, 2])
# textures = np.transpose(textures, [0, 3, 1, 2])
# return imgs, textures
#
def get_data_direct(img_size, imgs_dir, texture_size = None, textures_dir = None,
format = '*.png',
mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
imgs = glob.glob(os.path.join(imgs_dir, format))
imgs = sorted(imgs)
if textures_dir is not None:
textures = glob.glob(os.path.join(textures_dir, format))
textures = sorted(textures)
batch_size = len(imgs) * len(textures) if textures_dir is not None else len(imgs)
imgs_data = []
textures_data = []
if textures_dir is not None:
assert texture_size is not None
for img_index in range(len(imgs)):
for texture_index in range(len(textures)):
img_cur = Image.open(imgs[img_index])
img_cur = img_cur.resize([img_size, img_size])
# it could be rgba
img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
imgs_data.append(img_cur)
texture_cur = Image.open(textures[texture_index])
texture_cur = texture_cur.resize([texture_size, texture_size])
# it could be rgba
texture_cur = (np.asarray(texture_cur)[...,:3] / 255.0 - mean) / std
textures_data.append(texture_cur)
else:
for img_index in range(len(imgs)):
img_cur = Image.open(imgs[img_index])
img_cur = img_cur.resize([img_size, img_size])
# it could be rgba
img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
imgs_data.append(img_cur)
imgs_data = np.array(imgs_data).reshape([batch_size, img_size, img_size, 3])
imgs_data = np.transpose(imgs_data, [0, 3, 1, 2])
if textures_dir is not None:
textures_data = np.array(textures_data).reshape([batch_size, texture_size, texture_size, 3])
textures_data = np.transpose(textures_data, [0, 3, 1, 2])
return imgs_data, textures_data
class texture_seg_dataset(object):
def __init__(self, data_path, img_size, segmentation_regions, texture_size,
shuffle = True, use_same_from = True,
mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]): # from torch normalize
self.shuffle = shuffle
self.img_size = img_size
self.segmentation_regions = segmentation_regions
self.texture_size = texture_size
self.folders = glob.glob(os.path.join(data_path, '*/'))
self.use_same_from = use_same_from
self.mean = mean
self.std = std
# num_seg must be smaller than scene_num
assert (len(self.folders) >= self.segmentation_regions)
def generate_random_masks(self, points = None):
# use batch_size = 1
# return [size, size, segmentation_regions]
batch_size = 1
xs, ys = np.meshgrid(np.arange(0, self.img_size), np.arange(0, self.img_size))
if points is None:
n_points = [self.segmentation_regions]
points = [np.random.randint(0, self.img_size, size=(n_points[i], 2)) for i in range(batch_size)]
masks = []
for b in range(batch_size):
dists_b = [np.sqrt((xs - p[0])**2 + (ys - p[1])**2) for p in points[b]]
voronoi = np.argmin(dists_b, axis=0)
masks_b = np.zeros((self.img_size, self.img_size, self.segmentation_regions))
for m in range(self.segmentation_regions):
masks_b[:,:,m][voronoi == m] = 1
masks.append(masks_b)
return masks[0]
def random_crop(self, image, crop_height, crop_width):
if (crop_width <= image.shape[1]) and (crop_height <= image.shape[0]):
x = np.random.randint(0, image.shape[1]-crop_width)
y = np.random.randint(0, image.shape[0]-crop_height)
return image[y:y+crop_height, x:x+crop_width, :]
else:
raise Exception('Crop shape exceeds image dimensions!')
def get_data(self, format = '*.jpg'):
mask = self.generate_random_masks()
choose_from = []
img = np.zeros([self.img_size, self.img_size, 3])
sampled_folders = random.sample(self.folders, self.segmentation_regions)
texture_mask = []
for index, folder in enumerate(sampled_folders):
files = glob.glob(os.path.join(folder, format))
file_cur = random.choice(files)
# print (file_cur)
img_cur = Image.open(file_cur)
img_cur = img_cur.resize([self.img_size, self.img_size])
img_cur = (np.asarray(img_cur) / 255.0 - self.mean) / self.std
img[mask[..., index] == 1] = img_cur[mask[..., index] == 1]
if self.use_same_from:
texture_cur = img_cur
else:
file_cur = random.choice(files)
texture_cur = np.asarray(Image.open(file_cur))
texture = self.random_crop(texture_cur, self.texture_size, self.texture_size)
texture_mask.append({'mask': mask[...,index], 'texture':texture})
return img, texture_mask
def feed(self, batch_size = None):
if batch_size is None:
return self.get_data()
else:
img_texture_mask = []
# add alls in one img iput
# for _ in range(batch_size // self.segmentation_regions + 1):
# img, texture_mask = self.get_data()
# for index in range(self.segmentation_regions):
# patch = {}
# patch['img'] = img
# patch['texture'] = texture_mask[index]['texture']
# patch['mask'] = texture_mask[index]['mask']
# img_texture_mask.append(patch)
# add each one separatly
for _ in range(batch_size):
img, texture_mask = self.get_data()
# random choice one from cluster
index = np.random.choice(self.segmentation_regions, 1)[0]
patch = {}
patch['img'] = img
patch['texture'] = texture_mask[index]['texture']
patch['mask'] = texture_mask[index]['mask']
img_texture_mask.append(patch)
img_texture_mask = img_texture_mask[:batch_size]
if self.shuffle:
random.shuffle(img_texture_mask)
imgs = [item['img'] for item in img_texture_mask]
textures = [item['texture'] for item in img_texture_mask]
masks = [item['mask'] for item in img_texture_mask]
imgs = np.array(imgs).reshape([batch_size, self.img_size, self.img_size, 3])
textures = np.array(textures).reshape([batch_size, self.texture_size, self.texture_size, 3])
masks = np.array(masks).reshape([batch_size, self.img_size, self.img_size, 1])
imgs = np.transpose(imgs, [0, 3, 1, 2])
textures = np.transpose(textures, [0, 3, 1, 2])
masks = np.transpose(masks, [0, 3, 1, 2])
return imgs, textures, masks
if __name__ == '__main__':
data_set = texture_seg_dataset('./dataset/dtd/images', img_size = 256, segmentation_regions= 3, texture_size = 64)
imgs, textures, masks = data_set.feed(batch_size = 2)
print ('img shape: ', imgs.shape)
print ('texture shape: ', textures.shape )
print ('masks shape: ', masks.shape)
raise
img, texture_mask = data_set.get_data()
print (img.shape)
print (len(texture_mask))
img = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)
cv2.imwrite('test_img.png', img)
# cv2.imshow('img', img/ 255.0)
for i in range(3):
texture_mask[i]['texture'] = cv2.cvtColor(np.uint8(texture_mask[i]['texture']) , cv2.COLOR_BGR2RGB)
# cv2.imwrite('test_texture_%d.png'%(i), texture_mask[i]['texture']
cv2.imshow('mask_%d'%(i), texture_mask[i]['mask'])
cv2.imshow('texture_%d'%(i), texture_mask[i]['texture'])
cv2.waitKey(0)
| 2.40625
| 2
|
dynibatch/utils/segment_container.py
|
dynilib/dynibatch
| 4
|
12777021
|
<reponame>dynilib/dynibatch
#The MIT License
#
#Copyright (c) 2017 DYNI machine learning & bioacoustics team - Univ. Toulon
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import re
import random
import joblib
import soundfile as sf
from dynibatch.utils.segment import Segment, CommonLabels
from dynibatch.utils import exceptions
SC_EXTENSION = ".sc.jl"
ALLOWED_AUDIO_EXT = [".wav"]
class SegmentContainer:
"""
A segment container contains the list of segments related to an audio file.
"""
def __init__(self, audio_path):
self._audio_path = audio_path # relative to some root data path
self._segments = []
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def audio_path(self):
return self._audio_path
@property
def segments(self):
return self._segments
@segments.setter
def segments(self, segments):
self._segments = segments
@staticmethod
def load(path):
segment_container = joblib.load(path)
if not isinstance(segment_container, SegmentContainer):
raise TypeError(
"Object in {} is not an instance of SegmentContainer".format(
path))
return segment_container
@property
def labels(self):
# get segment label set
return set(s.label for s in self._segments)
@labels.setter
def labels(self, label):
# set label to all segments
for segment in self._segments:
segment.label = label
@property
def n_segments(self):
return len(self._segments)
@property
def n_active_segments(self):
return sum(1 for s in self._segments if
hasattr(s, "activity") and s.activity)
def save(self, path, compress=0):
joblib.dump(self,
os.path.join(path,
os.path.splitext(
os.path.basename(
self._audio_path))[0] + SC_EXTENSION),
compress=compress)
def n_segments_with_label(self, label):
return sum(1 for s in self._segments if s.label == label)
def n_active_segments_with_label(self, label):
return sum(1 for s in self._segments if
hasattr(s, "activity") and s.activity and s.label == label)
def has_features(self, features):
# assumes that if the last segment has the feature,
# all segments do
# return a list of boolean
if (self._segments and
hasattr(self._segments[-1], "features")):
return [f in self._segments[-1].features for f in features]
return [False for f in features]
def create_segment_containers_from_audio_files(audio_root,
shuffle=False,
**kwargs):
"""
Args:
audio_root
shuffle
(seg_duration
(seg_overlap)
Yields: segment container
"""
audio_filenames = []
for root, _, filenames in os.walk(audio_root):
for filename in filenames:
_, extension = os.path.splitext(filename)
if extension in ALLOWED_AUDIO_EXT:
audio_filenames.append(
os.path.relpath(os.path.join(root, filename),
audio_root)) # only get audio files
if shuffle:
random.shuffle(audio_filenames)
else:
# os.walk does not generate files always in the same order, so we sort
# them
audio_filenames.sort()
for filename in audio_filenames:
yield create_segment_container_from_audio_file(
(audio_root, filename),
**kwargs)
def create_segment_container_from_audio_file(audio_path_tuple, **kwargs):
"""
Args:
audio_path_tuple: audio file path as a tuple (<audio root>, <audio file
relative path>)
(seg_duration
(seg_overlap)
Yields: segment container
"""
# if a str is passed as audio_path_tuple, sc.audio_path will be wrong,
# so we must make sure it is a tuple
if not isinstance(audio_path_tuple, tuple):
raise TypeError("audio_path_tuple must be a tuple")
with sf.SoundFile(os.path.join(*audio_path_tuple)) as audio_file:
segment_container = SegmentContainer(audio_path_tuple[1])
n_samples = len(audio_file)
sample_rate = audio_file._info.samplerate
duration = float(n_samples) / sample_rate
if "seg_duration" in kwargs and kwargs["seg_duration"]:
segment_container.segments = create_fixed_duration_segments(duration, **kwargs)
else:
segment_container.segments.append(Segment(0, duration))
return segment_container
def create_segment_containers_from_seg_files(seg_file_root,
labels,
audio_file_ext=".wav",
seg_file_ext=".seg",
seg_file_separator="\t"):
"""
Args:
- seg_file_root
- labels: list of label set to be used
- (audio_file_ext)
- (seg_file_ext)
- (seg_file_separator)
Yields: segment container
"""
for root, _, filenames in os.walk(seg_file_root):
for filename in filenames:
_, ext = os.path.splitext(filename)
if ext != seg_file_ext:
continue # only get seg files
seg_file_path_tuple = (
seg_file_root,
os.path.relpath(
os.path.join(root, filename.replace(seg_file_ext, audio_file_ext)),
seg_file_root))
yield create_segment_container_from_seg_file(
seg_file_path_tuple,
labels,
audio_file_ext,
seg_file_ext,
seg_file_separator)
def create_segment_container_from_seg_file(seg_file_path_tuple,
label_dict,
audio_file_ext=".wav",
seg_file_ext=".seg",
seg_file_separator="\t"):
"""
Args:
- seg_file_path_tuple: seg file path as a tuple (<audio root>,
<audio file relative path>)
- label_dict: dict of labels with id:name mapping
- (audio_file_ext)
- (seg_file_ext)
- (seg_file_separator)
Yields: segment container
"""
# if a str is passed as seg_file_path_tuple, sc.audio_path will be wrong,
# so we must make sure it is a tuple
if not isinstance(seg_file_path_tuple, tuple):
raise TypeError("seg_file_path_tuple must be a tuple")
if audio_file_ext not in ALLOWED_AUDIO_EXT:
raise exceptions.ParameterError(
"{} is not an allowed audio file extension")
with open(os.path.join(*seg_file_path_tuple), "r") as audio_file:
segment_container = SegmentContainer(
seg_file_path_tuple[1].replace(seg_file_ext,
audio_file_ext))
for line in audio_file:
start_time, end_time, label_id = _parse_segment_file_line(
line, seg_file_separator)
segment_container.segments.append(
Segment(start_time,
end_time,
label_id if label_id in label_dict.keys() else CommonLabels.unknown.value))
return segment_container
def load_segment_containers_from_dir(path):
for root, _, filenames in os.walk(path):
for filename in filenames:
_, ext = os.path.splitext(filename)
if ext != SC_EXTENSION:
continue # only get segment containers
yield SegmentContainer.load(os.path.join(root, filename))
def create_fixed_duration_segments(file_duration, seg_duration, seg_overlap=0.5):
segments = []
start_time = 0.0
# Create chunks of length seg_duration.
# The last chunk is ignored.
while start_time + seg_duration < file_duration:
segments.append(Segment(start_time, start_time + seg_duration))
start_time += (1 - seg_overlap) * seg_duration
# If the audio file is shorter than seg_duration,
# add one segment anyway
if not segments:
segments.append(Segment(start_time, start_time + seg_duration))
return segments
def _parse_segment_file_line(line, field_separator):
start_time, end_time, label_id = line.split(field_separator)
start_time = float(start_time)
end_time = float(end_time)
label_id = int(label_id)
return start_time, end_time, label_id
| 1.851563
| 2
|
api_v3/tests/misc/test_oauth2.py
|
DrWhax/id-backend
| 3
|
12777022
|
<reponame>DrWhax/id-backend<filename>api_v3/tests/misc/test_oauth2.py
from django.test import TestCase
from api_v3.factories import SubscriberFactory, ProfileFactory
from api_v3.misc import oauth2
from api_v3.models import Subscriber, Action
class TicketAttachmentCommentFactoryMixin(TestCase):
def setUp(self):
self.new_user = ProfileFactory.create()
self.subscriber = SubscriberFactory.create(
user=None, email=self.new_user.email)
def test_activate_user(self):
self.new_user.is_active = False
self.new_user.save()
oauth2.activate_user(backend=None, user=self.new_user)
self.assertTrue(self.new_user.is_active)
def test_map_email_to_subscriber(self):
activities = Action.objects.filter(verb='subscriber:update:joined')
activities_count = activities.count()
oauth2.map_email_to_subscriber(
backend=None, user=self.new_user)
subscriber = Subscriber.objects.get(id=self.subscriber.id)
self.assertEqual(activities.count(), activities_count + 1)
self.assertEqual(subscriber.user, self.new_user)
self.assertIsNone(subscriber.email)
oauth2.map_email_to_subscriber(
backend=None, user=self.new_user)
self.assertEqual(activities.count(), activities_count + 1)
| 2.203125
| 2
|
util/loader.py
|
kangtastic/cryptopals
| 1
|
12777023
|
# -*- coding: utf-8 -*-
import base64
import os
def loader(file, decoder, split=True):
file_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
cdata_path = os.path.join(os.path.split(file_dir)[0], "static", file)
if decoder == "hexstring":
decoder = bytes.fromhex
elif decoder == "base64":
decoder = base64.b64decode
with open(cdata_path) as f:
if split:
return list(map(decoder, f))
else:
return decoder(f.read())
| 2.78125
| 3
|
dbcron/tests/test_calendar.py
|
cloudspectatordevelopment/django-dbcron
| 0
|
12777024
|
from django.test import TestCase
from dbcron.calendar import JobCalendar
from dbcron import models
from dbcron.tests.factories import JobFactory
class JobCalendarFormatMonthTest(TestCase):
factory = JobFactory
jobs = models.Job.objects.all()
def test_meth(self):
self.factory.create_batch(5, min=0, hou=0)
calendar = JobCalendar(self.jobs)
for year in range(2000, 2005):
for month in range(1, 13):
html = calendar.formatmonth(year, month)
class JobCalendarFormatWeekTest(TestCase):
factory = JobFactory
jobs = models.Job.objects.all()
def test_meth(self):
self.factory.create_batch(5, min=0, hou=0)
calendar = JobCalendar(self.jobs)
for year in range(2000, 2005):
for week in range(1, 53):
html = calendar.formatweekofmonth(year, week)
| 2.484375
| 2
|
src/vfxnaming/naming.py
|
xiancg/naming
| 8
|
12777025
|
<filename>src/vfxnaming/naming.py
# coding=utf-8
# MIT License
# Copyright (c) 2017 <NAME> and modified by <NAME>- Xian
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, print_function
import os
import json
import vfxnaming.rules as rules
import vfxnaming.tokens as tokens
from vfxnaming.logger import logger
from vfxnaming.error import SolvingError
import six
NAMING_REPO_ENV = "NAMING_REPO"
def parse(name):
"""Get metadata from a name string recognized by the currently active rule.
-For rules with repeated tokens:
If your rule uses the same token more than once, the returned dictionary keys
will have the token name and an incremental digit next to them so they can be
differentiated.
Args:
name (str): Name string e.g.: C_helmet_001_MSH
Returns:
dict: A dictionary with keys as tokens and values as given name parts.
e.g.: {'side':'C', 'part':'helmet', 'number': 1, 'type':'MSH'}
"""
rule = rules.get_active_rule()
return rule.parse(name)
def solve(*args, **kwargs):
"""Given arguments are used to build a name following currently active rule.
-For rules with repeated tokens:
If your rule uses the same token more than once, pass arguments with the token
name and add an incremental digit
i.e.: side1='C', side2='R'
If your rule uses the same token more than once, you can also pass a single
instance of the argument and it'll be applied to all repetitions.
i.e.: side='C'
If your rule uses the same token more than once, you can ignore one of the repetitions,
and the solver will use the default value for that token.
i.e.: side1='C', side4='L'
Raises:
SolvingError: A required token was passed as None to keyword arguments.
SolvingError: Missing argument for one field in currently active rule.
Returns:
str: A string with the resulting name.
"""
rule = rules.get_active_rule()
# * This accounts for those cases where a token is used more than once in a rule
repeated_fields = dict()
for each in rule.fields:
if each not in repeated_fields.keys():
if rule.fields.count(each) > 1:
repeated_fields[each] = 1
fields_with_digits = list()
for each in rule.fields:
if each in repeated_fields.keys():
counter = repeated_fields.get(each)
repeated_fields[each] = counter + 1
fields_with_digits.append("{}{}".format(each, counter))
else:
fields_with_digits.append(each)
values = dict()
i = 0
fields_inc = 0
for f in fields_with_digits:
token = tokens.get_token(rule.fields[fields_inc])
if token:
# Explicitly passed as keyword argument
if kwargs.get(f) is not None:
values[f] = token.solve(kwargs.get(f))
fields_inc += 1
continue
# Explicitly passed as keyword argument without repetitive digits
# Use passed argument for all field repetitions
elif kwargs.get(rule.fields[fields_inc]) is not None:
values[f] = token.solve(kwargs.get(rule.fields[fields_inc]))
fields_inc += 1
continue
elif token.required and kwargs.get(f) is None and len(args) == 0:
raise SolvingError("Token {} is required.")
# Not required and not passed as keyword argument
elif not token.required and kwargs.get(f) is None:
values[f] = token.solve()
fields_inc += 1
continue
# Implicitly passed as positional argument
try:
values[f] = token.solve(args[i])
i += 1
fields_inc += 1
continue
except IndexError as why:
raise SolvingError("Missing argument for field '{}'\n{}".format(f, why))
logger.debug("Solving rule '{}' with values {}".format(rule.name, values))
return rule.solve(**values)
def get_repo():
"""Get repository location from either global environment variable or local user,
giving priority to environment variable.
Environment varialble name: NAMING_REPO
Returns:
str: Naming repository location
"""
env_repo = os.environ.get(NAMING_REPO_ENV)
userPath = os.path.expanduser("~")
module_dir = os.path.split(__file__)[0]
config_location = os.path.join(module_dir, "cfg", "config.json")
config = dict()
with open(config_location) as fp:
config = json.load(fp)
local_repo = os.path.join(userPath, "." + config["local_repo_name"], "naming_repo")
result = env_repo or local_repo
logger.debug("Repo found: {}".format(result))
return result
def save_session(repo=None):
"""Save rules, tokens and config files to the repository.
Raises:
IOError, OSError: Repository directory could not be created.
Args:
repo (str, optional): Absolue path to a repository. Defaults to None.
Returns:
bool: True if saving session operation was successful.
"""
repo = repo or get_repo()
if not os.path.exists(repo):
try:
os.mkdir(repo)
except (IOError, OSError) as why:
raise why
# save tokens
for name, token in six.iteritems(tokens.get_tokens()):
logger.debug("Saving token: '{}' in {}".format(name, repo))
tokens.save_token(name, repo)
# save rules
for name, rule in six.iteritems(rules.get_rules()):
if not isinstance(rule, rules.Rule):
continue
logger.debug("Saving rule: '{}' in {}".format(name, repo))
rules.save_rule(name, repo)
# extra configuration
active = rules.get_active_rule()
config = {"set_active_rule": active.name if active else None}
filepath = os.path.join(repo, "naming.conf")
logger.debug("Saving active rule: {} in {}".format(active.name, filepath))
with open(filepath, "w") as fp:
json.dump(config, fp, indent=4)
return True
def load_session(repo=None):
"""Load rules, tokens and config from a repository, and create
Python objects in memory to work with them.
Args:
repo (str, optional): Absolute path to a repository. Defaults to None.
Returns:
bool: True if loading session operation was successful.
"""
repo = repo or get_repo()
if not os.path.exists(repo):
logger.warning("Given repo directory does not exist: {}".format(repo))
return False
namingconf = os.path.join(repo, "naming.conf")
if not os.path.exists(namingconf):
logger.warning("Repo is not valid. naming.conf not found {}".format(namingconf))
return False
rules.reset_rules()
tokens.reset_tokens()
# tokens and rules
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filename.endswith(".token"):
logger.debug("Loading token: {}".format(filepath))
tokens.load_token(filepath)
elif filename.endswith(".rule"):
logger.debug("Loading rule: {}".format(filepath))
rules.load_rule(filepath)
# extra configuration
if os.path.exists(namingconf):
logger.debug("Loading active rule: {}".format(namingconf))
with open(namingconf) as fp:
config = json.load(fp)
rules.set_active_rule(config.get('set_active_rule'))
return True
| 1.960938
| 2
|
mirsnp/formatpt.py
|
ghkly/miRsnp
| 0
|
12777026
|
#!/usr/bin/python3
# _*_ coding: utf-8 _*_
# @Time : 2022/1/31 19:21
import re
import os
import pandas as pd
from mirsnp.parse_rnahybrid import parse_rnahybrid, get_position, get_mirna_name, get_trans_name
from mirsnp.utils import check_outputf, DEFAULT_ENERGY, INTERVAL
def get_pt_map(pt):
pt_map = {}
with open(pt) as fr:
aligns = re.findall(f"target: .*?5'\n\n\n", fr.read(), re.S)
for al in aligns:
position = get_position(al)
trans = get_trans_name(al)
mirna = get_mirna_name(al)
flag = f"{trans}:{mirna}:{position}"
pt_map[flag] = al.replace('\n', '#')
return pt_map
def get_target_pt(flag, pt_map):
fs = flag.split(':')
k = f"{fs[0]}:{fs[1]}:{fs[-1]}"
return pt_map.get(k, "")
def parse_common_variants(variants_file):
df = pd.read_csv(variants_file, sep='\t')
gene_map = dict(zip(df['transcript'], df['gene']))
strand_map = dict(zip(df['transcript'], df['strand']))
return gene_map, strand_map
def isin_interval(tpos, seq_pos):
tpos = int(tpos)
start, end = seq_pos.split(' ')[1].split('-')
if int(start) <= tpos <= int(end):
return 'Yes'
return 'No'
def get_target_pos(mirna53, target53, flag, interval):
pos = int(flag.split(':')[-1])
tpos = int(flag.split(':')[0].split('_')[6])
target35 = ''.join(reversed(target53))
pos_range = []
m = 0
for i in target53:
if i != '-':
pos_range.append(pos+m)
m += 1
else:
pos_range.append(-1)
rev_range = list(reversed(pos_range))
spos = 1
mirna_seqs = []
target_seqs = []
target_poss = []
for n, b in enumerate(mirna53):
tb = target35[n]
if b != '-':
if interval[0] <= spos <= interval[1]:
mirna_seqs.append(b)
target_seqs.append(tb)
target_poss.append(rev_range[n])
spos += 1
else:
if interval[0] <= spos <= interval[1]:
target_seqs.append(tb)
target_poss.append(rev_range[n])
mirna_seq = ''.join(mirna_seqs)
target_seq = ''.join(reversed(target_seqs)).replace('-', '')
fpos = list(filter(lambda x: x != -1, target_poss))
target_poss = list(reversed(fpos))
target_seq_pos = f"{target_seq} {target_poss[0]}-{target_poss[-1]}"
intgt = isin_interval(tpos, target_seq_pos)
# "{}|{}|{}".format(mirna_seq, target_seq_pos, intgt)
return intgt
def fmt_pt(pt, tag, trans_info, interval=INTERVAL):
pp = parse_rnahybrid(pt, trans_info)
pt_map = get_pt_map(pt)
df = pd.read_csv(pp, sep='\t', low_memory=False)
if not df.empty:
if df.shape[0] >= 1 and df.shape[1] > 3:
df = df[df['region'] == 1].drop_duplicates()
df['flag'] = df["transcript"] + ":" + df["miRNA"].astype(str) + \
":" + df['gene'].astype(str) + \
":" + df['strand'].astype(str) + \
":" + df['position'].astype(str)
df = df[["flag", "pvalue", "energy", "miRNA_seq(5'-3')", "target_seq(5'-3')"]]
# itv = f"{interval[0]}-{interval[1]}"
df['variant_in_2-8'] = df.apply(lambda row: get_target_pos(
row["miRNA_seq(5'-3')"], row["target_seq(5'-3')"], row["flag"], interval), axis=1)
df['pattern'] = df['flag'].apply(lambda x: get_target_pt(x, pt_map))
df = df.set_index('flag', drop=True)
df = df.add_prefix('{}_'.format(tag))
return df
def cal_dist(flags):
# ENST00000551241_rs1859333_12_112933161_T_C_36:1364:OAS1:+:29
res = []
for flag in flags:
ar = flag.split(':')
variant_pos = ar[0].split('_')[-1]
tpos = ar[-1]
if variant_pos.isdigit() and tpos.isdigit():
res.append(str(int(variant_pos) - int(tpos)))
else:
res.append('NA')
return res
def get_diff(x, y):
if "NA" not in [x, y]:
diff = round(float(y) - float(x), 2)
if diff < 0:
return [diff, "More stable"]
elif diff > 0:
return [diff, "Less stable"]
else:
return [diff, "No change"]
return ['NA', 'NA']
def get_energy_classify(x, y):
if "NA" in [x, y]:
if x == "NA":
return "new"
elif y == "NA":
return "off-target"
return 'consistent'
def classify_and_sort(df, default_energy=DEFAULT_ENERGY):
"""根据能量值差异分类"""
if not df.empty:
df = df.copy()
df['classify'] = df.apply(lambda row: get_energy_classify(row["ref_target_seq(5'-3')"], row["alt_target_seq(5'-3')"]), axis=1)
df.loc[df.classify == 'new', 'ref_energy'] = default_energy
df.loc[df.classify == 'off-target', 'alt_energy'] = default_energy
df['diff_energy'] = df.apply(lambda row: get_diff(row['ref_energy'], row['alt_energy'])[0], axis=1)
df['state'] = df.apply(lambda row: get_diff(row['ref_energy'], row['alt_energy'])[1], axis=1)
return df
def merge_refalt(refpt, altpt, outdir, trans_info,
name='f', td='raw', interval=INTERVAL):
df_ref = fmt_pt(refpt, 'ref', trans_info, interval=interval)
df_alt = fmt_pt(altpt, 'alt', trans_info, interval=interval)
output_all = os.path.join(outdir, f"energy_stats-{name}-result.csv")
dfm = None
if df_ref is not None and df_alt is not None:
dfm = pd.merge(df_ref, df_alt, left_index=True, right_index=True, how='outer').fillna('NA')
dfm = classify_and_sort(dfm)
dfm['transcript_direction'] = td
dfm = dfm[dfm['state'] != 'No change']
dfm = dfm.drop_duplicates()
dfm['target_position_to_variant_position'] = cal_dist(dfm.index)
dfm.to_csv(output_all)
check_outputf(output_all)
else:
print('either {} or {} is null, no output!'.format(refpt, altpt))
return dfm
| 2.65625
| 3
|
tests/test_docs.py
|
qinfeng2011/wltp
| 0
|
12777027
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2015-2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import doctest
import io
import re
import subprocess
import unittest
from unittest.mock import patch
from wltp import cli
import wltp
import os.path as osp
mydir = osp.dirname(__file__)
proj_path = osp.join(mydir, "..")
readme_path = osp.join(proj_path, "README.rst")
class Doctest(unittest.TestCase):
def test_README_version_reldate_opening(self):
ver = wltp.__version__
reldate = wltp.__updated__
header_len = 20
mydir = osp.dirname(__file__)
ver_found = rdate_found = False
with open(readme_path) as fd:
for i, l in zip(range(header_len), fd):
if ver in l:
ver_found = True
if reldate not in l:
rdate_found = True
if not ver_found:
msg = "Version(%s) not found in README %s header-lines!"
raise AssertionError(msg % (ver, header_len))
if not rdate_found:
msg = "RelDate(%s) not found in README %s header-lines!"
raise AssertionError(msg % (reldate, header_len))
def test_README_version_from_cmdline(self):
ver = wltp.__version__
with open(readme_path) as fd:
ftext = fd.read()
with patch("sys.stdout", new=io.StringIO()) as stdout:
try:
cli.main(["--version"])
except SystemExit:
pass ## Cancel argparse's exit()
proj_ver = stdout.getvalue().strip()
assert proj_ver
self.assertIn(
proj_ver,
ftext,
"Version(%s) not found in README cmd-line version-check!" % ver,
)
def test_README_as_PyPi_landing_page(self):
from docutils import core as dcore
long_desc = subprocess.check_output(
"python setup.py --long-description".split(), cwd=proj_path
)
self.assertIsNotNone(long_desc, "Long_desc is null!")
with patch("sys.exit"):
dcore.publish_string(
long_desc,
enable_exit_status=False,
settings_overrides={ # see `docutils.frontend` for more.
"halt_level": 2 # 2=WARN, 1=INFO
},
)
class TestDoctest(unittest.TestCase):
def test_doctests(self):
failure_count, test_count = doctest.testmod(
wltp.datamodel, optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
)
self.assertGreater(test_count, 0, (failure_count, test_count))
self.assertEqual(failure_count, 0, (failure_count, test_count))
| 1.960938
| 2
|
testsuite/tests/QB13-061__module_config_file/run_test.py
|
AdaCore/style_checker
| 2
|
12777028
|
def test_epl_single_year_mtl(style_checker):
"""Style check test against epl_single_year.mtl
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('--config=module_config.yaml',
'whatever', 'epl_single_year.mtl')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
# Try the same test, but without the --config file.
# It should fail, because we no longer include the module's
# config which allows EPL copyright notices.
p = style_checker.run_style_checker('whatever', 'epl_single_year.mtl')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
epl_single_year.mtl:2: Copyright notice is not correctly formatted
It must look like...
Copyright (C) 1992-2017, <copyright holder>
... where <copyright holder> can be any of:
- `AdaCore'
- `Altran Praxis'
- `Altran UK Limited'
- `Free Software Foundation, Inc.'
- `AdaCore, Altran Praxis'
- `AdaCore and Altran UK Limited'
- `AdaCore, Altran UK Limited'
- `AdaCore and Altran UK'
- `AdaCore, Altran UK'
""")
def test_epl_range_mtl(style_checker):
"""Style check test against epl_range.mtl
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('--config=module_config.yaml',
'whatever', 'epl_range.mtl')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
# Try the same test, but without the --config file.
# It should fail, because we no longer include the module's
# config which allows EPL copyright notices.
p = style_checker.run_style_checker('whatever', 'epl_range.mtl')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
epl_range.mtl:2: Copyright notice is not correctly formatted
It must look like...
Copyright (C) 1992-2017, <copyright holder>
... where <copyright holder> can be any of:
- `AdaCore'
- `Altran Praxis'
- `Altran UK Limited'
- `Free Software Foundation, Inc.'
- `AdaCore, Altran Praxis'
- `AdaCore and Altran UK Limited'
- `AdaCore, Altran UK Limited'
- `AdaCore and Altran UK'
- `AdaCore, Altran UK'
""")
def test_relpath_m(style_checker):
"""Style check test against relpath.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('--config=module_config.yaml',
'whatever', 'relpath.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
relpath.m:5: Copyright notice has unexpected copyright holder:
`AdaCore'
Expected either of:
- `Someone Inc'
- `Second Holder SARL'
""")
# Try the same test, but without the --config file.
p = style_checker.run_style_checker('whatever', 'relpath.m')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_deep_notice(style_checker):
"""Style check test against deep_notice.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('--config=module_config.yaml',
'whatever', 'deep_notice.m')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
# Try the same test, but without the --config file.
p = style_checker.run_style_checker('whatever', 'deep_notice.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
deep_notice.m:100: Copyright notice must occur before line 24
""")
def test_notice_too_deep_m(style_checker):
"""Style check test against notice_too_deep.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('--config=module_config.yaml',
'whatever', 'notice_too_deep.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
notice_too_deep.m:101: Copyright notice must occur before line 100
""")
# Try the same test, but without the --config file.
p = style_checker.run_style_checker('whatever', 'notice_too_deep.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
notice_too_deep.m:101: Copyright notice must occur before line 24
""")
| 2.546875
| 3
|
tests/test_scripts/test_solph/test_connect_invest/test_connect_invest.py
|
jnnr/oemof
| 0
|
12777029
|
# -*- coding: utf-8 -*-
"""Connecting different investment variables.
This file is part of project oemof (github.com/oemof/oemof). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
oemof/tests/test_scripts/test_solph/test_connect_invest/test_connect_invest.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
import oemof.solph as solph
from oemof.outputlib import processing, views
import logging
import os
import pandas as pd
from oemof.network import Node
def test_connect_invest():
date_time_index = pd.date_range('1/1/2012', periods=24 * 7, freq='H')
energysystem = solph.EnergySystem(timeindex=date_time_index)
Node.registry = energysystem
# Read data file
full_filename = os.path.join(os.path.dirname(__file__),
'connect_invest.csv')
data = pd.read_csv(full_filename, sep=",")
logging.info('Create oemof objects')
# create electricity bus
bel1 = solph.Bus(label="electricity1")
bel2 = solph.Bus(label="electricity2")
# create excess component for the electricity bus to allow overproduction
solph.Sink(label='excess_bel', inputs={bel2: solph.Flow()})
solph.Source(label='shortage', outputs={bel2: solph.Flow(
variable_costs=50000)})
# create fixed source object representing wind power plants
solph.Source(label='wind', outputs={bel1: solph.Flow(
actual_value=data['wind'], nominal_value=1000000, fixed=True)})
# create simple sink object representing the electrical demand
solph.Sink(label='demand', inputs={bel1: solph.Flow(
actual_value=data['demand_el'], fixed=True, nominal_value=1)})
storage = solph.components.GenericStorage(
label='storage',
inputs={bel1: solph.Flow(variable_costs=10e10)},
outputs={bel1: solph.Flow(variable_costs=10e10)},
loss_rate=0.00, initial_storage_level=0,
invest_relation_input_capacity=1/6,
invest_relation_output_capacity=1/6,
inflow_conversion_factor=1, outflow_conversion_factor=0.8,
investment=solph.Investment(ep_costs=0.2),
)
line12 = solph.Transformer(
label="line12",
inputs={bel1: solph.Flow()},
outputs={bel2: solph.Flow(investment=solph.Investment(ep_costs=20))})
line21 = solph.Transformer(
label="line21",
inputs={bel2: solph.Flow()},
outputs={bel1: solph.Flow(investment=solph.Investment(ep_costs=20))})
om = solph.Model(energysystem)
solph.constraints.equate_variables(
om, om.InvestmentFlow.invest[line12, bel2],
om.InvestmentFlow.invest[line21, bel1], 2)
solph.constraints.equate_variables(
om, om.InvestmentFlow.invest[line12, bel2],
om.GenericInvestmentStorageBlock.invest[storage])
# if tee_switch is true solver messages will be displayed
logging.info('Solve the optimization problem')
om.solve(solver='cbc')
# check if the new result object is working for custom components
results = processing.results(om)
my_results = dict()
my_results['line12'] = float(views.node(results, 'line12')['scalars'])
my_results['line21'] = float(views.node(results, 'line21')['scalars'])
stor_res = views.node(results, 'storage')['scalars']
my_results['storage_in'] = stor_res[
(('electricity1', 'storage'), 'invest')]
my_results['storage'] = stor_res[(('storage', 'None'), 'invest')]
my_results['storage_out'] = stor_res[
(('storage', 'electricity1'), 'invest')]
connect_invest_dict = {
'line12': 814705,
'line21': 1629410,
'storage': 814705,
'storage_in': 135784,
'storage_out': 135784}
for key in connect_invest_dict.keys():
eq_(int(round(my_results[key])), int(round(connect_invest_dict[key])))
| 2.90625
| 3
|
050.py
|
wittycoder/project_euler
| 0
|
12777030
|
# this method sucks, takes over 30 hours to run on machine!!!
from functools import reduce
from fast_prime import primes, is_prime
import time
start = time.time()
p = primes(1000000)
#p = primes(100000)
long_num = 0
long_sum = 0
print('prime time = ', time.time() - start)
start = time.time()
for x in p[::-1]:
for i in range(len(p)):
sum_list = []
sum = 0
count = i
while sum < x:
sum_list.append(p[count])
#print(sum_list)
sum = reduce(lambda x,y: x+y, sum_list)
if sum == x:
if len(sum_list) > long_sum:
long_sum = len(sum_list)
long_num = x
count += 1
print('consecutive sum of primes took ', time.time() - start)
print(long_sum)
print(long_num)
| 3.296875
| 3
|
setup.py
|
oortega/django-async-messages-redux
| 2
|
12777031
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='django-async-messages-redux',
version='0.4.1',
url='https://github.com/maurizi/django-async-messages',
author='<NAME>',
author_email='<EMAIL>',
description="Send asynchronous messages to users (eg from offline scripts). Useful for integration with Celery.",
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests']),
install_requires=['django>=1.4'],
)
| 1.09375
| 1
|
while_num.py
|
ISE2012/ch5
| 0
|
12777032
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 07:44:37 2020
@author: ucobiz
"""
values = [] # initialize the list to be empty
userVal = 1 # give our loop variable a value
while userVal != 0:
userVal = int(input("Enter a number, 0 to stop: "))
if userVal != 0: # only append if it's valid
values.append(userVal) # add value to the list
print("Stopped!")
print(values)
| 4.1875
| 4
|
migrations/versions/13934f10a019_.py
|
dev-johnlopez/offerly
| 0
|
12777033
|
"""empty message
Revision ID: 13934f10a019
Revises:
Create Date: 2019-10-04 16:58:16.891916
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '13934f10a019'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('street_address', sa.String(length=255), nullable=True),
sa.Column('street_number', sa.String(length=255), nullable=True),
sa.Column('route', sa.String(length=255), nullable=True),
sa.Column('apt_number', sa.String(length=255), nullable=True),
sa.Column('locality', sa.String(length=255), nullable=True),
sa.Column('administrative_area_level_1', sa.String(length=2), nullable=True),
sa.Column('postal_code', sa.String(length=255), nullable=True),
sa.Column('county', sa.String(length=255), nullable=True),
sa.Column('country', sa.String(length=255), nullable=True),
sa.Column('latitude', sa.Numeric(precision=9, scale=6), nullable=True),
sa.Column('longitude', sa.Numeric(precision=9, scale=6), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('last_login_at', sa.DateTime(), nullable=True),
sa.Column('current_login_at', sa.DateTime(), nullable=True),
sa.Column('last_login_ip', sa.String(length=40), nullable=True),
sa.Column('current_login_ip', sa.String(length=40), nullable=True),
sa.Column('login_count', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('property',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('address_id', sa.Integer(), nullable=True),
sa.Column('sq_foot', sa.Integer(), nullable=True),
sa.Column('year_built', sa.Integer(), nullable=True),
sa.Column('stories_count', sa.Integer(), nullable=True),
sa.Column('basement_type', sa.String(length=255), nullable=True),
sa.Column('addition_type', sa.String(length=255), nullable=True),
sa.Column('bedroom_count', sa.Integer(), nullable=True),
sa.Column('full_bath_count', sa.Integer(), nullable=True),
sa.Column('half_bath_count', sa.Integer(), nullable=True),
sa.Column('parking_type', sa.String(length=255), nullable=True),
sa.Column('countertops', sa.String(length=255), nullable=True),
sa.Column('appliances', sa.String(length=255), nullable=True),
sa.Column('double_oven_ind', sa.Boolean(), nullable=True),
sa.Column('walk_in_pantry_ind', sa.Boolean(), nullable=True),
sa.Column('separate_cooktop_ind', sa.Boolean(), nullable=True),
sa.Column('built_in_oven_ind', sa.Boolean(), nullable=True),
sa.Column('built_in_microwave_ind', sa.Boolean(), nullable=True),
sa.Column('kitchen_flooring', sa.String(length=255), nullable=True),
sa.Column('main_flooring', sa.String(length=255), nullable=True),
sa.Column('bathroom_flooring_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_vinyl_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_laminate_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_hardwood_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_travertine_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_saltillo_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_carpet_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_woodplank_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_concrete_ind', sa.Boolean(), nullable=True),
sa.Column('bathroom_flooring_other_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_vinyl_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_laminate_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_hardwood_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_travertine_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_saltillo_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_carpet_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_woodplank_tile_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_concrete_ind', sa.Boolean(), nullable=True),
sa.Column('bedroom_flooring_other_ind', sa.Boolean(), nullable=True),
sa.Column('landscaping', sa.String(length=255), nullable=True),
sa.Column('pool', sa.Boolean(), nullable=True),
sa.Column('hottub', sa.Boolean(), nullable=True),
sa.Column('gated_community_ind', sa.Boolean(), nullable=True),
sa.Column('hoa_ind', sa.Boolean(), nullable=True),
sa.Column('age_restricted_ind', sa.Boolean(), nullable=True),
sa.Column('solar_panels_ind', sa.Boolean(), nullable=True),
sa.Column('septic_system_ind', sa.Boolean(), nullable=True),
sa.Column('well_water_ind', sa.Boolean(), nullable=True),
sa.Column('poor_location_ind', sa.Boolean(), nullable=True),
sa.Column('sinkholes_ind', sa.Boolean(), nullable=True),
sa.Column('foundation_issues', sa.Boolean(), nullable=True),
sa.Column('additional_info', sa.String(length=1500), nullable=True),
sa.Column('submitter_type', sa.String(length=255), nullable=True),
sa.Column('listed_ind', sa.Boolean(), nullable=True),
sa.Column('submitter_first_name', sa.String(length=255), nullable=True),
sa.Column('submitter_last_name', sa.String(length=255), nullable=True),
sa.Column('submitter_phone', sa.String(length=255), nullable=True),
sa.Column('submitter_email', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['address.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('property')
op.drop_table('user')
op.drop_table('role')
op.drop_table('address')
# ### end Alembic commands ###
| 1.6875
| 2
|
examples/hdx_wizard.py
|
orest-d/liquer
| 3
|
12777034
|
<reponame>orest-d/liquer<filename>examples/hdx_wizard.py
# Example of a wizard for Humanitarian Data Exchange
# Make it run from the examples directory
import sys
sys.path.append("..")
# Flask-related imports
import logging
import webbrowser
from flask import Flask
import liquer.blueprint as bp # This is the LiQuer blueprint containing the liquer web service
# Modules needed to configure LiQuer
from liquer.cache import FileCache, set_cache # Setting cache
from liquer.state import set_var # Configuring the state variables
# Modules
import liquer.ext.basic # basic modules (needed for the link command)
import liquer.ext.lq_pandas # pandas support
import liquer.ext.lq_hxl # libhxl support (not used by the wizard, but may be useful)
app = Flask(__name__)
# Setting the logger to make debugging info visible
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(logging.INFO)
# Registering the liquer blueprint under a given url prefix and letting LiQuer know where it is...
url_prefix='/liquer'
app.register_blueprint(bp.app, url_prefix=url_prefix)
set_var("api_path",url_prefix+"/q/")
set_var("server","http://localhost:5000")
# Setting the cache
set_cache(FileCache("../cache"))
# Standard Flask way of showing a index.html (not LiQuer specific)
@app.route('/', methods=['GET', 'POST'])
@app.route('/index.html', methods=['GET', 'POST'])
def index():
return open("hdx_wizard.html").read()
# Start a service and open a browser
if __name__ == '__main__':
webbrowser.open("http://localhost:5000")
app.run(debug=True,threaded=False)
| 2.421875
| 2
|
setup.py
|
ariestiyansyah/SpotiPy
| 5
|
12777035
|
<gh_stars>1-10
from distutils.core import setup
from setuptools import find_packages
setup(
name='SpotiPy',
version='1.0.0',
scripts=['SpotiPy'],
packages=find_packages(exclude=['tests*']),
license='WTFPL',
description='Spotify player using Python and AppleScript',
long_description='Spotify player using Python and AppleScript',
# install_requires=[''],
url='http://github.com/ariestiyansyah/SpotiPy',
author='<NAME>',
author_email='<EMAIL>'
)
| 1.1875
| 1
|
oru_walk_control.py
|
asherikov/oru_walk_module
| 9
|
12777036
|
<reponame>asherikov/oru_walk_module
#!/usr/bin/python
#-*- coding: iso-8859-15 -*-
# Remote control of walking module
import os
import sys
import time
from naoqi import ALProxy
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-a", "--action", action="store", type="int", dest="nao_action", default=0)
parser.add_option("-b", "--broker-ip", action="store", type="string", dest="IP", default="127.0.0.1")
parser.add_option("-p", "--broker-port", action="store", type="int", dest="PORT", default=9559)
(options, args) = parser.parse_args();
print '----- Started'
try:
walk_proxy = ALProxy("oru_walk", options.IP, options.PORT)
motion_proxy = ALProxy("ALMotion", options.IP, options.PORT)
except Exception,e:
print "Error when creating proxy:"
print str(e)
exit(1)
print '----- Proxy was created'
while True:
# select action
if options.nao_action == 0:
print "Please enter a number corresponding to a command:"
print "'0' - exit script"
print "'1' - set stiffness to 1"
print "'2' - set stiffness to 0"
print "'3' - take the initial position"
print "'4' - stop"
print "'5' - walk"
print "'6' - set stiffness to 0.5"
print "'7' - set stiffness to 1 and take the initial position"
print "'8' - walk (using builtin module)"
print "'9' - reset stiffness and angles (using builtin module)"
try:
nao_action = int (raw_input("Type a number: "))
except Exception,e:
print "Ooops!"
exit(1)
else:
nao_action = options.nao_action
# execute action
try:
if nao_action == 1:
walk_proxy.setStiffness(1.0)
elif nao_action == 2:
walk_proxy.setStiffness(0.0)
elif nao_action == 3:
walk_proxy.initPosition()
elif nao_action == 4:
walk_proxy.stopWalking()
elif nao_action == 5:
walk_proxy.walk()
elif nao_action == 6:
walk_proxy.setStiffness(0.5)
elif nao_action == 7:
walk_proxy.setStiffness(1.0)
walk_proxy.initPosition()
elif nao_action == 8:
motion_proxy.stiffnessInterpolation("Body", 1.0, 0.1)
motion_proxy.setWalkArmsEnabled(False, False)
# enable motion whe lifted in the air
motion_proxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
motion_proxy.walkInit()
# (X length, Y length, theta, frequency)
motion_proxy.walkTo(0.8, 0.0, 0.0);
elif nao_action == 9:
# reset stiffness and angles using motion proxy,
# otherwise it doesn't work well later
motion_proxy.stiffnessInterpolation("Body", 0.0, 1.0)
numAngles = len(motion_proxy.getJointNames("Body"))
angles = [0.0] * numAngles
motion_proxy.angleInterpolationWithSpeed ("Body", angles, 0.3)
except Exception,e:
print "Execution of the action was failed."
exit(1)
# leave if requested
if nao_action < 1 or nao_action > 9 or options.nao_action != 0:
print '----- The script was stopped'
break
exit (0)
| 2.46875
| 2
|
tests/workspace/test_geopackage.py
|
dvntucker/geoscript-py
| 22
|
12777037
|
<filename>tests/workspace/test_geopackage.py<gh_stars>10-100
import unittest
from tests.workspace.workspacetest import WorkspaceTest
from geoscript.workspace import GeoPackage
class GeoPackageWorkspace_Test(WorkspaceTest):
def setUp(self):
self.ws = GeoPackage('work/data.gpkg')
self.remove('widgets')
self.remove('widgets2')
self.remove('states2')
def remove(self, layer):
try:
self.ws.remove(layer)
except:
pass
| 2.015625
| 2
|
main.py
|
nishantchy/python-starter
| 0
|
12777038
|
mylist = ["banana", "apple", "pineapple"]
print(mylist)
item = mylist[2]
print(item)
if "banana" in mylist:
print("yes")
else:
print("no")
mylist.append("lemon")
print(mylist)
mylist.insert(2, "grapes")
print(mylist)
# /item = [0] * 5
# print(item)
| 3.8125
| 4
|
src/python/test/test_wasserstein_distance.py
|
KirillErofeev/gudhi-devel
| 0
|
12777039
|
from gudhi.wasserstein import wasserstein_distance
import numpy as np
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2019 Inria
Modification(s):
- YYYY/MM Author: Description of the modification
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
def test_basic_wasserstein():
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
diag3 = np.array([[0, 2], [4, 6]])
diag4 = np.array([[0, 3], [4, 8]])
emptydiag = np.array([[]])
assert wasserstein_distance(emptydiag, emptydiag, q=2., p=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=np.inf, p=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=np.inf, p=2.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=2., p=2.) == 0.
assert wasserstein_distance(diag3, emptydiag, q=np.inf, p=1.) == 2.
assert wasserstein_distance(diag3, emptydiag, q=1., p=1.) == 4.
assert wasserstein_distance(diag4, emptydiag, q=1., p=2.) == 5. # thank you Pythagorician triplets
assert wasserstein_distance(diag4, emptydiag, q=np.inf, p=2.) == 2.5
assert wasserstein_distance(diag4, emptydiag, q=2., p=2.) == 3.5355339059327378
assert wasserstein_distance(diag1, diag2, q=2., p=1.) == 1.4453593023967701
assert wasserstein_distance(diag1, diag2, q=2.35, p=1.74) == 0.9772734057168739
assert wasserstein_distance(diag1, emptydiag, q=2.35, p=1.7863) == 3.141592214572228
assert wasserstein_distance(diag3, diag4, q=1., p=1.) == 3.
assert wasserstein_distance(diag3, diag4, q=np.inf, p=1.) == 3. # no diag matching here
assert wasserstein_distance(diag3, diag4, q=np.inf, p=2.) == np.sqrt(5)
assert wasserstein_distance(diag3, diag4, q=1., p=2.) == np.sqrt(5)
assert wasserstein_distance(diag3, diag4, q=4.5, p=2.) == np.sqrt(5)
| 2.234375
| 2
|
app/urls.py
|
brianbrunner/cheddar-oauth-demo
| 1
|
12777040
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('app.views',
(r'^$', 'home'),
(r'^account/(?P<account_id>.+)$', 'account'),
)
| 1.6875
| 2
|
tech/__init__.py
|
A1eXFei/StockMarket3
| 0
|
12777041
|
import logging
from logging.config import fileConfig
from util import DatabaseUtil as dbu
class StockTechIndicator(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
pass
def save_tech_data(self, stock_code, date, dict):
update_tech_sql = "update pdtb_stock_tech_data set "
for key in dict:
update_tech_sql = update_tech_sql + key+ "=" + str(dict[key]) + ", "
update_tech_sql = update_tech_sql[:-2] + " where code = '" + stock_code + "' and date = '" + str(date) + "'"
#print(update_tech_sql)
dbu.update(update_tech_sql)
| 2.890625
| 3
|
clarity-ext-scripts/deployment/slices.py
|
ctmrbio/claritylims
| 4
|
12777042
|
import os
import click
from subprocess import call
import yaml
from datetime import datetime
import logging
logging.basicConfig(level="INFO")
tool = "./bin/config-slicer/config-slicer-3.1.14.7.jar"
# Fetches a manifest file from the staging server
@click.group()
def cli():
pass
def read_config():
with open(os.path.expanduser("~/.slices.config"), "r") as fs:
return yaml.safe_load(fs.read())
config_file = read_config()
@cli.command()
@click.argument("environment")
def manifest(environment):
config = config_file[environment]
server = config["server"]
logging.info("Generating manifest file for {}".format(server))
manifest_file = "exports/manifest-{}-{}.txt".format(server, datetime.now().isoformat())
try:
os.remove(manifest_file)
except OSError:
pass
call(["java", "-jar", tool,
"-o", "example",
"-s", server,
"-u", config["username"],
"-p", config["password"],
"-m", manifest_file])
@cli.command()
@click.argument("environment")
def export(environment):
config = config_file[environment]
manifest_file = "manifest.txt"
server = config["server"]
logging.info("Generating export package for {}".format(server))
package_file = "exports/export-package-{}-{}.xml".format(server, datetime.now().isoformat())
call(["java", "-jar", tool,
"-o", "export",
"-s", server,
"-u", config["username"],
"-p", config["password"],
"-m", manifest_file,
"-k", package_file])
@cli.command("import")
@click.argument("environment")
@click.argument("package")
@click.option("--validate/--no-validate", default=False)
def import_package(environment, package, validate):
operation = "validate" if validate else "importAndOverwrite"
config = config_file[environment]
server = config["server"]
logging.info(
"Importing export package {} to {} (validate={})".format(package, server, validate))
call(["java", "-jar", tool,
"-o", operation,
"-s", server,
"-u", config["username"],
"-p", config["password"],
"-k", package])
cli()
| 2.265625
| 2
|
microraiden/examples/ticker_client.py
|
andrevmatos/microraiden
| 417
|
12777043
|
<filename>microraiden/examples/ticker_client.py
from tkinter import ttk
import tkinter
import logging
import gevent
import click
import sys
from microraiden import Session
from microraiden import utils
log = logging.getLogger(__name__)
class ETHTickerClient(ttk.Frame):
def __init__(
self,
sender_privkey: str,
session: Session = None,
poll_interval: float = 5
) -> None:
self.poll_interval = poll_interval
self.root = tkinter.Tk()
ttk.Frame.__init__(self, self.root)
self.root.title('µRaiden ETH Ticker')
self.root.protocol('WM_DELETE_WINDOW', self.close)
self.pack()
self.pricevar = tkinter.StringVar(value='0.00 USD')
ttk.Label(self, textvariable=self.pricevar, font=('Helvetica', '72')).pack()
if session is None:
self.session = Session(
private_key=sender_privkey,
close_channel_on_exit=True,
endpoint_url='http://localhost:5000'
)
else:
self.session = session
self.active_query = False
self.running = False
def run(self):
self.running = True
self.root.after(0, self.query_price)
self.root.mainloop()
def query_price(self):
if not self.running:
return
self.active_query = True
response = self.session.get('http://localhost:5000/ETHUSD')
if response:
price = float(response.json()['last_price'])
log.info('New price received: {:.2f} USD'.format(price))
self.pricevar.set('{:.2f} USD'.format(price))
else:
log.warning('No response.')
if self.running:
self.root.after(int(self.poll_interval * 1000), self.query_price)
self.active_query = False
def close(self):
log.info('Shutting down gracefully.')
self.running = False
self.root.destroy()
# Sloppy handling of thread joining but works for this small demo.
while self.active_query:
gevent.sleep(1)
self.session.close()
@click.command()
@click.option(
'--private-key',
required=True,
help='Path to private key file of the proxy',
type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
@click.option(
'--private-key-password-file',
default=None,
help='Path to file containing password for the JSON-encoded private key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True)
)
def main(
private_key,
private_key_password_file,
):
private_key = utils.get_private_key(private_key, private_key_password_file)
if private_key is None:
sys.exit(1)
ticker = None
try:
ticker = ETHTickerClient(private_key)
ticker.run()
except KeyboardInterrupt:
if ticker:
ticker.close()
if __name__ == '__main__':
from gevent import monkey
monkey.patch_all()
logging.basicConfig(level=logging.INFO)
main()
| 2.46875
| 2
|
deepspeech_pytorch/modules/batch_rnn.py
|
NikolaiBabkin/deepspeech.pytorch
| 0
|
12777044
|
<gh_stars>0
import torch.nn as nn
from .sequence_wise import SequenceWise
class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=False, batch_norm=True):
super(BatchRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.batch_norm = SequenceWise(nn.BatchNorm1d(input_size)) if batch_norm else None
self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
bidirectional=bidirectional, bias=True)
self.num_directions = 2 if bidirectional else 1
def flatten_parameters(self):
self.rnn.flatten_parameters()
def forward(self, x, output_lengths):
if self.batch_norm is not None:
x = self.batch_norm(x)
x = nn.utils.rnn.pack_padded_sequence(x, output_lengths)
x, h = self.rnn(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
if self.bidirectional:
x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1) # (TxNxH*2) -> (TxNxH) by sum
return x
| 2.40625
| 2
|
spotidl/downloader.py
|
good-times-ahead/spoti-dl
| 8
|
12777045
|
from yt_dlp import YoutubeDL
from dataclasses import dataclass
from spotidl.spotify import SpotifySong
from spotidl.utils import make_song_title, check_file
@dataclass
class YoutubeSong:
id: str
title: str
video_url: str
def get_config(user_params: dict, song: SpotifySong) -> dict:
"""
Prepares the parameters that need to be passed onto the YoutubeDL object.
"""
downloader_params = {
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": user_params["codec"],
"preferredquality": user_params["quality"],
}
],
"outtmpl": f"{make_song_title(song.artists, song.name, ', ')}.%(ext)s",
# "outtmpl": "%(artist)s-%(title)s.ext",
"quiet": user_params["quiet"],
"format": "bestaudio/best",
"dynamic_mpd": False,
}
return downloader_params
def get_downloader(params: dict):
"""
Initiates the YoutubeDL class with the configured parameters.
"""
return YoutubeDL(params=params)
def fetch_source(yt: YoutubeDL, song: SpotifySong) -> YoutubeSong:
"""
Fetch appropriate source for the song from Youtube using the given details.
"""
try:
# adding "audio" to avoid 'official music videos' and similar types
song_title = make_song_title(song.artists, song.name, ", ") + " audio"
search = yt.extract_info(f"ytsearch:{song_title}", download=False)
yt_info = search["entries"][0]
except Exception as e:
print("Error when trying to get audio source from YT: ", e)
return
else:
yt_song = YoutubeSong(
id=yt_info["id"],
title=yt_info["title"],
video_url=yt_info["webpage_url"],
)
return yt_song
def download_song(yt: YoutubeDL, link: str):
"""
Downloads the song given its source link and the YouTube downloader object.
"""
print("\nStarting song download...\n")
try:
# attempts to download the song using the best matched
# youtube source link
yt.download(link)
except Exception:
print("\nDownload failed!")
else:
print("\nSuccessfully finished downloading!")
def controller(user_params: dict, song: SpotifySong, file_name: str):
"""
Handles the flow of the download process for the given song.
Initiates the configuration as per the user-defined parameters and chains
the rest of functions together.
"""
# check if song has already been downloaded before at some point;
# only proceed with download if it doesn't
if check_file(file_name):
print(f"\n{file_name} already exists! Skipping download...")
else:
# user parameters are used in the downloader parameters dictionary
# the downloader_params dict is then passed onto the YoutubeDL object
# when generating its instance.
downloader_params = get_config(user_params, song)
yt = get_downloader(downloader_params)
yt_song = fetch_source(yt, song)
download_song(yt, yt_song.video_url)
| 3.125
| 3
|
train.py
|
xhelenfu/Lung-Nodule-Attributes-CT-Network
| 0
|
12777046
|
<reponame>xhelenfu/Lung-Nodule-Attributes-CT-Network
import argparse
import logging
import sys
import torch
from torch.utils.data import DataLoader
from dataio.dataset_lidc import DatasetLIDC
from model.model import Network
from utils.utils import *
import numpy as np
def main(config):
json_opts = json_file_to_pyobj(config.config_file)
if json_opts.training_params.batch_size > 1:
sys.exit('Batch size > 1 not supported')
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Create experiment directories
if config.resume_epoch == None:
make_new = True
else:
make_new = False
timestamp = get_experiment_id(make_new, json_opts.experiment_dirs.load_dir, config.fold_id)
experiment_path = 'experiments' + '/' + timestamp
make_dir(experiment_path + '/' + json_opts.experiment_dirs.model_dir)
fold_mean = json_opts.data_params.fold_means[config.fold_id-1]
fold_std = json_opts.data_params.fold_stds[config.fold_id-1]
# Set up the model
logging.info("Initialising model")
model_opts = json_opts.model_params
n_features = len(json_opts.data_params.feature_ids)
model = Network(model_opts, n_features)
model = model.to(device)
# Dataloader
logging.info("Preparing data")
num_workers = json_opts.data_params.num_workers
train_dataset = DatasetLIDC(json_opts.data_source, config.fold_id, fold_mean, fold_std,
json_opts.data_params.feature_ids, isTraining=True)
train_loader = DataLoader(dataset=train_dataset,
batch_size=json_opts.training_params.batch_size,
shuffle=True, num_workers=num_workers)
n_train_examples = len(train_loader)
logging.info("Total number of training examples: %d" %n_train_examples)
# Optimiser
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(), lr=json_opts.training_params.learning_rate,
betas=(json_opts.training_params.beta1,
json_opts.training_params.beta2),
weight_decay=json_opts.training_params.l2_reg_alpha)
# Resume training or train from scratch
if config.resume_epoch != None:
initial_epoch = config.resume_epoch
else:
initial_epoch = 0
# Restore model
if config.resume_epoch != None:
load_path = experiment_path + '/' + json_opts.experiment_dirs.model_dir + "/epoch_%d.pth" %(config.resume_epoch)
checkpoint = torch.load(load_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
assert(epoch == config.resume_epoch)
print("Resume training, successfully loaded " + load_path)
logging.info("Begin training")
model = model.train()
for epoch in range(initial_epoch, json_opts.training_params.total_epochs):
epoch_train_loss = 0.
epoch_mae_all = 0.
for batch_idx, (batch_x, batch_y) in enumerate(train_loader):
# Permute channels axis to batch axis
batch_x = batch_x.permute(1,0,2,3)
# Transfer to GPU
batch_x, batch_y = batch_x.to(device), batch_y.to(device)
optimizer.zero_grad()
# Forward pass
y_pred, y_pred_aux, _, _ = model(batch_x)
# Optimisation
aux_loss = criterion(y_pred_aux, batch_y).squeeze()
pred_loss = criterion(y_pred, batch_y).squeeze()
loss = json_opts.training_params.aux_lambda*aux_loss + pred_loss
loss.backward()
optimizer.step()
epoch_train_loss += loss.detach().cpu().numpy()
epoch_mae_all += np.absolute(batch_y.squeeze().detach().cpu().numpy() - y_pred.squeeze().detach().cpu().numpy())
# Save model
if (epoch % json_opts.save_freqs.model_freq) == 0:
save_path = experiment_path + '/' + json_opts.experiment_dirs.model_dir + "/epoch_%d.pth" %(epoch+1)
torch.save({'epoch': epoch + 1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, save_path)
logging.info("Model saved: %s" % save_path)
# Print training losses and performance every epoch
print('Epoch[{}/{}], total loss:{:.4f}'.format(epoch+1, json_opts.training_params.total_epochs, epoch_train_loss))
print('MAE', np.around(epoch_mae_all/n_train_examples, 5), np.mean(epoch_mae_all/n_train_examples))
logging.info("Training finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', default='configs/config.json', type=str,
help='config file path')
parser.add_argument('--resume_epoch', default=None, type=int,
help='resume training from this epoch, set to None for new training')
parser.add_argument('--fold_id', default=1, type=int,
help='cross-validation fold')
config = parser.parse_args()
main(config)
| 2.359375
| 2
|
Medium/787.py
|
Hellofafar/Leetcode
| 6
|
12777047
|
# ------------------------------
# 787. Cheapest Flights Within K Stops
#
# Description:
# There are n cities connected by m flights. Each fight starts from city u and arrives at
# v with a price w.
#
# Now given all the cities and flights, together with starting city src and the destination
# dst, your task is to find the cheapest price from src to dst with up to k stops. If there
# is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
#
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
#
# Note:
# The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# The size of flights will be in range [0, n * (n - 1) / 2].
# The format of each flight will be (src, dst, price).
# The price of each flight will be in the range [1, 10000].
# k is in the range of [0, n - 1].
# There will not be any duplicated flights or self cycles.
#
# Version: 1.0
# 10/17/19 by Jianfa
# ------------------------------
class Solution:
def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:
f = collections.defaultdict(dict)
for i, j, p in flights:
f[i][j] = p
priceHeap = [(0, src, K + 1)]
while priceHeap:
# get the tuple with cheapest price from the heap,
# which contains the destination city and the rest available stops
p, i, k = heapq.heappop(priceHeap)
if i == dst:
return p
if k > 0:
for j in f[i]:
# add price information of all the cities j that i can reach into heap
heapq.heappush(priceHeap, (p + f[i][j], j, k - 1))
return -1
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Dijkstra's algorithm idea from https://leetcode.com/problems/cheapest-flights-within-k-stops/discuss/115541/JavaPython-Priority-Queue-Solution
#
# O(E · dkQ + VlogV) time, E is number of edges and V is number of vertex, dkQ is the time to sort in heap
| 3.578125
| 4
|
openapi_core/contrib/flask/responses.py
|
Yarn-e/openapi-core
| 160
|
12777048
|
<reponame>Yarn-e/openapi-core
"""OpenAPI core contrib flask responses module"""
from werkzeug.datastructures import Headers
from openapi_core.validation.response.datatypes import OpenAPIResponse
class FlaskOpenAPIResponseFactory:
@classmethod
def create(cls, response):
header = Headers(response.headers)
return OpenAPIResponse(
data=response.data,
status_code=response._status_code,
headers=header,
mimetype=response.mimetype,
)
| 1.585938
| 2
|
app.py
|
rejreign/Docufix-Page-Summary
| 0
|
12777049
|
<filename>app.py<gh_stars>0
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from flask import Flask, request, jsonify, render_template, redirect
# from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import nltk
LANGUAGE = "english"
SENTENCES_COUNT = 10
app = Flask(__name__)
@app.route('/', methods=['GET'])
def homepage():
return render_template('paraphrase.html')
@app.route('/', methods=['POST'])
def summarize():
""" Returns summary of articles """
text = request.form['text']
# parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
parser = PlaintextParser.from_string(text,Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
final = []
for sentence in summarizer(parser.document, SENTENCES_COUNT):
final.append(str(sentence))
length = len(final)
return render_template('paraphrase.html',report=final,length=length)
if __name__ == '__main__':
app.run(debug=True, host='127.0.0.1', port=5000)
| 2.5625
| 3
|
support-script/compare-ilist.py
|
yxtj/Daiger
| 0
|
12777050
|
<reponame>yxtj/Daiger<gh_stars>0
import os, sys, re
def get_file_names(graph_folder):
l=os.listdir(graph_folder);
rpat=re.compile(r'^ilist-\d+$')
rfiles=[]
for fn in l:
if rpat.match(fn):
rfiles.append(fn)
rfiles.sort()
return rfiles
def loadInList(fn):
with open(fn) as f:
gdata=[line for line in f.read().split('\n') if len(line)!=0]
g={}
for i in range(len(gdata)):
key, line=gdata[i].split('\t')
line = [l.split(',') for l in line.split(' ') if len(l)!=0]
#g[int(key)]=[(int(e), float(w)) for e,w in line]
g[int(key)]=sorted([int(e) for e,w in line])
return list(g.items())
def compareOne(g1, g2, show_detail):
l1=len(g1)
l2=len(g2)
cnt=0
if l1!=l2:
print('Error: number of nodes does match')
exit(1)
for i in range(l1):
if g1[i][0] != g2[i][0]:
print(' keys do not match:', g1[i][0], g2[i][0])
exit(1)
if g1[i][1] != g2[i][1]:
if show_detail:
print(' diff on',g1[i][0],':',g1[i][1], 'vs.', g2[i][1])
cnt+=1
return cnt
def main(path1, path2, merge_parts, show_detail):
files1=get_file_names(path1)
files2=get_file_names(path2)
print(files1)
print(files2)
if len(files1)==0:
print('Error: cannot find result files in folder 1')
exit(1)
elif len(files2)==0:
print('Error: cannot find result files in folder 2')
exit(1)
elif not merge_parts and len(files1) != len(files2):
print('Error: number of parts does not match. Please try to enable option: merge_parts')
exit(1)
cnt=0
nKeys=0
if merge_parts:
g1=[]
g2=[]
for i in range(len(files1)):
print('loading result 1 part',i)
g1.extend(loadInList(path1+'/'+files1[i]))
for i in range(len(files2)):
print('loading result 2 part',i)
g2.extend(loadInList(path2+'/'+files2[i]))
g1.sort()
g2.sort()
cnt =compareOne(g1, g2, show_detail)
nKeys=len(g1)
else:
for i in range(len(files1)):
print('comparing part',i)
g1=loadInList(path1+'/'+files1[i])
g2=loadInList(path2+'/'+files2[i])
num=compareOne(g1, g2, show_detail)
print(' # of different nodes:', num, '/', len(g1))
cnt+=num
nKeys+=len(g1)
print('Total different nodes', cnt, '/', nKeys)
if __name__=='__main__':
if len(sys.argv) < 3:
print('Compare the in neighbor list of two runs.')
print('Usage: <result-path-1> <result-path-1> [show-detail] [merge-parts]')
print(' [show-detail]: (=1) Show every found difference')
print(' [merge-pars]: (=0) Merge the graph parts before comparison, in order to work with those cases using different number of workers.')
exit()
path1=sys.argv[1]
path2=sys.argv[2]
merge_parts=False
if len(sys.argv) > 3 and sys.argv[3] in ['1', 'y', 'yes', 't', 'true']:
merge_parts=True
show_detail=True
if len(sys.argv) > 4 and sys.argv[4] not in ['1', 'y', 'yes', 't', 'true']:
show_detail=False
#print('Merge parts before comparison =', merge_parts)
main(path1, path2, merge_parts, show_detail)
| 2.421875
| 2
|