content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
# name: test_character.py
# version: 0.0.1
# date: 20210422
# author: Leam Hall
# desc: Testing 2d6 OGL Character object
import unittest
from person import Person
from person import Character
class TestCharacter(unittest.TestCase):
def test_creation(self):
# Note that this also tests "creation_empty_data". :)
al = Character()
self.assertIsInstance(al, Character)
self.assertIsInstance(al, Person)
def test_creation_full_data(self):
# Can add an existing person's data
pass
def test_add_attr(self):
# Can add an attr.
# Replaces existing attr with new value.
pass
def test_get_attr(self):
# Graceful fail if attr not present.
# Value if present.
pass
def test_gen_upp(self):
# Given a valid set of stats, can generate a UPP string.
# Does this need a different process, since it doesn't cover
# 3d6 and d% games?
# What if numbers for UPP are:
# - missing
# - invalid ( < 0 or > 15)
pass
def test_get_stat(self):
# Can pull a specific stat.
# What if that stat doesn't exist?
# - unset
# - wrong game
pass
def test_set_stats(self):
# Can set stats post instance creation.
# What if only some stats are given?
pass
def test_modify_skill(self):
# Test if skill missing.
# Test if skill can be lowered, and raised.
# Is it reasonable to think something could delete the skills structure?
pass
def test_get_skill(self):
# Get None if no skill.
# Get number if has skill.
pass
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# Copyright (C) 2017 Maeve Automation - All Rights Reserved
# Permission to copy and modify is granted under the MIT license
import rospy
from std_msgs.msg import Bool
import ros_parameter_loading
import rpi_piezo_buzzer
## @package rpi_piezo_buzzer
# ROS node for activating a Piezo buzzer on a Raspberry Pi.
##
# @brief Handler class for Piezo buzzer callback.
class Handler:
##
# @brief Instantiate the piezo buzzer control.
#
# @param p Buzzer params.
def __init__(self, p):
self.buzzer = rpi_piezo_buzzer.Buzzer(
p.buzzer_gpio_pin,
p.buzzer_duration,
p.buzzer_on_duration,
p.buzzer_off_duration)
##
# @brief Activate the buzzer for each 'True' signal on the topic.
#
# @param msg The activation message.
def callback(self, msg):
if not msg.data:
return
self.buzzer.buzz()
if __name__ == '__main__':
rospy.init_node('rpi_piezo_buzzer')
node_params = ros_parameter_loading.NodeParams()
handler = Handler(node_params)
rospy.Subscriber(
node_params.buzzer_topic,
Bool,
handler.callback)
rospy.spin()
|
import platform
from colorama import init, Fore, Style
init(autoreset=True)
class interpreter:
def __init__(self):
#print ("Hello World")
return
def help(self):
print(Style.BRIGHT + Fore.GREEN + """
show options => Show system options
exit or quit => Is to exit the system
""")
def command_ls(self):
system = platform.system()
if system == 'Windows':
return 'dir'
elif system == 'Linux':
return 'ls'
else:
return
def command_clear(self):
system = platform.system()
if system == 'Windows':
return 'cls'
elif system == 'Linux':
return 'clear'
else:
return
def show_options(self):
print(Style.BRIGHT + Fore.GREEN + """
set target => register the target in the system
use scanweb => Use a script that extracts information from a website
run or exploit => The run and exploit command is to start the system
use scanports => Scan all the ports of an IP
use scanport => Scan the port of an ip
set port => register the port in the system
use scanip => Geolocate an ip
scanns => Get authorization name server
use getwpv => Get the WordPress version
""")
|
"""services/plotting
Logic for data manipulation needed when new call backs are executed
"""
from typing import Dict, Any
from penn_chime.utils import add_date_column
import pandas as pd
def plot_dataframe(dataframe: pd.DataFrame, max_y_axis: int = None,) -> Dict[str, Any]:
"""
"""
if max_y_axis is None:
yaxis = {}
else:
yaxis = {"range": (0, max_y_axis), "autorange": False}
return {
"data": [
{
"x": dataframe.index,
"y": dataframe[col].astype(int),
"name": col,
"mode": "lines+markers",
}
for col in dataframe.columns
],
"layout": {"yaxis": yaxis},
}
|
import functools
import json
import typing
from pathlib import Path
from typing import Dict, Optional
@functools.lru_cache()
def get_raw_types() -> Dict[str, typing.Any]:
path = Path(__file__).parent.joinpath("dread_types.json")
with path.open() as f:
return json.load(f)
@functools.lru_cache()
def all_name_to_asset_id() -> Dict[str, int]:
path = Path(__file__).parent.joinpath("resource_names.json")
with path.open() as names_file:
return json.load(names_file)
@functools.lru_cache()
def all_asset_id_to_name() -> Dict[int, str]:
return {
asset_id: name
for name, asset_id in all_name_to_asset_id().items()
}
def name_for_asset_id(asset_id: int) -> Optional[str]:
return all_asset_id_to_name().get(asset_id)
@functools.lru_cache()
def all_name_to_property_id() -> Dict[str, int]:
path = Path(__file__).parent.joinpath("property_names.json")
with path.open() as names_file:
return json.load(names_file)
@functools.lru_cache()
def all_property_id_to_name() -> Dict[int, str]:
names = all_name_to_property_id()
return {
asset_id: name
for name, asset_id in names.items()
}
|
# WARNING!!!
# DO NOT MODIFY THIS FILE DIRECTLY.
# TO GENERATE THIS RUN: ./updateWorkspaceSnapshots.sh
BASE_ARCHITECTURES = ["amd64", "arm64"]
# Exceptions:
# - s390x doesn't have libunwind8.
# https://github.com/GoogleContainerTools/distroless/pull/612#issue-500157699
# - ppc64le doesn't have stretch security-channel.
# https://github.com/GoogleContainerTools/distroless/pull/637#issuecomment-728139611
# - arm needs someone with available hardware to generate:
# //experimental/python3/ld.so.arm.cache
ARCHITECTURES = BASE_ARCHITECTURES + ["arm", "s390x", "ppc64le"]
VERSIONS = [
("debian10", "buster"),
("debian11", "bullseye"),
]
DEBIAN_SNAPSHOT = "20210920T144924Z"
DEBIAN_SECURITY_SNAPSHOT = "20210920T191155Z"
SHA256s = {
"amd64": {
"debian10": {
"main": "3530cbc6c78b6cadda80c10d949f511abd4a7f33d3492ed17d36a7ecc591a5fd",
"updates": "6843a5d699f117b663d3f3264937902ca8d27f72c41a29158eea0f4e14a590c1",
"security": "b63d48c0ef1cd77644fa317eac4fe365834aa67e8d9ec1b29cf47bcf80747dc9",
},
"debian11": {
"main": "2dd487cd20eabf2aaecb3371d26543cfaec0cc7e7dd0846f2aee6b79574374ad",
"updates": "9d9184580240eb92ae0af4929191c180d13364f2d3f5258d79ae48d9f5f9f9ae",
"security": "2272f2683c4b7212d4ca5a48c6e7bde30ffe97f83f364f88e67feacadc661044",
},
},
"arm": {
"debian10": {
"main": "7f51ba4df1837b4f5b299aa46e533fd006e2dc1b07727e7b32fe87bea9a8be5d",
"updates": "dc2f8892a23ff27125993d38eed9efecb2dfd8b77ddd3f82483b71a90db2719e",
"security": "ef25999e71aaa86c38ec7deffc765b023af8d10260709429f9ede363d4ba76cf",
},
"debian11": {
"main": "d9dc9d75483fccd9f6f47d6baf1cf3bae166eb8d706de87241fee649d165c0bd",
"updates": "db4b24472c1ba2db9375a3f49e6421775483582a17d654db72eba3bf4d502f6c",
"security": "aa46fd9ebbede7f7bb38f70ea0920046f6786c29d77d0c19a449fc1f33093aaa",
},
},
"arm64": {
"debian10": {
"main": "cf63979c9d3f7a411f30f62632626552436a583531a5bdcfc051d63fda8af8a3",
"updates": "9851dabc9903095a5631f1e6980da1fd88051a01c5408748ba4333b6267e239b",
"security": "8a0a3fc23c597d34942f98c91f6c804f35bde2aef4c4fe3bfa8ec4bbfab0e08c",
},
"debian11": {
"main": "75d91cc94e87500c63d2808d440ec5197560ba45c5b80e5564c6d60e8ef95140",
"updates": "5cfc5140dff68cf017e526d90ac8ec5144b0f6eef057c3d81164163326febd77",
"security": "6c6f37544c28620fb75261adb8463e6c061306b17f4c9c681286d0cc55e8f732",
},
},
"s390x": {
"debian10": {
"main": "449258775fd2d7f1a6b30cb5537c25ba9ef88b0b3a41269e61005de6d8a6fb5e",
"updates": "b2e58f71a61ff5aa01004edfe3a0488d8ffaedb7e40dfdf01d949adb16d6c26b",
"security": "4c46c4e0a14b3349102ded8a78a260084dfcb94b46cccc746cc2c41d6964c3d7",
},
"debian11": {
"main": "0d738ded499cb12ddda2055e225ddeec3f5ec4e002b90af367dd526f71d74716",
"updates": "139b3cc7e0dd5ea3d7f9daf5e637553018f2f0a3d60a64697141d25ce8083c14",
"security": "a84d24fc75925e1a2e99e3fe391939225261b3780d98dff3026b67cce717b8fb",
},
},
"ppc64le": {
"debian10": {
"main": "2d4499fd08d0e2d73bee40c114198ac93098e8d14530e6f5e19a6838f5774b16",
"updates": "02527ab580bc2e3ea72be997a0b3034d0d26d715ac5c3851ffca0752082f38ad",
"security": "706a90b6d841d85e33e03d243de62a978bdae2c9e54a1befa992997390e7c0e4",
},
"debian11": {
"main": "af2cfa07996e2e5fb8dbb6e904b7f0910fcca79aee7bc25eed21ee0485fd9188",
"updates": "f8b507c08fd49cbaec15916d1de4edd139a28e57ad718143c8f3656b6b551192",
"security": "05165e7e63b4fc143fb533865c16def5662b316fbd1f9ffe8c1840a87aa17f83",
},
},
}
|
from django.urls import path
from .views import ServiceListView, ServiceDetailView
app_name = 'service'
urlpatterns = (
path('', ServiceListView.as_view(), name='index'),
path('<slug>/', ServiceDetailView.as_view(), name='detail'),
)
|
# Nama : Eraraya Morenzo Muten
# NIM : 16520002
# Tanggal : 19 Maret 2021
# PROGRAM luastrapesium
# menghitung luas trapesium dengan persamaan 0.5*t*(s1+s2)
# KAMUS
# t, s1, s2 : float
# ALGORITMA
t = float(input()) # menerima input panjang trapesium
s1 = float(input())
s2 = float(input())
l = 0.5*t*(s1+s2) # menampilkan hasil perhitungan luas
print("%.2f" % l)
|
""" Tools for working with collections """
import itertools
import sys
from warnings import warn
if sys.version_info < (3, 3):
from collections import Iterable
else:
from collections.abc import Iterable
__author__ = "Vince Reuter"
__email__ = "vreuter@virginia.edu"
__all__ = ["is_collection_like", "powerset", "asciify_dict", "merge_dicts"]
def merge_dicts(x, y):
"""
Merge dictionaries
:param Mapping x: dict to merge
:param Mapping y: dict to merge
:return Mapping: merged dict
"""
z = x.copy()
z.update(y)
return z
def is_collection_like(c):
"""
Determine whether an object is collection-like.
:param object c: Object to test as collection
:return bool: Whether the argument is a (non-string) collection
"""
return isinstance(c, Iterable) and not isinstance(c, str)
def uniqify(seq): # Dave Kirby
"""
Return only unique items in a sequence, preserving order
:param list seq: List of items to uniqify
:return list[object]: Original list with duplicates removed
"""
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def powerset(items, min_items=None, include_full_pop=True, nonempty=False):
"""
Build the powerset of a collection of items.
:param Iterable[object] items: "Pool" of all items, the population for
which to build the power set.
:param int min_items: Minimum number of individuals from the population
to allow in any given subset.
:param bool include_full_pop: Whether to include the full population in
the powerset (default True to accord with genuine definition)
:param bool nonempty: force each subset returned to be nonempty
:return list[object]: Sequence of subsets of the population, in
nondecreasing size order
:raise TypeError: if minimum item count is specified but is not an integer
:raise ValueError: if minimum item count is insufficient to guarantee
nonempty subsets
"""
if min_items is None:
min_items = 1 if nonempty else 0
else:
if not isinstance(min_items, int):
raise TypeError(
"Min items count for each subset isn't an integer: "
"{} ({})".format(min_items, type(min_items))
)
if nonempty and min_items < 1:
raise ValueError(
"When minimum item count is {}, nonempty subsets "
"cannot be guaranteed.".format(min_items)
)
# Account for iterable burn possibility; besides, collection should be
# relatively small if building the powerset.
items = list(items)
n = len(items)
if n == 0 or n < min_items:
return []
max_items = len(items) + 1 if include_full_pop else len(items)
return list(
itertools.chain.from_iterable(
itertools.combinations(items, k) for k in range(min_items, max_items)
)
)
def asciify_dict(data):
""" https://gist.github.com/chris-hailstorm/4989643 """
def _asciify_list(lst):
ret = []
for item in lst:
if isinstance(item, unicode):
item = item.encode("utf-8")
elif isinstance(item, list):
item = _asciify_list(item)
elif isinstance(item, dict):
item = asciify_dict(item)
ret.append(item)
return ret
if sys.version_info[0] >= 3:
warn("This operation is supported only in Python2", UserWarning)
return data
ret = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode("utf-8")
if isinstance(value, unicode):
value = value.encode("utf-8")
elif isinstance(value, list):
value = _asciify_list(value)
elif isinstance(value, dict):
value = asciify_dict(value)
ret[key] = value
return ret
|
"""Storage Manager Unit Test."""
import pytest
from data_resource.config import ConfigurationFactory
from data_resource.storage.aws_s3 import S3Manager
from data_resource.storage.storage_manager import StorageManager
from unittest.mock import patch
# Mock S3Manager class for unit tests
class MockS3Manager:
def aws_s3_object_exists_true(self, bucket, object_name):
return True
def aws_s3_object_exists_false(self, bucket, object_name):
return False
def aws_s3_get_data(self, bucket, object_name):
return "{}"
def aws_s3_put_data(self, json_data, bucket, object_name):
self.json_data = json_data
self.bucket = bucket
self.object_name = object_name
in_mock_s3_manager = MockS3Manager()
def base_s3_test_configs():
configs = ConfigurationFactory.get_config("TEST")
# set random value to pass check.
configs.SCHEMA_STORAGE_TYPE = "S3"
configs.AWS_S3_REGION = "test-region"
configs.AWS_ACCESS_KEY_ID = "test-key"
configs.AWS_SECRET_ACCESS_KEY = "test-access"
configs.AWS_S3_STORAGE_BUCKET_NAME = "my-bucket-name"
configs.AWS_S3_STORAGE_OBJECT_NAME = "my-object-name"
return configs
@pytest.mark.unit
def test_load_configuration_from_env_for_storage_manager():
"""Ensure that a configuration object can be pulled from the environment
and has the default configs."""
configuration = ConfigurationFactory.get_config("TEST")
assert hasattr(configuration, "SCHEMA_STORAGE_TYPE")
assert hasattr(configuration, "DEFAULT_LOCAL_GENERATION_PAYLOAD_PATH")
assert hasattr(configuration, "AWS_S3_REGION")
assert hasattr(configuration, "AWS_S3_STORAGE_BUCKET_NAME")
assert hasattr(configuration, "AWS_S3_STORAGE_OBJECT_NAME")
@pytest.mark.unit
def test_s3_manager_config_check_failure():
"""Ensure that the s3 manager will throw a RuntimeError when AWS configs
not found."""
with pytest.raises(RuntimeError):
_ = S3Manager(ConfigurationFactory.get_config("TEST"))
@pytest.mark.unit
def test_s3_manager_config_check_success():
"""Ensure that the s3 manager will init correctly with required envs:
- AWS_S3_REGION
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
"""
configs = base_s3_test_configs()
_ = S3Manager(configs)
@pytest.mark.unit
def test_s3_manager_config_required_env_check():
"""Ensures the AWS SDK is not triggered when running in the testing
environment."""
configs = base_s3_test_configs()
s3manager = S3Manager(configs)
with pytest.raises(RuntimeError):
s3manager.get_s3_client()
@pytest.mark.unit
@patch(
"data_resource.storage.aws_s3.S3Manager.aws_s3_object_exists",
new=in_mock_s3_manager.aws_s3_object_exists_true,
)
def test_s3_store_manager_data_resource_payload_exists():
"""Ensuring the S3 Manager is receiving the correct parameters
"aws_s3_object_exists" for the AWS API (boto3)"""
configs = base_s3_test_configs()
manager = StorageManager(configs)
assert manager.data_resource_generation_payload_exists() == True
@pytest.mark.unit
@patch(
"data_resource.storage.aws_s3.S3Manager.aws_s3_get_data",
new=in_mock_s3_manager.aws_s3_get_data,
)
def test_s3_store_manager_data_resource_payload_get():
"""Ensuring the S3 Manager is receiving the correct parameters
"aws_s3_get_data" for the AWS API (boto3)"""
configs = base_s3_test_configs()
manager = StorageManager(configs)
assert manager.get_data_resource_generation_payload_data() == {}
@pytest.mark.unit
@patch(
"data_resource.storage.aws_s3.S3Manager.aws_s3_put_data",
new=in_mock_s3_manager.aws_s3_put_data,
)
@patch(
"data_resource.storage.aws_s3.S3Manager.aws_s3_object_exists",
new=in_mock_s3_manager.aws_s3_object_exists_false,
)
def test_s3_store_manager_data_resource_playload_get():
"""Ensuring the S3 Manager is receiving the correct parameters
"aws_s3_put_data" for the AWS API (boto3)"""
configs = base_s3_test_configs()
manager = StorageManager(configs)
manager.save_data_resource_generation_payload_data("{}")
assert in_mock_s3_manager.json_data == "{}"
assert in_mock_s3_manager.bucket == configs.AWS_S3_STORAGE_BUCKET_NAME
assert in_mock_s3_manager.object_name == configs.AWS_S3_STORAGE_OBJECT_NAME
|
import time, datetime
import sys
show_possible = False
misses = 0
if(len(sys.argv)<=3):
sys.exit("INVALID ARGUMENTS "+str(sys.argv))
#At minimum we require 4 arguments: 'score_finder.py', SCORE, X_COUNT, and SHOTS
else:
if(len(sys.argv)>4):
#Optional fifth argument shows permutations containing missed shots
if str(sys.argv[4]).lower()=="y" or str(sys.argv[4]).lower()=="t":
show_possible= True
elif str(sys.argv[4]).lower()=="n" or str(sys.argv[4]).lower()=="f":
show_possible= False
else:
misses = int(sys.argv[4])
show_possible = False
#Constants to help with code coherence
X_SCORE = 0
TEN_SCORE = 1
NINE_SCORE = 2
ONE_SCORE = 10
ZERO_SCORE = 11
def find_score(distri):
'''
Calculates a score of a given distribution
In our distributions, each column/element is weighted differently. Two
elements are worth 10 points each, and all others descend. Some simple
math can create scalars that make this weighting.
'''
score = 0
score += distri[X_SCORE]*10
#X-Count is also worth 10
for n in range(TEN_SCORE,ZERO_SCORE):
score +=distri[n]*(11-n)
'''
A distribution which each element is indexed (numbered) from
left-to-right as the value decreases left-to-right would be:
this_element_scalar = (maximum_scalar - this_element_index)
element = this_element_value * this_element_scalar
However our distribution includes 11 elements and has a maximum
scalar of 10. This would mean the scalars to equate to:
first_scalar = (10 - 0) # => 10 (Correct)
second_scalar = (10 - 1) # => 9 (Incorrect)
...
last_scalar = (10 - 11) # => -1 (Incorrect)
Only the first scalar is correct.
The error here is that we have two elements with a scalar of 10. (X and 10 ring)
If we complete the formula as if there were a maximum scalar of 11 and
skip the first element, the scalars align:
ignored_first_scalar = (11 - 0) # ignored
second_scalar = (11 - 1) # => 10 (Correct)
third_scalar = (11 - 1) # => 9 (Correct)
...
last_scalar = (11 - 11) # => 0 (Correct)
'''
return score
def backtick(s):
#Prints time to the screen, only occupying bottom line. Proves code is still running.
print str(datetime.datetime.now().time())+"-: "+ s,
sys.stdout.flush()
print "\r",
bad_distris=[]
#Distributions with missed shots.
def find_distri(score, x, shot_count, show_possible):
'''
Main Function. Increments permutations left to right.
With each distribution represented as an 11-element list/array,
(One element/column per ring and centre X) we will literate through
distributions just like how you would count the decimal number 0 to 200:
Each 'digit' has a value. As you exaust one of these digits, you reset it
to 0 and increase its neighbour. (9 increases to 10)
As a competitor has the centre of the target as goal, they would hit
the bullsye or 'X' exponentially more likely than the outer rings.
With our distributions organised with the 'X' as the first element in our
list, it makes more sense to iterate or 'count up' through our distributions
from centre to the outer rings. Given a score, this also would present
results more likely of an experienced competitor first. For inexperienced
competitors, the assumption of perfection is observed as boost to morale.
This works around the fact that inaccurate scores would take longer to
find as we iterate.
(Thankfully, I have data from a Grand Master PPC shooter to test from.)
We will iterate from [X,0,0,0,0,0,0,0,0,0,0,0] to
[X,MAX,MAX,MAX,MAX,MAX,MAX,MAX,MAX,MAX,MAX,0] where MAX is the number of
shots on target minus the 'X'-count. The last element is the
number of missed shots and will not be iterated through. Our logic satisfies
distributions with missed shots early on. If we did iterate through the
missed shots, our code would take somewhere around
(SHOT_COUNT - X_COUNT) times as long to find.
'''
random_pass = 0
#Number of failed distributions since last successful: supplementary, resets every 1Mil
fcarry_count = 0
#Number of branches of permutations we skipped: supplementary, intriguing statistic, never resets
biggest_score = 0
#Largest score found in a permutation: supplementary, interesting statistic
biggest_sum = 0
#Largest number of shots found in a permutation, before a branch skip: supplementary, interesting statistic
distri = [0,0,0,0,0,0,0,0,0,0,0,0]
#Present list/distribution/permutation. 11 elements: X,10-1, and 'miss'/zero
distri[X_SCORE] = x
#Set distribution with provided input. Will never change past this point.
possible = shot_count-x
distri[ZERO_SCORE] = misses
#Set distribution with provided input. Will never change past this point.
print "There was a maximum of "+str(shot_count)+" shots."
possible = shot_count-x
#If we know n shots were X_SCORE, that leaves n-X possible other shots.
print "There is a possible "+str(possible)+" shots left after "+str(x)+" X hits."
if misses>0:
possible = possible-misses
#If we have provided missed shots, then we can eliminate them from the distribution
print "There is a possible "+str(possible)+" shots left after "+str(misses)+" misses."
overflow_column = 0
#Last column where we had a carry.
while distri[ZERO_SCORE]<=(misses):
#We will stop once the Zero element of the distribution increases. Logic for distributions with missed shots are nested further.
# Our main loop.
random_pass += 1
skip_flag = False
this_shot_sum = 0
for n in range(TEN_SCORE,ONE_SCORE):
#Count number of shots in present distribution, left to right, starting at 10-ring.
this_shot_sum += distri[n]
if this_shot_sum>possible:
skip_flag = True
break
#Break the for loop: If we are greater than 'possible' for the first three rings, there is no need to count the other 8.
#FORCE CARRY
if skip_flag and overflow_column:
'''
We learned the number number of shots was too high for this
distribution, due to skip_flag.
If possible = 5
[0,3,3] #=> A total of 6 out of 5 possible shots. Invalid.
Then we should ignore the rest of the [X,X,3] branch as we know
[0,3,3] through [5,5,3] are invalid. Jump to [0,0,4] as if we just
finished counting [5,5,3].
To ignore, we perform a 'force carry'. As the decimal number 199
counts up to 200, the leftmost digit increases, and all digits to
the right are reset to 0.
We perform the same action, except digits are our distribution
elements and we count up from left-to-right.
'''
for s in range(TEN_SCORE,overflow_column+1):
#Reset all to left of overflow_column to zero
distri[s]=0
distri[overflow_column+1]+=1
fcarry_count+=1
#Increase our supplementary statistic
#END FORCE CARRY
else:
#In all cases where we are not forcibly carrying, we are counting up left-to-right from the 10-ring.
distri[TEN_SCORE] += 1
#CARRY
for n in range(TEN_SCORE,ZERO_SCORE):
'''
Basic left-to-right carrying.
After careful study of this context, carrying does not need to be
recursive, as both the forced and unforced carries are conditional
that columns (individually or collectively) must not ever surpass
the possible number of shots.
Thus there will only ever be maximum of one forced and one unforced
carry per pass of our main loop.
Example:
If possible = 3
[1,4,0,4,0] would never reach [1,0,1,4,0]
as forced carry would have already changed to [0,0,0,0,1].
Additionally, the existence of [1,4,0,4,0] would be paradoxical, as
a previous forced or unforced carry would have caught any
distribution of [X,X,X,4,0].
'''
if distri[n] > possible: #overflow
distri[n] = 0
#we only need to change the current column to 0, for the same reason as above.
distri[n+1] +=1
#range(x,y) stops before y. [n+1] will be within bounds of list if y is equal upper bounds of our list. "for n in range(1,10): print n" will never say "10"
overflow_column = n
#latest column to CARRY from, useful for when we force carry.
#END CARRY
#TESTING LOGIC
dist_sum = sum(distri)
#This distribution's total number of shots.
this_score = find_score(distri)
#This distribution's score, as defined by find_score function.
backtick("Working...")
#Print out to the bottom of the screen the time and a message. Proves the script hasn't froze.
if this_score > biggest_score:
#Store our supplementary statistic of greatest score
biggest_score = this_score
if dist_sum > biggest_sum:
#Store our supplementary statistic of greatest sum
biggest_sum = dist_sum
if random_pass == 1000000:
#Every 1 Million failed distributions, print out some statistics to keep us busy.
random_pass = 0
#Random_pass is the counter for each individual distributions
print "I'm still working... Mil-BiggestSum:"+str(biggest_sum)+"-BiggestScore:"+str(biggest_score)+"-SKIPS:"+str(fcarry_count)+"--"+str(distri)+" SC:"+str(this_score)+" SUM:"+str(dist_sum)
'''
Prints as follows:
- "I'm still working..."
- "Mil-" : One million distributions have failed our tests
- BiggestSum : The tested distribution that had the most number of shots on target, however invalid
- BiggestScore : The tested distribution that had the greatest score, however invalid
- SKIPS : The number of times our code hs forced carried, avoiding entire invalid branches
- -- : The present distribution being tested, however invalid
- SC : The score of present distribution, however invalid
- SUM : The number of shots of present distribution, however invalid
'''
#check results.
if this_score==score:
#This distribution's score is a match with our request
if dist_sum==shot_count:
#Even the number of shots match up. This is possibly the distribution that made the score
print str(distri)+" SC:"+str(score)+" SUM:"+str(dist_sum)
elif dist_sum<shot_count:
'''
There is a possibility that an experienced competitor has missed
or lost shots on target. A misfire, hits on other competitors
targets are awarded zero points.
If the present distribution has fewer number of shots as hits,
but has an equal score, it is possible that the remaining shots
were all misses. Since a miss is zero points, we can put that
back into our results.
'''
distri2 = [0,0,0,0,0,0,0,0,0,0,0,0]
#Creating a second distribution as not to alter the distribution we are iterating through
for n in range(0,12):
distri2[n]=distri[n]
#Copy all the data from the first distribution into the second.
distri2[ZERO_SCORE] = (shot_count-dist_sum)
#The difference between this distribution's shot count and the requested is equal to the number of missed shots. Place that into the ZERO_SCORE column.
bad_distris.append(distri2)
#Take the distribution with missed shots and put it into the collection called bad_distris
#END TESTING LOGIC
if(show_possible):
#If the user requested distributions with missed shots, print them out now, since we have iterated through all other possibilities.
for n in bad_distris:
this_sum = sum(n)
this_zero = n[ZERO_SCORE]
print "P:"+str(n)+" SC:"+str( find_score(n) )+" SUM:"+str( this_sum )+" HIT/MISS:"+str(this_sum-this_zero)+"/"+str(this_zero)
#Print out the missed shot distributions differently than those which are a 100% match, as these are not as desireable.
find_distri(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]), show_possible)
#Point of entry, pulls command line arguments and starts the main function.
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.cog.SuitHealBossBehavior
from lib.coginvasion.cog.SuitHabitualBehavior import SuitHabitualBehavior
from lib.coginvasion.cog import SuitAttacks
from lib.coginvasion.globals import CIGlobals
from direct.interval.IntervalGlobal import Sequence, Wait, Func
from direct.distributed.ClockDelta import globalClockDelta
import random
class SuitHealBossBehavior(SuitHabitualBehavior):
HEAL_SPEED = 50.0
HEAL_DISTANCE = 20.0
HEAL_COOLDOWN = 10.0
def __init__(self, suit, boss):
doneEvent = 'suit%s-healBoss' % suit.doId
SuitHabitualBehavior.__init__(self, suit, doneEvent)
self.boss = boss
self.maxHeal = int(self.suit.getLevel() * 7)
self.minHeal = int(self.suit.getLevel() * 2.5)
self.suitHealTrack = None
self.cooldownTrack = None
self.canHeal = True
return
def __attemptToHealBoss(self, hp, currBossPos):
if self.isBossAvailable():
if (self.boss.getPos(render) - currBossPos).length() <= 1:
self.boss.b_setHealth(self.boss.getHealth() + hp)
self.boss.d_announceHealth(1, hp)
self.suit.d_handleWeaponTouch()
def isBossAvailable(self):
if not self.boss.isEmpty() and not hasattr(self.boss, 'DELETED') and not self.boss.isDead():
return True
return False
def __disableBoss(self):
if self.isBossAvailable():
if self.boss.getBrain():
self.boss.getBrain().stopThinking()
self.boss.b_setAnimState('neutral')
def __enableBoss(self):
if self.isBossAvailable():
self.boss.getBrain().startThinking()
def __toggleCanHeal(self):
if self.canHeal:
self.canHeal = False
else:
self.canHeal = True
def enter(self):
SuitHabitualBehavior.enter(self)
self.__toggleCanHeal()
attack = random.randint(0, 6)
attackName = SuitAttacks.SuitAttackLengths.keys()[attack]
timestamp = globalClockDelta.getFrameNetworkTime()
self.suit.sendUpdate('doAttack', [attack, self.boss.doId, timestamp])
distance = self.suit.getDistance(self.boss)
timeUntilHeal = distance / self.HEAL_SPEED
timeUntilRelease = 1.0
self.suit.d_setChat(CIGlobals.SuitHealTaunt)
hp = int(self.suit.maxHealth / SuitAttacks.SuitAttackDamageFactors[attackName])
if hp == 0:
hp = 1
if self.boss.getHealth() + hp > self.boss.getMaxHealth():
hp = self.boss.getMaxHealth() - self.boss.getHealth()
if attackName != 'glowerpower':
if self.suit.suitPlan.getSuitType() == 'C':
timeUntilRelease = 2.2
else:
timeUntilRelease = 3.0
self.suitHealTrack = Sequence(Func(self.__disableBoss), Wait(timeUntilRelease + timeUntilHeal), Func(self.__attemptToHealBoss, hp, self.boss.getPos(render)), Func(self.__enableBoss), Func(self.exit))
self.suitHealTrack.start()
def exit(self):
SuitHabitualBehavior.exit(self)
if self.suitHealTrack:
self.suitHealTrack.pause()
self.suitHealTrack = None
self.cooldownTrack = Sequence(Wait(self.HEAL_COOLDOWN), Func(self.__toggleCanHeal))
self.cooldownTrack.start()
return
def shouldStart(self):
SuitHabitualBehavior.shouldStart(self)
if hasattr(self, 'suit') and not self.suit.isEmpty() and hasattr(self, 'boss') and not self.boss.isEmpty():
if self.suit.getDistance(self.boss) <= self.HEAL_DISTANCE:
if not hasattr(self.suit, 'DELETED') and not hasattr(self.boss, 'DELETED'):
return self.canHeal
return False
def unload(self):
SuitHabitualBehavior.unload(self)
if self.suitHealTrack:
self.suitHealTrack.pause()
self.suitHealTrack = None
if self.cooldownTrack:
self.cooldownTrack.pause()
self.cooldownTrack = None
del self.suitHealTrack
del self.cooldownTrack
del self.maxHeal
del self.minHeal
del self.boss
return
|
import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.componentsInGraph([
[1, 6],
[2, 7],
[3, 8],
[4, 9],
[2, 6],
]), (2, 4))
if __name__ == '__main__':
unittest.main()
|
"""Various frequently used common utilities."""
import json
import logging
import os
from libmorse import exceptions, settings
RES_TEXT = "text"
RES_JSON = "json"
def get_logger(name, use_logging=settings.LOGGING, debug=settings.DEBUG):
"""Obtain a logger object given a name."""
log = logging.getLogger(name)
if settings.LOGFILE:
handler = logging.FileHandler(settings.LOGFILE)
else:
handler = logging.StreamHandler()
template = "%(levelname)s - %(name)s - %(asctime)s - %(message)s"
formatter = logging.Formatter(template)
handler.setFormatter(formatter)
if not log.handlers:
log.addHandler(handler)
level = logging.DEBUG if debug else logging.INFO
level = level if use_logging else logging.CRITICAL
log.setLevel(level)
return log
def get_return_code(exc):
"""Get a return code based on the raised exception."""
if not exc:
return 0 # all good, clean code
if isinstance(exc, exceptions.MorseError):
return exc.CODE # known error, known code
return exceptions.MorseError.CODE # normalize to default error code
def get_resource(name, resource_type=RES_TEXT):
"""Retrieve the content of a resource name."""
path = os.path.join(settings.RESOURCE, name)
with open(path) as stream:
data = stream.read()
if resource_type == RES_TEXT:
return data
if resource_type == RES_JSON:
return json.loads(data)
raise exceptions.ProcessMorseError(
"invalid resource type {!r}".format(resource_type))
def get_mor_code(name):
"""Get MOR code given `data`."""
data = (get_resource(name) if isinstance(name, str)
else name.read()).strip()
if not data:
return []
mor_code = []
for line in data.splitlines():
# Remove extra spaces and get rig of comments.
line = line.strip()
idx = line.find("#")
if idx != -1:
line = line[:idx].strip()
if not line:
continue
# Now get the status and time length of the quanta.
chunks = line.split()
state, duration = bool(int(chunks[0])), float(chunks[1])
mor_code.append((state, duration))
return mor_code
def humanize_mor_code(morse_code, unit=settings.UNIT, ratio=8.0,
split=False):
"""Add an expected silence at the end of the morse code."""
length = unit * ratio
if split:
length /= 2
ending = [(False, length)]
if split:
ending *= 2
morse_code.extend(ending)
return morse_code
class Logger(object):
"""Simple base class offering logging support."""
def __init__(self, name, use_logging=settings.LOGGING,
debug=settings.DEBUG):
"""Log information using the `log` method.
:param bool use_logging: enable logging or not
:param bool debug: enable debugging messages
"""
super(Logger, self).__init__()
self.log = get_logger(name, use_logging=use_logging, debug=debug)
def _log_error(self, message):
self.log.error(str(message).strip(".!?").capitalize() + ".")
|
# -*- coding: utf-8 -*-
"""Implementation of ConvE."""
from typing import Dict, Optional
import torch
import torch.autograd
import torch.optim as optim
from pykeen.constants import (
CONV_E_FEATURE_MAP_DROPOUT,
CONV_E_HEIGHT,
CONV_E_INPUT_CHANNELS,
CONV_E_INPUT_DROPOUT,
CONV_E_KERNEL_HEIGHT,
CONV_E_KERNEL_WIDTH,
CONV_E_NAME,
CONV_E_OUTPUT_CHANNELS,
CONV_E_OUTPUT_DROPOUT,
CONV_E_WIDTH,
EMBEDDING_DIM,
GPU,
LEARNING_RATE,
MARGIN_LOSS,
NUM_ENTITIES,
NUM_RELATIONS,
PREFERRED_DEVICE,
)
from pykeen.kge_models.base import BaseModule
from torch import nn
from torch.nn import Parameter, functional as F
from torch.nn.init import xavier_normal
__all__ = ["ConvE"]
class ConvE(BaseModule):
"""An implementation of ConvE [dettmers2017]_.
.. [dettmers2017] Dettmers, T., *et al.* (2017) `Convolutional 2d knowledge graph embeddings
<https://arxiv.org/pdf/1707.01476.pdf>`_. arXiv preprint arXiv:1707.01476.
.. seealso:: https://github.com/TimDettmers/ConvE/blob/master/model.py
"""
model_name = CONV_E_NAME
hyper_params = [
EMBEDDING_DIM,
CONV_E_INPUT_CHANNELS,
CONV_E_OUTPUT_CHANNELS,
CONV_E_HEIGHT,
CONV_E_WIDTH,
CONV_E_KERNEL_HEIGHT,
CONV_E_KERNEL_WIDTH,
CONV_E_INPUT_DROPOUT,
CONV_E_FEATURE_MAP_DROPOUT,
CONV_E_OUTPUT_DROPOUT,
MARGIN_LOSS,
LEARNING_RATE,
]
def __init__(
self,
margin_loss: float,
embedding_dim: int,
ConvE_input_channels,
ConvE_output_channels,
ConvE_height,
ConvE_width,
ConvE_kernel_height,
ConvE_kernel_width,
conv_e_input_dropout,
conv_e_output_dropout,
conv_e_feature_map_dropout,
random_seed: Optional[int] = None,
preferred_device: str = "cpu",
**kwargs
) -> None:
super().__init__(margin_loss, embedding_dim, random_seed, preferred_device)
self.ConvE_height = ConvE_height
self.ConvE_width = ConvE_width
assert self.ConvE_height * self.ConvE_width == self.embedding_dim
self.inp_drop = torch.nn.Dropout(conv_e_input_dropout)
self.hidden_drop = torch.nn.Dropout(conv_e_output_dropout)
self.feature_map_drop = torch.nn.Dropout2d(conv_e_feature_map_dropout)
self.loss = torch.nn.BCELoss()
self.conv1 = torch.nn.Conv2d(
in_channels=ConvE_input_channels,
out_channels=ConvE_output_channels,
kernel_size=(ConvE_kernel_height, ConvE_kernel_width),
stride=1,
padding=0,
bias=True,
)
# num_features – C from an expected input of size (N,C,L)
self.bn0 = torch.nn.BatchNorm2d(ConvE_input_channels)
# num_features – C from an expected input of size (N,C,H,W)
self.bn1 = torch.nn.BatchNorm2d(ConvE_output_channels)
self.bn2 = torch.nn.BatchNorm1d(self.embedding_dim)
num_in_features = (
ConvE_output_channels
* (2 * self.ConvE_height - ConvE_kernel_height + 1)
* (self.ConvE_width - ConvE_kernel_width + 1)
)
self.fc = torch.nn.Linear(num_in_features, self.embedding_dim)
# Default optimizer for ConvE
self.default_optimizer = optim.Adam
def _init_embeddings(self):
super()._init_embeddings()
self.entity_embeddings = nn.Embedding(self.num_entities, self.embedding_dim)
self.relation_embeddings = nn.Embedding(self.num_relations, self.embedding_dim)
self.register_parameter("b", Parameter(torch.zeros(self.num_entities)))
def init(self): # FIXME is this ever called?
xavier_normal(self.entity_embeddings.weight.data)
xavier_normal(self.relation_embeddings.weight.data)
def predict(self, triples):
# Check if the model has been fitted yet.
if self.entity_embeddings is None:
print(
"The model has not been fitted yet. Predictions are based on randomly initialized embeddings."
)
self._init_embeddings()
batch_size = triples.shape[0]
subject_batch = triples[:, 0:1]
relation_batch = triples[:, 1:2]
object_batch = triples[:, 2:3].view(-1)
subject_batch_embedded = self.entity_embeddings(subject_batch).view(
-1, 1, self.ConvE_height, self.ConvE_width
)
relation_batch_embedded = self.relation_embeddings(relation_batch).view(
-1, 1, self.ConvE_height, self.ConvE_width
)
candidate_object_emebddings = self.entity_embeddings(object_batch)
# batch_size, num_input_channels, 2*height, width
stacked_inputs = torch.cat([subject_batch_embedded, relation_batch_embedded], 2)
# batch_size, num_input_channels, 2*height, width
stacked_inputs = self.bn0(stacked_inputs)
# batch_size, num_input_channels, 2*height, width
x = self.inp_drop(stacked_inputs)
# (N,C_out,H_out,W_out)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
# batch_size, num_output_channels * (2 * height - ConvE_kernel_height + 1) * (width - ConvE_kernel_width + 1)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
if batch_size > 1:
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, candidate_object_emebddings.transpose(1, 0))
scores = torch.sigmoid(x)
max_scores, predictions = torch.max(scores, dim=1)
# Class 0 represents false fact and class 1 represents true fact
return predictions
def forward(self, pos_batch, neg_batch):
batch = torch.cat((pos_batch, neg_batch), dim=0)
positive_labels = torch.ones(
pos_batch.shape[0], dtype=torch.float, device=self.device
)
negative_labels = torch.zeros(
neg_batch.shape[0], dtype=torch.float, device=self.device
)
labels = torch.cat([positive_labels, negative_labels], dim=0)
perm = torch.randperm(labels.shape[0])
batch = batch[perm]
labels = labels[perm]
batch_size = batch.shape[0]
heads = batch[:, 0:1]
relations = batch[:, 1:2]
tails = batch[:, 2:3]
# batch_size, num_input_channels, width, height
heads_embs = self.entity_embeddings(heads).view(
-1, 1, self.ConvE_height, self.ConvE_width
)
relation_embs = self.relation_embeddings(relations).view(
-1, 1, self.ConvE_height, self.ConvE_width
)
tails_embs = self.entity_embeddings(tails).view(-1, self.embedding_dim)
# batch_size, num_input_channels, 2*height, width
stacked_inputs = torch.cat([heads_embs, relation_embs], 2)
# batch_size, num_input_channels, 2*height, width
stacked_inputs = self.bn0(stacked_inputs)
# batch_size, num_input_channels, 2*height, width
x = self.inp_drop(stacked_inputs)
# (N,C_out,H_out,W_out)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
# batch_size, num_output_channels * (2 * height - ConvE_kernel_height + 1) * (width - ConvE_kernel_width + 1)
x = x.view(batch_size, -1)
x = self.fc(x)
x = self.hidden_drop(x)
if batch_size > 1:
x = self.bn2(x)
x = F.relu(x)
scores = torch.sum(torch.mm(x, tails_embs.transpose(1, 0)), dim=1)
predictions = torch.sigmoid(scores)
loss = self.loss(predictions, labels)
return loss
|
from train import alexnet_train, siamese_train
from evaluate import alexnet_evaluate, siamese_evaluate
alexnet = alexnet_train()
siamese = siamese_train()
alexnet_evaluate()
siamese_evaluate(siamese)
|
from bark_ml.library_wrappers.lib_tf_agents.agents.sac_agent import BehaviorSACAgent
from bark_ml.library_wrappers.lib_tf_agents.agents.ppo_agent import BehaviorPPOAgent
|
import numpy as np
import logging
import itertools
class Moon:
def __init__(self, x, y, z):
self.position = np.array([x,y,z],dtype=int)
self.velocity = np.array([0,0,0],dtype=int)
def energy(self):
return np.abs(self.position).sum() * np.abs(self.position).sum()
@property
def x(self):
return self.position[0], self.velocity[0]
@property
def y(self):
return self.position[1], self.velocity[1]
@property
def z(self):
return self.position[2], self.velocity[2]
def __repr__(self): return str(self)
def __str__(self): return f'pos:{self.position} vel:{self.velocity}\n'
def processMoons(moons, n):
"""
o apply gravity, consider every pair of moons. On each axis (x, y, and z), the velocity of each moon changes by exactly +1 or -1 to pull the moons together. For example, if Ganymede has an x position of 3, and Callisto has a x position of 5, then Ganymede's x velocity changes by +1 (because 5 > 3) and Callisto's x velocity changes by -1 (because 3 < 5). However, if the positions on a given axis are the same, the velocity on that axis does not change for that pair of moons.
Once all gravity has been applied, apply velocity: simply add the velocity of each moon to its own position. For example, if Europa has a position of x=1, y=2, z=3 and a velocity of x=-2, y=0,z=3, then its new position would be x=-1, y=2, z=6. This process does not modify the velocity of any moon.
"""
for i in range(n):
logging.debug(moons)
# gravity
for moon1, moon2 in itertools.combinations(moons, 2):
add = 1 * (moon1.position < moon2.position) - 1 * (moon1.position > moon2.position)
add[moon1.position == moon2.position] = 0
moon1.velocity += add
moon2.velocity += np.negative(add)
# update positions
for moon in moons:
moon.position += moon.velocity
return moons
def predictRecurrence(moons):
prev = [[None]]*3
cycles = [[None]]*3
for i in range(1000):
prev[0].append([m.x for m in moons])
prev[1].append([m.y for m in moons])
prev[2].append([m.z for m in moons])
moons = processMoons(moons,1)
xs = [m.x for m in moons]
if xs in prev[0]:
cycles[0].append(i - prev[0].index(xs))
if [m.y for m in moons] in prev[1]:
cycles[1].append(i - prev[1].index([m.y for m in moons]))
if [m.z for m in moons] in prev[2]:
cycles[2].append(i - prev[2].index([m.z for m in moons]))
return cycles
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tm1 = [Moon(x,y,z) for x,y,z in ((-1,0, 2), (2, -10, -7), (4, -8, 8), (3, 5, -1))]
processMoons(tm1, 10)
c = predictRecurrence(tm1)
|
from huey import crontab
from app.core.config import TRAFFIC_INTERVAL_SECONDS
from app.db.session import db_session
from app.db.models.server import Server
from app.db.crud.server import get_server_with_ports_usage, get_servers
from .config import huey
from tasks.utils.runner import run
from tasks.utils.handlers import iptables_finished_handler
@huey.task()
def traffic_server_runner(server_id: Server):
with db_session() as db:
server = get_server_with_ports_usage(db, server_id)
run(
server=server,
playbook="traffic.yml",
finished_callback=iptables_finished_handler(server.id),
)
@huey.periodic_task(crontab(minute=f"*/{int(TRAFFIC_INTERVAL_SECONDS)//60}"))
def traffic_runner():
with db_session() as db:
servers = get_servers(db)
for server in servers:
traffic_server_runner(server.id)
|
from chainermn.extensions.allreduce_persistent import AllreducePersistent # NOQA
from chainermn.extensions.checkpoint import create_multi_node_checkpointer # NOQA
from chainermn.extensions.multi_node_evaluator import create_multi_node_evaluator # NOQA
|
import json
import re
import npyscreen
from vent.helpers.meta import Dependencies
class InstanceSelect(npyscreen.MultiSelect):
"""
A widget class for selecting an exact amount of instances to perform
actions on
"""
def __init__(self, *args, **kargs):
""" Initialize an instance select object """
self.instance_num = kargs['instance_num']
super(InstanceSelect, self).__init__(*args, **kargs)
def when_value_edited(self, *args, **kargs):
""" Overrided to prevent user from selecting too many instances """
if len(self.value) > self.instance_num:
self.value.pop(-2)
self.display()
def safe_to_exit(self, *args, **kargs):
"""
Overrided to prevent user from exiting selection until
they have selected the right amount of instances
"""
if len(self.value) == self.instance_num:
return True
return False
class DeleteForm(npyscreen.ActionForm):
""" A form for selecting instances to delete and deleting them """
def __init__(self, *args, **keywords):
""" Initialize a delete form object """
self.new_instances = int(keywords['new_instances'])
self.next_tool = keywords['next_tool']
self.manifest = keywords['manifest']
self.clean = keywords['clean']
self.prep_start = keywords['prep_start']
self.start_tools = keywords['start_tools']
self.cur_instances = []
self.display_to_section = {}
self.old_instances = int(keywords['old_instances'])
section = keywords['section']
for i in range(1, self.old_instances + 1):
i_section = section.rsplit(':', 2)
i_section[0] += str(i) if i != 1 else ''
i_section = ':'.join(i_section)
display = i_section.rsplit('/', 1)[-1]
self.cur_instances.append(display)
self.display_to_section[display] = i_section
super(DeleteForm, self).__init__(*args, **keywords)
def create(self):
""" Creates the necessary display for this form """
self.add_handlers({'^E': self.quit, '^Q': self.quit})
to_delete = self.old_instances - self.new_instances
self.add(npyscreen.Textfield, value='Select which instances to delete'
' (you must select ' + str(to_delete) +
' instance(s) to delete):', editable=False, color='GOOD')
self.del_instances = self.add(InstanceSelect,
values=self.cur_instances,
scroll_exit=True, rely=3,
instance_num=to_delete)
def change_screens(self):
""" Change to the next tool to edit or back to MAIN form """
if self.next_tool:
self.parentApp.change_form(self.next_tool)
else:
self.parentApp.change_form('MAIN')
def quit(self, *args, **kargs):
""" Quit without making any changes to the tool """
npyscreen.notify_confirm('No changes made to instance(s)',
title='Instance confiugration cancelled')
self.change_screens()
def on_ok(self):
""" Delete the instances that the user chose to delete """
npyscreen.notify_wait('Deleting instances given...',
title='In progress')
# keep track of number for shifting instances down for alignment
shift_num = 1
to_update = []
for i, val in enumerate(self.del_instances.values):
# clean all tools for renmaing and relabeling purposes
t = val.split(':')
section = self.display_to_section[val]
prev_running = self.manifest.option(section, 'running')
run = prev_running[0] and prev_running[1] == 'yes'
if i in self.del_instances.value:
if run:
# grab dependencies of tools that linked to previous one
if i == 0:
dependent_tools = [self.manifest.option(
section, 'link_name')[1]]
for dependency in Dependencies(dependent_tools):
self.clean(**dependency)
to_update.append(dependency)
self.clean(name=t[0], branch=t[1], version=t[2])
self.manifest.del_section(section)
else:
try:
# update instances for tools remaining
section = self.display_to_section[val]
settings_dict = json.loads(self.manifest.option
(section, 'settings')[1])
settings_dict['instances'] = self.new_instances
self.manifest.set_option(section, 'settings',
json.dumps(settings_dict))
# check if tool name doesn't need to be shifted because
# it's already correct
identifier = str(shift_num) if shift_num != 1 else ''
new_section = section.rsplit(':', 2)
new_section[0] = re.sub(r'\d+$', identifier,
new_section[0])
new_section = ':'.join(new_section)
if section != new_section:
# clean tool so that we can rename its container and
# labels with new information
self.clean(name=t[0], branch=t[1], version=t[2])
prev_name = self.manifest.option(section, 'name')[1]
new_name = re.split(r'[0-9]', prev_name)[0] + \
identifier
self.manifest.set_option(section, 'name', new_name)
# copy new contents into shifted version
self.manifest.add_section(new_section)
for val_pair in self.manifest.section(section)[1]:
self.manifest.set_option(new_section, val_pair[0],
val_pair[1])
self.manifest.del_section(section)
if run:
to_update.append({'name': new_name,
'branch': t[1],
'version': t[2]})
shift_num += 1
except Exception as e:
npyscreen.notify_confirm('Trouble deleting tools'
' because ' + str(e))
self.manifest.write_config()
tool_d = {}
for tool in to_update:
tool_d.update(self.prep_start(**tool)[1])
if tool_d:
self.start_tools(tool_d)
npyscreen.notify_confirm('Done deleting instances.',
title='Finished')
self.change_screens()
def on_cancel(self):
""" Exits the form without performing any action """
self.quit()
|
from rest_framework import serializers
from django.contrib.auth.backends import UserModel
from question.models import (
TblQuestion
)
class QuestionCreateSerializer(serializers.ModelSerializer):
class Meta:
model = TblQuestion
fields = '__all__'
read_only_fields = ['id', 'create_time', 'update_time', 'creator']
class QuestionListSerializer(serializers.ModelSerializer):
creator = serializers.StringRelatedField(source='creator.nickname')
level = serializers.StringRelatedField(source='level_tag.description')
type = serializers.StringRelatedField(source='type_tag.description')
category = serializers.StringRelatedField(source='category_tag.name')
class Meta:
model = TblQuestion
fields = ['id', 'content', 'description', 'creator', 'level', 'type', 'category', 'create_time', 'update_time']
|
import gzip
import os
import re
from collections import OrderedDict
import filetype
import pysam
from src.BamInfo import BamInfo
from src.logger import logger
from src.SpliceRegion import SpliceRegion
def clean_star_filename(x):
u"""
if input file is STAR SJ.out.tab or STAR bam then remove the SJ.out.tab
:param x:
:return:
"""
return re.sub("[_.]?(SJ.out.tab|Aligned.sortedByCoord.out)?.bam", "", os.path.basename(x))
def is_gtf(infile):
u"""
check if input file is gtf
:param infile: path to input file
:return:
"""
if infile is None:
return False
is_gtf = 0
try:
if filetype.guess_mime(infile) == "application/gzip":
is_gtf += 10
r = gzip.open(infile, "rt")
else:
r = open(infile)
except TypeError as err:
logger.error("failed to open %s", infile)
exit(err)
for line in r:
if line.startswith("#"):
continue
lines = re.split(r"\s+", line)
if len(lines) < 8:
break
if re.search(
r"([\w-]+ \"[\w.\s\-%,:]+\";? ?)+",
" ".join(lines[8:])
):
is_gtf += 1
break
r.close()
return is_gtf
def is_bam(infile):
u"""
check if input file is bam or sam file
:param infile: path to input file
:return: Boolean
"""
try:
create = False
if not os.path.exists(infile + ".bai"):
create = True
elif os.path.getctime(infile + ".bai") < os.path.getctime(infile):
try:
os.remove(infile + ".bai")
create = True
except PermissionError as err:
logger.warn(err)
create = False
else:
try:
with pysam.AlignmentFile(infile) as r:
r.check_index()
except ValueError:
create = True
if create:
logger.info("Creating index for %s" % infile)
pysam.index(infile)
return True
except pysam.utils.SamtoolsError:
return False
def get_sites_from_splice_id(string, span=0, indicator_lines=None):
u"""
get splice range from splice id
:param string: splice id
:param sep: the separator between different junctions
:param span: the range to span the splice region, int -> bp; float -> percentage
:param indicator_lines: bool
:return: chromosome, start, end, strand
"""
string = string.strip()
split = string.split("@")
if not split:
raise ValueError("Invalid region %s" % string)
sites = []
try:
for i in split:
if re.search(r"[\w.]:(\d+-?){2,}:[+-]", i):
chromosome, tmp_sites, strand = i.split(":")
elif re.search(r"[\w.]:(\d+-?){2,}[+-]", i):
chromosome, tmp_sites = i.split(":")
tmp_sites, strand = tmp_sites[:-1], tmp_sites[-1]
else:
chromosome, tmp_sites = i.split(":")
strand = "*"
try:
for x in tmp_sites.split("-"):
sites.append(int(x))
except ValueError as err:
logger.error(err)
logger.error("Contains illegal characters in %s" % string)
exit(err)
except ValueError as err:
logger.error("Invalid format of input region %s" % string)
exit(err)
sites = sorted(sites)
start, end = sites[0], sites[-1]
try:
span = int(span)
start, end = sites[0] - span, sites[-1] + span
except ValueError:
try:
span = float(span) * (sites[-1] - sites[0])
start, end = sites[0] - span, sites[-1] + span
except ValueError as err:
logger.error("Invalid format of span, %s" % str(span))
exit(err)
if indicator_lines is True:
indicator_lines = {x: 'k' for x in sites}
elif indicator_lines:
indicator_lines = [int(i) for i in indicator_lines.split(",")]
return SpliceRegion(
chromosome=chromosome,
start=start,
end=end,
strand=strand,
events=string,
sites=indicator_lines,
ori=str(string)
)
def get_merged_event(events, span, indicator_lines):
u"""
merged multiple events to huge region, to reduce the IO
:param events: DataFrame index, output event id
:param span: see main
:param indicator_lines: see main
:return:
"""
# if they came from same chr and same strand, the put together
coords = {}
for e in events:
tmp = get_sites_from_splice_id(e, span=span, indicator_lines=indicator_lines)
tmp_key = "%s#%s" % (tmp.chromosome, tmp.strand)
tmp_list = coords[tmp_key] if tmp_key in coords.keys() else []
tmp_list.append(tmp)
coords[tmp_key] = tmp_list
return coords
def assign_max_y(shared_y, reads_depth, batch = False):
u"""
assign max y for input files
:param shared_y: [[group1], [group2]]
:param reads_depth: output from read_reads_depth_from_bam
:return:
"""
if len(shared_y) == 0:
max_ = max([x.max for x in reads_depth.values()])
for v in reads_depth.values():
v.max = max_
elif batch:
max_ = max([reads_depth[x].max for x in shared_y if x in reads_depth.keys()])
for j in shared_y:
reads_depth[j].max = max_
else:
for i in shared_y:
max_ = max([reads_depth[x].max for x in i if x in reads_depth.keys()])
for j in i:
if j in reads_depth.keys():
reads_depth[j].max = max_
def load_barcode(path: str) -> dict:
barcodes = {}
with open(path) as r:
for line in r:
lines = re.split(r"\t| {2,}", line.strip())
lines[0] = clean_star_filename(lines[0])
if len(lines) >= 3:
key = lines[2]
temp = barcodes.get(lines[0], {})
if key not in temp.keys():
temp[key] = [lines[1]]
else:
temp[key].append(lines[1])
barcodes[lines[0]] = temp
return barcodes
def load_colors(bam: str, barcodes: str, color_factor: str, colors):
res = OrderedDict()
if color_factor and re.search("^\\d+$", color_factor):
color_factor = int(color_factor) - 1
if color_factor and not isinstance(color_factor, int):
with open(color_factor) as r:
for line in r:
line = line.strip().split("\t")
if len(line) > 1:
res[line[0]] = line[1]
else:
logger.error("the input color is not seperate by \\t, {}".format(line))
try:
with open(bam) as r:
for idx, line in enumerate(r):
line = line.strip().split()
key = line[1] if len(line) > 1 else clean_star_filename(line[0])
if key not in res.keys():
if not isinstance(color_factor, int):
res[key] = colors[idx % len(colors)]
else:
if len(line) <= color_factor:
logger.error("--color-factor must <= number of columns from " + bam)
exit(1)
res[key] = line[color_factor].upper()
if "|" in res[key]:
res[key] = res[key].split("|")[1]
except Exception as err:
logger.error("please check the input file, including: .bai index", err)
exit(0)
if barcodes:
temp = set()
with open(barcodes) as r:
for line in r:
line = line.strip().split()
temp.add(line[-1])
for idx, group in enumerate(sorted(temp)):
if group not in res.keys():
res[group] = colors[idx % len(colors)]
return res
def prepare_bam_list(bam, color_factor, colors, share_y_by=-1, plot_by=None, barcodes=None, is_atac = False):
u"""
Prepare bam list
:return:
"""
if is_bam(bam):
return [
BamInfo(
path=bam,
alias=clean_star_filename(bam),
title=None,
label=None,
color=colors[0]
)
], {}
# load barcode groups
barcodes_group = load_barcode(barcodes) if barcodes else {}
colors = load_colors(bam, barcodes, color_factor, colors)
# load bam list
shared_y = {} # {sample group: [BamInfo...]}
bam_list = OrderedDict()
with open(bam) as r:
for line in r:
lines = re.split(r"\t| {2,}", line.strip())
if not os.path.exists(lines[0]) and not os.path.isfile(lines[0]):
logger.warn("wrong input path or input list sep by blank, it should be '\\t'")
continue
if not is_atac and not is_bam(lines[0]):
raise ValueError("%s seem not ba a valid BAM file" % lines[0])
temp_barcodes = barcodes_group.get(
clean_star_filename(lines[0]),
barcodes_group.get(lines[1], {}) if len(lines) > 1 else {}
)
if not barcodes_group:
key = lines[1] if len(lines) > 1 else clean_star_filename(lines[0])
temp_barcodes[key] = None
for alias, barcode in temp_barcodes.items():
tmp = BamInfo(
path=lines[0],
alias=alias,
title="",
label=alias,
color=colors[alias],
barcodes=set(barcode) if barcode else None
)
if alias not in bam_list.keys():
bam_list[alias] = tmp
else:
bam_list[alias] += tmp
if plot_by is not None:
if plot_by < 0:
tmp = shared_y.get("", [])
tmp.append(alias)
shared_y[""] = tmp
else:
try:
tmp = shared_y.get(lines[plot_by - 1], [])
tmp.append(alias)
shared_y[lines[plot_by - 1]] = tmp
except IndexError as err:
logger.error(err)
logger.error("Wrong --plot-by index")
logger.error("Your --plot-by is %d" % plot_by)
logger.error("Your error line in %s" % lines)
exit(err)
elif share_y_by > 0:
try:
tmp = shared_y.get(lines[share_y_by - 1], [])
tmp.append(alias)
shared_y[lines[share_y_by - 1]] = tmp
except IndexError as err:
logger.error(err)
logger.error("Wrong --share-y-by index")
logger.error("Your --share-y-by is %d" % share_y_by)
logger.error("Your error line in %s" % lines)
exit(err)
if len(bam_list) == 0:
logger.error("Cannot find any input bam file, please check the bam path or the input list")
exit(1)
if not barcodes:
return list(bam_list.values()), {i: [bam_list[k] for k in j] for i, j in shared_y.items()}
return [bam_list[x] for x in colors.keys() if x in bam_list.keys()], {i: [bam_list[k] for k in j] for i, j in shared_y.items()}
|
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
connection = MongoClient()
db = connection.c3stem_database
ip = "X.X.X.X"
db.virtualmachine.insert({
"_id": ip,
"flavor": "m1.medium",
"private_IP": 'X.X.X.X',
"public_IP": ip,
"key_name": "c3stem_keypair",
"type": "TRANSIENT",
"status": "AVAIL",
"user": "ALL",
"mode": "GROUP"
});
ip = "X.X.X.X"
db.virtualmachine.insert({
"_id": ip,
"flavor": "m1.medium",
"private_IP": 'X.X.X.X',
"public_IP": ip,
"key_name": "c3stem_keypair",
"type": "TRANSIENT",
"status": "AVAIL",
"user": "ALL",
"mode": "GROUP"
});
|
#!/usr/bin/env python
from time import sleep
from sys import exit
from AmbP3.config import get_args
from AmbP3.decoder import Connection
from AmbP3.decoder import p3decode
from AmbP3.decoder import bin_data_to_ascii as data_to_ascii
from AmbP3.decoder import bin_dict_to_ascii as dict_to_ascii
from AmbP3.write import Write
from AmbP3.write import open_mysql_connection
from AmbP3.write import Cursor
from AmbP3.time_server import TimeServer
from AmbP3.time_server import DecoderTime
from AmbP3.time_server import RefreshTime
def main():
print("************ STARTING *******************")
config = get_args()
conf = config.conf
mysql_enabled = conf['mysql_backend']
if not mysql_enabled:
print("ERROR, please configure MySQL")
exit(1)
mysql_con = open_mysql_connection(user=conf['mysql_user'],
db=conf['mysql_db'],
password=conf['mysql_password'],
host=conf['mysql_host'],
port=conf['mysql_port'],
)
cursor = mysql_con.cursor()
my_cursor = Cursor(mysql_con, cursor)
""" start Connectio to Decoder """
connection = Connection(config.ip, config.port)
connection.connect()
RefreshTime(connection)
if not config.file:
print("file not defined in config")
exit(1)
elif not config.debug_file:
print("debug file not defined in config")
exit(1)
while 'decoder_time' not in locals():
print("Waiting for DECODER timestamp")
for data in connection.read():
decoded_data = data_to_ascii(data)
decoded_header, decoded_body = p3decode(data) # NEED OT REPLACE WITH LOGGING
if 'GET_TIME' == decoded_body['RESULT']['TOR']:
decoder_time = DecoderTime(int(decoded_body['RESULT']['RTC_TIME'], 16))
print(f"GET_TIME: {decoder_time.decoder_time} Conitnue")
break
TimeServer(decoder_time)
try:
log_file = config.file
debug_log_file = config.debug_file
with open(log_file, "a") as amb_raw, open(debug_log_file, "a") as amb_debug:
while True:
raw_log_delim = "##############################################"
for data in connection.read():
decoded_data = data_to_ascii(data)
Write.to_file(decoded_data, amb_raw) # REPLACE BY LOGGING
decoded_header, decoded_body = p3decode(data) # NEED OT REPLACE WITH LOGGING
header_msg = ("Decoded Header: {}\n".format(dict_to_ascii(decoded_header)))
raw_log = f"{raw_log_delim}\n{header_msg}\n{decoded_body}\n"
Write.to_file(raw_log, amb_debug)
if 'TOR' in decoded_body['RESULT']:
if 'PASSING' in decoded_body['RESULT']['TOR']:
Write.passing_to_mysql(my_cursor, decoded_body)
elif 'RTC_TIME' in decoded_body['RESULT']['TOR']:
decoder_time.set_decoder_time(int(decoded_body['RESULT']['RTC_TIME'], 16))
sleep(0.1)
sleep(0.1)
except KeyboardInterrupt:
print("Closing")
exit(0)
except IOError as e:
print("error writing to file. Reason: {}".format(e))
if __name__ == "__main__":
main()
|
sandwich_orders = ['aaa', 'pastrami', 'bbb', 'pastrami', 'ccc','pastrami']
finished_sandwiches = []
print("\nAll pastrami had been sold!")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
while sandwich_orders:
sandwich_order = sandwich_orders.pop()
print("\nI made your tuna sanwichi " + sandwich_order)
finished_sandwiches.append(sandwich_order)
print("\nI have finished all sandwiches!")
|
__author__ = 'Ralph'
import pandas as pd
from arff_utils import ARFF
from base import Node
from base import InputPort
class Exporter(Node):
def __init__(self, name):
super(Exporter, self).__init__(name)
class ExportARFF(Exporter):
def __init__(self):
super(ExportARFF, self).__init__('ExportARFF')
self.add_input_port(InputPort(name='input', data_type=pd.DataFrame))
self.set_required_config_items(['file_name'])
def execute(self):
self.check_config()
file_name = self.get_config().get('file_name')
data = self.get_input_port('input').get_data()
if data is not None:
ARFF.write(file_name, ARFF.from_data_frame('output', data))
class ExportCSV(Exporter):
def __init__(self):
super(ExportCSV, self).__init__('ExportCSV')
self.add_input_port(InputPort(name='input', data_type=pd.DataFrame))
self.set_required_config_items(['file_name'])
def execute(self):
self.check_config()
file_name = self.get_config().get('file_name')
data = self.get_input_port('input').get_data()
if data is not None:
data.to_csv(file_name, index=False)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="healthdes", # Replace with your own username
version="0.0.2",
author="David Plummer",
author_email="david.plummer@tanzo.org",
license="BSD 3-Clause",
keywords="library simulation healthcare health care hospital",
description="A python library to support discrete event simulation in health and social care",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DiaAzul/healthdes",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Intended Audience :: Healthcare Industry",
"Topic :: Software Development :: Libraries"
],
python_requires='>=3.7',
install_requires=['simpy>=4',
'networkx>=2',
'pandas>=1']
)
|
import os.path as op
from tornado import template
LOADER = template.Loader(op.join(op.dirname(op.abspath(__file__)), 'templates'), autoescape=None)
TEMPLATES = {
'graphite': {
'html': LOADER.load('graphite/message.html'),
'text': LOADER.load('graphite/message.txt'),
'short': LOADER.load('graphite/short.txt'),
},
'url': {
'html': LOADER.load('url/message.html'),
'text': LOADER.load('url/message.txt'),
'short': LOADER.load('url/short.txt'),
},
'common': {
'html': LOADER.load('common/message.html'),
'text': LOADER.load('common/message.txt'),
'short': LOADER.load('common/short.txt'),
},
}
|
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from yacfg.query import filter_template_list
basic_template_list = [
'a.xml',
'a.txt',
'b.xml',
'b.txt',
'c.xml',
'c.properties',
]
dataset_filter_basic = (
([''], basic_template_list),
(['a.*'], ['a.xml', 'a.txt']),
(['b.*'], ['b.xml', 'b.txt']),
(['.*xml'], ['a.xml', 'b.xml', 'c.xml']),
(['.*xml', '.*txt'], ['a.xml', 'a.txt', 'b.xml', 'b.txt', 'c.xml']),
(['xxx'], []),
)
@pytest.mark.parametrize('output_filter,expected_result', dataset_filter_basic)
def test_filter_basic(output_filter, expected_result):
result = filter_template_list(basic_template_list,
output_filter=output_filter)
assert result == expected_result
|
import argparse
import re
import sys
import traceback
import memory
from state import State, StateSA
from strategy import StrategyBFS, StrategyDFS, StrategyBestFirst
from heuristic import AStar, WAStar, Greedy
class SearchClient:
def __init__(self, server_messages):
self.initial_state = None
colors_re = re.compile(r'^[a-z]+:\s*[0-9A-Z](\s*,\s*[0-9A-Z])*\s*$')
lines = []
try:
# Read lines for colors. There should be none of these in warmup levels.
line = server_messages.readline().rstrip()
if colors_re.fullmatch(line) is not None:
print('Error, client does not support colors.', file=sys.stderr, flush=True)
sys.exit(1)
# Read lines for level.
while line:
lines.append(line)
line = server_messages.readline().rstrip()
#for line in lines:
# print(line, file=sys.stderr, flush=True)
cols = max([len(line) for line in lines])
self.initial_state = State(rows=len(lines), cols=cols)
maze = [[True for _ in range(cols)] for _ in range(len(lines))]
agent = (0,0)
boxes = []
goals = []
type_count = 0
seen_types = {}
row = 0
for line in lines:
#print(line, file=sys.stderr, flush=True)
for col, char in enumerate(line):
if char == '+':
self.initial_state.walls[row][col] = True
maze[row][col] = False
elif char in "0123456789":
if self.initial_state.agent_row is not None:
print('Error, encountered a second agent (client only supports one agent).', file=sys.stderr, flush=True)
sys.exit(1)
self.initial_state.agent_row = row
self.initial_state.agent_col = col
agent = (row, col)
elif char in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
self.initial_state.boxes[row][col] = char
type = type_count
if char.lower() in seen_types.keys():
type = seen_types[char.lower()]
else:
seen_types[char.lower()] = type
type_count += 1
boxes.append((type, (row, col)))
elif char in "abcdefghijklmnopqrstuvwxyz":
self.initial_state.goals[row][col] = char
self.initial_state.goal_dict[char].append((row, col))
type = type_count
if char.lower() in seen_types.keys():
type = seen_types[char.lower()]
else:
seen_types[char.lower()] = type
type_count += 1
goals.append((type, (row, col)))
elif char == ' ':
# Free cell.
pass
else:
print('Error, read invalid level character: {}'.format(char), file=sys.stderr, flush=True)
sys.exit(1)
row += 1
self.initial_state = StateSA(maze, boxes, goals, agent)
except Exception as ex:
print('Error parsing level: {}.'.format(repr(ex)), file=sys.stderr, flush=True)
print(traceback.format_exc(), file=sys.stderr, flush=True)
sys.exit(1)
def search(self, strategy: 'Strategy') -> '[State, ...]':
print('Starting search with strategy {}.'.format(strategy), file=sys.stderr, flush=True)
strategy.add_to_frontier(self.initial_state)
iterations = 0
while True:
if iterations == 1000:
print(strategy.search_status(), file=sys.stderr, flush=True)
iterations = 0
if memory.get_usage() > memory.max_usage:
print('Maximum memory usage exceeded.', file=sys.stderr, flush=True)
return None
if strategy.frontier_empty():
return None
leaf = strategy.get_and_remove_leaf()
if leaf.is_goal_state():
return leaf.extract_plan()
strategy.add_to_explored(leaf)
for child_state in leaf.get_children(): # The list of expanded states is shuffled randomly; see state.py.
if not strategy.is_explored(child_state) and not strategy.in_frontier(child_state):
strategy.add_to_frontier(child_state)
iterations += 1
def main(strategy_str: 'str'):
# Read server messages from stdin.
server_messages = sys.stdin
# Use stderr to print to console through server.
print('SearchClient initializing. I am sending this using the error output stream.', file=sys.stderr, flush=True)
# Read level and create the initial state of the problem.
client = SearchClient(server_messages);
# Default to BFS strategy.
if not strategy_str:
strategy_str = 'bfs'
print('Defaulting to BFS search. Use arguments -bfs, -dfs, -astar, -wastar, or -greedy to set the search strategy.', file=sys.stderr, flush=True)
strategy = None
if strategy_str == 'bfs':
strategy = StrategyBFS()
elif strategy_str == 'dfs':
strategy = StrategyDFS()
elif strategy_str == 'astar':
strategy = StrategyBestFirst(AStar(client.initial_state))
elif strategy_str == 'wastar':
strategy = StrategyBestFirst(WAStar(client.initial_state, 5))
elif strategy_str == 'greedy':
strategy = StrategyBestFirst(Greedy(client.initial_state))
solution = client.search(strategy)
if solution is None:
print(strategy.search_status(), file=sys.stderr, flush=True)
print('Unable to solve level.', file=sys.stderr, flush=True)
sys.exit(0)
else:
print('\nSummary for {}.'.format(strategy), file=sys.stderr, flush=True)
print('Found solution of length {}.'.format(len(solution)), file=sys.stderr, flush=True)
print('{}.'.format(strategy.search_status()), file=sys.stderr, flush=True)
for state in solution:
print(state.action, flush=True)
response = server_messages.readline().rstrip()
if 'false' in response:
print('Server responsed with "{}" to the action "{}" applied in:\n{}\n'.format(response, state.action, state), file=sys.stderr, flush=True)
break
if __name__ == '__main__':
# Program arguments.
parser = argparse.ArgumentParser(description='Simple client based on state-space graph search.')
parser.add_argument('--max-memory', metavar='<MB>', type=float, default=2048.0, help='The maximum memory usage allowed in MB (soft limit, default 2048).')
strategy_group = parser.add_mutually_exclusive_group()
strategy_group.add_argument('-bfs', action='store_const', dest='strategy', const='bfs', help='Use the BFS strategy.')
strategy_group.add_argument('-dfs', action='store_const', dest='strategy', const='dfs', help='Use the DFS strategy.')
strategy_group.add_argument('-astar', action='store_const', dest='strategy', const='astar', help='Use the A* strategy.')
strategy_group.add_argument('-wastar', action='store_const', dest='strategy', const='wastar', help='Use the WA* strategy.')
strategy_group.add_argument('-greedy', action='store_const', dest='strategy', const='greedy', help='Use the Greedy strategy.')
args = parser.parse_args()
# Set max memory usage allowed (soft limit).
memory.max_usage = args.max_memory
# Run client.
main(args.strategy.lower())
|
"""
Contains unit tests to ensure bounding boxes are converted correctly from
a DICOM annotation to a Pascal VOC compatible format.
"""
from xml.etree.ElementTree import Element, SubElement
import numpy as np
from breakdb.io.export.voc import create_bounding_box
from tests.helpers.xml import match
class TestCreateBoundingBox:
"""
Test suite for :function: 'create_bounding_box'.
"""
def test_create_bounding_box_computes_extrema_correctly(self):
coords = np.random.randint(0, 1200, 10)
x = coords[0::2]
y = coords[1::2]
bndbox = create_bounding_box(coords)
x_max = bndbox.findall('xmax')[0]
y_max = bndbox.findall('ymax')[0]
x_min = bndbox.findall('xmin')[0]
y_min = bndbox.findall('ymin')[0]
assert int(x_max.text) == np.max(x)
assert int(y_max.text) == np.max(y)
assert int(x_min.text) == np.min(x)
assert int(y_min.text) == np.min(y)
def test_create_bounding_box_creates_well_formed_xml(self):
coords = np.random.randint(0, 1200, 10)
x = coords[0::2]
y = coords[1::2]
bndbox = create_bounding_box(coords)
expected = Element('bndbox')
x_min = SubElement(expected, "xmin")
y_min = SubElement(expected, "ymin")
x_max = SubElement(expected, "xmax")
y_max = SubElement(expected, "ymax")
x_min.text = str(np.min(x))
y_min.text = str(np.min(y))
x_max.text = str(np.max(x))
y_max.text = str(np.max(y))
match(bndbox, expected)
|
#! python
# -*- coding: utf-8 -*-
# ================================================================================
# ACUMOS
# ================================================================================
# Copyright © 2017 AT&T Intellectual Property & Tech Mahindra. All rights reserved.
# ================================================================================
# This Acumos software file is distributed by AT&T and Tech Mahindra
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================================
import gensim
def readfile(filename, keyword):
saved_word = []
# read file
with open(filename) as file_search: #open search file
file_search = file_search.readlines() #read file
for lines in file_search: # every word is scaned
if keyword in lines: # extract the keyword
saved_word.append(lines) #store all found keywords in array
# write in new file
with open('CompleteResponse.txt', 'w') as file_handler:
file_handler.write(f"{filename}\n")
for i in range(len(saved_word)):
file_handler.write(f"{saved_word[i]}")
print('done') # completed
print(len(saved_word)) # count found words
striptext = text.replace('\n\n', ' ')
striptext = striptext.replace('\n', ' ')
keywords = gensim.summarization.keywords(striptext)
print(keywords)
keywords_list = keywords.split()
dict = []
def keyword_scan():
for i in keywords_list:
if i in dict:
return "Fail"
else:
return "Pass"
|
'''
Truncates a string if the
length of the string is greater
than the provided length integer.
@param str {str}
@param to_length {int}
@return {str}
'''
def truncate_str(str, to_length):
return (str[:to_length] + '...') if len(str) > to_length else str
|
# Copyright (c) 2015 Lightricks. All rights reserved.
|
from __future__ import print_function
from flask import Flask, request
from flask_restful import reqparse, abort, Api, Resource
import argparse
import json
import os
import random
CHARS = "BCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789" # 60 unique characters.
UNIQUE_LEN = 6 # 46B unique
app = Flask(__name__)
api = Api(app)
db = None
request_parser = reqparse.RequestParser()
verbose = 0
strict_put = False
authorization_token = ""
def create_unique():
"""
:return: string very likely to be unique.
"""
result = ""
for i in range(UNIQUE_LEN):
result += random.choice(CHARS)
return result
def strip_tag(field, tag):
"""
If 'field' is in the form 'str_tag1_tag2' and tag matches any tag remove it.
:param field: arbitrary string
:return: str, bool (indicating if 'tag' was removed)
"""
s = field.split("_")
found = False
if tag in s:
s.remove(tag)
found = True
return "_".join(s), found
def strip_from_end(s, c):
"""
Remove all 'c' from the end of 's'
:param s: a string.
:param c: character or substring to remove
:return: remainder of 's'
"""
if c:
while s.endswith(c):
s = s[:-len(c)]
return s
def strip_from_start(s, c):
"""
Remove all 'c' from the start of 's'
:param s: a string.
:param c: character or substring to remove
:return: remainder of 's'
"""
if c:
while s.startswith(c):
s = s[len(c):]
return s
def validate_authorization():
if authorization_token:
auth = request.headers.get("Authorization", "").split(" ")
if len(auth) == 2 and auth[0].lower() == "bearer" and auth[1] == authorization_token:
return # Success!
print("Authentication failed: '{}'".format(" ".join(auth)))
abort(401)
class DataBase(object):
def __init__(self, file_name=""):
self._data = {}
self._file_name = file_name
self.load_from_disk()
def load_from_disk(self):
if self._file_name and os.path.exists(self._file_name):
with open(self._file_name, "r") as f:
self._data = json.load(f)
if verbose:
print("Loaded data from file", self._file_name, "- records found:", len(self._data))
def persist_to_disk(self):
if self._file_name:
with open(self._file_name, "w") as f:
json.dump(self._data, f, sort_keys=True, indent=4, separators=(",", ": "))
if verbose:
print("Persisted data to file", self._file_name, "- records to store:", len(self._data))
def throw_if_does_not_exist(self, unique_id):
if not self.exists(unique_id):
abort(404, message="Item with id '{}' does not exist.".format(unique_id))
def all(self):
return self._data
def exists(self, unique_id):
return unique_id in self._data
def get(self, unique_id):
self.throw_if_does_not_exist(unique_id)
return self._data[unique_id]
def get_field(self, unique_id, field):
record = self.get(unique_id)
if field not in record:
abort(404, message="field '{}' not found!".format(field))
return record[field]
def delete(self, unique_id):
validate_authorization()
self.throw_if_does_not_exist(unique_id)
del db.data[unique_id]
self.persist_to_disk()
def put(self, unique_id, record, only_update=True):
validate_authorization()
if only_update:
self.throw_if_does_not_exist(unique_id)
self._data[unique_id] = record
self.persist_to_disk()
def post(self, record):
validate_authorization()
while True:
unique_id = create_unique()
if not self.exists(unique_id):
self._data[unique_id] = record
self.persist_to_disk()
return unique_id
class ItemList(Resource):
def get(self):
return db.all()
def post(self):
validate_authorization()
args = request_parser.parse_args()
unique_id = db.post(args)
return unique_id, 201
class Item(Resource):
def get(self, unique_id):
return db.get(unique_id)
def delete(self, unique_id):
db.delete(unique_id)
return '', 204
def put(self, unique_id):
args = request_parser.parse_args()
db.put(unique_id, args, strict_put)
return args, 201
class ItemField(Resource):
def get(self, unique_id, field):
return db.get_field(unique_id, field)
def main():
global verbose, strict_put, authorization_token, db
parser = argparse.ArgumentParser("Simple rest api server")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Increase output verbosity")
parser.add_argument("-d", "--debug", action="store_true", default=False,
help="Run in debug mode")
parser.add_argument("-a", "--api", default="/api",
help="API root. Default '%(default)s'")
parser.add_argument("-f", "--file-name", default="",
help="Filename for persistent storage")
parser.add_argument("-t", "--token", default="",
help="Authorization token required for updates")
parser.add_argument("-s", "--strict-put", action="store_true", default=False,
help="Only allow PUT on existing resource")
parser.add_argument("field", nargs="*", default=["text", "count_optional_int", "help_optional"],
help="Fields in API. If empty: 'text count_optional_int help_optional'")
args = parser.parse_args()
verbose = args.verbose
strict_put = args.strict_put
authorization_token = args.token
db = DataBase(file_name=args.file_name)
api_normalized = strip_from_end(strip_from_start(args.api, "/"), "/")
api.add_resource(ItemList, "/" + api_normalized)
if len(api_normalized) > 0:
api_normalized = "/" + api_normalized
api.add_resource(Item, api_normalized + "/<unique_id>")
api.add_resource(ItemField, api_normalized + "/<unique_id>/<field>")
print("Starting API server on", args.api)
for field in args.field:
argvars = {}
field, optional = strip_tag(field, "optional")
field, required = strip_tag(field, "required")
field, type_int = strip_tag(field, "int")
field, type_str = strip_tag(field, "str")
if type_int:
argvars["type"] = int
if not optional:
argvars["required"] = True
else:
if type_int:
argvars["default"] = 0
else:
argvars["default"] = ""
print("Adding field:", field, ("required" if not optional else "optional"), ("int" if type_int else "str"))
request_parser.add_argument(field, **argvars)
app.run(debug=args.debug, host="0.0.0.0", port=5000)
return 0
if __name__ == '__main__':
exit(main())
|
"""
Pipeline for text processing implementation
"""
from pathlib import Path
import re
import pymorphy2
from pymystem3 import Mystem
from constants import ASSETS_PATH
from core_utils.article import Article, ArtifactType
class EmptyDirectoryError(Exception):
"""
No data to process
"""
class InconsistentDatasetError(Exception):
"""
Corrupt data:
- numeration is expected to start from 1 and to be continuous
- a number of text files must be equal to the number of meta files
- text files must not be empty
"""
class MorphologicalToken:
"""
Stores language params for each processed token
"""
def __init__(self, original_word):
self.original_word = original_word
self.normalized_form = ''
self.tags_mystem = ''
self.tags_pymorphy = ''
def get_cleaned(self):
"""
Returns lowercased original form of a token
"""
return self.original_word.lower()
def get_single_tagged(self):
"""
Returns normalized lemma with MyStem tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>'
def get_multiple_tagged(self):
"""
Returns normalized lemma with PyMorphy tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>({self.tags_pymorphy})'
class CorpusManager:
"""
Works with articles and stores them
"""
def __init__(self, path_to_raw_txt_data: str):
self.path_to_raw_txt_data = path_to_raw_txt_data
self._storage = {}
self._scan_dataset()
def _scan_dataset(self):
"""
Register each dataset entry
"""
path = Path(self.path_to_raw_txt_data)
for file in path.glob('*_raw.txt'):
article_id = int(file.stem.split('_')[0])
self._storage[article_id] = Article(url=None, article_id=article_id)
def get_articles(self):
"""
Returns storage params
"""
return self._storage
class TextProcessingPipeline:
"""
Process articles from corpus manager
"""
def __init__(self, corpus_manager: CorpusManager):
self.corpus_manager = corpus_manager
def run(self):
"""
Runs pipeline process scenario
"""
articles = self.corpus_manager.get_articles().values()
for article in articles:
raw_text = article.get_raw_text()
cleaned_tokens = []
single_tagged_tokens = []
multiple_tagged_tokens = []
for token in self._process(raw_text):
cleaned_tokens.append(token.get_cleaned())
single_tagged_tokens.append(token.get_single_tagged())
multiple_tagged_tokens.append(token.get_multiple_tagged())
article.save_as(' '.join(cleaned_tokens), ArtifactType.cleaned)
article.save_as(' '.join(single_tagged_tokens), ArtifactType.single_tagged)
article.save_as(' '.join(multiple_tagged_tokens), ArtifactType.multiple_tagged)
def _process(self, raw_text: str):
"""
Processes each token and creates MorphToken class instance
"""
pattern = re.compile(r'[а-яА-Яa-zA-z ё]')
for symbol in raw_text:
if not pattern.match(symbol):
clean_text = raw_text.replace(symbol, '')
analyzed_text = Mystem().analyze(clean_text)
morph = pymorphy2.MorphAnalyzer()
tokens = []
for token in analyzed_text:
if 'analysis' not in token:
continue
if not token['analysis']:
continue
for token in analyzed_text:
morphological_token = MorphologicalToken(original_word=token['text'])
morphological_token.normalized_form = token['analysis'][0]['lex']
morphological_token.tags_mystem = token['analysis'][0]['gr']
tokens.append(morphological_token)
morphological_token.tags_pymorphy = morph.parse(token['text'])[0].tag
return tokens
def validate_dataset(path_to_validate):
"""
Validates folder with assets
"""
if isinstance(path_to_validate, str):
path_to_validate = Path(path_to_validate)
if not path_to_validate.exists():
raise FileNotFoundError
if not path_to_validate.is_dir():
raise NotADirectoryError
dataset = list(path_to_validate.glob('*'))
if not dataset:
raise EmptyDirectoryError
json_counter = 0
raw_counter = 0
for file in sorted(path_to_validate.glob('*'), key=lambda x: int(x.name[:x.name.find('_')])):
if file.name.endswith('raw.txt'):
raw_counter += 1
if f'{raw_counter}_raw' not in file.name:
raise InconsistentDatasetError
with open(file, 'r', encoding='utf-8') as this_file:
text = this_file.read()
if not text:
raise InconsistentDatasetError
if file.name.endswith('meta.json'):
json_counter += 1
if json_counter != raw_counter:
raise InconsistentDatasetError
def main():
# YOUR CODE HERE
validate_dataset(ASSETS_PATH)
corpus_manager = CorpusManager(ASSETS_PATH)
pipeline = TextProcessingPipeline(corpus_manager)
pipeline.run()
if __name__ == "__main__":
main()
|
import os # Enables directory search items
## Utility function to get list of the immediate dub dirs in folder
# @param aDir - the directory to search for sub directories from
# @return an array of sub directories
def getImmediateSubDirs(aDir):
return [name for name in os.listdir(aDir)
if os.path.isdir(os.path.join(aDir,name))]
## Utility function to get list of the immediate files in folder
# @param aDir - the directory to search for the immediate files
# @return an array of immediate files contained in the directory
def getImmediateFiles(aDir):
"""
Utility function to get list of the immediate files in folder.
"""
return [name for name in os.listdir(aDir)
if os.path.isfile(os.path.join(aDir,name))]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from com.alipay.ams.api.model.grant_type import GrantType
from com.alipay.ams.api.model.scope_type import ScopeType
from com.alipay.ams.api.model.customer_belongs_to import CustomerBelongsTo
from com.alipay.ams.api.model.os_type import OsType
from com.alipay.ams.api.model.terminal_type import TerminalType
from com.alipay.ams.api.default_alipay_client import DefaultAlipayClient
from com.alipay.ams.api.request.auth.alipay_auth_apply_token_request import AlipayAuthApplyTokenRequest
from com.alipay.ams.api.request.auth.alipay_auth_consult_request import AlipayAuthConsultRequest
from com.alipay.ams.api.request.auth.alipay_auth_query_token_request import AlipayAuthQueryTokenRequest
from com.alipay.ams.api.response.auth.alipay_auth_apply_token_response import AlipayAuthApplyTokenResponse
from com.alipay.ams.api.response.auth.alipay_auth_query_token_response import AlipayAuthQueryTokenResponse
from com.alipay.ams.api.response.auth.alipay_auth_consult_response import AlipayAuthConsultResponse
from com.alipay.ams.api.request.auth.alipay_auth_revoke_token_request import AlipayAuthRevokeTokenRequest
from com.alipay.ams.api.response.auth.alipay_auth_revoke_token_response import AlipayAuthRevokeTokenResponse
MERCHANT_PRIVATE_KEY = ""
ALIPAY_PUBLICK_KEY = ""
CLIENT_ID = ""
def apply_token():
default_alipay_client = DefaultAlipayClient("https://open-na.alipay.com", CLIENT_ID, MERCHANT_PRIVATE_KEY, ALIPAY_PUBLICK_KEY)
# 申请token
alipay_auth_apply_token_request = AlipayAuthApplyTokenRequest()
alipay_auth_apply_token_request.path = '/ams/sandbox/api/v1/authorizations/applyToken'
alipay_auth_apply_token_request.grant_type = GrantType.AUTHORIZATION_CODE
alipay_auth_apply_token_request.customer_belongs_to = "GCASH"
alipay_auth_apply_token_request.auth_code = "6c2f2029-fd00-a545-bee1-585906224226"
rsp_body = default_alipay_client.execute(alipay_auth_apply_token_request)
alipay_auth_apply_token_response = AlipayAuthApplyTokenResponse(rsp_body)
if alipay_auth_apply_token_response.result.result_code == "SUCCESS":
print(alipay_auth_apply_token_response.access_token)
else:
print(alipay_auth_apply_token_response.result.result_message)
def query_token():
default_alipay_client = DefaultAlipayClient("https://open-na.alipay.com", CLIENT_ID, MERCHANT_PRIVATE_KEY, ALIPAY_PUBLICK_KEY)
# 查询token
alipay_auth_query_token_request = AlipayAuthQueryTokenRequest()
alipay_auth_query_token_request.path = '/ams/sandbox/api/v1/authorizations/query'
alipay_auth_query_token_request.access_token = "2020042910435415881282340824710273532915573saFBMrdXVH"
rsp_body = default_alipay_client.execute(alipay_auth_query_token_request)
alipay_auth_query_token_response = AlipayAuthQueryTokenResponse(rsp_body)
if alipay_auth_query_token_response.result.result_code == "SUCCESS":
print(alipay_auth_query_token_response.access_token)
else:
print(alipay_auth_query_token_response.result.result_message)
def auth_consult():
default_alipay_client = DefaultAlipayClient("https://open-na.alipay.com", CLIENT_ID, MERCHANT_PRIVATE_KEY, ALIPAY_PUBLICK_KEY)
alipay_auth_consult_request = AlipayAuthConsultRequest()
alipay_auth_consult_request.path = "/ams/sandbox/api/v1/authorizations/consult"
alipay_auth_consult_request.auth_redirect_url = "https://www.taobao.com/?param1=567¶m2=123"
alipay_auth_consult_request.auth_state = "663A8FA9-D836-56EE-8AA1-dd1F6F682ff989DC7"
alipay_auth_consult_request.customer_belongs_to = CustomerBelongsTo.KAKAOPAY
alipay_auth_consult_request.os_type = OsType.ANDROID
alipay_auth_consult_request.os_version = "6.6.6"
scopes = []
scopes.append(ScopeType.AGREEMENT_PAY)
alipay_auth_consult_request.scopes = scopes
alipay_auth_consult_request.terminal_type = TerminalType.APP
rsp_body = default_alipay_client.execute(alipay_auth_consult_request)
alipay_auth_consult_response = AlipayAuthConsultResponse(rsp_body)
if alipay_auth_consult_response.result.result_code == "SUCCESS":
print(alipay_auth_consult_response.auth_url)
else:
print(alipay_auth_consult_response.result.result_message)
def revoke_token():
default_alipay_client = DefaultAlipayClient("https://open-na.alipay.com", CLIENT_ID, MERCHANT_PRIVATE_KEY,
ALIPAY_PUBLICK_KEY)
alipay_revoke_token_request = AlipayAuthRevokeTokenRequest()
alipay_revoke_token_request.path = "/ams/sandbox/api/v1/authorizations/revoke"
alipay_revoke_token_request.access_token = "20200402172139158581929917426077528204771351HzJTwAoUFw"
rsp_body = default_alipay_client.execute(alipay_revoke_token_request)
alipay_auth_revoke_token_response = AlipayAuthRevokeTokenResponse(rsp_body)
if alipay_auth_revoke_token_response.result.result_code == "SUCCESS":
print(alipay_auth_revoke_token_response.extend_info)
else:
print(alipay_auth_revoke_token_response.result.result_message)
|
#!/usr/bin/env python
# inst: university of bristol
# auth: jeison sosa
# mail: j.sosa@bristol.ac.uk / sosa.jeison@gmail.com
import os
import sys
import subprocess
import configparser
import getopt
import numpy as np
import gdalutils
from lfptools import shapefile
from lfptools import misc_utils
from osgeo import osr
from scipy.spatial.distance import cdist
from scipy.optimize import fsolve
def getdepths_shell(argv):
myhelp = '''
LFPtools v0.1
Name
----
getdepths
Description
-----------
Get river depths, three methods availables:
1) depth_raster
Get depths from a raster of depths
2) depth_geometry
Get depths by using hydraulic geometry equation
depth = r * width ^ p
3) depth_mannings
Get depths by using simplified mannings equation
((bankfull_flow*manning_coef)/(slope**0.5*width))**(3/5.)
Usage
-----
>> lfp-getdepths -i config.txt
Content in config.txt
---------------------
[getdepths]
recf = `Rec` file path
proj = Output projection in Proj4 format
netf = Target mask file path
method = depth_raster, depth_geometry, depth_mannings
output = Shapefile output file path
# If depth_raster
fdepth = Depth raster source file GDAL format projection EPSG:4326
thresh = Serching threshold in degrees
# If depth_geometry
wdtf = Shapefile width from lfp-getwidths
r = Constant number
p = Constant number
# If depth_mannings
n = Manning's coefficient
wdtf = Shapefile width from lfp-getwidths
slpf = Shapefile slope from lfp-getslopes
qbnkf = Shapefile q bank full
'''
try:
opts, args = getopt.getopt(argv, "i:")
for o, a in opts:
if o == "-i":
inifile = a
except:
print(myhelp)
sys.exit(0)
config = configparser.SafeConfigParser()
config.read(inifile)
proj = str(config.get('getdepths', 'proj'))
netf = str(config.get('getdepths', 'netf'))
method = str(config.get('getdepths', 'method'))
output = str(config.get('getdepths', 'output'))
try:
fdepth = str(config.get('getdepths', 'fdepth'))
thresh = np.float64(config.get('getdepths', 'thresh'))
kwargs = {'fdepth':fdepth,'thresh':thresh}
except:
pass
try:
wdtf = str(config.get('getdepths', 'wdtf'))
r = np.float64(config.get('getdepths', 'r'))
p = np.float64(config.get('getdepths', 'p'))
kwargs = {'wdtf':wdtf,'r':r,'p':p}
except:
pass
try:
n = np.float64(config.get('getdepths', 'n'))
wdtf = str(config.get('getdepths', 'wdtf'))
slpf = str(config.get('getdepths', 'slpf'))
qbnkf = str(config.get('getdepths', 'qbnkf'))
kwargs = {'n':n,'wdtf':wdtf,'slpf':slpf,'qbnkf':qbnkf}
except:
pass
getdepths(proj,netf,method,output,**kwargs)
def getdepths(proj,netf,method,output,**kwargs):
print(" runnning getdepths.py...")
fname = output
w = shapefile.Writer(shapefile.POINT)
w.field('x')
w.field('y')
w.field('depth')
if method == "depth_raster":
depth_raster(w, netf, **kwargs)
elif method == "depth_geometry":
depth_geometry(w, **kwargs)
elif method == "depth_manning":
depth_manning(w, **kwargs)
else:
sys.exit("ERROR method not recognised")
# write final value in a shapefile
w.save("%s.shp" % fname)
# write .prj file
prj = open("%s.prj" % fname, "w")
srs = osr.SpatialReference()
srs.ImportFromProj4(proj)
prj.write(srs.ExportToWkt())
prj.close()
nodata = -9999
fmt = "GTiff"
name1 = output+".shp"
name2 = output+".tif"
mygeo = gdalutils.get_geo(netf)
subprocess.call(["gdal_rasterize", "-a_nodata", str(nodata), "-of", fmt, "-tr", str(mygeo[6]), str(mygeo[7]),
"-a", "depth", "-a_srs", proj, "-te", str(mygeo[0]), str(mygeo[1]), str(mygeo[2]), str(mygeo[3]), name1, name2])
def depth_raster(w, netf, fdepth, thresh):
"""
From a raster of depths this subroutine finds nearest depth to every river pixel in grid
"""
# Reading river network file
dat_net = gdalutils.get_data(netf)
geo_net = gdalutils.get_geo(netf)
iy, ix = np.where(dat_net > 0)
xx = geo_net[8][ix]
yy = geo_net[9][iy]
# Reading depth source file
dat = gdalutils.get_data(fdepth)
geo = gdalutils.get_geo(fdepth)
iy, ix = np.where(dat > -9999)
xdat = geo[8][ix]
ydat = geo[9][iy]
depth = []
for x, y in zip(xx, yy):
try:
dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
if dis <= thresh:
val = dat[iy[ind], ix[ind]]
depth.append(val)
else:
depth.append(np.nan)
except ValueError:
depth.append(np.nan)
for x,y,mydepth in zip(xx,yy,depth):
w.point(x,y)
w.record(x,y,mydepth)
return w
def depth_geometry(w, r, p, wdtf):
"""
Uses hydraulic geoemtry equation to estimate requires width, r and p coeffs.
"""
width = np.array(shapefile.Reader(wdtf).records(), dtype='float64')
x = width[:, 0]
y = width[:, 1]
for i in range(width.shape[0]):
print("getdepths.py - " + str(width.shape[0]-i))
mydepth = r*width[i, 2]**p
w.point(x[i], y[i])
w.record(x[i], y[i], mydepth)
return w
def depth_manning(f, n, qbnkf, slpf, wdtf):
"""
Uses manning's equation to estimate depth requires bankfull flow,
slope, width and manning coefficient
"""
# load width shapefile
width = np.array(shapefile.Reader(wdtf).records(), dtype='float64')
xw = width[:, 0]
yw = width[:, 1]
qbnk = np.array(shapefile.Reader(qbnkf).records(), dtype='float64')
xq = qbnk[:, 0]
yq = qbnk[:, 1]
slope = np.array(shapefile.Reader(slpf).records(), dtype='float64')
xs = slope[:, 0]
ys = slope[:, 1]
# iterate over every width x-y pair in the shapefile
for i in range(width.shape[0]):
# get index for Q and S based on W coordinates
iiq = near(yq, xq, np.array([[xw[i], yw[i]]]))
iis = near(ys, xs, np.array([[xw[i], yw[i]]]))
if (np.float32(xw[i]) != np.float32(xq[iiq])) | (np.float32(xw[i]) != np.float32(xs[iis])) | (np.float32(yw[i]) != np.float32(yq[iiq])) | (np.float32(yw[i]) != np.float32(ys[iis])):
print(xw[i], yw[i])
print(xq[iiq], yq[iiq])
print(xs[iis], ys[iis])
sys.exit("Coordinates are not equal")
w = width[i, 2]
q = qbnk[iiq, 2]
s = slope[iis, 2]
data = (q, w, s, n)
# # depth by using a full version of the mannings equation (solve numerically)
# mydepth = fsolve(manning_depth,0,args=data)
# f.point(xw[i],yw[i])
# f.record(xw[i],yw[i],mydepth[0])
# depth by using a simplified version of the mannings equation
mydepth = manning_depth_simplified(data)
f.point(xw[i], yw[i])
f.record(xw[i], yw[i], mydepth)
return f
def nearpixel(array, ddsx, ddsy, XA):
"""
Find nearest pixel
array: array with sourcedata
ddsx: 1-dim array with longitudes of array
ddsy: 1-dim array with latitudes of array
XA: point
"""
_ds = np.where(array > 0)
# if there are river pixels in the window
if _ds[0].size > 0:
XB = np.vstack((ddsy[_ds[0]], ddsx[_ds[1]])).T
ind = np.int(cdist(XA, XB, metric='euclidean').argmin())
res = array[_ds[0][ind], _ds[1][ind]]
else:
res = -9999
return res
def manning_depth(d, *data):
q, w, s, n = data
return q*n/s**0.5-w*d*(w*d/(2*d+w))**(2/3)
def manning_depth_simplified(data):
q = data[0]
w = data[1]
s = data[2]
n = data[3]
return ((q*n)/(s**0.5*w))**(3/5.)
def near(ddsx, ddsy, XA):
XB = np.vstack((ddsy, ddsx)).T
dis = cdist(XA, XB, metric='euclidean').argmin()
return dis
if __name__ == '__main__':
getdepths_shell(sys.argv[1:])
|
import bluetooth
import time
import threading
class BluetoothReceiver(threading.Thread):
"""
no docs... too bad!
oh yeah, here is an example:
addr = '98:D3:51:FD:EB:C4' # some device address, whatever
port = 1
t = BluetoothReceiver(addr, port)
with t:
for _ in range(1000):
print (t.last_read)
time.sleep(1/100) # time delay, doesn't affect receiving, whatever whatever
"""
def __init__(self, addr, port):
"""
here is a riddle for you: what does init do?
"""
super().__init__()
self.backBuffer = bytearray()
self._last_read = b""
self._terminator = b"\n"
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((addr, port))
self.readSize = 100
# -------------------threading related------------------------
self.alive = True
self._lock = threading.Lock()
self.daemon = False
def __enter__(self):
self.start()
if not self.alive:
self.close()
raise RuntimeError("receive thread already finished")
def __exit__(self, *exp):
self.stop()
def stop(self):
self.alive = False
self.join(4)
time.sleep(0.01)
self.close()
def close(self):
"""hammer time!"""
time.sleep(0.5)
self.sock.close()
@property
def last_read(self):
return self._last_read
def send(self, data):
"""sends data or something idk"""
self.sock.send(data)
def run(self):
while self.alive:
try:
data = self.sock.recv(self.readSize)
self.backBuffer.extend(data)
if not data:
break
while self._terminator in self.backBuffer:
self._last_read, self.backBuffer = self.backBuffer.split(self._terminator, 1)
except Exception as e:
print(f"BluetoothSocket receive thread failed: {repr(e)}")
break
self.alive = False
|
from django.contrib.auth.models import User, Group
from django.conf import settings
from rest_framework import generics
from rest_framework import filters
from rest_framework import permissions
from rest_framework import status
from rest_framework.decorators import api_view,permission_classes
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser
from rest_framework.views import APIView
from rest_user_profiles.models import UserProfile,AvatarUpload
from rest_user_profiles.serializers import *
from rest_user_profiles.permissions import *
# register
class Register(generics.CreateAPIView):
serializer_class = RegisterSerializer
permission_classes = (permissions.IsAdminUser,)
queryset = User.objects.all()
# users
class UserList(generics.ListCreateAPIView):
serializer_class = UserSerializer
permission_classes = (permissions.IsAdminUser,)
queryset = User.objects.all()
filter_fields = ('groups',)
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
serializer_class = UserSerializer
permission_classes = (IsOwnerOrAdmin,)
queryset = User.objects.all()
# user profiles
class UserProfileList(generics.ListCreateAPIView):
serializer_class = UserProfileSerializer
permission_classes = (IsAdminOrReadOnly,)
queryset = UserProfile.objects.all()
filter_fields = ('user','displayname',)
class UserProfileDetail(generics.RetrieveUpdateDestroyAPIView):
serializer_class = UserProfileSerializer
permission_classes = (IsOwnerOrAdmin,)
queryset = UserProfile.objects.all()
# avatar upload
class AvatarUploadView(APIView):
parser_classes = (MultiPartParser,)
permission_classes = (permissions.AllowAny,)
def put(self, request, format=None):
file_obj = request.data['file']
avatarupload = AvatarUpload.objects.get(user=request.user)
avatarupload.file = file_obj
avatarupload.save()
return Response({'url':'{}/{}'.format(settings.MEDIA_ROOT,avatarupload.file)},status=status.HTTP_200_OK)
|
import unittest
from cpuinfo import *
import helpers
class MockDataSource(object):
bits = '64bit'
cpu_count = 8
is_windows = False
arch_string_raw = 'amd64'
uname_string_raw = 'amd64'
can_cpuid = False
@staticmethod
def has_dmesg():
return True
@staticmethod
def dmesg_a():
retcode = 0
output = '''Copyright (c) 1992-2018 The FreeBSD Project.
Copyright (c) 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994
The Regents of the University of California. All rights reserved.
FreeBSD is a registered trademark of The FreeBSD Foundation.
FreeBSD 12.0-CURRENT #26 fa797a5a3(trueos-stable-18.03): Mon Mar 26 00:24:47 UTC 2018
root@chimera:/usr/obj/usr/src/amd64.amd64/sys/GENERIC amd64
FreeBSD clang version 6.0.0 (branches/release_60 324090) (based on LLVM 6.0.0)
VT(vga): text 80x25
CPU: AMD Ryzen 7 2700X Eight-Core Processor (3693.15-MHz K8-class CPU)
Origin="AuthenticAMD" Id=0x800f82 Family=0x17 Model=0x8 Stepping=2
Features=0x1783fbff<FPU,VME,DE,PSE,TSC,MSR,PAE,MCE,CX8,APIC,SEP,MTRR,PGE,MCA,CMOV,PAT,PSE36,MMX,FXSR,SSE,SSE2,HTT>
Features2=0x5ed82203<SSE3,PCLMULQDQ,SSSE3,CX16,SSE4.1,SSE4.2,MOVBE,POPCNT,AESNI,XSAVE,OSXSAVE,AVX,RDRAND>
AMD Features=0x2a500800<SYSCALL,NX,MMX+,FFXSR,RDTSCP,LM>
AMD Features2=0x1f3<LAHF,CMP,CR8,ABM,SSE4A,MAS,Prefetch>
Structured Extended Features=0x40021<FSGSBASE,AVX2,RDSEED>
TSC: P-state invariant
'''
return retcode, output
class TestTrueOS_18_X86_64_Ryzen7(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
helpers.monkey_patch_data_source(cpuinfo, MockDataSource)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
'''
Make sure calls return the expected number of fields.
'''
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
self.assertEqual(10, len(cpuinfo._get_cpu_info_from_dmesg()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
self.assertEqual(17, len(cpuinfo._get_cpu_info_internal()))
def test_get_cpu_info_from_dmesg(self):
info = cpuinfo._get_cpu_info_from_dmesg()
self.assertEqual('AuthenticAMD', info['vendor_id_raw'])
self.assertEqual('AMD Ryzen 7 2700X Eight-Core Processor', info['brand_raw'])
self.assertEqual('3.6932 GHz', info['hz_advertised_friendly'])
self.assertEqual('3.6932 GHz', info['hz_actual_friendly'])
self.assertEqual((3693150000, 0), info['hz_advertised'])
self.assertEqual((3693150000, 0), info['hz_actual'])
self.assertEqual(2, info['stepping'])
self.assertEqual(8, info['model'])
self.assertEqual(23, info['family'])
self.assertEqual(
['abm', 'aesni', 'apic', 'avx', 'cmov', 'cmp', 'cr8', 'cx16',
'cx8', 'de', 'ffxsr', 'fpu', 'fxsr', 'htt', 'lahf', 'lm', 'mas',
'mca', 'mce', 'mmx', 'mmx+', 'movbe', 'msr', 'mtrr', 'nx',
'osxsave', 'pae', 'pat', 'pclmulqdq', 'pge', 'popcnt', 'prefetch',
'pse', 'pse36', 'rdrand', 'rdtscp', 'sep', 'sse', 'sse2', 'sse3',
'sse4.1', 'sse4.2', 'sse4a', 'ssse3', 'syscall', 'tsc', 'vme',
'xsave']
,
info['flags']
)
def test_all(self):
info = cpuinfo._get_cpu_info_internal()
self.assertEqual('AuthenticAMD', info['vendor_id_raw'])
self.assertEqual('AMD Ryzen 7 2700X Eight-Core Processor', info['brand_raw'])
self.assertEqual('3.6932 GHz', info['hz_advertised_friendly'])
self.assertEqual('3.6932 GHz', info['hz_actual_friendly'])
self.assertEqual((3693150000, 0), info['hz_advertised'])
self.assertEqual((3693150000, 0), info['hz_actual'])
self.assertEqual('X86_64', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(8, info['count'])
self.assertEqual('amd64', info['arch_string_raw'])
self.assertEqual(2, info['stepping'])
self.assertEqual(8, info['model'])
self.assertEqual(23, info['family'])
self.assertEqual(
['abm', 'aesni', 'apic', 'avx', 'cmov', 'cmp', 'cr8', 'cx16',
'cx8', 'de', 'ffxsr', 'fpu', 'fxsr', 'htt', 'lahf', 'lm', 'mas',
'mca', 'mce', 'mmx', 'mmx+', 'movbe', 'msr', 'mtrr', 'nx',
'osxsave', 'pae', 'pat', 'pclmulqdq', 'pge', 'popcnt', 'prefetch',
'pse', 'pse36', 'rdrand', 'rdtscp', 'sep', 'sse', 'sse2', 'sse3',
'sse4.1', 'sse4.2', 'sse4a', 'ssse3', 'syscall', 'tsc', 'vme',
'xsave']
,
info['flags']
)
|
# Copyright (c) 2019, Anders Lervik.
# Distributed under the MIT License. See LICENSE for more info.
"""Create a map using folium."""
from functools import partial
import json
import pandas as pd
import folium
import branca.colormap as cm
from legend import Legend
COLORS = {
'blue': '#1f77b4',
'orange': '#ff7f0e',
'green': '#2ca02c',
'red': '#d62728',
'purple': '#9467bd',
'brown': '#8c564b',
'pink': '#e377c2',
'gray': '#7f7f7f',
'yellow': '#bcbd22',
'cyan': '#17becf',
}
COLORS_PARTY = {
'Arbeiderpartiet': COLORS['red'],
'Høyre': COLORS['blue'],
'Miljøpartiet De Grønne': COLORS['green'],
'Senterpartiet': COLORS['yellow'],
'SV - Sosialistisk Venstreparti': COLORS['pink'],
'Fremskrittspartiet': COLORS['brown'],
'Venstre': COLORS['orange'],
'Folkeaksjonen Nei til mer bompenger': COLORS['gray'],
'Kristelig Folkeparti': COLORS['cyan'],
'Rødt': COLORS['purple'],
'Andre': '#262626',
}
COLOR_MAPS = cm._schemes
COLORS_PARTY_MAPS = {
'Arbeiderpartiet': 'Reds_03',
'Høyre': 'PuBu_03',
'Miljøpartiet De Grønne': 'YlGn_05',
'Senterpartiet': 'YlOrRd_03',
'SV - Sosialistisk Venstreparti': 'viridis',
'Fremskrittspartiet': 'viridis',
'Venstre': 'viridis',
'Folkeaksjonen Nei til mer bompenger': 'viridis',
'Kristelig Folkeparti': 'viridis',
'Rødt': 'Reds_05'
}
TILES = [
{
'name': 'topo4graatone',
'url': (
'http://opencache.statkart.no/gatekeeper/gk/gk.open_gmaps?'
'layers=topo4graatone&zoom={z}&x={x}&y={y}'
),
'attr': (
'<a href="http://www.kartverket.no/">Kartverket</a>',
),
},
{
'name': 'topo4',
'url': (
'http://opencache.statkart.no/gatekeeper/gk/gk.open_gmaps?'
'layers=topo4&zoom={z}&x={x}&y={y}'
),
'attr': (
'<a href="http://www.kartverket.no/">Kartverket</a>',
),
},
]
OPACITY = 0.7
def read_csv_results(result_file):
"""Read the results from the given csv file."""
print('Reading results from "{}"'.format(result_file))
results = pd.read_csv(
result_file,
sep=';',
decimal=',',
converters={
'Fylkenummer': str,
'Kommunenummer': str,
'Stemmekretsnummer': str
},
)
return results
def default_style_function(item):
"""Style for geojson polygons."""
party = item['properties']['partinavn']
if party not in COLORS_PARTY:
print('Missing color for {} --- using default'.format(party))
style = {
'fillColor': COLORS_PARTY.get(party, '#262626'),
'fillOpacity': OPACITY,
'color': '#262626',
'weight': 0.5,
}
return style
def style_function_color_map(item, key, data, color_map):
"""Style for geojson polygons."""
feature_key = item['properties'][key]
if feature_key not in data:
color = '#262626'
else:
color = color_map(data[feature_key])
style = {
'fillColor': color,
'fillOpacity': OPACITY,
'color': '#262626',
'weight': 0.5,
}
return style
def default_highlight_function(item):
"""Style for geojson highlighting."""
return {'weight': 2.0, 'fillOpacity': OPACITY + 0.15}
def create_tool_tip(fields, aliases, labels=True):
"""Create tool tip to add to the map."""
tool = folium.GeoJsonTooltip(
fields=fields,
aliases=aliases,
style=('font-size: 14px;'),
labels=labels,
)
return tool
def load_json_file(filename):
"""Load data from a json file."""
data = {}
print('Loading file "{}"'.format(filename))
with open(filename, 'r') as infile:
data = json.load(infile)
return data
def add_tiles_to_map(the_map):
"""Add default tiles to a folium map.
Parameters
----------
the_map : object like folium.folium.Map
The map we are to add the tiles to.
"""
for tile in TILES:
folium.TileLayer(
tile['url'], attr=tile['attr'], name=tile['name']
).add_to(the_map)
folium.TileLayer('openstreetmap').add_to(the_map)
def add_legend_to_map(the_map):
"""Add a default legend to a folium map.
Parameters
----------
the_map : object like folium.folium.Map
The map we are to add the tiles to.
"""
labels = []
for key, val in COLORS_PARTY.items():
labels.append({'text': key, 'color': val, 'opacity': OPACITY})
legend = Legend(title='Partier', labels=labels)
the_map.add_child(legend)
def add_geojson_layers(the_map, geojson_layers,
style_function=default_style_function,
tooltip=None):
"""Add geojson layers to a map.
Parameters
----------
geojson_layers : list of tuples
Each typle is of form (name, geojson-dict) where the
name is used as a label and the geojson-dict contains
the geojson layer to be shown.
style_function : callable, optional
A style function for defining the style to use when drawing
the geojson layers.
tooltip : list of objects like folium.features.GeoJsonTooltip, optional
A tooltip to add to the map.
"""
if tooltip is None:
tooltip = [None for _ in geojson_layers]
for (name, data), tool in zip(geojson_layers, tooltip):
folium.GeoJson(
data,
name=name,
style_function=style_function,
highlight_function=default_highlight_function,
tooltip=tool
).add_to(the_map)
def create_folium_map(geojson_layers, map_settings):
"""Create a folium map.
Parameters
----------
geojson_layers : list of tuples
Each typle is of form (name, geojson-dict) where the
name is used as a label and the geojson-dict contains
the geojson layer to be shown.
map_settings : dict
A dict containing settings for initializing the map.
Returns
-------
the_map : object like folium.folium.Map
The map created here.
"""
the_map = folium.Map(
location=map_settings.get('center', [63.447, 10.422]),
tiles=None,
zoom_start=map_settings.get('zoom', 9),
)
add_tiles_to_map(the_map)
add_geojson_layers(
the_map, geojson_layers, tooltip=map_settings.get('tooltip', None)
)
folium.LayerControl().add_to(the_map)
add_legend_to_map(the_map)
return the_map
def extract_data_values(data, value_key):
"""Extract value from a pandas.DataFrame
Parmeters
---------
data : dict
The raw data.
value_key : string
A column in data which contains the values we are to extract.
Returns
-------
values : dict
A dict where the keys are the id's found in data_key and
the values are the correconding values from data_value.
"""
values = {}
for key in data:
values[key] = data[key][value_key]
return values
def create_color_map(values, color_map_name):
"""Create a color map to use with a geojson layer."""
vals = [i for _, i in values.items()]
linear = cm.LinearColormap(
COLOR_MAPS[color_map_name],
vmin=min(vals),
vmax=max(vals)
)
return linear
def create_folium_choropleth(geojson_layer, data, map_settings):
"""Create a folium choropleth map.
Parameters
----------
geojson_layer : dict
A geojson layer to add to the map.
data : dict
The raw data to use for coloring.
map_settings : dict
A dict containing settings for initializing the map.
Returns
-------
the_map : object like folium.folium.Map
The map created here.
"""
the_map = folium.Map(
location=map_settings.get('center', [63.447, 10.422]),
tiles=None,
zoom_start=map_settings.get('zoom', 9),
)
add_tiles_to_map(the_map)
title = map_settings.get('title', 'Unknown')
party = map_settings.get('party', 'Unknown')
legend = 'Oppslutning (%) for {} i {}'.format(party, title)
values = extract_data_values(
data,
map_settings['value_key']
)
if 'color_map_map' not in map_settings:
color_map_name = COLORS_PARTY_MAPS.get(party, 'viridis')
else:
color_map_name = map_settings('color_map_name')
linear = create_color_map(values, color_map_name)
style_function = partial(
style_function_color_map,
key='krets',
data=values,
color_map=linear,
)
folium.GeoJson(
geojson_layer,
name=title,
style_function=style_function,
highlight_function=default_highlight_function,
tooltip=map_settings.get('tooltip', None),
).add_to(the_map)
linear.caption = legend
the_map.add_child(linear)
folium.LayerControl().add_to(the_map)
return the_map
def produce_map(geojson_layers, map_settings, output='map.html'):
"""Produce the folium map and save it to a file.
Parameters
----------
geojson_layers : list of tuples
Each typle is of form (name, geojson-dict) where the
name is used as a label and the geojson-dict contains
the geojson layer to be shown.
map_settings : dict
A dict with settings for the folium map.
output : string, optional
The file name to write the map to.
"""
the_map = create_folium_map(geojson_layers, map_settings)
print('Writing map to "{}"'.format(output))
the_map.save(output)
|
import smtplib
import imaplib
import poplib
from email.header import Header
from email.mime.text import MIMEText
from utils.logger import logger
class Pop3Mail(object):
def __init__(self, smtpserver='mail.test.com'):
self.smtpserver = smtpserver
self.timeout = 5
def conn(self,user, password):
try:
server = poplib.POP3_SSL(self.smtpserver, 995)
server.user(user)
server.pass_(password)
print("login success!")
except Exception as e:
print("login failed!")
print(e)
class Imap4Mail(object):
def __init__(self, smtpserver='mail.test.com'):
self.smtpserver = smtpserver
self.timeout = 5
def conn(self,login_name, password):
server = imaplib.IMAP4_SSL(self.smtpserver, 993)
full_password = password
try:
server.login(login_name, full_password)
print(login_name + ' | ' + full_password)
print('[+] Login Success!')
except Exception as e:
print(e)
class SmtpMail(object):
"""
用法:
SendMail("mail.test.com").send("test@test.com", aliasname='',password='',mailto=['admin@test.com'], subject='test',content='这是一个测试邮件',header='')
"""
def __init__(self, smtpserver='mail.test.com'):
self.smtpserver = smtpserver
self.timeout = 5
def send_email(self, sender, aliasname=None, password=None, mailto=[], subject='', content='', header=''):
'''
发送邮件
:param sender: 发送者,可用于发送匿名邮件
:param aliasname: 别名,认证方式时使用别名登录
:param password: 密码,认证登录
:param mailto: 收件者
:param subject: 邮件主题
:param content: 邮件内容
:param header: header类型,可以用于伪造收件者
:return:
'''
smtpserver = self.smtpserver
msg = MIMEText(content, 'html', 'utf-8')
msg['subject'] = subject
msg['from'] = sender
msg['to'] = Header(header, 'utf-8')
msg['to'] = ','.join(mailto)
try:
if (aliasname is not None) and (password is not None):
smtpobj = smtplib.SMTP(smtpserver, 587)
smtpobj.ehlo()
smtpobj.starttls()
smtpobj.login(aliasname, password)
else:
smtpobj = smtplib.SMTP(smtpserver, 25)
smtpobj.sendmail(sender, mailto, msg.as_string())
smtpobj.quit()
logger.info('邮件发送成功,发送给{}'.format(mailto))
# print('-' * 60 + '\n' + '邮件发送成功')
except Exception as e:
logger.error('邮件发送失败,{}'.format(e))
# print(e, '\n-----------\n邮件发送失败')
import smtplib
from email.mime.text import MIMEText
from config import MAIL_CONFIG
from utils.logger import logger
class SmtpMail(object):
def __init__(self):
self.parse_config(**MAIL_CONFIG)
def parse_config(self, host='127.0.0.1', port=25, **config: dict):
''' 发送邮件配置
'host' :'mail.test.com',
'port' :25,
'login':'xxx',认证使用登录名
'password' :'xxx',登录密码
'sender':'schedule@test.com',发送者,
'mailto':['test@test.com'],收件者
'timeout':5 登录超时时间
'''
self.smtpserver = host
self.port = port
self.timeout = 5
for k, v in config.items():
setattr(self, k, v)
def send_email(self, subject='', content=''):
'''
发送邮件
:param subject: 邮件主题
:param content: 邮件内容
'''
msg = MIMEText(content, 'html', 'utf-8')
msg['subject'] = subject
msg['from'] = getattr(self, 'sender', None) or getattr(self, 'login')
if isinstance(getattr(self, 'mailto'), list):
msg['to'] = ','.join(getattr(self, 'mailto'))
elif isinstance(getattr(self, 'mailto'), str):
msg['to'] = getattr(self, 'mailto', '')
try:
smtpobj = smtplib.SMTP(self.smtpserver, self.port, timeout=self.timeout)
if getattr(self, 'login', None) and getattr(self, 'password', None) and self.port == 587:
smtpobj.ehlo()
smtpobj.starttls()
smtpobj.login(getattr(self, 'login'), getattr(self, 'password'))
smtpobj.sendmail(msg['from'], msg['to'], msg.as_string())
smtpobj.quit()
logger.info('邮件发送成功,发送给 {}'.format(msg['to']))
# print('-' * 60 + '\n' + '邮件发送成功')
except Exception as e:
logger.error('邮件发送失败,{}'.format(e))
# print(e, '\n-----------\n邮件发送失败')
def error_monitor(self, reason):
content = '''
Hi,all:
<br> <pre>
xxx出现异常,请查看应用日志排查解决! <br>
<b>报警原因:</b> {}
</pre>
'''.format(reason)
self.send_email(subject='xxx运行异常', content=content)
|
import itertools
def groupper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=None, *args)
def gen():
for ii in range(10, 32):
yield ii
for n in groupper(gen(), 5):
print(list(n))
|
import unittest
from app.models import Pitch
from app import db
class PitchTest(unittest.TestCase):
"""
Test class to test the behaviour of the Pitch
"""
def setUp(self):
"""
Set up method that will run before every Test
"""
self.new_pitch = Pitch(0000, 'name','description')
def test_instance(self):
self.assertTrue(isinstance(self.new_pitch))
def tearDown(self):
Review.query.delete()
User.query.delete()
|
import datetime
import re
from pathlib import Path
from typing import Optional
from il2fb.commons.belligerents import BELLIGERENTS
from il2fb.ds.events.definitions.mission import MissionLoadedInfo
from il2fb.ds.events.definitions.mission import MissionStartedInfo
from il2fb.ds.events.definitions.mission import MissionEndedInfo
from il2fb.ds.events.definitions.mission import MissionWonInfo
from il2fb.ds.events.definitions.mission import MissionLoadedEvent
from il2fb.ds.events.definitions.mission import MissionStartedEvent
from il2fb.ds.events.definitions.mission import MissionEndedEvent
from il2fb.ds.events.definitions.mission import MissionWonEvent
from .base import LineWithTimestampParser
from .regex import BELLIGERENT_REGEX
from ._utils import export
MISSION_LOADED_EVENT_REGEX = re.compile(
r"^Mission: (?P<file_path>.+\.mis) is Playing$"
)
MISSION_WON_EVENT_REGEX = re.compile(
rf"^Mission: {BELLIGERENT_REGEX} WON$"
)
MISSION_STARTED_EVENT_LITERAL = "Mission BEGIN"
MISSION_ENDED_EVENT_LITERAL = "Mission END"
@export
class MissionLoadedLineParser(LineWithTimestampParser):
"""
Parses gamelog messages about loaded missions.
Examples of input lines:
"Mission: net/dogfight/1596469535.mis is Playing"
"""
def parse_line(self, timestamp: datetime.datetime, line: str) -> Optional[MissionLoadedEvent]:
match = MISSION_LOADED_EVENT_REGEX.match(line)
if not match:
return
file_path = Path(match.group('file_path'))
return MissionLoadedEvent(MissionLoadedInfo(
timestamp=timestamp,
file_path=file_path,
))
@export
class MissionStartedLineParser(LineWithTimestampParser):
"""
Parses gamelog messages about started missions.
Examples of input lines:
"Mission BEGIN"
"""
def parse_line(self, timestamp: datetime.datetime, line: str) -> Optional[MissionStartedEvent]:
if line == MISSION_STARTED_EVENT_LITERAL:
return MissionStartedEvent(MissionStartedInfo(timestamp=timestamp))
@export
class MissionEndedLineParser(LineWithTimestampParser):
"""
Parses gamelog messages about ended missions.
Examples of input lines:
"Mission END"
"""
def parse_line(self, timestamp: datetime.datetime, line: str) -> Optional[MissionEndedEvent]:
if line == MISSION_ENDED_EVENT_LITERAL:
return MissionEndedEvent(MissionEndedInfo(timestamp=timestamp))
@export
class MissionWonLineParser(LineWithTimestampParser):
"""
Parses gamelog messages about loaded missions.
Examples of input lines:
"Mission: RED WON"
"""
def parse_line(self, timestamp: datetime.datetime, line: str) -> Optional[MissionWonEvent]:
match = MISSION_WON_EVENT_REGEX.match(line)
if not match:
return
belligerent = match.group('belligerent').upper()
belligerent = BELLIGERENTS[belligerent]
return MissionWonEvent(MissionWonInfo(
timestamp=timestamp,
belligerent=belligerent,
))
|
#!/usr/bin/env python
#! -*- encoding:utf-8 -*-
import datetime
import qiniu
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from restaurant.models import Restaurant
from restaurant.serializers import RestaurantSerializer
@api_view(["GET"])
@authentication_classes(())
@permission_classes(())
def get_restaurant(request):
"""
通过openid获取餐厅信息
"""
openid = request.GET.get('openid', None)
try:
restaurant = Restaurant.objects.get(openid=openid)
except Restaurant.DoesNotExist:
return Response('restaurant not found', status=status.HTTP_404_NOT_FOUND)
serializer = RestaurantSerializer(restaurant)
return Response(serializer.data)
|
IMAGE_PATH = '/Users/calvindelima/code/weekly-rain-day-predictions/weather-network-7-day-forecast-scraper/tests/screenshot.png'
WN_FORECAST_DATA_PATH = '/Users/calvindelima/code/weekly-rain-day-predictions/weather-network-7-day-forecast-scraper/tests/wn_forecast_data.csv'
FORECAST_URL = 'https://www.theweathernetwork.com/ca/14-day-weather-trend/ontario/london'
|
from __future__ import print_function
import difflib
import os
import unittest
import remove_py_md
class TestRemovePy(unittest.TestCase):
def test_store_input(self):
test_file = 'tests/test.md'
test_out = 'tests/test_no_py.md'
gold = 'tests/gold.md'
remove_py_md.remove_py(test_file)
with open(gold, 'r') as f:
gold_lines = f.readlines()
with open(test_out, 'r') as f:
test_lines = f.readlines()
# might break due to line ending..
diff = difflib.unified_diff(gold_lines, test_lines)
list_diff = list(diff)
if list_diff:
print("Files differ:")
for l in list_diff:
print(l, end='')
self.fail("Files differed")
else:
self.remove_file(test_out)
def remove_file(self, file):
os.remove(file)
if __name__ == '__main__':
unittest.main()
|
from dateutil import parser
'''
ctime attribute will be provided within VideoDetector
but same as time.ctime(os.path.getctime("filename"))
- parsed_time: datetime type
'''
class Blacklist:
def __init__(self, ctime):
self.blacklisted = False
self.parsed_time = parser.parse(ctime)
self.check_if_blacklisted()
'''
set blacklisting attributes here:
- which weekdays or times to skip
'''
def check_if_blacklisted(self):
# skip all saturdays
if self.parsed_time.weekday() == 5:
return True
return False
|
from __future__ import absolute_import, division, print_function
import libtbx.load_env
from libtbx.utils import multi_out
from optparse import OptionParser
from six.moves import cStringIO as StringIO
import os.path as op
import os
import sys
# XXX these will be skipped (for various reasons)
ignore_modules = set([
"libtbx.crossmingw",
"libtbx.start_print_trace", # produces lots of output
"libtbx.command_line.epydoc_run",
"libtbx.command_line.ipython_shell_start",
"mmtbx.pdb_distances", # too much executed code
"gltbx.wx_viewer_leapmotion", # crashes if Leap installed
# non-CCTBX modules
"PyQuante.ThomasFermi", # uses reserved keyword 'with'
"elbow.example_script",
"phenix_regression",
"phenix_dev",
"phenix.autosol.bayes_5", # too much executed code
])
def has_init_py (path_name) :
for file_name in os.listdir(path_name) :
if (file_name == "__init__.py") :
return True
return False
def split_all (path_name) :
paths = []
while True :
leading_path, dir_name = op.split(path_name)
if (dir_name != '') :
paths.append(dir_name)
path_name = leading_path
else :
break
paths.reverse()
return paths
def run (args) :
verbose = False
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Turn on verbose output")
parser.add_option("--skip-tests", dest="skip_tests", action="store_true",
help="Don't import modules beginning with 'tst'")
options, args = parser.parse_args(args)
module_list = []
if (len(args) == 0) :
module_list.extend([ m.name for m in libtbx.env.module_list ])
else :
for arg in args :
assert (arg in libtbx.env.module_dict), arg
module_list.append(arg)
has_stdout = []
stdout_old = sys.stdout
for module_name in module_list :
if (module_name in ignore_modules) :
continue
try :
module = __import__(module_name)
except ImportError as e:
print(e, file=sys.stderr)
continue
assert len(module.__path__) == 1
mod_path = module.__path__[0]
path_fields = split_all(mod_path)
n_leading_dirs = len(path_fields) - 1
for dirname, dirnames, filenames in os.walk(mod_path) :
for file_name in filenames :
if file_name.endswith(".py") and (file_name != "libtbx_refresh.py") :
py_mod_name, ext = op.splitext(file_name)
if (ext != '.py') or ("." in py_mod_name) :
if (options.verbose) :
print("skipping %s" % file_name, file=sys.stderr)
continue
py_path = split_all(dirname)[n_leading_dirs:]
import_name = ".".join(py_path)
if (not has_init_py(dirname)) or (import_name in ignore_modules) :
continue
top_level_module = py_path[0]
if (file_name != "__init__.py") :
import_name += "." + file_name[:-3]
if (import_name in ignore_modules) :
continue
elif ((file_name.startswith("tst_") or file_name.startswith("test_"))
and options.skip_tests) :
continue
if (options.verbose) :
print(import_name)
try :
sys.stdout = multi_out()
sys.stdout.register("stdout", stdout_old)
out = StringIO()
sys.stdout.register("stringio", out)
submodule = __import__(import_name)
if (out.getvalue() != '') :
has_stdout.append(import_name)
if (options.verbose) :
print(out.getvalue(), file=sys.stderr)
except ImportError as e :
print(e, file=sys.stderr)
finally :
sys.stdout = stdout_old
print("")
print("*" * 80)
print("ALL MODULES IMPORTED SUCCESSFULLY")
print("*" * 80)
print("")
if (len(has_stdout) > 0) :
print("Warning: %d modules print to stdout on import" % \
len(has_stdout), file=sys.stderr)
for import_name in has_stdout :
print(import_name, file=sys.stderr)
if (__name__ == "__main__") :
run(sys.argv[1:])
|
#!/usr/bin/python3
# 다음 코드를 실행하기 위해서는 별도 모듈이 필요하지 않습니다.
def print_text(text, encoding_type):
byte_data = text.encode(encoding_type)
hex_data_as_str = ' '.join("{0}".format(hex(c)) for c in byte_data)
int_data_as_str = ' '.join("{0}".format(int(c)) for c in byte_data)
print('\'' + text + '\' 전체 문자 길이: {0}'.format(len(text)))
print('\'' + text + '\' 전체 문자를 표현하는 데 사용한 바이트 수: {0} 바이트'.format(len(byte_data)))
print('\'' + text + '\' 16진수 값: {0}'.format(hex_data_as_str))
print('\'' + text + '\' 10진수 값: {0}'.format(int_data_as_str))
print_text('Hello', 'ascii')
|
from opencensus.trace.execution_context import get_opencensus_tracer
from python.service.handler import SimpleLeagueHandler
from python.service.codegen import feb_stats_pb2, feb_stats_pb2_grpc
class FebStatsServiceServicer(feb_stats_pb2_grpc.FebStatsServiceServicer):
def __init__(self, league_handler: SimpleLeagueHandler):
self.league_handler = league_handler
def GetFebStats(self, request, context):
with get_opencensus_tracer().span(name="GetFebStats") as span:
boxscores = request.boxscores
# TODO: Add tenants when distributing the computations
result = self.league_handler.export_boxscores(boxscores)
response = feb_stats_pb2.GetFebStatsResponse()
response.sheet = result
response.teams.extend([str(t) for t in self.league_handler.league.teams])
span.add_annotation(
"League",
name=str(self.league_handler.league.name),
season=str(self.league_handler.league.season),
number_of_teams=str(len(self.league_handler.league.teams)),
number_of_games=str(len(self.league_handler.league.games)),
)
self.league_handler.clean_league()
return response
|
#!/usr/bin/python
import requests
import socket
import json
from getpass import getpass
import sys
import os
import time
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from requests.packages.urllib3.exceptions import SubjectAltNameWarning, InsecureRequestWarning
from requests_toolbelt.adapters import host_header_ssl
requests.packages.urllib3.disable_warnings(SubjectAltNameWarning)
from collections import OrderedDict
from IPython.core.display import HTML
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
import pandas as pd
pd.set_option('display.max_columns', None)
@magics_class
class Drill(Magics):
myip = None
session = None
drill_connected = False
pd_display_idx = False
pd_display_max = 1000
pd_replace_crlf = True
drill_host = ""
drill_pinned_ip = ""
drill_user = ""
drill_pass = ""
drill_base_url = ""
drill_pin_to_ip = True
drill_headers = {}
def __init__(self, shell, *args, **kwargs):
super(Drill, self).__init__(shell)
self.myip = get_ipython()
def retConnStatus(self):
if self.drill_connected == True:
print("Drill is currrently connected to %s" % self.drill_base_url)
else:
print("Drill is not connected")
def disconnectDrill(self):
if self.drill_connected == True:
print("Disconnected Drill Session from %s" % self.drill_base_url)
self.session = None
self.drill_base_url = None
self.drill_pass = None
self.drill_connected = False
else:
print("Drill Not Currently Connected")
def connectDrill(self):
global tpass
if self.drill_connected == False:
try:
tuser = os.environ['JPY_USER']
except:
raise Exception("Could not find user at ENV JPY_USER - Please use '%drill connect alt' to specify")
print("Connecting as user %s" % tuser)
try:
turl = os.environ['DRILL_BASE_URL']
except:
raise Exception("No DRILL_BASE_URL specified in ENV - Please use '%drill connect alt' to specify")
print("Connecting to Drill URL: %s" % turl)
print("")
print("Now, please enter the password you wish to connect with:")
tpass = ""
self.myip.ex("from getpass import getpass\ntpass = getpass(prompt='Drill Connect Password: ')")
tpass = self.myip.user_ns['tpass']
self.session = requests.Session()
if self.drill_pin_to_ip == True:
tipurl = self.getipurl(turl)
print("")
print("Pinning to IP for this session: %s" % tipurl)
print("")
self.drill_base_url = tipurl
self.session.mount(tipurl, host_header_ssl.HostHeaderSSLAdapter())
else:
self.drill_base_url = turl
self.drill_user = tuser
self.drill_pass = tpass
self.myip.user_ns['tpass'] = ""
#try:
if 1 == 1:
self.session = self.authDrill()
self.drill_connected = True
print("%s - Drill Connected!" % self.drill_base_url)
#except:
# print("Connection Error - Perhaps Bad Usename/Password?")
else:
print("Drill is already connected - Please type %drill for help on what you can you do")
def connectDrillAlt(self):
global tpass
if self.drill_connected == False:
try:
tuser = os.environ['JPY_USER']
except:
tuser = ""
print("Currently, the user is set to %s" % tuser)
print("To use this user, just press enter at the prompt, otherwise type a different user name")
tmp = input("Please type new user name if desired: ")
if tmp != "":
tuser = tmp
try:
turl = os.environ['DRILL_BASE_URL']
except:
turl = ""
print("Currently the drill base url is set to %s" % turl)
print("To use this URL, please press enter at the prompt, otherwise type a different url to connect with")
tmpu = input("Please type a new URL if desired:")
if tmpu != "":
turl = tmpu
print("Now, please enter the password you wish to connect with:")
tpass = ""
self.myip.ex("from getpass import getpass\ntpass = getpass(prompt='Drill Connect Password: ')")
tpass = self.myip.user_ns['tpass']
self.session = requests.Session()
if self.drill_pin_to_ip == True:
tipurl = self.getipurl(turl)
print("")
print("Provided Host: %s" % turl)
print("")
print("Pinning to IP for this session: %s" % tipurl)
print("")
self.drill_base_url = tipurl
self.session.mount(tipurl, host_header_ssl.HostHeaderSSLAdapter())
else:
self.drill_base_url = turl
self.drill_user = tuser
self.drill_pass = tpass
self.myip.user_ns['tpass'] = ""
#try:
if 1 == 1:
self.session = self.authDrill()
self.drill_connected = True
print("%s - Drill Connected!" % self.drill_base_url)
#except:
# print("Connection Error - Perhaps Bad Usename/Password?")
else:
print("Drill is already connected - Please type %drill for help on what you can you do")
def replaceHTMLCRLF(self, instr):
gridhtml = instr.replace("<CR><LF>", "<BR>")
gridhtml = gridhtml.replace("<CR>", "<BR>")
gridhtml = gridhtml.replace("<LF>", "<BR>")
gridhtml = gridhtml.replace("<CR><LF>", "<BR>")
gridhtml = gridhtml.replace("<CR>", "<BR>")
gridhtml = gridhtml.replace("<LF>", "<BR>")
return gridhtml
def resultsNewWin(self, b):
max_col = pd.get_option('max_colwidth')
max_rows = pd.get_option('display.max_rows')
pd.set_option('max_colwidth', 100000)
pd.set_option('display.max_rows', None)
df = self.myip.user_ns['prev_drill']
gridhtml = df.to_html(index=self.pd_display_idx)
if self.pd_replace_crlf == True:
outhtml = self.replaceHTMLCRLF(gridhtml)
else:
outhtml = gridhtml
window_options = "toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, resizable=yes, width=1024, height=800, top=0, left=0"
base = """var win = window.open("", "&~&", "*~*");
win.document.body.innerHTML = `%~%`;
"""
JS = base.replace('%~%', outhtml)
JS = JS.replace('&~&', "Current Results")
JS = JS.replace('*~*', window_options)
j = Javascript(JS)
display(j)
pd.set_option('max_colwidth', max_col)
pd.set_option('display.max_rows', max_rows)
def getipurl(self, url):
ts1 = url.split("://")
scheme = ts1[0]
t1 = ts1[1]
ts2 = t1.split(":")
host = ts2[0]
port = ts2[1]
ip = socket.gethostbyname(host)
self.drill_host = host
self.drill_ip = ip
ipurl = "%s://%s:%s" % (scheme, ip, port)
self.drill_headers = {}
#self.drill_headers = {"Host": self.drill_host}
return ipurl
def runQuery(self, query):
if query.find(";") >= 0:
print("WARNING - Do not type a trailing semi colon on queries, your query will fail (like it probably did here)")
if self.drill_pin_to_ip == True:
verify = False
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
else:
verify = "/etc/ssl/certs/ca-certificates.crt"
if self.drill_connected == True:
url = self.drill_base_url + "/query.json"
payload = {"queryType":"SQL", "query":query}
cur_headers = self.drill_headers
cur_headers["Content-type"] = "application/json"
starttime = int(time.time())
r = self.session.post(url, data=json.dumps(payload), headers=cur_headers, verify=verify)
endtime = int(time.time())
query_time = endtime - starttime
return r, query_time
def authDrill(self):
url = self.drill_base_url + "/j_security_check"
login = {'j_username': self.drill_user, 'j_password': self.drill_pass}
verify = "/etc/ssl/certs/ca-certificates.crt"
if self.drill_pin_to_ip == True:
verify = False
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
else:
verify = "/etc/ssl/certs/ca-certificates.crt"
r = self.session.post(url, data=login, headers=self.drill_headers, verify=verify)
if r.status_code == 200:
if r.text.find("Invalid username/password credentials") >= 0:
raise Exception("Invalid username/password credentials")
elif r.text.find('<li><a href="/logout">Log Out (') >= 0:
pass
else:
raise Exception("Unknown HTTP 200 Code: %s" % r.text)
else:
raise Exception("Status Code: %s - Error" % r.status_code)
return self.session
@line_cell_magic
def drill(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
if line == "":
print("Help with Drill Functions")
print("%drill - This Help")
print("%drill connect - Connect to your instance of Drill")
print("%drill connect alt - Connect to a different drill cluster or use a different user (will prompt)")
print("%drill status - Show the Connection Status of Drill")
print("%drill disconnect - Disconnect from your instance of Drill")
print("")
print("Run Drill Queries")
print("%%drill")
print("select * from your table")
print("")
print("Ran with two % and a query, it queries a table and returns a df")
print("The df is displayed but also stored in variable called prev_drill")
print("")
elif line.lower() == "status":
self.retConnStatus()
elif line.lower() == "disconnect":
self.disconnectDrill()
elif line.lower() == "connect alt":
self.connectDrillAlt()
elif line.lower() == "connect":
self.connectDrill()
else:
print("I am sorry, I don't know what you want to do, try just %drill for help options")
else:
cell = cell.replace("\r", "")
if self.drill_connected == True:
res, qtime = self.runQuery(cell)
if res == "notconnected":
pass
else:
if res.status_code == 200:
if res.text.find("Invalid username/password credentials.") >= 0:
print("It looks like your Drill Session has expired, please run %drill connect to resolve")
self.disconnectDrill()
self.myip.set_next_input("%drill connect")
else:
try:
jrecs = json.loads(res.text, object_pairs_hook=OrderedDict)
except:
print("Error loading: %s " % res.text)
cols = jrecs['columns']
myrecs = jrecs['rows']
df = pd.read_json(json.dumps(myrecs))
df = df[cols]
self.myip.user_ns['prev_drill'] = df
mycnt = len(df)
print("%s Records in Approx %s seconds" % (mycnt,qtime))
print("")
button = widgets.Button(description="Cur Results")
button.on_click(self.myip.user_ns['drill_edwin_class'].resultsNewWin)
display(button)
if mycnt <= self.pd_display_max:
display(HTML(df.to_html(index=self.pd_display_idx)))
else:
print("Number of results (%s) greater than pd_display_max(%s) - Press button to see results in new window" % (mycnt, self.pd_display_max))
else:
print("Error Returned - Code: %s" % res.status_code)
emsg = json.loads(res.text, object_pairs_hook=OrderedDict)
print("Error Text:\n%s" % emsg['errorMessage'])
else:
print("Drill is not connected: Please see help at %drill - To Connect: %drill connect")
|
from .BaseEncoder import BaseEncoder # noqa
from .BiTransformerCnnEncoder import BiTransformerCnnEncoder # noqa
|
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from neutron.common import exceptions
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import queue as queuelib
from neutron.tests.unit.vmware.nsxlib import base
class TestLogicalQueueLib(base.NsxlibTestCase):
def setUp(self):
super(TestLogicalQueueLib, self).setUp()
self.fake_queue = {
'name': 'fake_queue',
'min': 0, 'max': 256,
'dscp': 0, 'qos_marking': False
}
def test_create_and_get_lqueue(self):
queue_id = queuelib.create_lqueue(
self.fake_cluster, self.fake_queue)
queue_res = nsxlib.do_request(
'GET',
nsxlib._build_uri_path('lqueue', resource_id=queue_id),
cluster=self.fake_cluster)
self.assertEqual(queue_id, queue_res['uuid'])
self.assertEqual('fake_queue', queue_res['display_name'])
def test_create_lqueue_nsx_error_raises(self):
def raise_nsx_exc(*args, **kwargs):
raise api_exc.NsxApiException()
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
self.assertRaises(
exceptions.NeutronException, queuelib.create_lqueue,
self.fake_cluster, self.fake_queue)
def test_delete_lqueue(self):
queue_id = queuelib.create_lqueue(
self.fake_cluster, self.fake_queue)
queuelib.delete_lqueue(self.fake_cluster, queue_id)
self.assertRaises(exceptions.NotFound,
nsxlib.do_request,
'GET',
nsxlib._build_uri_path(
'lqueue', resource_id=queue_id),
cluster=self.fake_cluster)
def test_delete_non_existing_lqueue_raises(self):
self.assertRaises(exceptions.NeutronException,
queuelib.delete_lqueue,
self.fake_cluster, 'whatever')
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.base import test_organization, test_author, test_lots
from openprocurement.tender.belowthreshold.tests.bid_blanks import (
# TenderBidDocumentResourceTest
not_found,
# TenderBidderBatchDocumentWithDSResourceTest
create_tender_bid_with_documents,
create_tender_bid_with_document_invalid,
create_tender_bid_with_document,
# Tender2LotBidResourceTest
patch_tender_with_bids_lots_none,
)
from openprocurement.tender.openua.tests.base import (
BaseTenderUAContentWebTest,
test_tender_data,
test_features_tender_ua_data,
test_bids,
)
from openprocurement.tender.openua.tests.bid_blanks import (
# TenderBidResourceTest
create_tender_biddder_invalid,
create_tender_bidder,
patch_tender_bidder,
get_tender_bidder,
delete_tender_bidder,
deleted_bid_is_not_restorable,
deleted_bid_do_not_locks_tender_in_state,
get_tender_tenderers,
bid_Administrator_change,
draft1_bid,
draft2_bids,
bids_invalidation_on_tender_change,
bids_activation_on_tender_documents,
create_tender_bid_no_scale_invalid,
create_tender_bid_with_scale_not_required,
create_tender_bid_no_scale,
# TenderBidFeautreResourceTest
features_bidder,
features_bidder_invalid,
# TenderBidDocumentResourceTest
create_tender_bidder_document,
put_tender_bidder_document,
patch_tender_bidder_document,
create_tender_bidder_document_nopending,
# TenderBidDocumentWithDSResourceTest
create_tender_bidder_document_json,
put_tender_bidder_document_json,
tender_bidder_confidential_document,
)
class TenderBidResourceTestMixin(object):
test_create_tender_biddder_invalid = snitch(create_tender_biddder_invalid)
test_create_tender_bidder = snitch(create_tender_bidder)
test_patch_tender_bidder = snitch(patch_tender_bidder)
test_get_tender_bidder = snitch(get_tender_bidder)
test_delete_tender_bidder = snitch(delete_tender_bidder)
test_deleted_bid_is_not_restorable = snitch(deleted_bid_is_not_restorable)
test_deleted_bid_do_not_locks_tender_in_state = snitch(deleted_bid_do_not_locks_tender_in_state)
test_get_tender_tenderers = snitch(get_tender_tenderers)
test_bid_Administrator_change = snitch(bid_Administrator_change)
test_bids_invalidation_on_tender_change = snitch(bids_invalidation_on_tender_change)
test_bids_activation_on_tender_documents = snitch(bids_activation_on_tender_documents)
test_create_tender_bid_no_scale_invalid = snitch(create_tender_bid_no_scale_invalid)
test_create_tender_bid_with_scale_not_required = snitch(create_tender_bid_with_scale_not_required)
test_create_tender_bid_no_scale = snitch(create_tender_bid_no_scale)
class TenderBidDocumentResourceTestMixin(object):
test_create_tender_bidder_document = snitch(create_tender_bidder_document)
test_put_tender_bidder_document = snitch(put_tender_bidder_document)
test_patch_tender_bidder_document = snitch(patch_tender_bidder_document)
test_create_tender_bidder_document_nopending = snitch(create_tender_bidder_document_nopending)
class TenderBidResourceTest(BaseTenderUAContentWebTest, TenderBidResourceTestMixin):
initial_data = test_tender_data
initial_status = "active.tendering"
test_bids_data = test_bids
author_data = test_author
test_draft1_bid = snitch(draft1_bid)
test_draft2_bids = snitch(draft2_bids)
class Tender2LotBidResourceTest(BaseTenderUAContentWebTest):
initial_data = test_tender_data
test_bids_data = test_bids
initial_lots = 2 * test_lots
initial_status = "active.tendering"
test_patch_tender_with_bids_lots_none = snitch(patch_tender_with_bids_lots_none)
class TenderBidFeaturesResourceTest(BaseTenderUAContentWebTest):
initial_data = test_features_tender_ua_data
initial_status = "active.tendering"
test_features_bidder = snitch(features_bidder)
test_features_bidder_invalid = snitch(features_bidder_invalid)
class TenderBidDocumentResourceTest(BaseTenderUAContentWebTest, TenderBidDocumentResourceTestMixin):
initial_status = "active.tendering"
test_bids_data = test_bids
author_data = test_author
def setUp(self):
super(TenderBidDocumentResourceTest, self).setUp()
# Create bid
response = self.app.post_json(
"/tenders/{}/bids".format(self.tender_id),
{
"data": {
"selfEligible": True,
"selfQualified": True,
"tenderers": [test_organization],
"value": {"amount": 500},
}
},
)
bid = response.json["data"]
self.bid_id = bid["id"]
self.bid_token = response.json["access"]["token"]
test_not_found = snitch(not_found)
class TenderBidDocumentWithDSResourceTestMixin:
docservice = True
test_create_tender_bidder_document_json = snitch(create_tender_bidder_document_json)
test_put_tender_bidder_document_json = snitch(put_tender_bidder_document_json)
test_tender_bidder_confidential_document = snitch(tender_bidder_confidential_document)
class TenderBidDocumentWithDSResourceTest(TenderBidDocumentWithDSResourceTestMixin, TenderBidDocumentResourceTest):
pass
class TenderBidderBatchDocumentWithDSResourceTest(BaseTenderUAContentWebTest):
docservice = True
initial_status = "active.tendering"
test_bids_data = test_bids
bid_data_wo_docs = {
"tenderers": [test_organization],
"value": {"amount": 500},
"selfEligible": True,
"selfQualified": True,
"documents": [],
}
create_tender_bid_with_document_invalid = snitch(create_tender_bid_with_document_invalid)
create_tender_bid_with_document = snitch(create_tender_bid_with_document)
create_tender_bid_with_documents = snitch(create_tender_bid_with_documents)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderBidDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderBidDocumentWithDSResourceTest))
suite.addTest(unittest.makeSuite(TenderBidFeaturesResourceTest))
suite.addTest(unittest.makeSuite(TenderBidResourceTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
print("dic for person and user will ask about information")
person = {"name":"Ajinkya","gender":"Male","address":"Toronto","phone": 5488881681 }
key = input("Please enter what information would like to know about: ").lower()
result = person.get(key,"Invalid Input")
print(result)
|
from django import forms
from .models import Order
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ['supplier', 'text', 'department', 'amount']
widgets = {
'supplier': forms.Textarea(attrs={'rows':1, 'cols':100, 'style':'resize:none;'}),
'text': forms.Textarea(attrs={'rows':15, 'cols':100}),
'department': forms.Select(attrs={'style': 'width:183px'})
}
|
import re
import requests
import datetime
from bs4 import BeautifulSoup
from python_utils import converters
def get_parsed_page(url):
# This fixes a blocked by cloudflare error i've encountered
headers = {
"referer": "https://www.hltv.org/stats",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}
return BeautifulSoup(requests.get(url, headers=headers).text, "lxml")
def top5teams():
home = get_parsed_page("http://hltv.org/")
count = 0
teams = []
for team in home.find_all("div", {"class": ["col-box rank"], }):
count += 1
teamname = team.text[3:]
teams.append(teamname)
return teams
def top30teams():
page = get_parsed_page("http://www.hltv.org/ranking/teams/")
teams = page.find("div", {"class": "ranking"})
teamlist = []
for team in teams.find_all("div", {"class": "ranked-team standard-box"}):
newteam = {'name': team.find('div', {"class": "ranking-header"}).select('.name')[0].text.strip(),
'rank': converters.to_int(team.select('.position')[0].text.strip(), regexp=True),
'rank-points': converters.to_int(team.find('span', {'class': 'points'}).text, regexp=True),
'team-id': converters.to_int(team.find('a', {'class': 'details moreLink'})['href'].split('/')[-1]),
'team-players': []}
for player_div in team.find_all("td", {"class": "player-holder"}):
player = {}
player['name'] = player_div.find('img', {'class': 'playerPicture'})['title']
player['player-id'] = converters.to_int(player_div.select('.pointer')[0]['href'].split("/")[-2])
newteam['team-players'].append(player)
teamlist.append(newteam)
return teamlist
def top_players():
page = get_parsed_page("https://www.hltv.org/stats")
players = page.find_all("div", {"class": "col"})[0]
playersArray = []
for player in players.find_all("div", {"class": "top-x-box standard-box"}):
playerObj = {}
playerObj['country'] = player.find_all('img')[1]['alt'].encode('utf8')
buildName = player.find('img', {'class': 'img'})['alt'].split("'")
playerObj['name'] = buildName[0].rstrip() + buildName[2]
playerObj['nickname'] = player.find('a', {'class': 'name'}).text.encode('utf8')
playerObj['rating'] = player.find('div', {'class': 'rating'}).find('span', {'class': 'bold'}).text.encode('utf8')
playerObj['maps-played'] = player.find('div', {'class': 'average gtSmartphone-only'}).find('span', {'class': 'bold'}).text.encode('utf8')
playersArray.append(playerObj)
return playersArray
def get_players(teamid):
page = get_parsed_page("http://www.hltv.org/?pageid=362&teamid=" + str(teamid))
titlebox = page.find("div", {"class": "bodyshot-team"})
players = []
for player_link in titlebox.find_all("a"):
players.append(player_link['title'])
return players
def get_team_info(teamid):
"""
:param teamid: integer (or string consisting of integers)
:return: dictionary of team
example team id: 5378 (virtus pro)
"""
page = get_parsed_page("http://www.hltv.org/?pageid=179&teamid=" + str(teamid))
team_info = {}
team_info['team-name']=page.find("div", {"class": "context-item"}).text.encode('utf8').decode('utf8')
# print(team_info['team-name'])
current_lineup = _get_current_lineup(page.find_all("div", {"class": "col teammate"}))
team_info['current-lineup'] = current_lineup
# print(team_info['current-lineup'])
historical_players = _get_historical_lineup(page.find_all("div", {"class": "col teammate"}))
team_info['historical-players'] = historical_players
for i in team_info['historical-players']:
for k in i:
try:
i[k] = i[k].decode('utf-8')
except:
pass
# print(team_info['historical-players'])
team_stats_columns = page.find_all("div", {"class": "columns"})
team_stats = {}
for columns in team_stats_columns:
stats = columns.find_all("div", {"class": "col standard-box big-padding"})
for stat in stats:
stat_value = stat.find("div", {"class": "large-strong"}).text.encode('utf8')
stat_title = stat.find("div", {"class": "small-label-below"}).text.encode('utf8')
team_stats[stat_title.decode('utf-8')] = stat_value
team_info['stats'] = team_stats
for i in team_info['stats']:
team_info['stats'][i] = team_info['stats'][i].decode('utf-8')
# print(team_info['stats'])
return team_info
def _get_current_lineup(player_anchors):
"""
helper function for function above
:return: list of players
"""
players = []
for player_anchor in player_anchors[0:5]:
player = {}
buildName = player_anchor.find("img", {"class": "container-width"})["alt"].split('\'')
player['country'] = player_anchor.find("div", {"class": "teammate-info standard-box"}).find("img", {"class": "flag"})["alt"]
player['name'] = buildName[0].rstrip() + buildName[2]
player['nickname'] = player_anchor.find("div", {"class": "teammate-info standard-box"}).find("div", {"class": "text-ellipsis"}).text
player['maps-played'] = int(re.search(r'\d+', player_anchor.find("div", {"class": "teammate-info standard-box"}).find("span").text).group())
players.append(player)
return players
def _get_historical_lineup(player_anchors):
"""
helper function for function above
:return: list of players
"""
players = []
for player_anchor in player_anchors[5::]:
player = {}
buildName = player_anchor.find("img", {"class": "container-width"})["alt"].split('\'')
player['country'] = player_anchor.find("div", {"class": "teammate-info standard-box"}).find("img", {"class": "flag"})["alt"].encode('utf8')
player['name'] = buildName[0].rstrip() + buildName[2]
player['nickname'] = player_anchor.find("div", {"class": "teammate-info standard-box"}).find("div", {"class": "text-ellipsis"}).text.encode('utf8')
player['maps-played'] = int(re.search(r'\d+', player_anchor.find("div", {"class": "teammate-info standard-box"}).find("span").text).group())
players.append(player)
return players
def get_matches():
matches = get_parsed_page("http://www.hltv.org/matches/")
matches_list = []
upcomingmatches = matches.find("div", {"class": "upcoming-matches"})
matchdays = upcomingmatches.find_all("div", {"class": "match-day"})
for match in matchdays:
matchDetails = match.find_all("table", {"class": "table"})
for getMatch in matchDetails:
matchObj = {}
matchObj['date'] = match.find("span", {"class": "standard-headline"}).text.encode('utf8')
matchObj['time'] = getMatch.find("td", {"class": "time"}).text.encode('utf8').lstrip().rstrip()
if (getMatch.find("td", {"class": "placeholder-text-cell"})):
matchObj['event'] = getMatch.find("td", {"class": "placeholder-text-cell"}).text.encode('utf8')
elif (getMatch.find("td", {"class": "event"})):
matchObj['event'] = getMatch.find("td", {"class": "event"}).text.encode('utf8')
else:
matchObj['event'] = None
if (getMatch.find_all("td", {"class": "team-cell"})):
matchObj['team1'] = getMatch.find_all("td", {"class": "team-cell"})[0].text.encode('utf8').lstrip().rstrip()
matchObj['team2'] = getMatch.find_all("td", {"class": "team-cell"})[1].text.encode('utf8').lstrip().rstrip()
matchObj['id1'] = int(str(getMatch.find_all("td", {"class": "team-cell"})[0].find_all("div", {"class": "line-align"})[0].find_all("img")[0]).split("src=")[1].split()[0].split('/')[-1].split('"')[0])
matchObj['id2'] = int(str(getMatch.find_all("td", {"class": "team-cell"})[1].find_all("div", {"class": "line-align"})[0].find_all("img")[0]).split("src=")[1].split()[0].split('/')[-1].split('"')[0])
else:
matchObj['team1'] = None
matchObj['team2'] = None
matchObj['id1'] = None
matchObj['id2'] = None
matches_list.append(matchObj)
return matches_list
def get_results():
results = get_parsed_page("http://www.hltv.org/results/")
results_list = []
pastresults = results.find_all("div", {"class": "results-holder"})
for result in pastresults:
resultDiv = result.find_all("div", {"class": "result-con"})
for res in resultDiv:
getRes = res.find("div", {"class": "result"}).find("table")
resultObj = {}
if (res.parent.find("span", {"class": "standard-headline"})):
resultObj['date'] = res.parent.find("span", {"class": "standard-headline"}).text.encode('utf8')
else:
dt = datetime.date.today()
resultObj['date'] = str(dt.day) + '/' + str(dt.month) + '/' + str(dt.year)
if (res.find("td", {"class": "placeholder-text-cell"})):
resultObj['event'] = res.find("td", {"class": "placeholder-text-cell"}).text.encode('utf8')
elif (res.find("td", {"class": "event"})):
resultObj['event'] = res.find("td", {"class": "event"}).text.encode('utf8')
else:
resultObj['event'] = None
if (res.find_all("td", {"class": "team-cell"})):
resultObj['team1'] = res.find_all("td", {"class": "team-cell"})[0].text.encode('utf8').lstrip().rstrip()
resultObj['team1score'] = converters.to_int(res.find("td", {"class": "result-score"}).find_all("span")[0].text.encode('utf8').lstrip().rstrip())
resultObj['team2'] = res.find_all("td", {"class": "team-cell"})[1].text.encode('utf8').lstrip().rstrip()
resultObj['team2score'] = converters.to_int(res.find("td", {"class": "result-score"}).find_all("span")[1].text.encode('utf8').lstrip().rstrip())
else:
resultObj['team1'] = None
resultObj['team2'] = None
results_list.append(resultObj)
return results_list
def get_results_by_date(start_date, end_date):
# Dates like yyyy-mm-dd (iso)
results_list = []
offset = 0
# Loop through all stats pages
while True:
url = "https://www.hltv.org/stats/matches?startDate="+start_date+"&endDate="+end_date+"&offset="+str(offset)
results = get_parsed_page(url)
# Total amount of results of the query
amount = int(results.find("span", attrs={"class": "pagination-data"}).text.split("of")[1].strip())
# All rows (<tr>s) of the match table
pastresults = results.find("tbody").find_all("tr")
# Parse each <tr> element to a result dictionary
for result in pastresults:
team_cols = result.find_all("td", {"class": "team-col"})
t1 = team_cols[0].find("a").text
t2 = team_cols[1].find("a").text
t1_score = int(team_cols[0].find_all(attrs={"class": "score"})[0].text.strip()[1:-1])
t2_score = int(team_cols[1].find_all(attrs={"class": "score"})[0].text.strip()[1:-1])
map = result.find(attrs={"class": "statsDetail"}).find(attrs={"class": "dynamic-map-name-full"}).text
event = result.find(attrs={"class": "event-col"}).text
date = result.find(attrs={"class": "date-col"}).find("a").find("div").text
result_dict = {"team1": t1, "team2": t2, "team1score": t1_score,
"team2score": t2_score, "date": date, "map": map, "event": event}
# Add this pages results to the result list
results_list.append(result_dict)
# Get the next 50 results (next page) or break
if offset < amount:
offset += 50
else:
break
return results_list
if __name__ == "__main__":
import pprint
pp = pprint.PrettyPrinter()
pp.pprint('top5')
pp.pprint(top5teams())
pp.pprint('top30')
pp.pprint(top30teams())
pp.pprint('top_players')
pp.pprint(top_players())
pp.pprint('get_players')
pp.pprint(get_players('6665'))
pp.pprint('get_team_info')
pp.pprint(get_team_info('6665'))
pp.pprint('get_matches')
pp.pprint(get_matches())
pp.pprint('get_results')
pp.pprint(get_results())
pp.pprint('get_results_by_date')
today_iso = datetime.datetime.today().isoformat().split('T')[0]
pp.pprint(get_results_by_date(today_iso, today_iso))
|
# https://codility.com/programmers/task/perm_missing_elem/
def solution(A):
greater = 0
lenA = len(A)
N = lenA + 1
i = 0
while i < lenA:
if A[i] == i + 1 or A[i] == N:
i += 1
continue
A[A[i]-1], A[i] = A[i],A[A[i]-1]
for i in xrange(lenA):
if i + 1 != A[i]:
return i + 1
return N
print solution([2,1,3,5])
print solution([])
print solution([1,2,3,4])
print solution([4, 2, 3, 5, 1, 6, 8, 9])
|
from functools import wraps
from flask import session, request, flash, redirect
def auth_required(fn):
@wraps(fn)
def wrap(*args, **kwargs):
print(session)
if 'logged_in' not in session:
flash('You need to login first.', 'danger')
return redirect('/sign-in')
return fn(*args, **kwargs)
return wrap
|
from dataclasses import dataclass
from viewdom import html, VDOM
from viewdom_wired import component
from .plugins import greeting
from .plugins.greeting import Greeting
plugins = (greeting,)
@component(for_=Greeting)
@dataclass
class SiteGreeting:
name: str = 'Site'
def __call__(self) -> VDOM:
return html('<h1>Hello {self.name}</h1>')
|
# -*- coding: utf-8 -*-
from crispy_forms import helper as crispy_helper
from crispy_forms import layout
from crispy_forms import bootstrap
import floppyforms
from .models import Historia
class HistoriaEditForm(floppyforms.ModelForm):
def __init__(self, *args, **kwargs):
super(HistoriaEditForm, self).__init__(*args, **kwargs)
self.helper = crispy_helper.FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2'
self.helper.field_class = 'col-md-4'
self.helper.help_text_inline = True
self.helper.layout = layout.Layout(
bootstrap.TabHolder(
bootstrap.Tab(u'Información Básica',
'paciente',
'motivo_consulta',
'antecedentes_personales',
'antecedentes_familiares',
'habitos_psicobiologicos'
),
bootstrap.Tab(u'Exámen Físico',
'examen_general',
'examen_piel',
'examen_cabeza',
'examen_ojos',
'examen_nariz',
'examen_boca',
'examen_garganta',
'examen_respiratorio',
'examen_osteomuscular',
'examen_cardiovascular',
'examen_gastrointestinal',
'examen_genitourinario',
'examen_ginecologico',
'examen_nervioso_y_mental',
'examen_epidemiologico'
),
bootstrap.Tab(u'Signos vitales',
bootstrap.AppendedText('temperatura', u'°C'),
bootstrap.AppendedText('pulso', 'ppm'),
'respiracion',
'tension_art_sist',
'tension_art_diast',
'frecuencia_cardiaca',
bootstrap.AppendedText('peso', 'kg'),
bootstrap.AppendedText('talla', 'cm'),
'grasa_corporal'
)
),
layout.HTML('<hr>'),
bootstrap.FormActions(
layout.Submit(
'submit', 'Agregar',
css_class='col-md-offset-2 btn-default btn-success'
)
)
)
class Meta:
model = Historia
|
__version__ = '1.0.4.2'
|
"""ETL step wrapper for delta loading a table based on a date column
"""
from ..database import SqlScript
from ..database import Table
from ..utils.helpers import parse_path
from .upsert import UpsertStep
class DeltaLoadStep(UpsertStep):
"""DeltaLoadStep Step class that creates the table if needed and loads data
"""
def __init__(self, destination, date_column, window=0, **kwargs):
"""Constructor for the DeltaLoadStep class
Args:
date_column(string): name of column (of type date) to use as the
delta value (i.e., only load the last X days)
window(int): number of days before last loaded day to update
**kwargs(optional): Keyword arguments directly passed to base class
"""
dest = Table(SqlScript(filename=parse_path(destination)))
delta_clause = """
WHERE {date_column} >=
COALESCE(
(SELECT MAX({date_column}) FROM {destination}),
'1800-01-01'::DATE
) - {window}
""".format(date_column=date_column,
destination=dest.full_name,
window=window)
super(DeltaLoadStep, self).__init__(destination=destination,
filter_clause=delta_clause,
**kwargs)
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import robomaster
from robomaster import robot
if __name__ == '__main__':
mled_smile1 = '000000000r0000r0r0r00r0r000000000000000000r00r00000rr00000000000'
mled_smile2 = '00rrrr000r0000r0r0r00r0rr000000rr0r00r0rr00rr00r0r0000r000rrrr00'
tl_drone = robot.Drone()
tl_drone.initialize()
# 显示自定义图案
tl_drone.led.set_mled_graph(mled_smile1)
time.sleep(3)
tl_drone.led.set_mled_graph(mled_smile2)
time.sleep(3)
# 显示数字
for num in range(10):
tl_drone.led.set_mled_char('r', num)
time.sleep(0.5)
# 显示字符A, B, C
tl_drone.led.set_mled_char(color='b', display_char='A')
time.sleep(3)
tl_drone.led.set_mled_char(color='b', display_char='B')
time.sleep(3)
tl_drone.led.set_mled_char(color='b', display_char='C')
time.sleep(3)
# 清屏
tl_drone.led.set_mled_char('0')
tl_drone.close()
|
from rest_framework import serializers
from main.models import Sentence
class SentenceSerializer(serializers.ModelSerializer):
spoken_count = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Sentence
fields = ('content_jp', 'content_en', 'score', 'spoken_count', )
def __init__(self, *args, **kwargs):
user = kwargs.pop("user", None)
super().__init__(*args, **kwargs)
self.user = user
self.fields['score'].read_only = True
def get_spoken_count(self, obj):
# return obj.usersentence_set.all().count()
return obj.usersentence_set.filter(text__user=self.user).count()
|
from abc import ABCMeta, abstractmethod
class Classifier(metaclass=ABCMeta):
''' An abstract interface for classifier model. '''
def __init__(self, n_class, x_min, x_max, x_shape, x_dtype, y_dtype):
''' Initialize the abstract Classifier with metadata.
:param n_class: A ``int`` number. Number of class of the classifier.
:param x_min: A ``float`` number. Min value for the classifier's input.
:param x_max: A ``float`` number. Max value for the classifier's input.
:param x_shape: A ``tuple`` of ``int`` numbers. The shape of the classifier's input.
:param x_dtype: A ``tf.DType`` instance. The data type of the classifier's input.
:param y_dtype: A ``tf.DType`` instance. The data type of the classifier's classification result.
'''
self.n_class = n_class
self.x_min, self.x_max, self.x_shape, self.x_dtype = x_min, x_max, x_shape, x_dtype
self.y_dtype = y_dtype
# cache labels output to reuse graph
self._xs_labels_map = dict()
@abstractmethod
def _labels(self, xs):
''' Take an input ``tf.Tensor`` and give the classifier's classification result as a ``tf.Tensor``.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A ``tf.Tensor`` instance with shape of ``(self.n_class,)`` and with data type of ``self.y_dtype``.
Represents the classification result.
'''
def labels(self, xs):
''' A wrapper for ``self._labels()`` to reuse graph.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A ``tf.Tensor`` instance with shape of ``(self.n_class,)`` and with data type of ``self.y_dtype``.
Represents the classification result.
'''
try:
return self._xs_labels_map[xs]
except KeyError:
labels = self._labels(xs)
self._xs_labels_map[xs] = labels
return labels
class ClassifierWithLogits(Classifier, metaclass=ABCMeta):
''' An abstract interface for classifier model which provides (maybe differentiable) logits output. '''
def __init__(self, n_class, x_min, x_max, x_shape, x_dtype, y_dtype):
''' Initialize the abstract ClassifierWithLogits with metadata.
:param n_class: A ``int`` number. Number of class of the classifier.
:param x_min: A ``float`` number. Min value for the classifier's input.
:param x_max: A ``float`` number. Max value for the classifier's input.
:param x_shape: A ``tuple`` of ``int`` numbers. The shape of the classifier's input.
:param x_dtype: A ``tf.DType`` instance. The data type of the classifier's input.
:param y_dtype: A ``tf.DType`` instance. The data type of the classifier's classification result.
'''
super().__init__(n_class, x_min, x_max, x_shape, x_dtype, y_dtype)
# cache logits and labels output to reuse graph
self._xs_logits_labels_map = dict()
@abstractmethod
def _logits_and_labels(self, xs):
''' Take an input ``tf.Tensor`` and give the classifier's logits output as a ``tf.Tensor`` and classification
result as a ``tf.Tensor``.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A tuple of two tensor, which represent the logits and the labels output of the classifier.
'''
def _labels(self, xs):
''' Implementation for the ``Classifier`` interface. '''
_, labels = self._logits_and_labels(xs)
return labels
def logits_and_labels(self, xs):
''' A wrapper for ``self._logits_and_labels()`` to reuse graph.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A tuple of two tensor, which represent the logits and the labels output of the classifier.
'''
try:
logits, labels = self._xs_logits_labels_map[xs]
except KeyError:
logits, labels = self._logits_and_labels(xs)
self._xs_logits_labels_map[xs] = (logits, labels)
return logits, labels
def labels(self, xs):
''' A wrapper for ``self._logits_and_labels()`` to reuse graph which returns only labels output.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A tensor, which represent the labels output of the classifier.
'''
_, labels = self.logits_and_labels(xs)
return labels
def logits(self, xs):
''' A wrapper for ``self._logits_and_labels()`` to reuse graph which returns only logits output.
:param xs: A ``tf.Tensor`` instance. Input of the classifier with shape of ``self.x_shape`` and with data type
of ``self.x_dtype``. ``xs`` shall be in the range of [``self.x_min``, ``self.x_max``].
:return: A tensor, which represent the logits output of the classifier.
'''
logits, _ = self.logits_and_labels(xs)
return logits
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://projecteuler.info/problem=5
# 232792560
el = 2520
if __name__ == '__main__':
while True:
st = True
for d in [11, 13, 14, 16, 17, 28, 19, 20]:
if el % d != 0:
st = False
if not st:
el += 2520
continue
print(el)
exit(0)
exit(1)
|
from django.db import models
from members.models import Member
class Project(models.Model):
"""A project worked on by members and containing bugs"""
POSSIBLE_STATUS = ['ON-GOING', 'FINISHED', 'PAUSED', 'CLOSED']
STATUS_CLASSES = {
'ON-GOING': 'primary',
'FINISHED': 'success',
'PAUSED': 'secondary',
'CLOSED': 'danger',
}
ACTIVE_STATUS = ['ON-GOING']
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
_status = models.CharField(max_length=10, default='ON-GOING')
creationDate = models.DateTimeField(auto_now_add=True)
closingDate = models.DateTimeField(null=True, blank=True, default=None)
members = models.ManyToManyField(
Member, related_name='projects', blank=True
)
supervisors = models.ManyToManyField(
Member, related_name='supervised_projects', blank=True
)
@property
def status(self):
return self._status
def set_status(self, status):
"""Set the status of the project checking for valid status"""
if status not in self.POSSIBLE_STATUS:
possibleVals = str(self.POSSIBLE_STATUS).strip('[]')
raise ValueError(
'Project status must be one of the following: ' + possibleVals
)
self._status = status
@property
def status_tuples(self):
"""A list of tuples containing the status and its bootstrap class"""
return [(s, self.STATUS_CLASSES[s]) for s in self.POSSIBLE_STATUS]
@classmethod
def get_active(cls):
"""Return a queryset with the active projects"""
return cls.objects.filter(_status__in=cls.ACTIVE_STATUS)
@property
def active_bugs(self):
if not self.bugs.exists():
return self.bugs
return self.bugs.filter(_status__in=self.bugs.first().ACTIVE_STATUS)
def __str__(self):
"""Return the string representation of the project object"""
return self.title
__repr__ = __str__
|
from scrapy import Item, Field
class V2exLoginItem(Item):
# 标题
title = Field()
# 节点
node = Field()
# 作者
auth = Field()
|
# -*- coding: utf-8 -*-
import time
import datetime
from itertools import chain, groupby
from flask_admin import expose
from flask_login import current_user
from flask_babel import gettext as _
from application.extensions import admin
import application.models as Models
from flask import redirect, url_for, request, jsonify
from application.controllers.admin import AdminView
from application.utils import redirect_url, groupby, Pagination
class OrderView(AdminView):
_permission = 'order'
@expose('/<tab>/')
@expose('/')
def index(self, tab='all'):
page = request.args.get('page',1)
if tab == 'all':
orders = Models.Order.commodities(is_paid=True,
status__nin=['ABNORMAL', 'CANCELLED', 'REFUNDED'], is_test=False)
elif tab == 'test':
orders = Models.Order.commodities(is_paid=True,
status__nin=['ABNORMAL', 'CANCELLED', 'REFUNDED'], is_test=True)
elif tab == 'transfer':
orders = Models.Order.transfer()
elif tab == 'unpaid':
orders = Models.Order.commodities(is_paid=False)
elif tab == 'irregularity':
orders = Models.Order.commodities(
status__in=['ABNORMAL', 'CANCELLED', 'REFUNDED'])
elif tab == 'payment_abnormal':
orders = Models.Order.objects(is_payment_abnormal=True)
else:
orders = Models.Order.commodities(is_paid=True)
orders = orders.order_by('-paid_date', '-created_at')
data = Pagination(orders, int(page), 10)
return self.render('admin/order/orders.html',
tab=tab,
page=page,
orders=data,
section='orders')
@expose('/search/<page>')
@expose('/search')
def search(self, page=1):
name = request.args.get(u'name')
if name.isdigit():
orders = Models.Order.objects(short_id=int(name))
else:
addrs = Models.Address.objects(receiver=name).distinct('id')
orders = Models.Order.commodities(address__in=addrs)
if len(name) > 15:
orders = Models.Order.objects(id=name)
data = Pagination(orders, int(page), 10)
return self.render('admin/order/orders.html',
name=name,
orders=data,
page=page,
section='search')
@expose('/<order_id>/cancel')
def cancel_order(self, order_id):
order = Models.Order.objects(id=order_id).first_or_404()
reason = request.args.get(u'reason')
order.cancel_order(reason=reason or 'cancelled from content page')
return redirect(url_for('.index'))
@expose('/mall_info', methods=['GET', 'POST'])
def edit_mall_info(self):
if not request.is_xhr: return jsonify({'message': 'FAILED'})# only for AJAX
if request.method == 'GET':
logistic_id = request.args.get('id')
logistic = Models.Logistic.objects(id=logistic_id).first()
return jsonify({'remark': logistic.detail.extra,
'id': str(logistic.id)
})
if request.method == 'POST':
logistic_id = request.form.get('lid')
logistic = Models.Logistic.objects(id=str(logistic_id)).first()
logistic.detail.extra = request.form.get('remark')
return jsonify({'remark': logistic.detail.extra,
'message': "OK"})
admin.add_view(OrderView(name=_('Order'), category=_('Order'), menu_icon_type="fa", menu_icon_value="bar-chart-o"))
|
# Generated by Django 2.2.7 on 2020-10-01 05:23
import ckeditor_uploader.fields
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20200929_1828'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='status',
field=models.IntegerField(choices=[(1, 'Secondry Menu'), (0, 'Primary Menu')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='task_manegar',
name='created_time',
field=models.DateField(default=datetime.datetime(2020, 10, 1, 8, 53, 31, 852042), null=True, verbose_name='Start date'),
),
migrations.AlterField(
model_name='task_manegar',
name='end_time',
field=models.DateField(default=datetime.datetime(2020, 10, 1, 8, 53, 31, 852042), null=True, verbose_name='Finish date'),
),
migrations.AlterField(
model_name='task_manegar',
name='status',
field=models.IntegerField(choices=[(3, 'Done!'), (1, 'In progress'), (0, 'To Do')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='time',
name='status',
field=models.IntegerField(choices=[(0, 'On Duty'), (1, 'Out of Duty')], default=0, verbose_name='Status'),
),
migrations.CreateModel(
name='Tickets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('content', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Content')),
('date', models.DateField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Portable', verbose_name='User')),
],
options={
'verbose_name': 'Ticket',
'verbose_name_plural': 'Tickets',
},
),
]
|
import math
import numpy as np
import os
import datetime
import torch
import torch.optim as optim
import torch.nn.functional as F
# from tqdm import tqdm
from torchvision import transforms, datasets
from net import Net
from itertools import takewhile
import matplotlib.pyplot as plt
# MAX_SAVEPOINTS = 10
CLASSES = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
PRINT_AFTER_X_BATCHES = 50
class Training():
def __init__(self, lr=0.0001, momentum=0.0, savepoint_dir="savepoints", sp_serial=-1, no_cuda=False, batch_size=10, num_workers=2, weight_decay=0.0):
self.batch_size = batch_size
self.num_workers = num_workers
self.sp_serial = sp_serial
# self.savepoint_dir = savepoint_dir
self.net = Net(classes_number=len(CLASSES))
if (not no_cuda) and torch.cuda.is_available():
self.net.cuda()
self.device = "cuda"
print(f"Device :: CUDA {torch.cuda.get_device_name()}")
else:
self.device = "cpu"
print(f"Device :: CPU")
# TODO: dynamic learning rate
# Define optimizer AFTER device is set
self.optimizer = optim.RMSprop(self.net.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
# self.optimizer = optim.Adam(self.net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
self.criterion = torch.nn.CrossEntropyLoss()
self.transforms = transforms.Compose([
transforms.Grayscale(1),
transforms.RandomAffine(0, translate=(.1, .1)),
transforms.ToTensor(),
transforms.Normalize((0,), (1,)),
# TODO: ZCA whitening with: transforms.LinearTransformation()
])
'''
# load savepoints if available
savepoints = os.listdir(self.savepoint_dir) if os.path.isdir(
self.savepoint_dir) else []
if not savepoints == []:
self._loadSavepoint(savepoints)
else:
print("No savepoints found!")
'''
# TODO: Use actual dataset
self.trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=self.transforms)
self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=self.transforms)
self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
def run(self, epochs=1):
# TODO: Save and load epochs from savepoint
while True:
print("Starting training!")
self.net.train() # switch to train mode
# for each epoch
for epoch in range(epochs):
print(f"Epoch {epoch+1} of {epochs}:")
running_loss = 0.0
# for each batch
for i, data in enumerate(self.trainloader):
inputs, targets = data
# Show first image for testing transforms
# for index, i in enumerate(inputs):
# img = i.numpy()[0]
# plt.imshow(img, cmap="gray")
# plt.title(CLASSES[targets[index]])
# plt.show()
# exit()
if self.device == "cuda":
inputs = inputs.cuda()
targets = targets.cuda()
# Forward pass
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
if math.isnan(loss.item()):
print(" ############# Loss is NaN #############")
print("Outputs: ")
print(outputs)
print("Loss: ")
print(loss)
exit(-1)
# Backpropagation pass
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# if math.isnan(loss.item()):
# print(loss)
# print(outputs)
# exit()
running_loss += loss.item()
if i % PRINT_AFTER_X_BATCHES == PRINT_AFTER_X_BATCHES-1:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / PRINT_AFTER_X_BATCHES))
# print(outputs)
running_loss = 0.0
# self._makeSavepoint()
print("Finished training!")
self.evaluate()
def evaluate(self):
self.net.eval()
correct = 0
total = 0
with torch.no_grad():
for data in self.testloader:
images, labels = data
outputs = self.net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Accuracy of the network on %d test images: %d %%" %
(100 * correct / total))
'''
def _loadSavepoint(self, savepoints):
if not os.path.isdir(self.savepoint_dir):
return
target_file = None
ser_files = self._getSavepointList()
if len(ser_files) == 0:
print("No existing savepoints!")
return
if self.sp_serial > -1:
for n, f in ser_files:
if n == self.sp_serial:
target_file = f
else:
self.sp_serial, target_file = ser_files[-1]
print(f"Loading progress from {target_file}!")
self.net.load_state_dict(torch.load(
os.path.join(self.savepoint_dir, target_file)))
self.net.eval()
def _makeSavepoint(self):
if not os.path.isdir(self.savepoint_dir):
os.mkdir(self.savepoint_dir)
target_path = os.path.join(
self.savepoint_dir, self._getNextSavepointPath())
print(f"Saving progress in {target_path}!")
torch.save(self.net.state_dict(), target_path)
self._removeOldSavepoints()
def _getSavepointList(self):
# only look @ .pt and .pth files
path_files = [f for f in os.listdir(self.savepoint_dir) if f[-4:]
== ".pth" or f[-3:] == ".pt"]
# parse serial number
ser_files = [(int(''.join([t for t in takewhile(lambda x: x != '_', f)])), f)
for f in path_files]
# sort in place
ser_files.sort()
return ser_files
def _getNextSavepointPath(self):
sn = self.sp_serial + 1
fn = "%03d_savepoint.pth" % sn
current_files = os.listdir(self.savepoint_dir)
while fn in current_files:
sn = sn + 1
fn = "%03d_savepoint.pth" % sn
self.sp_serial = sn
return fn
def _removeOldSavepoints(self):
files = self._getSavepointList()
# files :: [(sn :: Int, path :: String)] sorted
while len(files) > MAX_SAVEPOINTS:
t = files[0][1]
os.remove(os.path.join(self.savepoint_dir, t))
print(
f"Removing old savepoint: {t}")
files = files[1:]
'''
|
import os
import pandas as pd
import datetime
import numpy as np
def import_OR_record_sheet(OR_fname, output_OR_fname):
full_path = os.path.expanduser(OR_fname)
OR_file = pd.read_excel(full_path)
OR_file['Spectrum-OV'] = 'SPECTRUM-OV-' + OR_file['Spectrum-OV'].astype(str)
OR_file['Surgery Date'] = OR_file['Surgery Date'].apply(
lambda x: x.date() if isinstance(x, datetime.datetime) else x)
OR_file = OR_file[["Notes", "Spectrum-OV", "MRN", "Surgery Type", "Surgery Date", "Attending", "Surgery Location", "Room", "Study Protocol", "Excluded", "Final Path"]]
OR_file.to_excel(output_OR_fname)
def import_IGO_storage_sheet(IGO_fname, elab_fname, output_fname):
IGO_path = os.path.expanduser(IGO_fname)
elab_path = os.path.expanduser(elab_fname)
IGO_file = pd.read_excel(IGO_path, usecols=["IGO ID", "Sample ID"])
elab_file = pd.read_excel(elab_path)
IGO_file.dropna(subset=["Sample ID"], inplace=True)
IGO_file.set_index("Sample ID")
elab_file.set_index("scRNA REX Sample ID")
all_data = pd.merge(IGO_file, elab_file, how='left', left_on="Sample ID", right_on="scRNA REX Sample ID")
all_data["Storage Facility"] = "IGO"
all_data = all_data[
["Patient ID", "MRN", "Surgery Type", "Surgery Date", "Surgeon", "Surgery Location", "Room", "Study Protocol",
"Excluded", "Final Pathology", "Processing Date", "Specimen Site", "Site Details", "Primary Site",
"Tissue Type", "Preservation Method", "Downstream Submission", "Submitted Populations", "Storage Facility", "IGO ID", "Sample ID"]].sort_values(by='Sample ID')
all_data.to_excel(output_fname)
#import_OR_record_sheet("~/Desktop/MSKCC/SPECTRUM/Sample Collection/Spectrum Patient OR Collection Record.xlsx", "elab SPECTRUM Patient OR Collection Record.xlsx")
import_IGO_storage_sheet("~/Desktop/MSKCC/SPECTRUM/Sample Collection/Sample Processing & Storage/IGO 121620 - Spectrum Remainder Aliquots Post-10X.xlsx", "~/Desktop/MSKCC/SPECTRUM/Database Management/ELAB PULL/elabpull wMRN 121620.xlsx", 'elab IGO Storage Aliquots.xlsx')
|
# The contents of this file is free and unencumbered software released into the
# public domain. For more information, please refer to <http://unlicense.org/>
character_query = '''
query ($id: Int, $search: String) {
Page {
characters (id: $id, search: $search) {
name {
full
native
alternative
}
description
image {
large
medium
}
media {
nodes {
title {
romaji
english
native
}
type
format
siteUrl
}
}
siteUrl
id
dateOfBirth {
year
month
day
}
age
favourites
}
}
}
'''
|
# coding: utf-8
from parlaseje.models import *
from sklearn.decomposition import PCA
from numpy import argpartition
all_votes = Vote.objects.all()
all_votes_as_list = list(all_votes)
all_votes_as_vectors = [(vote.votes_for, vote.against, vote.abstain, vote.not_present) for vote in all_votes_as_list]
pca = PCA(n_components=1)
pca.fit(all_votes_as_vectors)
distances = pca.score_samples(all_votes_as_vectors)
number_of_samples = len(all_votes_as_list)/4
ind = argpartition(distances, number_of_samples)[:number_of_samples]
for i in ind: print all_votes_as_vectors[i], all_votes[i].motion
|
import pandas as pd
datatrain = pd.read_csv('/home/edvvin/Desktop/mnist/mnist_train.csv')
datatrain_array = datatrain.values
ytrain = datatrain_array[:,0]
temp = datatrain_array[:,1:]
xtrain = []
for ar in temp:
xic = []
for i in range(28):
xic.append(ar[i*28:(i+1)*28])
xtrain.append([xic])
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
torch.manual_seed(1234)
hl = 100
lr = 0.001
num_epoch = 100
inl = 28*28
outl = 10
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 8,kernel_size=5, stride=1)
self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(8, 8,kernel_size=3, stride=1)
self.pool2 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc2 = nn.Linear(5*5*8,hl)
self.fc3 = nn.Linear(hl, 10)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
net.to(device)
crit = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
for epoch in range(num_epoch):
X = Variable(torch.Tensor(xtrain).float()).to(device)
Y = Variable(torch.Tensor(ytrain).long()).to(device)
optimizer.zero_grad()
out = net(X)
loss = crit(out, Y)
loss.backward()
optimizer.step()
if (epoch) % 10 == 0:
print ('Epoch [%d/%d] Loss: %.4f'%(epoch+1, num_epoch, loss.data))
datatest = pd.read_csv('/home/edvvin/Desktop/mnist/mnist_test.csv')
datatest_array = datatest.values
temp = datatest_array[:,1:]
xtest = []
for ar in temp:
xic = []
for i in range(28):
xic.append(ar[i*28:(i+1)*28])
xtest.append([xic])
ytest = datatest_array[:,0]
X = Variable(torch.Tensor(xtest).float().to(device))
Y = torch.Tensor(ytest).long().to(device)
out = net(X)
_, predicted = torch.max(out.data, 1)
print('Accuracy of the network %.4f %%' % (100 * torch.sum(Y==predicted) / 10000.0 ))
|
from django.contrib import admin
from django.conf.urls import url
from . import views
from django.urls import path
from django.views.generic import RedirectView
urlpatterns = [
path('',views.index, name = 'index'),
#url(r'^admin/', admin.site.urls),
url(r'^$',views.signIn),
url(r'^postsign/',views.postsign),
url(r'^logout/',views.logout,name="log"),
url(r'^signup/',views.signUp,name='signup'),
url(r'^postsignup/',views.postsignup,name='postsignup'),
url(r'^favicon\.ico$',RedirectView.as_view(url='/static/images/favicon.ico')),
#url(r'^', include('home.urls', namespace='home')),
]
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
JVM_E2E_METRICS = [
'jvm.buffer_pool.direct.capacity',
'jvm.buffer_pool.direct.count',
'jvm.buffer_pool.direct.used',
'jvm.buffer_pool.mapped.capacity',
'jvm.buffer_pool.mapped.count',
'jvm.buffer_pool.mapped.used',
'jvm.cpu_load.process',
'jvm.cpu_load.system',
'jvm.gc.cms.count',
'jvm.gc.eden_size',
'jvm.gc.metaspace_size',
'jvm.gc.old_gen_size',
'jvm.gc.parnew.time',
'jvm.gc.survivor_size',
'jvm.heap_memory',
'jvm.heap_memory_committed',
'jvm.heap_memory_init',
'jvm.heap_memory_max',
'jvm.loaded_classes',
'jvm.non_heap_memory',
'jvm.non_heap_memory_committed',
'jvm.non_heap_memory_init',
'jvm.non_heap_memory_max',
'jvm.os.open_file_descriptors',
'jvm.thread_count',
]
JMX_E2E_METRICS = [
'jmx.gc.major_collection_count',
'jmx.gc.major_collection_time',
'jmx.gc.minor_collection_count',
'jmx.gc.minor_collection_time',
]
JVM_E2E_METRICS_NEW = list(JVM_E2E_METRICS)
JVM_E2E_METRICS_NEW.remove('jvm.gc.cms.count')
JVM_E2E_METRICS_NEW.remove('jvm.gc.parnew.time')
JVM_E2E_METRICS_NEW.extend(m.replace('jmx.', 'jvm.', 1) for m in JMX_E2E_METRICS)
|
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for Blender Driver demonstration application.
This code illustrates:
- Basic use of the Blender Game Engine (BGE) KX_GameObject interface, to
change the size of a game object.
- Termination of BGE when any key is pressed.
- Spawning a single thread at the start of BGE execution. The thread is
joined before terminating BGE.
- Use of a thread lock to indicate termination is due.
This application doesn't override the Blender Driver game_tick, which is then a
pass in the base class.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# https://docs.python.org/3.5/library/argparse.html
# The import isn't needed because this class uses the base class to get an
# object.
# import argparse
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# This module uses the Thread class.
# https://docs.python.org/3.4/library/threading.html#thread-objects
import threading
#
# Module for sleep.
# https://docs.python.org/3.5/library/time.html
import time
#
# Third party modules, in alphabetic order.
#
# Blender library imports, in alphabetic order.
#
# Main Blender Python interface, which is used to get the size of a mesh.
# Import isn't needed because the base class keeps a reference to the interface
# object.
# import bpy
#
# Blender Game Engine KX_GameObject
# Import isn't needed because this class gets an object that has been created
# elsewhere.
# https://www.blender.org/api/blender_python_api_current/bge.types.KX_GameObject.html
#
# Blender Game Engine maths utilities, which can only be imported if running
# from within the Blender Game Engine.
# Import isn't needed because this class gets a Vector from the bpy layer.
# http://www.blender.org/api/blender_python_api_current/mathutils.html
#
# Local imports.
#
# Blender Driver application with threads and locks.
from . import demonstration
# Diagnostic print to show when it's imported. Only printed if all its own
# imports run OK.
print('"'.join(('Application module ', __name__, '.')))
class Application(demonstration.Application):
templates = {
'Cube': {'subtype':'Cube', 'physicsType':'NO_COLLISION'
, 'location': (0, 0, 1)}
}
# Overriden.
def game_initialise(self):
super().game_initialise()
threading.Thread(
target=self.pulse_object_scale, name="pulse_object_scale" ).start()
def _get_scales(self):
"""Eternal generator for scale vectors. Uses the following properties as
parameters: minScale, changeScale, increments.
"""
minScale = self.arguments.minScale
changeScale = self.arguments.changeScale
increments = self.arguments.increments
cycleDec = 0
scale = 0
yield_ = [None] * 3
while True:
# Set list and indexes
#
# What was decrementing will be unchanging.
yield_[cycleDec] = minScale
#
# Next dimension will be decrementing.
cycleDec = (cycleDec + 1) % 3
#
# Next next dimension will be incrementing.
cycleInc = (cycleDec + 1) % 3
for scale in range(increments):
yield_[cycleDec] = (
minScale
+ (changeScale * (increments - scale) / increments))
yield_[cycleInc] = (
minScale
+ (changeScale * (scale + 1) / increments))
yield yield_
def pulse_object_scale(self):
"""Pulse the scale of a game object for ever. Run as a thread."""
minScale = self.arguments.minScale
changeScale = self.arguments.changeScale
increments = self.arguments.increments
objectName = self.arguments.pulsar
#
# Get the underlying dimensions of the Blender mesh, from the data
# layer. It's a mathutils.Vector instance.
dimensions = self.bpy.data.objects[objectName].dimensions
#
# Get a reference to the game object.
if objectName not in self.gameScene.objects:
self.game_add_object(objectName)
object_ = self.gameScene.objects[objectName]
get_scales = self._get_scales()
while True:
log(DEBUG, "locking ...")
self.mainLock.acquire()
try:
log(DEBUG, "locked.")
if self.terminating():
log(DEBUG, "Stop.")
get_scales.close()
return
scales = next(get_scales)
worldScale = dimensions.copy()
for index, value in enumerate(scales):
worldScale[index] *= value
object_.worldScale = worldScale
finally:
log(DEBUG, "releasing.")
self.mainLock.release()
if self.arguments.sleep is not None:
time.sleep(self.arguments.sleep)
def get_argument_parser(self):
"""Method that returns an ArgumentParser. Overriden."""
parser = super().get_argument_parser()
parser.prog = ".".join((__name__, self.__class__.__name__))
parser.add_argument(
'--increments', type=int, default=40, help=
"Number of increments. Default: 40.")
parser.add_argument(
'--changeScale', type=float, default=2.0,
help="Change of scale. Default: 2.0.")
parser.add_argument(
'--minScale', type=float, default=0.5,
help="Minimum scale. Default: 0.5.")
parser.add_argument(
'--pulsar', default="Cube",
help="Name of the object to pulse.")
parser.add_argument(
'--sleep', type=float, help=
"Sleep after each increment, for a floating point number of"
" seconds. Default is not to sleep.")
return parser
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
s = str(input("Put some text: "))
c = 0
if (s[0] == ' ') and (s[len(s) - 1] == ' '):
for i in s:
if i == ' ':
c += 1
print (f"Количество слов в строке = {c - 1}")
else:
for i in s:
if i == ' ':
c += 1
print(f"Количество слов в строке = {c + 1}")
|
#!/usr/bin/env python
#
# Copyright (c) 2016 10X Genomics, Inc. All rights reserved.
#
# Support code for Supernova stages
#
# "Pseudo" Martian API for reporting errors and warnings from C++
# code prior to deployment of a formal adapter.
#
# Code to plot molecule length histogram and the kmer spectrum
import os
import json
import martian
from constants import ALARMS_LOCATION
def check_alert(stage, alert):
keys = ["action", "metric", "compare", "threshold", "message"]
for key in keys:
if not alert.has_key(key):
print key, " is missing in "
print alert
martian.throw("incorrectly formatted alert, see stdout.")
if not ( alert["compare"] == "<" or alert["compare"] == ">" ):
print alert
martian.throw("invalid value for compare in alert")
if not ( type(alert["threshold"]) == int or type(alert["threshold"]) == float ):
martian.throw("%s: invalid type for threshold" % type(alert["threshold"]))
def load_alerts():
alerts_file = os.path.join(ALARMS_LOCATION, "alarms-supernova.json")
json_string = open( alerts_file, "r" ).read()
try:
alerts = json.loads(json_string)
except:
martian.throw("Incorrectly formatted alarms-supernova.json file.")
for stage, alert_list in alerts.iteritems():
for alert in alert_list:
check_alert(stage, alert)
return alerts
def write_stage_alerts(stage, path, alerts_file="alerts.list"):
alerts = load_alerts()
out_file = os.path.join(path, alerts_file)
if not os.path.exists(path):
os.makedirs(path)
out_handle = open(out_file, "w")
keys = ["metric", "threshold", "compare", "action", "message"]
if not alerts.has_key(stage):
martian.throw("No alerts found for stage %s" % stage)
for alert in alerts[stage]:
out_handle.write("#\n")
out_handle.write(alert["metric"]+"\n")
out_handle.write(str(alert["threshold"])+"\n")
out_handle.write(alert["compare"]+"\n")
out_handle.write(alert["action"]+"\n")
out_handle.write(alert["message"]+"\n")
out_handle.close()
class AlertLogger:
def __init__(self, stage):
alerts_all = load_alerts()
self.alerts = alerts_all[stage]
def issue(self, metric, value, format_string=""):
for alert in self.alerts:
## find the right metric
if alert["metric"] == metric:
## should we trigger?
if (alert["compare"] == ">") ^ (value < alert["threshold"]):
## optional formatting of alert message with format_string or value
if len(format_string) == 0:
format_string = str(value)
message = alert["message"].replace("{}", format_string)
## issue an alert
if alert["action"] == "alarm":
martian.alarm(message)
elif alert["action"] == "exit":
martian.exit(message)
class SupernovaAlarms:
SN_STAGE_ALARMS = "martian_alerts.json"
SN_ROLLUP_ALARMS = "alerts_rollup.txt"
SN_EXIT = u"exit"
SN_ALARM = u"alarm"
SN_ALARM_HEAD = "The following warning(s) were issued prior to encountering an error:"
SN_UNEXPECTED_TEXT = "An unexpected error has occurred."
SN_ALERT_HANDLERS={ SN_ALARM : martian.alarm, \
u"log_info" : martian.log_info, \
u"log_warn" : martian.log_warn, \
u"throw" : martian.throw, \
SN_EXIT : martian.exit }
def __init__(self, base_dir,
alarms_file = SN_STAGE_ALARMS, rollup_file = SN_ROLLUP_ALARMS,
delete = True ):
self._alarms_file=os.path.join(base_dir, alarms_file)
self._rollup_file=os.path.join(base_dir, rollup_file)
self._posted=False
if delete: self.check_delete()
def exit(self, msg=None):
exit_handler=self.SN_ALERT_HANDLERS[self.SN_EXIT]
if msg is None:
full_msg = self.SN_UNEXPECTED_TEXT
else:
full_msg = msg
if os.path.exists(self._rollup_file):
rollup = open(self._rollup_file, "r").read()
full_msg += "\n\n"
full_msg += self.SN_ALARM_HEAD
full_msg += "\n\n"
full_msg += rollup
exit_handler(full_msg)
def post(self):
self._posted=True
## if alarm file does not exist, do nothing
if os.path.exists(self._alarms_file):
with open(self._alarms_file, 'r') as fp:
alerts=json.loads(fp.read())
else:
return
meta_alarm=[]
exit_str=''
for k,v in alerts.iteritems():
if not k in self.SN_ALERT_HANDLERS:
meta_alarm.append("unknown key {} in {} (BUG)".format(k,self._alarms_file))
elif k == self.SN_EXIT:
exit_str=';'.join(v)
else:
handler=self.SN_ALERT_HANDLERS[k]
for post in v:
handler( post )
for alarm in meta_alarm:
martian.alarm(alarm)
if len(exit_str) > 0:
self.exit(exit_str)
def __del__(self):
if not self._posted:
martian.alarm("C++ alarms were not posted, but left in file (BUG).")
else:
self.check_delete()
def check_delete(self, filename = None):
if filename == None:
filename = self._alarms_file
if os.path.isfile( filename ):
os.unlink(filename)
## this function plots a normalized histogram
def plot_norm_pct_hist( plt, values, binsize, start, **plt_args ):
x = start
xvals = []
yvals = []
norm = 0.0
for v in values:
xvals.append(x)
yvals.append(v)
xvals.append(x+binsize)
norm += v
yvals.append(v)
x += binsize
for i in xrange (len(yvals)):
yvals[i] = yvals[i]/norm*100.0
plt.plot( xvals, yvals, **plt_args)
plt.xlim( start, x )
def try_plot_molecule_hist(args):
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import json
import numpy as np
from statsmodels.nonparametric.smoothers_lowess import lowess
## final plot name
plot_name = os.path.join( args.parent_dir, "stats",
"molecule_lengths.pdf" )
mol_fn = os.path.join( args.parent_dir, "stats",
"histogram_molecules.json" )
mol_dict = json.loads(open( mol_fn, "r" ).read())
if mol_dict["numbins"] > 0:
xmin = 0 ## min length kb
xmax = 300 ## max length kb
binsize=1 ## bin size of hist in kb
## compute length-weighted histogram
lwhist = []
for x, v in enumerate( mol_dict["vals"] ):
lwhist.append( (x + 0.5)* v )
## truncate
lwhist = lwhist[:xmax]
## normalize
norm = sum(lwhist)
lwhist = np.array([100*x/norm for x in lwhist])
## lowess smoothing
xvalues = np.arange(0, xmax, 1) + 0.5
newhist = lowess(lwhist, xvalues, frac=0.1, return_sorted=False)
## do the plotting
fig, ax = plt.subplots()
## set up axis, grid
ax.grid(True)
plt.locator_params( 'x', nbins=10 )
plt.locator_params( 'y', nbins=10 )
plt.plot ( newhist, **{"lw": 2, "ls": "-", "color": "blue"} )
plt.xlim( xmin, xmax )
plt.ylim( 0, None )
plt.xlabel ( "Inferred molecule length (kb)")
plt.ylabel ( "Percent of total DNA mass (1 kb bins)")
plt.title ("Length-weighted histogram (LOWESS-smoothed)")
plt.savefig( plot_name )
plt.close()
except ImportError, e:
martian.log_info( "Error importing libraries for plotting" )
martian.log_info( str(e) )
except KeyError, e:
martian.log_info( "Invalid key in json while plotting" )
martian.log_info( str(e) )
except IOError, e:
martian.log_info( "Could not find the molecule json for plotting" )
martian.log_info( str(e) )
except Exception as e:
martian.log_info( "Unexpected error while plotting molecule length")
martian.log_info( str(e) )
## take a set of (distinct) numbers and format them into a list of strings
## where we have an integer followed by a suffix
## picks out the representation that has the shortest length
def nice_labels ( numbers ):
suffixes = ['', 'K', 'M', 'G']
suff_len = []
## figure out which suffix gives us the shortest label length
for i, suff in enumerate( suffixes ):
test = [float(y)/(1000.0**i) for y in numbers]
labels = ["%d%s"% (int(y), suff) for y in test]
## make sure that in the new representation there are no
## degenerate cases
if len(set(labels)) == len(labels):
suff_len.append( (sum(map(len, labels)), i) )
## if we fail to find any satisfactory suffixes, just use defaults
if len(suff_len) == 0:
return map(str, numbers), 0
else:
suff_len.sort()
i = suff_len[0][1]
labels = ["%d%s"% (int(float(y)/(1000.0**i)), suffixes[i]) for y in numbers]
return labels, i
def try_plot_kmer_spectrum(args):
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import json
## final plot name
plot_name = os.path.join( args.parent_dir, "stats",
"kmer_spectrum.pdf" )
kmer_fn = os.path.join( args.parent_dir, "stats",
"histogram_kmer_count.json" )
data = json.loads(open( kmer_fn, "r" ).read())
## max k-mer multiplicity
MAX = 100
if len( data["vals"] ) == 0:
martian.log_info ("No kmer data to plot.")
return
elif len( data["vals"] ) < MAX:
martian.log_info ("Warning: no kmers with multiplicity >= %d"%MAX )
MAX = len( data["vals"] )
fig, ax = plt.subplots()
#plt.setp(ax.get_yticklabels(), visible=False)
## set mode to 1.0
xvals = range(MAX)
yvals = data["vals"][:MAX]
ax.plot (xvals, yvals, lw = 2.0, color="blue" )
## plot tail
tail_height = float(sum(data["vals"][MAX:]))
_, ymax = plt.ylim()
plt.axvspan (xmin=MAX-1, xmax=MAX, ymin=0, ymax=tail_height/ymax, ls=None, color="blue")
## set up axis, grid
ax.grid(True)
plt.locator_params( 'x', nbins=10 )
plt.locator_params( 'y', nbins=10 )
plt.xlim(0, MAX)
yt = ax.get_yticks()
ylabels, yexp = nice_labels( yt )
plt.yticks ( yt, ylabels, rotation=45 )
plt.xlabel ( "filtered kmer multiplicity" )
plt.ylabel ( "counts" )
plt.savefig (plot_name)
plt.close()
except ImportError, e:
martian.log_info( "Error importing libraries for plotting" )
martian.log_info( str(e) )
except KeyError, e:
martian.log_info( "Invalid key in json while plotting" )
martian.log_info( str(e) )
except IOError, e:
martian.log_info( "Could not find the molecule json for plotting" )
martian.log_info( str(e) )
except Exception as e:
martian.log_info( "Unexpected error while plotting molecule length")
martian.log_info( str(e) )
|
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 Andrey Pleshakov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from time import sleep, time
from typing import TYPE_CHECKING
from . import config, factory
from .exception import DoRollback
from .parser import PortCollector
if TYPE_CHECKING:
from .protocol import OperationControl
def find_and_save_ports(control: OperationControl) -> None:
ds = factory.data_source()
pc = PortCollector()
dp = factory.data_parser(pc)
pm = factory.persistence_manager()
while not control.interrupted:
dp.extract_data(ds.get_port_data())
if pc.port_data:
pm.save_port_data(pc.port_data, commit=True)
pc.reset()
scan_done_time = time()
control.feedback_pass_done()
while not control.interrupted and time() - scan_done_time < config.port_scan_interval:
sleep(config.port_scan_wake_up_interval)
def save_ports_from_data_file(control: OperationControl) -> None:
sf = config.data_file
if sf is not None:
pc = PortCollector()
dp = factory.data_parser(pc)
pm = factory.persistence_manager()
bs = config.import_batch_size
with open(sf, 'rt') as fo:
with dp.line_stream_consumer() as consume:
for line_no, line in enumerate(fo):
if control.interrupted:
raise DoRollback
consume(line, line_no + 1)
if len(pc.port_data) >= bs:
pm.save_port_data(pc.port_data)
pc.reset()
control.feedback_pass_done()
if pc.port_data:
pm.save_port_data(pc.port_data)
control.feedback_pass_done()
else:
raise RuntimeError('No data file is specified in the config')
|
from django.views.generic.base import TemplateView
from .models import BannerHome
from apps.products.models import ShopDepartment
from .documents import ProductDocument
class HomePageView(TemplateView):
template_name = "shop/web/home.html"
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
context['banners_home'] = BannerHome.objects.all()
context['shop_departements'] = ShopDepartment.objects.filter(top_shop=True)
return context
class SearchResultView(TemplateView):
template_name = 'shop/web/search_result.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
search = request.GET['search']
print(search, '#######')
if search:
products = ProductDocument.search().query('match', name=search)
else:
products = ''
context['products'] = products
context['search'] = search
return self.render_to_response(context)
|
import logging
import os
pretrained_model_path = './pretrained-model/'
AUDIO_NAME = 'MOCK CALL_Bank Customer Service_139-284.wav' #'Bdb001_interaction_first60s.wav'
AUDIO_PATH = './wav/'
AUDIO_FILE = os.path.join(AUDIO_PATH,AUDIO_NAME)
SAMPLE_RATE = 44100
STT_SAMPLERATE = 16000
FLG_REDUCE_NOISE:bool = True
FLG_SPEECH_ENHANCE:bool = True
FLG_SUPER_ENHANCE_NEW:bool = False
FLG_SUPER_RES:bool = False
label_stop_words = 'nan'
label_checklist = 'checklist.txt'
dbHost = 'localhost'
dbName = 'UOBtests'
dbUser = 'root'
dbPwd = 'password'
sd_global_starttime = 0.0
sttModel = 'malaya-speech'
# 'malaya-speech' 'VOSK'
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Initialization Processes #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# * Audo Convert FLG_SPEECH_ENHANCE for better effect
if FLG_REDUCE_NOISE == False and FLG_SPEECH_ENHANCE==True:
logging.warning('Auto turn off Speech Enhancement for better effect since Noise Reduction is off.')
FLG_SPEECH_ENHANCE = False
|
import logging
from os import makedirs, path
from shutil import rmtree
from urllib import urlencode
from dulwich.errors import NotGitRepository
from dulwich.repo import Repo
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application
from magpie.server import make_app
logging.getLogger('tornado').setLevel(logging.ERROR)
APP = None
class BaseTest(AsyncHTTPTestCase):
@classmethod
def setUpClass(cls):
cls.path = path.join('/tmp', 'magpie_testing_git_repo')
if not path.exists(cls.path):
makedirs(cls.path)
try:
cls.repo = Repo(cls.path)
except NotGitRepository:
cls.repo = Repo.init(cls.path)
@classmethod
def tearDownClass(cls):
rmtree(cls.path)
def get_app(self):
# This global nonsense is because I was getting a port in use error when
# running more than one test. I suspect I am missing something / doing
# something wrong in testing, but this works.
# I have seen the port in use error pop up again but it was only when
# there were errors in this method
global APP
if APP is None:
APP = make_app(path.join(path.dirname(__file__),
'configs',
'default.cfg'))
return APP
def get(self, url, allow_errors=False):
self.http_client.fetch(self.get_url(url), self.stop)
res = self.wait()
if not allow_errors:
self.assertEqual(200, int(res.code))
return res
def post(self, url, allow_errors=False, **kwargs):
self.http_client.fetch(self.get_url(url), method='POST',
body=urlencode(kwargs),
callback=self.stop)
res = self.wait()
if not allow_errors:
self.assertEqual(200, int(res.code))
return res
|
#
# Solved Problems in Geostatistics
#
# ------------------------------------------------
# Script for lesson 5.3
# "Variogram Modeling and Volume Variance"
# ------------------------------------------------
import sys
sys.path.append(r'../shared')
from numpy import *
from geo import *
from matplotlib import *
from pylab import *
from scipy import *
from gslib import *
from variogram_routines import *
#---------------------------------------------------
# Problem:
#
# Model the experimental semivariograms from Part 2 of the previous Problem 5.2 using maximum of two nested structures. All directions must be modeled using the same structures and variance contributions for each structure, but each structure may have different range parameters.
#
# ----------------------------------------------------
nugget = 0
sill_hor1 = 15
var_range_hor1 = 4000
sill_hor2 = 20
var_range_hor2 = 5000
sill_ver = 11
var_range_ver = 35
def exp_var(sill, nugget, var_range, h_vect):
Gamma = zeros((len(h_vect)), dtype = float32)
for i in xrange(len(h_vect)):
Gamma[i] = (sill - nugget) * (1 - exp(float(-h_vect[i])*3/(var_range)))
return Gamma
# Loading sample data from file
dict = load_gslib_file("allwelldata.txt")
x_coord = dict['X']
y_coord = dict['Y']
z_coord = dict['Z']
poro_values = dict['Por']
# Lets make a PointSet
PointSet = {}
PointSet['X'] = x_coord
PointSet['Y'] = y_coord
PointSet['Z'] = z_coord
PointSet['Property'] = poro_values
IndicatorData = []
IndicatorData.append(poro_values)
Params = {'HardData':IndicatorData}
Function = CalcVariogramFunction
#Suggested Parameters for Horizontal Variogram 1:
#Azimuth = 320 (Azimut)
#Dip = 0 (Dip)
#Lag Distance = 550 m (LagWidth, LagSeparation)
#Horizontal Bandwith = 500 m (R2)
#Vertical Bandwith = 5 m (R3)
#Number of Lags = 11 (NumLags)
XVariogram, XLagDistance1 = PointSetScanContStyle(TVVariogramSearchTemplate(
LagWidth = 550, LagSeparation = 550, TolDistance = 450, NumLags = 12,
Ellipsoid = TVEllipsoid(R1 = 1, R2 = 500, R3 = 5, Azimut = 320, Dip = 0, Rotation = 0)
), PointSet, Function, Params)
Variogram_hor1 = XVariogram[:, 0]
print "Horizontal XVariogram 1:"
print Variogram_hor1
#Suggested Parameters for Horizontal Variogram 2:
#Azimuth = 230 (Azimut)
#Dip = 0 (Dip)
#Lag Distance = 550 m (LagWidth, LagSeparation)
#Horizontal Bandwith = 500 m (R2)
#Vertical Bandwith = 5 m (R3)
#Number of Lags = 11 (NumLags)
XVariogram, XLagDistance2 = PointSetScanContStyle(TVVariogramSearchTemplate(
LagWidth = 550, LagSeparation = 550, TolDistance = 450, NumLags = 12,
Ellipsoid = TVEllipsoid(R1 = 1, R2 = 500, R3 = 5, Azimut = 230, Dip = 0, Rotation = 0)
), PointSet, Function, Params)
Variogram_hor2 = XVariogram[:, 0]
print "Horizontal XVariogram 2:"
print Variogram_hor2
#Calculate Gamma for horizontal semivariogram 1 and 2
Gamma1 = exp_var(sill_hor1, nugget, var_range_hor1, range(min(XLagDistance1), max(XLagDistance1), 1))
print "Gamma for horizontal semivariogram 1: ", Gamma1
Gamma2 = exp_var(sill_hor2, nugget, var_range_hor2, range(min(XLagDistance2), max(XLagDistance2), 1))
print "Gamma for horizontal semivariogram 2: ", Gamma2
#Experimental horizontal semivariogram 1 and 2
figure()
plot(XLagDistance1, Variogram_hor1, 'bo', color = 'blue')
plot(range(min(XLagDistance1), max(XLagDistance1), 1), Gamma1, color = 'blue')
plot(XLagDistance2, Variogram_hor2, 'bo', color = 'green')
plot(range(min(XLagDistance2), max(XLagDistance2), 1), Gamma2, color = 'green')
xlabel("Distance")
ylabel("Gamma")
title("Experimental horizontal semivariogram")
#Suggested Parameters for Vertical Variogram:
#Azimuth = 0 (Azimut)
#Dip = 90 (Dip)
#Lag Distance = 4 m (LagWidth, LagSeparation)
#Horizontal Bandwith = 0.0 m (R2)
#Vertical Bandwith = 10 m (R3)
#Number of Lags = 10 (NumLags)
XVariogram, XLagDistance = PointSetScanContStyle(TVVariogramSearchTemplate(
LagWidth = 4, LagSeparation = 4, TolDistance = 4, NumLags = 11,
Ellipsoid = TVEllipsoid(R1 = 1, R2 = 0.1, R3 = 10, Azimut = 0, Dip = 90, Rotation = 0)
), PointSet, Function, Params)
Variogram_ver = XVariogram[:, 0]
print "Vertical Variogram:"
print Variogram_ver
#Calculate Gamma for vertical semivariogram
Gamma = exp_var(sill_ver, nugget, var_range_ver, range(min(XLagDistance), max(XLagDistance), 1))
print "Gamma for vartical semivariogram: ", Gamma
#Variogram modeling results for the vertical direction
figure()
plot(XLagDistance, Variogram_ver, 'bo')
plot(range(min(XLagDistance), max(XLagDistance), 1), Gamma)
xlabel("Distance")
ylabel("Gamma")
title("Variogram modeling results for the vertical direction")
show()
|
import os
import json
import tweepy
import logging
logger = logging.getLogger()
logging.basicConfig(filename='simar_roturas.log', filemode='a', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO)
#twitter loggin and api creation
def create_api():
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except Exception as e:
logger.error("Error creating API", exc_info=True)
raise e
#logger.info("API created")
return api
# create report message and tweet it
def send_report(tipo, elem):
report = ""
freguesia = ""
local = ""
if elem['freguesia']:
freguesia = " na freguesia de %s" % elem['freguesia']
if elem['local']:
local = " no local %s" % elem['local']
interruption_info = "%s%s com data de fim para dia %s às %s" % (freguesia, local, elem['dia'], elem['hora'])
interruption_info = interruption_info.replace(" ", " ")
report = "[%s]%s. #SIMAR_ROTURA" % (tipo.upper(), interruption_info.capitalize())
logging.info(report)
api = create_api()
api.update_status(report)
logging.debug("Tweet Sent")
def search_element_in_array(local, freguesia, array):
for elem in array:
if elem['freguesia'] == freguesia and elem['local'] == local:
return elem
return
def main():
# check if there is already a processed version of occurrences
# case not, copy current occurrences to processed
logging.info("Running 'process_occurrences.py")
# no file roturas - Ite means the scrapy script had problems running. Do nothing
if not os.path.isfile('roturas.json'):
logging.debug("No 'roturas.json' file found. Exiting")
return
# There is a current file to be processed
else:
# If there is no previous "roturas_processadas.json" file
if not os.path.isfile('roturas_processadas.json'):
logging.debug("No 'roturas_processadas.json' file found.")
with open('roturas.json', 'r') as json_file:
data = json.load(json_file)
data = data[0]
logging.debug("'roturas.json' file found. Processing any entry as 'NEW'.")
# Throws Alert for each entry and in the end saves as processed file (rename file)
for elem in data['roturas']:
if elem:
send_report("nova", elem)
json_file.close()
os.rename('roturas.json', 'roturas_processadas.json')
return
# There is a previous "roturas_processadas.json" file
else:
logging.debug("'roturas_processadas.json' file found. Processing entries...")
processadas_file = open('roturas_processadas.json', 'r')
roturas_file = open('roturas.json', 'r')
processadas_data = json.load(processadas_file)
processadas_data = processadas_data[0]
roturas_data = json.load(roturas_file)
roturas_data = roturas_data[0]
for elem in roturas_data['roturas']:
result = search_element_in_array(elem['local'], elem['freguesia'], processadas_data['roturas'])
#if found a match in already processed
if result:
# if although its same location has an updated resolution time
if not (elem['dia'] == result['dia'] and elem['hora'] == result['hora']):
send_report("actualizada", elem)
# otherwise its new
else:
#new occurrence
send_report("nova", elem)
for processed in processadas_data['roturas']:
result = search_element_in_array(processed['local'], processed['freguesia'], roturas_data['roturas'])
# if not found in recent events then it was solved
if not result:
send_report("resolvida", processed)
# Close fds and rename/remove files
processadas_file.close()
roturas_file.close()
os.remove('roturas_processadas.json')
os.rename('roturas.json', 'roturas_processadas.json')
if __name__ == "__main__":
main()
|
"""Tests the changelog activity."""
import os
import json
from rever import vcsutils
from rever.logger import current_logger
from rever.main import env_main
REVER_XSH = """
$ACTIVITIES = ['authors']
$PROJECT = "WAKKA"
$AUTHORS_FILENAME = "AUTHORS.md"
$AUTHORS_METADATA = "authors.yaml"
$AUTHORS_MAILMAP = "Mailmap"
$AUTHORS_LATEST = "latest.json"
"""
def test_authors(gitrepo):
vcsutils.tag('42.1.0')
files = [('rever.xsh', REVER_XSH),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial authors')
env_main(['42.1.1'])
# now see if this worked
files = os.listdir('.')
assert 'AUTHORS.md' in files
assert 'authors.yaml' in files
assert 'Mailmap' in files
assert 'latest.json' in files
# test authors file
with open('AUTHORS.md') as f:
auth = f.read()
assert auth.startswith("All of the people who have made at least one contribution to WAKKA.\n"
"Authors are sorted by number of commits.\n")
# test latest file
with open("latest.json") as f:
latest = json.load(f)
assert isinstance(latest, list)
assert len(latest) == 1
assert isinstance(latest[0], str)
assert '@' in latest[0]
# ensure that the updates were commited
logger = current_logger()
entries = logger.load()
assert entries[-2]['rev'] != entries[-1]['rev']
SETUP_XSH = """
$PROJECT = 'castlehouse'
$ACTIVITIES = ['authors']
$AUTHORS_FILENAME = "AUTHORS.md"
$AUTHORS_METADATA = "authors.yaml"
$AUTHORS_MAILMAP = "Mailmap"
$AUTHORS_TEMPLATE = "My project is $PROJECT"
"""
def test_changelog_setup(gitrepo):
files = [('rever.xsh', SETUP_XSH),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial authors')
env_main(['setup'])
# now see if this worked
files = os.listdir('.')
assert 'AUTHORS.md' in files
assert 'Mailmap' in files
with open('AUTHORS.md') as f:
auth = f.read()
assert 'My project is castlehouse\n' == auth
|
import numpy as np
import pandas as pd
import warnings
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, QuantileTransformer, LabelEncoder, FunctionTransformer
from sklearn.preprocessing import PowerTransformer
from .functions import calculate_numerical_bins, modify_histogram_edges
class WrapperFunctionTransformer:
"""Wrapper class for FunctionTransformer - defines custom __str__ method.
Attributes:
str_repr (str): text used in str method
transformer (sklearn.preprocessing.FunctionTransformer): FunctionTransformer that is used for fit/transform
"""
def __init__(self, str_repr, func_transformer):
"""Create WrapperFunctionTransformer object.
Args:
str_repr (str): text used in str method
func_transformer (sklearn.preprocessing.FunctionTransformer): FunctionTransformer that is used for
fit/transform
"""
self.str_repr = str_repr
self.transformer = func_transformer
def fit(self, *args, **kwargs):
"""Call fit on transformer attribute."""
self.transformer.fit(*args, **kwargs)
return self
def fit_transform(self, *args, **kwargs):
"""Call fit_transform on transformer attribute."""
return self.transformer.fit_transform(*args, **kwargs)
def get_params(self, *args, **kwargs):
"""Call get_params on transformer attribute."""
return self.transformer.get_params(*args, **kwargs)
def inverse_transform(self, *args, **kwargs):
"""Call inverse_transformer on transformer attribute."""
return self.transformer.inverse_transform(*args, **kwargs)
def set_params(self, **kwargs):
"""Call set_params on transformer attribute."""
self.transformer.set_params(**kwargs)
return self
def transform(self, *args, **kwargs):
"""Call transform on transformer attribute."""
return self.transformer.transform(*args, **kwargs)
def __str__(self):
"""Return str of str_repr attribute.
Returns:
str
"""
return str(self.str_repr)
class Transformer:
"""Transformer that transforms input data based on the type of it.
Transformer loosely follows sklearn API and it also defines fit_y, transform_y methods etc. to transform not only
X but y as well.
Default preprocessor for X is a ColumnTransformer that does different transformations on
Categorical and Numerical Features. If the feature is not included in either of those two, it gets added to the
final output unchanged.
Default preprocessor for y is either LabelEncoder for classification or placeholder FunctionTransformer (with
identity function) for regression - in that case Models get wrapped in RegressionWrapper during fitting.
If transformers different from default are needed, then they can be also injected via set_custom_preprocessor
methods.
Note:
Some methods might work differently when custom transformers are in place.
Attributes:
categorical_features (list): list of categorical features names
numerical_features (list): list of numerical features names
target_type (str): feature type of the target
random_state (int): integer for reproducibility on fitting and transformations, defaults to None if not
provided during __init__
classification_pos_label (Any): label that is treated as positive (1) in y, defaults to None if not provided
during __init__
categorical_transformers (list): list of transformers for categorical features
numerical_transformers (list): list of transformers for numerical features
y_transformer (Transformer): transformer for target (y)
preprocessor_X (sklearn.pipeline.pipeline): pipeline made from categorical and numerical transformers
preprocessor_y (Transformer): attribute made for consistency, equals to y_transformer
"""
_default_categorical_transformers = [
SimpleImputer(strategy="most_frequent"),
OneHotEncoder(handle_unknown="ignore")
]
_default_numerical_transformers = [
SimpleImputer(strategy="median"),
QuantileTransformer(output_distribution="normal"),
StandardScaler()
]
_default_transformers_random_state = [QuantileTransformer]
_categorical_pipeline = "categorical"
_numerical_pipeline = "numerical"
_one_hot_encoder_tr_name = "onehotencoder"
_normal_transformers = [QuantileTransformer, PowerTransformer]
_func_trans_classification_pos_label_text = "FunctionTransformer(classification_pos_label='{}')"
_func_trans_regression_wrapper_text = "TransformedTargetRegressor(transformer=QuantileTransformer" \
"(output_distribution='normal')) "
def __init__(self,
categorical_features,
numerical_features,
target_type,
random_state=None,
classification_pos_label=None
):
"""Create Transformer object.
Set default transformers for X and y depending on provided categorical and numerical features lists and
target type.
Args:
categorical_features (list): list of categorical features names
numerical_features (list): list of numerical features names
target_type (str): feature type of the target
random_state (int, optional): integer for reproducibility on fitting and transformations, defaults to None
classification_pos_label (Any, optional): label that is treated as positive (1) in y, defaults to None
"""
self.categorical_features = categorical_features
self.numerical_features = numerical_features
self.target_type = target_type
self.random_state = random_state
self.classification_pos_label = classification_pos_label
self.categorical_transformers = self._check_random_state(self._default_categorical_transformers)
self.numerical_transformers = self._check_random_state(self._default_numerical_transformers)
self.y_transformer = self._create_default_transformer_y()
self.preprocessor_X = self._create_preprocessor_X()
self.preprocessor_y = self._create_preprocessor_y()
def fit(self, X):
"""Fit preprocessor_X with X data.
Args:
X (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): feature space to fit the transformer
Returns:
self
"""
self.preprocessor_X = self.preprocessor_X.fit(X)
return self
def transform(self, X):
"""Transform X with fitted preprocessor_X.
Args:
X (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): feature space to transform with the transformer
Returns:
numpy.ndarray, scipy.csr_matrix: transformed X
"""
transformed = self.preprocessor_X.transform(X)
return transformed
def fit_transform(self, X):
"""Fit data and then transform it with preprocessor_X.
Args:
X (pandas.DataFrame, numpy.ndarray, scipy.csr_matrix): feature space to fit and transform the transformer
Returns:
numpy.ndarray, scipy.csr_matrix: transformed X
"""
self.fit(X)
return self.transform(X)
def fit_y(self, y):
"""Fit preprocessor_y with y data.
Args:
y (pandas.Series, numpy.ndarray): feature space to fit the transformer
Returns:
self
"""
self.preprocessor_y = self.preprocessor_y.fit(y)
return self
def transform_y(self, y):
"""Transform y with fitted preprocessor_y.
Args:
y (pandas.Series, numpy.ndarray): feature space to transform with the transformer
Returns:
numpy.ndarray: transformed y
"""
transformed = self.preprocessor_y.transform(y)
return transformed
def fit_transform_y(self, y):
"""Fit data and then transform it with preprocessor_y.
Args:
y (pandas.Series, numpy.ndarray): feature space to fit the transformer
Returns:
numpy.ndarray: transformed y
"""
self.fit_y(y)
return self.transform_y(y)
def set_custom_preprocessor_X(self, categorical_transformers=None, numerical_transformers=None):
"""Set preprocessors for categorical and numerical features in X to be used later on in the process (e.g. with
fit or transform calls).
Both lists of transformers are optional - only one type of custom transformers can be set, the other one left
will be set to default transformers. If none of the lists are provided, then the method is equal to setting
default transformers.
Args:
categorical_transformers (list, optional): transformers to be used on categorical features, defaults to None
numerical_transformers (list, optional): transformers to be used on numerical features, defaults to None
"""
if categorical_transformers:
self.categorical_transformers = categorical_transformers
if numerical_transformers:
self.numerical_transformers = numerical_transformers
self.preprocessor_X = self._create_preprocessor_X()
def set_custom_preprocessor_y(self, transformer):
"""Set provided transformer as preprocessor for y to be used later on in the process (e.g. with fit or
transform calls).
Args:
transformer (Transformer): transformer that will be used to transform y (target)
"""
self.y_transformer = transformer
self.preprocessor_y = self._create_preprocessor_y()
def y_classes(self):
"""Return classes (labels) present in preprocessor_y.
Returns:
numpy.ndarray: array of classes present in fitted preprocessor_y
Raises:
ValueError: when target_type is 'Numerical'
"""
if self.target_type == "Numerical":
raise ValueError("No classes present in regression problem.")
return self.preprocessor_y.classes_
def transformed_columns(self):
"""Return list of names of transformed columns.
Numerical features list is combined with categorical features list and the output is returned. If
preprocessor_X transformers include 'one hot encoder', then special column names are extracted for every
new feature created. Otherwise, regular categorical features list is used.
Returns:
list: categorical + numerical features list
"""
# checking if there are any categorical_features at all
if len(self.categorical_features) > 0:
cat_transformers = self.preprocessor_X.named_transformers_[self._categorical_pipeline]
try:
onehot = cat_transformers[self._one_hot_encoder_tr_name]
categorical_cols = onehot.get_feature_names(self.categorical_features).tolist()
except KeyError:
categorical_cols = self.categorical_features
else:
categorical_cols = []
output = self.numerical_features + categorical_cols
return output
def transformations(self):
"""Return dictionary of transformers and transformations applied to every feature in X.
Structure of a returned dictionary is 'feature name': 2-element tuple - transformers used to transform the
feature and columns (array) of the result of transformations.
Note:
feature not included in either categorical or numerical features lists (feature with no transformations)
is not included
Returns:
dict: 'feature name': (transformers, transformations) tuple
"""
output = {}
new_cols = self.transformed_columns()
for feature in (self.categorical_features + self.numerical_features):
transformers = self.transformers(feature)
transformed_cols = [col for col in new_cols if col.startswith(feature)]
output[feature] = (transformers, transformed_cols)
return output
def y_transformations(self):
"""Return 1-element list of y_transformer.
Returns:
list: [y_transformer]
"""
return [self.y_transformer]
def transformers(self, feature):
"""Return transformers that are used to transform provided feature name depending on its type (categorical
or numerical), None otherwise.
Args:
feature (str): feature name
Return:
list, None: list of transformers or None
"""
if feature in self.categorical_features:
return self.categorical_transformers
elif feature in self.numerical_features:
return self.numerical_transformers
else:
return None
def normal_transformations_histograms(self, feature_train_data, feature_test_data):
"""Return dict of 'feature name': histogram data for every transformer for every feature in train/test data,
where histograms are calculated after different normalization methods of feature data.
Features are normalized with different methods (QuantileTransformer, Yeo-Johnson, Box-Cox) and histograms
are calculated from the transformed data to visualize which transformation makes the feature 'most' normal.
Note:
Transformers are fit on train data, whereas histograms are calculated from transformed test data.
Args:
feature_train_data (pandas.DataFrame): feature space on which transformers will be trained
feature_test_data (pandas.DataFrame): feature space from which histograms will be created
Returns:
dict: 'feature name': list of tuples (normal transformer, (hist_data, left_edges, right_edges))
"""
output = {}
for feature in feature_train_data.columns:
train_series = feature_train_data[feature].to_numpy().reshape(-1, 1)
test_series = feature_test_data[feature].to_numpy().reshape(-1, 1)
transformations = self.normal_transformations(train_series, test_series)
histogram_data = []
for transformer, transformed_series in transformations:
series = pd.Series(transformed_series.reshape(1, -1)[0])
bins = calculate_numerical_bins(series)
hist, edges = np.histogram(series, density=True, bins=bins)
left_edges, right_edges = modify_histogram_edges(edges)
result = (transformer, (hist, left_edges, right_edges))
histogram_data.append(result)
output[feature] = histogram_data
return output
def normal_transformations(self, single_feature_train_data, single_feature_test_data):
"""Return differently normalized (transformed) feature data depending on the transformer used.
Normalizing Transformers used are QuantileTransformer (with output_distribution='normal'), PowerTransformer for
Yeo-Johnson method and PowerTransformer for Box-Cox method. Transformers are fit on train data and output is
created on test data.
Note:
if feature includes values of 0 or less (in either train or test) then Box-Cox method is not included
in transformations
Args:
single_feature_train_data (pd.Series, numpy.ndarray): numerical feature data on which transformers are
trained
single_feature_test_data (pd.Series, numpy.ndarray): numerical feature data which is transformed by
fitted 'normal' Transformers
Returns:
list: list of tuples - (transformer used, transformed test data)
"""
normal_transformers = [
QuantileTransformer(output_distribution="normal", random_state=self.random_state),
PowerTransformer(method="yeo-johnson")
]
# box-cox works only with positive values, 0s excluded
nmin = np.nanmin
if nmin(single_feature_train_data) > 0 and nmin(single_feature_test_data) > 0:
normal_transformers.append(PowerTransformer(method="box-cox"))
output = []
for transformer in normal_transformers:
pipe = make_pipeline(
SimpleImputer(strategy="median"), # imputing missing values with their median
transformer
)
# Catching warnings, mostly from n_quantiles from QuantileTransformer
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pipe.fit(single_feature_train_data)
output.append((transformer, pipe.transform(single_feature_test_data)))
return output
def _create_preprocessor_X(self):
"""Create preprocessor for X features with different Transformers for Categorical and Numerical features.
ColumnTransformer is created with categorical/numerical_features attributes as feature names and categorical/
numerical_transformers attributes as Transformers. Any feature not included in categorical/numerical_features
attributes is treated as 'pre-transformer' and is concatted at the end (remainder='passthrough').
Returns:
sklearn.compose.ColumnTransformer: ColumnTransformer for X features
"""
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html
# https://scikit-learn.org/stable/modules/compose.html
numerical_features = self.numerical_features
categorical_features = self.categorical_features
numeric_transformer = make_pipeline(*self.numerical_transformers)
categorical_transformer = make_pipeline(*self.categorical_transformers)
col_transformer = ColumnTransformer(
transformers=[
(self._numerical_pipeline, numeric_transformer, numerical_features),
(self._categorical_pipeline, categorical_transformer, categorical_features)
],
remainder="passthrough"
)
return col_transformer
def _create_default_transformer_y(self):
"""Create default transformer for y (target) depending on the target_type attribute.
If target is Categorical then either regular LabelEncoder is created or if the classification_pos_label
attribute is not None then the value in it is treated as positive (1), rest of values defaults to 0. If target
is Numerical then no transformation takes place, as RegressionWrapper takes Model as an argument, not features.
Returns:
Any: y (target) Transformer
Raises:
ValueError: if target_type is not in ["Categorical", "Numerical"]
"""
if self.target_type == "Categorical":
if self.classification_pos_label is not None:
func_transformer = FunctionTransformer(lambda x: np.where(x == self.classification_pos_label, 1, 0))
text = self._func_trans_classification_pos_label_text.format(self.classification_pos_label)
# Wrapper for custom text to be shown in Views
transformer = WrapperFunctionTransformer(text, func_transformer)
else:
transformer = LabelEncoder()
elif self.target_type == "Numerical":
# in regression model finder wraps its object in TargetRegressor
func_transformer = FunctionTransformer(func=None) # identity function, x = x
text = self._func_trans_regression_wrapper_text
# Wrapper for custom text to be shown in Views
transformer = WrapperFunctionTransformer(text, func_transformer)
else:
raise ValueError(
"Target type set as {target_type}, should be Categorical or Numerical".format(
target_type=self.target_type
)
)
return transformer
def _create_preprocessor_y(self):
"""Return y_transformer attribute.
Returns:
Any: y_transformer attribute
"""
return self.y_transformer
def _check_random_state(self, transformers):
"""Check provided transformers and if any of them is included in _default_transformers_random_state class
attribute list then add random_state instance attribute to it.
This method shouldn't be used for custom transformers - those should have their random state already defined
during initialization.
Args:
transformers (list): list of transformers
Returns:
list: transformers with random_state added (if appropriate)
"""
for transformer in transformers:
if transformer.__class__ in self._default_transformers_random_state:
transformer.random_state = self.random_state
return transformers
|
# coding: utf-8
# In[1]:
import pandas as pd
# In[12]:
ufo = pd.read_csv('http://bit.ly/uforeports')
# In[13]:
type(ufo)
# In[14]:
ufo.head()
# In[19]:
type(ufo['City'])
# In[20]:
ufo['City'].head()
# In[23]:
ufo.City.head() # sane as ufo['City'].head()
# In[26]:
ufo['Colors Reported'].head() # cannot use 'ufo.Color Reported'
# In[29]:
type(ufo.shape)
# In[30]:
ufo.shape[0] # returns total rows
# In[32]:
ufo.shape[1] # returns total columns
# In[34]:
city_state = ufo.City + '[' + ufo.State + ']'
# In[35]:
city_state.head()
# In[41]:
# create a new series names location
# should use bracket notation to create a series
ufo['Location'] = ufo.City + ', ' + ufo.State
# In[42]:
ufo.head()
# In[ ]:
# In[ ]:
|
from .Event import Event, EventType, EventCategory
class MouseEvent(Event):
def __init__(self) -> None:
pass
@property
def CategoryFlags(self) -> int:
return EventCategory.Mouse | EventCategory.Input
class MouseButtonPressedEvent(MouseEvent):
__slots__ = "__ButtonCode"
def __init__(self, buttonCode: int) -> None:
self.__ButtonCode = buttonCode
@property
def ButtonCode(self) -> int:
return self.__ButtonCode
@property
def EventType(self) -> int:
return EventType.MouseButtonPressed
@property
def CategoryFlags(self) -> int:
return super().CategoryFlags | EventCategory.MouseButton
def ToString(self) -> str:
return "<MouseButtonPressedEvent: {}>".format(self.__ButtonCode)
class MouseButtonReleasedEvent(MouseEvent):
__slots__ = "__ButtonCode"
def __init__(self, buttonCode: int) -> None:
self.__ButtonCode = buttonCode
@property
def ButtonCode(self) -> int:
return self.__ButtonCode
@property
def EventType(self) -> int:
return EventType.MouseButtonReleased
@property
def CategoryFlags(self) -> int:
return super().CategoryFlags | EventCategory.MouseButton
def ToString(self) -> str:
return "<MouseButtonReleasedEvent: {}>".format(self.__ButtonCode)
class MouseMovedEvent(MouseEvent):
__slots__ = "__OffsetX", "__OffsetY"
def __init__(self, offsetX: int, offsetY: int) -> None:
self.__OffsetX = offsetX
self.__OffsetY = offsetY
@property
def OffsetX(self) -> int:
return self.__OffsetX
@property
def OffsetY(self) -> int:
return self.__OffsetY
@property
def EventType(self) -> int:
return EventType.MouseMoved
def ToString(self) -> str:
return "<MouseMovedEvent: {}, {}>".format(self.__OffsetX, self.__OffsetY)
class MouseScrolledEvent(MouseEvent):
__slots__ = "__OffsetX", "__OffsetY"
def __init__(self, offsetY: int, offsetX: int=0) -> None:
self.__OffsetX = offsetX
self.__OffsetY = offsetY
@property
def OffsetX(self) -> int:
return self.__OffsetX
@property
def OffsetY(self) -> int:
return self.__OffsetY
@property
def EventType(self) -> int:
return EventType.MouseScrolled
def ToString(self) -> str:
return "<MouseScrolledEvent: {}, {}>".format(self.__OffsetX, self.__OffsetY)
|
###############################################################################
# Copyright 2014 OCLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from authliboclc import wskey
import urllib2
import pickle
"""Django view generator"""
def index(request):
"""You must fill in the clientID and secret with your WSKey parameters"""
key = '{clientID}'
secret = '{secret}'
"""Default values for the Sandbox Institution. You may want to change them to your institution's values"""
authenticating_institution_id = '128807'
context_institution_id = '128807'
"""We use the Worldcat Metadata API to test the Access Token"""
services = ['WorldCatMetadataAPI', 'refresh_token']
"""The response object is where we write data to display on the page."""
response = HttpResponse()
response.write("""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="https://static1.worldcat.org/yui/combo?pure/0.4.2/pure-min.css">
<style>
body {width:800px; margin:auto}
h1 {font-size: 150%}
td {vertical-align:top}
.error {line-height: 140%; background-color: lightcoral; padding: 3px 20px; border-radius: 12px}
pre {font-size: 83%; margin-top:0}
</style>
</head>
<body>
<h1>Authentication Token & Bibliographic Record - Django Example</h1>
""")
"""The redirect URI is calculated here and must match the redirect URI assigned to your WSKey"""
if request.is_secure:
"""You must use SSL to request an Access token"""
redirect_uri = 'https://' + request.get_host() + request.path # https://localhost:8000/auth/
else:
"""This won't work."""
redirect_uri = 'http://' + request.get_host() + request.path # http://localhost:8000/auth/
"""Populate the WSKey object's parameters"""
my_wskey = wskey.Wskey(
key=key,
secret=secret,
options={
'services': services,
'redirect_uri': redirect_uri
}
)
access_token = None
"""If an access_token is stored in the current session load it."""
if request.session.get('access_token', None) != None:
access_token = pickle.loads(request.session.get('access_token', None))
"""If the access_token we loaded is expired, we can't use it."""
if access_token != None and access_token.is_expired():
access_token = None
"""If there is a code parameter on the current URL, load it."""
code = request.GET.get('code', None)
"""If there are error parameters on the current URL, load them."""
error = request.GET.get('error', None)
errorDescription = request.GET.get('error_description', None)
if error != None:
"""If an error was returned, display it."""
response.write('<p class="error">Error: ' + error + '<br>' + errorDescription + '</p>')
elif access_token == None and code == None:
"""Initiate user authentication by executing a redirect to the IDM sign in page."""
login_url = my_wskey.get_login_url(
authenticating_institution_id=authenticating_institution_id,
context_institution_id=context_institution_id
)
response['Location'] = login_url
response.status_code = '303'
elif access_token == None and code != None:
"""Request an access token using the user authentication code returned after the user authenticated"""
"""Then request a bibliographic record"""
access_token = my_wskey.get_access_token_with_auth_code(
code=code,
authenticating_institution_id=authenticating_institution_id,
context_institution_id=context_institution_id
)
if access_token.error_code == None:
request.session['access_token'] = pickle.dumps(access_token)
response.write('<p><strong>Access Token</strong> NOT FOUND in this session, so I requested a new one.</p>')
response.write(format_access_token(access_token=access_token))
if access_token.error_code == None:
response.write(get_bib_record(access_token=access_token, wskey=my_wskey))
elif access_token != None:
"""We already have an Access Token, so display the token and request a Bibliographic Record"""
if access_token.error_code == None:
response.write('<p><strong>Access Token</strong> found in this session, and it is still valid.</p>')
response.write(format_access_token(access_token=access_token))
if access_token.error_code == None:
response.write(get_bib_record(access_token=access_token, wskey=my_wskey))
return response
def format_access_token(access_token):
"""Display all the parameters of the Access Token"""
print(access_token)
ret = '<h2>Access Token</h2>'
ret += '<table class="pure-table">'
if access_token.error_code != None:
ret += '<tr><td>Error Code</td><td>' + str(access_token.error_code) + '</td></tr>'
ret += '<tr><td>Error Message</td><td>' + str(access_token.error_message) + '</td></tr>'
ret += ('<tr><td>Error Url</td><td><pre>' +
str(access_token.error_url).replace('?', '?\n').replace('&', '\n&') + '</pre></td></tr>')
else:
ret += '<tr><td>access_token</td><td>' + str(access_token.access_token_string) + '</td></tr>'
ret += '<tr><td>token_type</td><td>' + str(access_token.type) + '</td></tr>'
ret += '<tr><td>expires_at</td><td>' + str(access_token.expires_at) + '</td></tr>'
ret += '<tr><td>expires_in</td><td>' + str(access_token.expires_in) + '</td></tr>'
if access_token.user != None:
ret += '<tr><td>principalID</td><td>' + str(access_token.user.principal_id) + '</td></tr>'
ret += '<tr><td>principalIDNS</td><td>' + str(access_token.user.principal_idns) + '</td></tr>'
ret += '<tr><td>contextInstitutionId</td><td>' + str(access_token.context_institution_id) + '</td></tr>'
if access_token.refresh_token != None:
ret += '<tr><td>refresh_token</td><td>' + str(access_token.refresh_token.refresh_token) + '</td></tr>'
ret += '<tr><td>refresh_token_expires_at</td><td>' + str(
access_token.refresh_token.expires_at) + '</td></tr>'
ret += '<tr><td>refresh_token_expires_in</td><td>' + str(
access_token.refresh_token.expires_in) + '</td></tr>'
ret += '</table>'
return ret
def get_bib_record(access_token, wskey):
"""Use an Access Token's User Parameter to request a Bibliographic Record"""
request_url = (
'https://worldcat.org/bib/data/823520553?' +
'classificationScheme=LibraryOfCongress' +
'&holdingLibraryCode=MAIN'
)
authorization_header = wskey.get_hmac_signature(
method='GET',
request_url=request_url,
options={
'user': access_token.user
}
)
my_request = urllib2.Request(
url=request_url,
data=None,
headers={'Authorization': authorization_header}
)
try:
xml_result = urllib2.urlopen(my_request).read()
except urllib2.HTTPError, e:
xml_result = str(e)
ret = '<h2>Bibliographic Record</h2>'
ret += '<pre>' + xml_result.replace('<', '<') + '</pre>'
return ret
|
import json
from django.urls import reverse
from django_dynamic_fixture import G
from guardian.shortcuts import assign_perm
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from accounts.models import Child, DemographicData, User
from studies.models import ConsentRuling, Lab, Response, Study
from studies.permissions import LabPermission, StudyPermission
class ResponseTestCase(APITestCase):
def setUp(self):
self.researcher = G(
User, is_active=True, is_researcher=True, given_name="Researcher 1"
)
self.participant = G(User, is_active=True, given_name="Participant 1")
self.superuser = G(User, is_active=True, is_researcher=True, is_superuser=True)
self.demographics = G(
DemographicData, user=self.participant, languages_spoken_at_home="French"
)
self.participant.save()
self.child = G(Child, user=self.participant, given_name="Sally")
self.child_of_researcher = G(Child, user=self.researcher, given_name="Grace")
self.lab = G(Lab, name="MIT")
self.study = G(Study, creator=self.researcher, lab=self.lab)
self.response = G(Response, child=self.child, study=self.study, completed=False)
self.consented_response = G(
Response,
child=self.child,
study=self.study,
completed=False,
completed_consent_frame=True,
)
self.positive_consent_ruling = G(
ConsentRuling, response=self.consented_response, action="accepted"
)
self.url = reverse("api:response-list", kwargs={"version": "v1"})
self.response_detail_url = f"{self.url}{self.response.uuid}/"
self.consented_response_detail_url = (
f"{self.url}{self.consented_response.uuid}/"
)
self.client = APIClient()
self.data = {
"data": {
"attributes": {
"global_event_timings": [],
"exp_data": {"first_frame": {}, "second_frame": {}},
"sequence": [],
"completed": False,
"completed_consent_frame": False,
},
"relationships": {
"child": {"data": {"type": "children", "id": str(self.child.uuid)}},
"study": {"data": {"type": "studies", "id": str(self.study.uuid)}},
},
"type": "responses",
}
}
self.patch_data = {
"data": {
"attributes": {
"global_event_timings": [],
"exp_data": {"first_frame": {}, "second_frame": {}},
"sequence": ["first_frame", "second_frame"],
"completed": True,
"completed_consent_frame": True,
},
"type": "responses",
"id": str(self.response.uuid),
}
}
# Response List test
def testGetResponseListUnauthenticated(self):
# Must be authenticated to view participants
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_401_UNAUTHORIZED)
def testGetResponseOrderingReverseDateModified(self):
assign_perm(
StudyPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study,
)
self.client.force_authenticate(user=self.researcher)
self.response2 = G(
Response,
child=self.child,
study=self.study,
completed=False,
completed_consent_frame=True,
)
self.positive_consent_ruling2 = G(
ConsentRuling, response=self.response2, action="accepted"
)
self.response3 = G(
Response,
child=self.child,
study=self.study,
completed=False,
completed_consent_frame=True,
)
self.positive_consent_ruling3 = G(
ConsentRuling, response=self.response3, action="accepted"
)
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["links"]["meta"]["count"], 3)
self.assertIn(str(self.response3.uuid), api_response.data["results"][0]["url"])
self.assertIn(str(self.response2.uuid), api_response.data["results"][1]["url"])
self.assertIn(
str(self.consented_response.uuid), api_response.data["results"][2]["url"]
)
def testGetResponsesListByOwnChildren(self):
# Participant can view their own responses
self.client.force_authenticate(user=self.participant)
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
# Should get both the consented and unconsented.
self.assertEqual(api_response.data["links"]["meta"]["count"], 2)
def testGetResponsesListViewStudyPermissions(self):
# Can view study permissions insufficient to view responses
assign_perm(
StudyPermission.READ_STUDY_DETAILS.prefixed_codename,
self.researcher,
self.study,
)
assign_perm(
StudyPermission.WRITE_STUDY_DETAILS.prefixed_codename,
self.researcher,
self.study,
)
self.lab.admin_group.user_set.add(self.researcher)
self.lab.save()
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["links"]["meta"]["count"], 0)
def testGetResponsesListViewStudyResponsesPermissions(self):
# With can_view_study_responses permissions, can view study responses
assign_perm(
StudyPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study,
)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["links"]["meta"]["count"], 1)
def testGetResponsesListLabwideViewStudyResponsesPermissions(self):
assign_perm(
LabPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study.lab,
)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["links"]["meta"]["count"], 1)
# Response Detail tests
def testGetResponseDetailUnauthenticated(self):
# Can't view response detail unless authenticated
api_response = self.client.get(
self.consented_response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_401_UNAUTHORIZED)
def testGetResponseDetailByOwnChildren(self):
# Participant can view their own response detail regardless of consent coding
self.client.force_authenticate(user=self.participant)
api_response = self.client.get(
self.response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
def testGetResponseDetailViewStudyPermissions(self):
# Can view study permissions insufficient to view responses
assign_perm(
StudyPermission.READ_STUDY_DETAILS.prefixed_codename,
self.researcher,
self.study,
)
assign_perm(
StudyPermission.WRITE_STUDY_DETAILS.prefixed_codename,
self.researcher,
self.study,
)
self.lab.admin_group.user_set.add(self.researcher)
self.lab.save()
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.consented_response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_404_NOT_FOUND)
def testGetResponseDetailViewStudyResponsesPermissions(self):
# With can_view_study_responses permissions, can view study response detail,
# but still can't view non-consented response
assign_perm(
StudyPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study,
)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_404_NOT_FOUND)
def testGetResponseDetailViewStudyResponsesPermissionsAfterConsent(self):
# With can_view_study_responses permissions, can view study response detail
assign_perm(
StudyPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study,
)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.consented_response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["completed"], False)
self.assertEqual(api_response.data["completed_consent_frame"], True)
def testGetResponseDetailLabwideViewStudyResponsesPermissionsAfterConsent(self):
# With can_view_study_responses permissions, can view study response detail
assign_perm(
LabPermission.READ_STUDY_RESPONSE_DATA.prefixed_codename,
self.researcher,
self.study.lab,
)
self.client.force_authenticate(user=self.researcher)
api_response = self.client.get(
self.consented_response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["completed"], False)
self.assertEqual(api_response.data["completed_consent_frame"], True)
# POST Responses tests
def testPostResponse(self):
self.client.force_authenticate(user=self.participant)
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_201_CREATED)
self.assertEqual(api_response.data["completed"], False)
self.assertEqual(Response.objects.count(), 3)
def testPostResponseWithNotYourChild(self):
self.client.force_authenticate(user=self.participant)
self.data["data"]["relationships"]["child"]["data"]["id"] = str(
self.child_of_researcher.uuid
)
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_403_FORBIDDEN)
def testPostResponseNeedDataHeader(self):
self.client.force_authenticate(user=self.participant)
data = {}
api_response = self.client.post(
self.url, json.dumps(data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(api_response.data[0]["detail"].code, "parse_error")
self.assertEqual(api_response.data[0]["source"]["pointer"], "/data")
def testPostResponseNeedResponsesType(self):
self.client.force_authenticate(user=self.participant)
self.data["data"]["type"] = "bad"
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_409_CONFLICT)
def testPostResponseNeedChildRelationship(self):
self.client.force_authenticate(user=self.participant)
self.data["data"]["relationships"] = {
"study": {"data": {"type": "studies", "id": str(self.study.uuid)}}
}
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(api_response.data[0]["detail"].code, "required")
self.assertEqual(
api_response.data[0]["source"]["pointer"], "/data/attributes/child"
)
def testPostResponseNeedStudyRelationship(self):
self.client.force_authenticate(user=self.participant)
self.data["data"]["relationships"] = {
"child": {"data": {"type": "children", "id": str(self.child.uuid)}}
}
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(api_response.data[0]["detail"].code, "required")
self.assertEqual(
api_response.data[0]["source"]["pointer"], "/data/attributes/study"
)
def testPostResponseWithEmptyAttributes(self):
self.client.force_authenticate(user=self.participant)
self.data["data"]["attributes"] = {}
api_response = self.client.post(
self.url, json.dumps(self.data), content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_201_CREATED)
# PATCH responses
def testPatchResponseAttributes(self):
self.client.force_authenticate(user=self.participant)
api_response = self.client.patch(
self.response_detail_url,
json.dumps(self.patch_data),
content_type="application/vnd.api+json",
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(api_response.data["sequence"], ["first_frame", "second_frame"])
def testPatchResponseAttributesTopLevelKeysUnderscoredOnly(self):
self.client.force_authenticate(user=self.participant)
data = {
"data": {
"id": str(self.response.uuid),
"attributes": {
"conditions": {
"8-pref-phys-videos": {
"startType": 5,
"showStay": 8,
"whichObjects": [8, 5, 6, 8],
}
},
"global_event_timings": [],
"exp_data": {
"0-0-video-config": {
"eventTimings": [
{
"eventType": "nextFrame",
"timestamp": "2017-10-31T20:09:47.479Z",
}
]
},
"1-1-video-consent": {
"videoId": "e729321f-418f-4728-992c-9364623dbe9b_1-video-consent_bdebd15b-adc7-4377-b2f6-e9f3de70dd19",
"eventTimings": [
{
"eventType": "hasCamAccess",
"timestamp": "2017-10-31T20:09:48.096Z",
"hasCamAccess": "True",
"videoId": "e729321f-418f-4728-992c-9364623dbe9b_1-video-consent_bdebd15b-adc7-4377-b2f6-e9f3de70dd19",
"streamTime": "None",
},
{
"eventType": "videoStreamConnection",
"timestamp": "2017-10-31T20:09:50.534Z",
"status": "NetConnection.Connect.Success",
"videoId": "e729321f-418f-4728-992c-9364623dbe9b_1-video-consent_bdebd15b-adc7-4377-b2f6-e9f3de70dd19",
"streamTime": "None",
},
{
"eventType": "stoppingCapture",
"timestamp": "2017-10-31T20:09:53.051Z",
"videoId": "e729321f-418f-4728-992c-9364623dbe9b_1-video-consent_bdebd15b-adc7-4377-b2f6-e9f3de70dd19",
"streamTime": 2.459,
},
{
"eventType": "nextFrame",
"timestamp": "2017-10-31T20:09:53.651Z",
"videoId": "e729321f-418f-4728-992c-9364623dbe9b_1-video-consent_bdebd15b-adc7-4377-b2f6-e9f3de70dd19",
"streamTime": "None",
},
],
},
"2-2-instructions": {
"confirmationCode": "6YdLL",
"eventTimings": [
{
"eventType": "nextFrame",
"timestamp": "2017-10-31T20:09:56.472Z",
}
],
},
"5-5-mood-survey": {
"rested": "1",
"healthy": "1",
"childHappy": "1",
"active": "1",
"energetic": "1",
"ontopofstuff": "1",
"parentHappy": "1",
"napWakeUp": "11:00",
"usualNapSchedule": "no",
"lastEat": "6:00",
"doingBefore": "s",
"eventTimings": [
{
"eventType": "nextFrame",
"timestamp": "2017-10-31T20:10:17.269Z",
}
],
},
},
"sequence": [
"0-0-video-config",
"1-1-video-consent",
"2-2-instructions",
"5-5-mood-survey",
],
"completed": "False",
"completed_consent_frame": "true",
},
"type": "responses",
}
}
api_response = self.client.patch(
self.response_detail_url,
json.dumps(data),
content_type="application/vnd.api+json",
)
self.assertEqual(api_response.status_code, status.HTTP_200_OK)
self.assertEqual(
api_response.data["sequence"],
[
"0-0-video-config",
"1-1-video-consent",
"2-2-instructions",
"5-5-mood-survey",
],
)
self.assertEqual(
api_response.data["exp_data"]["0-0-video-config"],
{
"eventTimings": [
{"eventType": "nextFrame", "timestamp": "2017-10-31T20:09:47.479Z"}
]
},
)
self.assertEqual(
api_response.data["exp_data"]["5-5-mood-survey"],
{
"rested": "1",
"healthy": "1",
"childHappy": "1",
"active": "1",
"energetic": "1",
"ontopofstuff": "1",
"parentHappy": "1",
"napWakeUp": "11:00",
"usualNapSchedule": "no",
"lastEat": "6:00",
"doingBefore": "s",
"eventTimings": [
{"eventType": "nextFrame", "timestamp": "2017-10-31T20:10:17.269Z"}
],
},
)
# Delete responses
def testDeleteResponse(self):
self.client.force_authenticate(user=self.superuser)
api_response = self.client.delete(
self.response_detail_url, content_type="application/vnd.api+json"
)
self.assertEqual(api_response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
|
# This file makes a python package out of the project.
# This way you can access other modules in the project
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="example-pkg-LoSSeth",
version="0.0.1",
author="Jimmy Bierenbroodspot",
author_email="aph095ba08@gmail.com",
description="A casino game made for a challenge",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/LoSSeth/casino",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
|
import logging
import math
from datetime import datetime, timedelta
from custom_components.smappee import DATA_SMAPPEE, DOMAIN
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt as dt_util
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
SENSOR_PREFIX = 'Smappee'
SENSOR_TYPES = {
'power': ['Power', 'mdi:power-plug', 'W'],
'cosphi': ['Cosphi', 'mdi:power-plug', 'factor'],
}
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" setup the sensor platform for smappee """
smappee = hass.data[DATA_SMAPPEE]
dev = []
for sensor in SENSOR_TYPES:
dev.append(SmappeeSensor(smappee, sensor))
add_devices(dev)
return True
class SmappeeSensor(Entity):
"""Implementation of a Smappee sensor."""
def __init__(self, smappee, sensor):
"""Initialize the sensor."""
self._smappee = smappee
self._sensor = sensor
self.data = None
self._state = None
self._timestamp = None
self.update()
@property
def name(self):
return "{} {}".format(SENSOR_PREFIX, SENSOR_TYPES[self._sensor][0])
@property
def icon(self):
return SENSOR_TYPES[self._sensor][1]
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return SENSOR_TYPES[self._sensor][2]
@property
def device_state_attributes(self):
attr = {}
attr['Last Update'] = self._timestamp
return attr
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
data = self._smappee.loadInstantaneous()
if data:
if SENSOR_TYPES[self._sensor][0] == 'Power':
self._timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._state = round(float(data["power"]) / 1000, 2)
elif SENSOR_TYPES[self._sensor][0] is 'Cosphi':
self._timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._state = float(data["cosphi"])
else:
return None
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.