content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
from collections import OrderedDict
def checkio(data):
data = ["".join(OrderedDict.fromkeys(d)) for d in data]
letters = sorted(set("".join(data)))
ans = ""
while letters:
for letter in letters:
if not any(letter in d[1:] for d in data):
ans += letter
break
data = [d.replace(letter, "") for d in data]
letters.remove(letter)
return ans
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(["llo","low","lino","itttnosw"]) == "litnosw"
assert checkio(["acb", "bd", "zwa"]) == "zwacbd", \
"Just concatenate it"
assert checkio(["klm", "kadl", "lsm"]) == "kadlsm", \
"Paste in"
assert checkio(["a", "b", "c"]) == "abc", \
"Cant determine the order - use english alphabet"
assert checkio(["aazzss","aa"]) == "azs", \
"Each symbol only once"
assert checkio(["dfg", "frt", "tyg"]) == "dfrtyg", \
"Concatenate and paste in"
|
import logging
import os
from phenoai.manager.weather_station_manager import WeatherStationManager
from phenoai.manager.data_persistance_manager import DataPersistanceManager
from typing import Optional
from settings.constants import VITIGEOSS_CONFIG_FILE, VITIGEOSS_DATA_ROOT, VITIGEOSS_PHENO_PHASES_DIR
from settings.instance import settings
from fastapi import HTTPException
import pandas as pd
import json
logger = logging.getLogger()
def get_places_info(place: Optional[str] = None, variety: Optional[str] = None):
result = {'places': []}
with open(VITIGEOSS_CONFIG_FILE) as f:
config = json.loads(f.read())
places = config['places']
if place:
if place not in places:
raise HTTPException(status_code=404, detail=f'Place {place} not found')
places = [place]
for _place in places:
place_info = {
'name': _place,
'weather-station': config['place_to_station'][_place],
'varieties': config['place_to_varieties'][_place]
}
if variety and variety not in place_info['varieties']:
continue
result['places'].append(place_info)
if variety and len(result['places']) == 0:
raise HTTPException(status_code=404, detail=f'Variety {variety} not found')
return result
def get_pheno_phases_df(place: str, variety: str):
df_path = get_pheno_phases_csv_path(place, variety)
if os.path.exists(df_path):
return pd.read_csv(df_path, sep=',', error_bad_lines=False, index_col=0)
raise HTTPException(status_code=404, detail=f'Phenological phases dataframe not found. You must build it first.')
def get_pheno_phases_csv_path(place: str, variety: str):
return os.path.join(VITIGEOSS_DATA_ROOT, VITIGEOSS_PHENO_PHASES_DIR, f'{place}_{variety}_pheno_phases_df.csv')
def get_input_data_df(place: str, variety: str, year: int, dpm: DataPersistanceManager):
try:
return dpm.load_df(place=place, variety=variety, year=year, force_new=False, fail_if_not_found=True).fillna(0.0) # Use df.fillna(0.0) to avoid problems with NaN values on Swagger
except FileNotFoundError:
raise HTTPException(status_code=404, detail=f'Input data not found for place {place}, variety {variety} and year {year}')
def update_input_data_df(place: str, variety: str, year: int, force_new: bool,
dpm: DataPersistanceManager, wsm: WeatherStationManager):
df = dpm.load_df(place=place, variety=variety, year=year, force_new=force_new)
weather_station_missing_rows = df[df[settings.input_data_source] != 'WS']
if weather_station_missing_rows.empty:
return df.fillna(0.0) # Use df.fillna(0.0) to avoid problems with NaN values on Swagger
update_df = wsm.get_wsdata_df(place, weather_station_missing_rows)
if update_df is None:
return df.fillna(0.0) # Use df.fillna(0.0) to avoid problems with NaN values on Swagger
df = df.combine_first(update_df) # Update NaN values of the df with values of the update_df
dpm.save_df(df)
return df.fillna(0.0) # Use df.fillna(0.0) to avoid problems with NaN values on Swagger
|
"""
Binary search
Slicing is O(k)
Complexity: O(log(n))
"""
def search_binary(input_array, value):
low = 0
high = len(input_array) - 1
le = len(input_array)
if le == 0:
return False
else:
mid = (low + high) // 2
if input_array[mid] == value:
return True
elif input_array[mid] > value:
return search_binary(input_array[low: mid], value)
elif input_array[mid] < value:
return search_binary(input_array[mid + 1:], value)
return -1
if __name__ == '__main__':
test_list = [1, 3, 9, 11, 15, 19, 29, 31]
test_val1 = 25
test_val2 = 15
test_val3 = 31
print (search_binary(test_list, test_val1))
print (search_binary(test_list, test_val2))
print (search_binary(test_list, test_val3))
|
import os
import pathlib
import shutil
import airsimdroneracinglab
def mod_client_file():
as_path = pathlib.Path(airsimdroneracinglab.__file__).resolve().parent
dir_path = os.path.dirname(os.path.realpath(__file__))
mod_path = os.path.join(dir_path, 'client.py')
target_path = os.path.join(as_path, 'client.py')
shutil.copyfile(src=mod_path, dst=target_path)
|
#!/usr/bin/python3
import os
from formula import formula
system = os.environ.get("RIT_SYSTEM")
linux_os = os.environ.get("RIT_LINUX_OS")
formula.Run(system, linux_os)
|
#notes: figure out how to import word2vec embeddings
#pre-process -- text -> onehot --> padding? --> embed --> concat --> lstm
# --> numeric input
#create second model w/ NLP pipeline
vocab_size = 1000 #revisit
model_inputN = layers.Input(shape = (19,125)) #numeric
#text input
model_inputT = layers.Input(shape = (19,200)) #assuming we have ~200 words per time-step
embed = layers.Embedding(vocab_size, 64, input_length=(19,200))(model_inputT) #embedding for text
concat = layers.Concatenate(axis =1, [embed, model_inputN] )()
lstm = layers.LSTM(125, dropout = .2)(concat)
|
#!/usr/bin/env python
# manager class allows creation of manager object to use as a higher
# level API to connect to and interact with the YANG models on the device
from ncclient import manager
device = {
'host': 'r1.lab.local',
'username': 'wwt',
'password': 'WWTwwt1!',
'port': 830
}
def main():
# # Connect to the device using the manager object
# mgr = manager.connect(
# host=device['host'],
# username=device['username'],
# password=device['password'],
# port=device['port'],
# hostkey_verify=False
# )
# # Display Capabilities advertised by the device(server)
# for capability in mgr.server_capabilities:
# print(capability)
# # Close the netconf session
# mgr.close_session()
# Refactor to use context manager
# Double *'s infront of dict gives keyword arguments
# Single * gives you all functional parameters as a tuple or to unpack a list
# More reading on context manager is that a class needs to have an
# __enter__ and __exit_ method define
with manager.connect(**device, hostkey_verify=False) as mgr:
for capability in mgr.server_capabilities:
print(capability)
if __name__ == "__main__":
main()
|
import pygame
#def drawBall(window):
# ball = pygame.image.load("ball2.png")
# window.blit(ball,(int(ball_pos[x]-10),int(ball_pos[y]-10)))
from INI import *
#import pygame
class Ball:
def __init__(self, window):
self.window = window
#self.ball = pygame.image.load("ball2.png")
self.X = int(ball_pos[x])
self.Y = int(ball_pos[y])
self.radius = 10
self.speed = [0 , 0]
self.goal = [0 , 0]
#self.X = 300
#self.Y = 200
pygame.draw.circle(self.window, black, (self.X, self.Y), self.radius)
def move (self):
self.X = self.X + self.speed[0]
self.Y = self.Y + self.speed[1]
k = 0.9
self.speed[0] = int(k * self.speed[0])
self.speed[1] = int(k * self.speed[1])
print(self.speed)
if (self.X > win_width or self.X < 0):
if (self.Y > 165 and self.Y < 240):
if (self.X < 0):
self.goal = [0 , 1]
else:
self.goal = [1 , 0]
else:
self.speed[0] *= -1
if (self.Y > win_height or self.Y < 0):
self.speed[1] *= -1
pygame.draw.circle(self.window, black, (self.X, self.Y), self.radius)
|
"""
edx-lint
========
A collection of code quality tools:
- A few pylint plugins to check for quality issues pylint misses.
- A command-line tool to generate config files like pylintrc from a master
file (part of edx_lint), and a repo-specific tweaks file.
"""
import setuptools
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Returns:
list: Requirements file relative path strings
"""
requirements = set()
for path in requirements_paths:
requirements.update(
line.split('#')[0].strip() for line in open(path).readlines()
if is_requirement(line.strip())
)
return list(requirements)
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment, a URL, or
an included file
"""
return line and not line.startswith(("-r", "#", "-e", "git+", "-c"))
setuptools.setup(
include_package_data=True,
install_requires=load_requirements("requirements/base.in"),
)
|
#!/usr/bin/env python3
# -*- coding: iso-8859-1 -*-
#pylint: disable=missing-docstring, line-too-long, empty-docstring, too-many-locals, invalid-name, trailing-newlines
#-------------------------------------------------------------------------------
#
# Project: ViRES-Aeolus
# Purpose: Aeolus -- get settings from the "config.ini" file
# registraton/deregistration accordingly
# Authors: Christian Schiller
# Copyright(C): 2016 - EOX IT Services GmbH, Vienna, Austria
# Email: christian dot schiller at eox dot at
# Date: 2017-05-22
# License: MIT License (MIT)
#
#-------------------------------------------------------------------------------
# The MIT License (MIT):
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
"""
# Project: ViRES-Aeolus
# Purpose: Aeolus -- get settings from the "config.ini" file
# registraton/deregistration accordingly
# Authors: Christian Schiller
# Copyright(C): 2017 - EOX IT Services GmbH, Vienna, Austria
# Email: christian dot schiller at eox dot at
# Date: 2017-05-22
# License: MIT License (MIT)
0.1: - initial version
0.2: - changed to python 3
"""
from __future__ import print_function
import os.path
import ast
import configparser as configparser
__version__ = '0.2'
def get_config(conf_file=None):
"""
read the config file provided or use the default in the current dir
"""
# print("I'm in "+sys._getframe().f_code.co_name)
config = {}
cp = configparser.ConfigParser()
if conf_file is None:
cfile = os.path.join(os.path.dirname(__file__), "config.ini")
else:
if conf_file.startswith('/'):
cfile = conf_file
else:
cfile = os.path.join(os.path.dirname(__file__), conf_file)
cp.read(cfile)
for sec in cp.sections():
name = str.lower(sec)
for opt in cp.options(sec):
value = str.strip(cp.get(sec, opt))
try:
if value in ('None', 'False', 'True'):
config[name +'.'+ str.lower(opt)] = ast.literal_eval(value)
elif value.startswith(('[', '{')):
config[name +'.'+ str.lower(opt)] = ast.literal_eval(value)
elif value == '/':
value = '' #ast.literal_eval(value)
config[name +'.'+ str.lower(opt)] = value
elif isinstance(value, str) and value.startswith('/'):
if value.endswith('/'):
value = value[:-1]
config[name +'.'+ str.lower(opt)] = value
elif isinstance(value, str) and value.isalnum():
value = ast.literal_eval(value)
config[name +'.'+ str.lower(opt)] = value
elif isinstance(value, int):
value = ast.literal_eval(value)
config[name +'.'+ str.lower(opt)] = value
else:
config[name +'.'+ str.lower(opt)] = value
except ValueError:
config[name +'.'+ str.lower(opt)] = value
return config
|
#
# (C) 2014-2017 Seiji Matsuoka
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""Debug utility"""
import cProfile
import os
import pstats
import sys
from collections import deque
from itertools import chain
def total_size(obj, verbose=False):
"""Approximate memory size"""
seen = set()
def sizeof(o):
if id(o) in seen:
return 0
seen.add(id(o))
s = sys.getsizeof(o, default=0)
if verbose:
print(s, type(o), repr(o))
if isinstance(o, (tuple, list, set, frozenset, deque)):
s += sum(map(sizeof, iter(o)))
elif isinstance(o, dict):
s += sum(map(sizeof, chain.from_iterable(o.items())))
elif "__dict__" in dir(o):
s += sum(map(sizeof, chain.from_iterable(o.__dict__.items())))
return s
return sizeof(obj)
def total_size_str(obj):
"""Formatted approximate memory size"""
s = total_size(obj)
if 1 > s / 1000.0:
return "{} Bytes".format(s)
if 1 > s / 1000000.0:
return "{} KB".format(round(s / 1000.0, 1))
if 1 > s / 1000000000.0:
return "{} MB".format(round(s / 1000000.0, 1))
return "{} GB".format(round(s / 1000000000.0, 1))
def profile(func):
"""Decorator to execute cProfile"""
def _f(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
print("\n<<<---")
res = func(*args, **kwargs)
p = pstats.Stats(pr)
p.strip_dirs().sort_stats('cumtime').print_stats(20)
print("\n--->>>")
return res
return _f
def mute(func):
"""Decorator to mute stdout"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f
|
from __future__ import annotations
from typing import List
from logger import logger
from common_utils.base.basic import BasicLoadableHandler, BasicHandler
from .objects import NDDS_Annotation_Object
class NDDS_Annotation_Object_Handler(
BasicLoadableHandler['NDDS_Annotation_Object_Handler', 'NDDS_Annotation_Object'],
BasicHandler['NDDS_Annotation_Object_Handler', 'NDDS_Annotation_Object']
):
def __init__(self, ndds_obj_list: List[NDDS_Annotation_Object]=None):
super().__init__(obj_type=NDDS_Annotation_Object, obj_list=ndds_obj_list)
self.objects = self.obj_list
@classmethod
def from_dict_list(cls, dict_list: List[dict]) -> NDDS_Annotation_Object_Handler:
return NDDS_Annotation_Object_Handler(
ndds_obj_list=[NDDS_Annotation_Object.from_dict(obj_dict) for obj_dict in dict_list]
)
def delete(self, idx_list: List[int], verbose: bool=False, verbose_ref=None):
if len(idx_list) > 0 and verbose and verbose_ref:
logger.info(f'verbose_ref: {verbose_ref}')
for i in idx_list:
if verbose:
if verbose_ref:
logger.info(f'\tDeleted duplicate of {self.objects[i].class_name}')
else:
logger.info(f'Deleted duplicate of {self.objects[i].class_name}')
del self[i]
def find_duplicates(self) -> List[int]:
duplicate_idx_list = []
for i in range(len(self)):
for j in range(i+1, len(self)):
if self[i].class_name == self[j].class_name:
if j not in duplicate_idx_list:
duplicate_idx_list.append(j)
return duplicate_idx_list
def delete_duplicates(self, verbose: bool=False, verbose_ref=None):
self.delete(
idx_list=self.find_duplicates(),
verbose=verbose, verbose_ref=verbose_ref
)
|
import json
from operator import attrgetter
from os.path import expanduser
from pathlib import Path
from typing import List, Tuple
from pydantic import BaseModel, ValidationError
import pytest
from reponews.config import ActivityPrefs, Configuration
from reponews.types import ActivityType, Repository, User
from reponews.util import UserError, get_default_state_file
DATA_DIR = Path(__file__).with_name("data")
def mkrepo(owner: str, name: str) -> Repository:
return Repository(
id="1234",
owner=User(login=owner, url=f"https://github.com/{owner}"),
name=name,
nameWithOwner=f"{owner}/{name}",
url=f"https://github.com/{owner}/{name}",
description="A repository",
descriptionHTML="A repository",
)
@pytest.mark.parametrize(
"tomlfile",
sorted((DATA_DIR / "config").glob("*.toml")),
ids=attrgetter("name"),
)
@pytest.mark.usefixtures("tmp_home")
def test_parse_config(tomlfile: Path) -> None:
config = Configuration.from_toml_file(tomlfile)
with tomlfile.with_suffix(".json").open() as fp:
expected = json.load(fp)
if expected["state_file"] is None:
expected["state_file"] = get_default_state_file()
for key in ("auth_token_file", "state_file"):
if expected[key] is not None:
expected[key] = expanduser(expected[key]).format(config=tomlfile.parent)
assert config.for_json() == expected
@pytest.mark.parametrize(
"tomlfile",
sorted((DATA_DIR / "bad-config").glob("*.toml")),
ids=attrgetter("name"),
)
def test_parse_bad_config(tomlfile: Path) -> None:
with pytest.raises(ValidationError):
Configuration.from_toml_file(tomlfile)
class InclusionCase(BaseModel):
included_owners: List[str]
included_repos: List[Tuple[str, str]]
not_excluded_repos: List[Tuple[str, str]]
excluded_repos: List[Tuple[str, str]]
@pytest.mark.parametrize(
"tomlfile",
sorted((DATA_DIR / "inclusions").glob("*.toml")),
ids=attrgetter("name"),
)
def test_inclusions(tomlfile: Path) -> None:
config = Configuration.from_toml_file(DATA_DIR / "inclusions" / tomlfile)
expected = InclusionCase.parse_file(tomlfile.with_suffix(".json"))
assert config.get_included_repo_owners() == expected.included_owners
assert config.get_included_repos() == expected.included_repos
for owner, name in expected.not_excluded_repos:
assert not config.is_repo_excluded(mkrepo(owner, name))
for owner, name in expected.excluded_repos:
assert config.is_repo_excluded(mkrepo(owner, name))
@pytest.mark.parametrize(
"tomlfile,repo,is_affiliated,prefs",
[
(
"empty.toml",
mkrepo("owner", "repo"),
True,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"empty.toml",
mkrepo("owner", "repo"),
False,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"full.toml",
mkrepo("owner", "repo"),
True,
ActivityPrefs(
issues=False,
pull_requests=False,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=True,
),
),
(
"full.toml",
mkrepo("owner", "repo"),
False,
ActivityPrefs(
issues=False,
pull_requests=False,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=True,
),
),
(
"repo-activity01.toml",
mkrepo("luser", "repo"),
True,
ActivityPrefs(
issues=False,
pull_requests=False,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"repo-activity01.toml",
mkrepo("luser", "repo"),
False,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"repo-activity01.toml",
mkrepo("owner", "project"),
False,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=False,
released_tags=False,
stars=True,
forks=True,
my_activity=True,
),
),
(
"repo-activity01.toml",
mkrepo("owner", "repo"),
False,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=False,
tags=False,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"repo-activity01.toml",
mkrepo("owner", "repo"),
True,
ActivityPrefs(
issues=False,
pull_requests=False,
discussions=True,
releases=False,
tags=False,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"repo-activity02.toml",
mkrepo("luser", "repo"),
True,
ActivityPrefs(
issues=True,
pull_requests=False,
discussions=False,
releases=True,
tags=True,
released_tags=False,
stars=False,
forks=False,
my_activity=False,
),
),
(
"repo-activity02.toml",
mkrepo("luser", "repo"),
False,
ActivityPrefs(
issues=False,
pull_requests=True,
discussions=False,
releases=True,
tags=True,
released_tags=False,
stars=False,
forks=False,
my_activity=False,
),
),
(
"repo-activity02.toml",
mkrepo("owner", "project"),
False,
ActivityPrefs(
issues=False,
pull_requests=True,
discussions=False,
releases=True,
tags=False,
released_tags=False,
stars=True,
forks=False,
my_activity=True,
),
),
(
"repo-activity02.toml",
mkrepo("owner", "repo"),
False,
ActivityPrefs(
issues=False,
pull_requests=True,
discussions=False,
releases=False,
tags=False,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"repo-activity02.toml",
mkrepo("owner", "repo"),
True,
ActivityPrefs(
issues=True,
pull_requests=False,
discussions=False,
releases=False,
tags=False,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
),
(
"yestags.toml",
mkrepo("owner", "repo"),
True,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=True,
stars=True,
forks=True,
my_activity=False,
),
),
(
"yestags.toml",
mkrepo("owner", "repo"),
False,
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=True,
stars=True,
forks=True,
my_activity=False,
),
),
],
)
def test_get_repo_activity_prefs(
tomlfile: str, repo: Repository, is_affiliated: bool, prefs: ActivityPrefs
) -> None:
config = Configuration.from_toml_file(DATA_DIR / "config" / tomlfile)
assert config.get_repo_activity_prefs(repo, is_affiliated) == prefs
@pytest.mark.parametrize(
"prefs,types",
[
(
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=True,
released_tags=False,
stars=True,
forks=True,
my_activity=False,
),
[
ActivityType.ISSUE,
ActivityType.PR,
ActivityType.DISCUSSION,
ActivityType.RELEASE,
ActivityType.TAG,
ActivityType.STAR,
ActivityType.FORK,
],
),
(
ActivityPrefs(
issues=False,
pull_requests=False,
discussions=False,
releases=False,
tags=False,
released_tags=True,
stars=False,
forks=False,
my_activity=True,
),
[],
),
(
ActivityPrefs(
issues=True,
pull_requests=True,
discussions=True,
releases=True,
tags=False,
released_tags=True,
stars=False,
forks=False,
my_activity=True,
),
[
ActivityType.ISSUE,
ActivityType.PR,
ActivityType.DISCUSSION,
ActivityType.RELEASE,
],
),
],
)
def test_get_activity_types(prefs: ActivityPrefs, types: List[ActivityType]) -> None:
assert prefs.get_activity_types() == types
def test_get_auth_token_explicit(
monkeypatch: pytest.MonkeyPatch, tmp_home: Path
) -> None:
monkeypatch.setenv("GITHUB_TOKEN", "QWERTY")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration(
auth_token="123456",
auth_token_file="~/token.txt",
)
assert config.get_auth_token() == "123456"
def test_get_auth_token_file(monkeypatch: pytest.MonkeyPatch, tmp_home: Path) -> None:
monkeypatch.setenv("GITHUB_TOKEN", "QWERTY")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration(auth_token_file="~/token.txt")
assert config.get_auth_token() == "abcdef"
@pytest.mark.usefixtures("tmp_home")
def test_get_auth_token_missing_file(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("GITHUB_TOKEN", "QWERTY")
config = Configuration(auth_token_file="~/token.txt")
with pytest.raises(FileNotFoundError):
config.get_auth_token()
def test_get_auth_token_envvar1(
monkeypatch: pytest.MonkeyPatch, tmp_home: Path
) -> None:
monkeypatch.setenv("GITHUB_TOKEN", "QWERTY")
monkeypatch.setenv("GH_TOKEN", "QR")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration()
assert config.get_auth_token() == "QWERTY"
def test_get_auth_token_envvar1_empty(
monkeypatch: pytest.MonkeyPatch, tmp_home: Path
) -> None:
monkeypatch.setenv("GITHUB_TOKEN", "")
monkeypatch.setenv("GH_TOKEN", "QR")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration()
assert config.get_auth_token() == "QR"
def test_get_auth_token_envvar2(
monkeypatch: pytest.MonkeyPatch, tmp_home: Path
) -> None:
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
monkeypatch.setenv("GH_TOKEN", "QR")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration()
assert config.get_auth_token() == "QR"
def test_get_auth_token_notset(monkeypatch: pytest.MonkeyPatch, tmp_home: Path) -> None:
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
monkeypatch.delenv("GH_TOKEN", raising=False)
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration()
with pytest.raises(UserError) as excinfo:
config.get_auth_token()
assert str(excinfo.value) == (
"GitHub OAuth token not set. Specify in config file or via"
" GITHUB_TOKEN or GH_TOKEN environment variable."
)
def test_get_auth_token_envvar2_empty(
monkeypatch: pytest.MonkeyPatch, tmp_home: Path
) -> None:
monkeypatch.delenv("GITHUB_TOKEN", raising=False)
monkeypatch.setenv("GH_TOKEN", "")
(tmp_home / "token.txt").write_text("abcdef\n")
config = Configuration()
with pytest.raises(UserError) as excinfo:
config.get_auth_token()
assert str(excinfo.value) == (
"GitHub OAuth token not set. Specify in config file or via"
" GITHUB_TOKEN or GH_TOKEN environment variable."
)
|
class CachedAttr:
def __init__(self, name, func) -> None:
self.name = name
self.func = func
def __get__(self, instance, owner):
try:
return instance.__dict__[self.name]
except KeyError:
result = self.func()
instance.__dict__[self.name] = result
return result
import time
def longRunFunction():
time.sleep(3)
return "this is a long run result"
class TestClass:
cachedAttr = CachedAttr('cachedAttr', longRunFunction)
tc = TestClass()
print(tc.cachedAttr)
print(tc.cachedAttr)
# this is a long run result
# this is a long run result
|
from datetime import datetime
from .db import db
class Song(db.Model):
__tablename__ = "songs"
id = db.Column(db.INTEGER, primary_key=True)
user_id = db.Column(db.INTEGER, db.ForeignKey("users.id"))
artist = db.Column(db.String(50))
name = db.Column(db.String(50), nullable=False)
url = db.Column(db.String(255), nullable=False)
normalized_data = db.Column(db.TEXT)
cover_image = db.Column(db.String(255))
genre = db.Column(db.String(255))
description = db.Column(db.TEXT)
release_date = db.Column(db.DateTime())
aws_unique_name = db.Column(db.String(255), unique=True)
duration = db.Column(db.Numeric(5, 2))
uploaded_date = db.Column(db.DateTime, default=datetime.now())
user = db.relationship("User", back_populates="song")
comments = db.relationship("Comment", back_populates="song")
likes = db.relationship("Like", back_populates="song")
def to_dict(self):
return {
"id" : self.id,
"user_id": self.user_id,
"artist": self.artist,
"name": self.name,
"url": self.url,
"normalized_data": self.normalized_data,
"cover_image": self.cover_image,
"genre": self.genre,
"description": self.description,
"release_date": self.release_date,
"duration": float(self.duration),
'uploaded_date': self.uploaded_date
}
|
from python.test.test_game_state import DEFAULT_ELEVATION_MAP
from python.lib.board_state import BoardState
from .constants import (
DEFAULT_ELEVATION_MAP_SHAPE,
DEFAULT_ELEVATION_MAP,
DEFAULT_EXPECTED_OUTPUT_GRID_MAP,
)
def test_BoardState_create_grid_map_from_elevation_array():
elevation_map_2d = DEFAULT_ELEVATION_MAP
shape = DEFAULT_ELEVATION_MAP_SHAPE
grid_map = BoardState.create_grid_map_from_elevation_array(elevation_map_2d, shape)
expected_grid_map = DEFAULT_EXPECTED_OUTPUT_GRID_MAP
for k, v in grid_map.items():
# print(f"{k}: {v} -> {expected_grid_map[k]}")
assert k in expected_grid_map
assert v == expected_grid_map[k]
assert grid_map == expected_grid_map
def test_BoardState_create_position_list_from_2d_mask():
# a cross mask in a 3x4 neighborhood
mask_2d = [
[None, BoardState.FILLED_TILE, None],
[
BoardState.FILLED_TILE,
BoardState.MASK_DEFAULT_PIVOT_VALUE,
BoardState.FILLED_TILE,
],
[None, BoardState.FILLED_TILE, None],
[None, BoardState.FILLED_TILE, None],
]
shape = (3, 4)
# without a grid pivot
grid_map = BoardState.create_2d_position_list_from_2d_mask(mask_2d, shape)
expected_grid_map = {
(-1, 0): BoardState.FILLED_TILE,
(0, -1): BoardState.FILLED_TILE,
(0, 1): BoardState.FILLED_TILE,
(1, 0): BoardState.FILLED_TILE,
(0, 2): BoardState.FILLED_TILE,
}
assert grid_map == expected_grid_map
# with a grid pivot provided
grid_pivot_pos = (1, 1)
grid_map_rel = BoardState.create_2d_position_list_from_2d_mask(
mask_2d, shape, grid_pivot_pos
)
expected_grid_map_relative = {
(0, 1): BoardState.FILLED_TILE,
(1, 0): BoardState.FILLED_TILE,
(2, 1): BoardState.FILLED_TILE,
(1, 2): BoardState.FILLED_TILE,
(1, 3): BoardState.FILLED_TILE,
}
assert grid_map_rel == expected_grid_map_relative
def test_board_state():
span = 8 # the board's square radius
grid = []
for x in range(-(span - 1), span):
for y in range(-1, 2):
for z in range(-(span - 1), span):
grid.append((x, y, z))
board = BoardState(grid)
board.setup()
# Check center tile neighbors
expected_neighbors = BoardState.NEIGHBORING_DIRECTIONS_MASK
CENTER_TILE_POS = (0, 0, 0)
actual_neighbors = board.neighbors(CENTER_TILE_POS)
assert expected_neighbors == actual_neighbors
# Check corner cases
corners = [(-1, 0, -1), (1, 0, 1), (1, 0, -1), (-1, 0, 1)]
corner_positions = [((span - 1) * x, y, (span - 1) * z) for x, y, z in corners]
idx = 0
for corner_pos in corner_positions:
x, y, z = corners[idx]
s = span - 1
corner_neighbors = board.neighbors(corner_pos)
print(
"corner: ",
corners[idx],
corner_pos,
" corner_neighbors: ",
corner_neighbors,
)
assert len(corner_neighbors) == 6
invalid_tiles = [
((s * x) + x, y, s * z),
((s * x), y, (s * z) + z),
((s * x) + x, y + 1, s * z),
((s * x), y + 1, (s * z) + z),
((s * x) + x, y - 1, s * z),
((s * x), y - 1, (s * z) + z),
]
for invalid_tile in invalid_tiles:
assert invalid_tile not in corner_neighbors
idx += 1
# Check pathfinding
diagonal_path = board.astar(corner_positions[0], corner_positions[1])
diagonal_path_list = list(diagonal_path)
expected_diagonal_path = [
(-7, 0, -7),
(-6, 0, -7),
(-5, 0, -7),
(-4, 0, -7),
(-3, 0, -7),
(-2, 0, -7),
(-1, 0, -7),
(0, 0, -7),
(1, 0, -7),
(2, 0, -7),
(3, 0, -7),
(3, 0, -6),
(3, 0, -5),
(3, 0, -4),
(4, 0, -4),
(5, 0, -4),
(6, 0, -4),
(6, 0, -3),
(6, 0, -2),
(6, 0, -1),
(6, 0, 0),
(6, 0, 1),
(6, 0, 2),
(6, 0, 3),
(7, 0, 3),
(7, 0, 4),
(7, 0, 5),
(7, 0, 6),
(7, 0, 7),
]
assert diagonal_path_list == expected_diagonal_path
lateral_path = board.astar(corner_positions[0], corner_positions[2])
lateral_path_list = list(lateral_path)
expected_lateral_path = [
(-7, 0, -7),
(-6, 0, -7),
(-5, 0, -7),
(-4, 0, -7),
(-3, 0, -7),
(-2, 0, -7),
(-1, 0, -7),
(0, 0, -7),
(1, 0, -7),
(2, 0, -7),
(3, 0, -7),
(4, 0, -7),
(5, 0, -7),
(6, 0, -7),
(7, 0, -7),
]
assert lateral_path_list == expected_lateral_path
# Test movable area checking
movable_area_from_center = board.movable_area(CENTER_TILE_POS, distance_budget=2)
print("[")
for pos in movable_area_from_center:
print(f"{pos},")
print("]")
expected_movable_area = [
(0, -1, -1),
(0, -1, -2),
(-1, 1, -1),
(0, -1, 1),
(0, 1, 0),
(-1, 1, 1),
(2, 1, 0),
(1, -1, 1),
(1, -1, -1),
(0, 0, -1),
(0, 0, -2),
(0, 0, 1),
(1, 0, 1),
(1, 1, 0),
(1, 0, -1),
(-1, -1, -1),
(0, -1, 0),
(-1, -1, 1),
(-1, 1, 0),
(-2, 1, 0),
(0, 1, 2),
(1, -1, 0),
(2, -1, 0),
(-1, 0, 1),
(-1, 0, -1),
(0, 0, 0),
(1, 0, 0),
(2, 0, 0),
(-1, -1, 0),
(0, 1, -1),
(-2, -1, 0),
(0, -1, 2),
(0, 1, 1),
(0, 1, -2),
(-1, 0, 0),
(-2, 0, 0),
(0, 0, 2),
(1, 1, -1),
(1, 1, 1),
]
assert movable_area_from_center == expected_movable_area
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import itertools
import json
import logging
import os
import re
from typing import List
from urllib.parse import urljoin
from urllib.parse import urlparse
import aiohttp
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.retry import KazooRetry
from mypy_extensions import TypedDict
from retry import retry
from . import exceptions
from . import framework
from . import log
from . import mesos_file
from . import slave
from . import task
from . import util
from . import zookeeper
from paasta_tools.async_utils import async_ttl_cache
from paasta_tools.utils import get_user_agent
ZOOKEEPER_TIMEOUT = 1
INVALID_PATH = "{0} does not have a valid path. Did you forget /mesos?"
MISSING_MASTER = """unable to connect to a master at {0}.
Try running `mesos config master zk://localhost:2181/mesos`. See the README for
more examples."""
MULTIPLE_SLAVES = "There are multiple slaves with that id. Please choose one: "
logger = logging.getLogger(__name__)
class MesosState(TypedDict):
slaves: List
frameworks: List
orphan_tasks: List
MesosMetrics = TypedDict(
'MesosMetrics',
{
'master/cpus_total': int,
'master/cpus_used': int,
'master/disk_total': int,
'master/disk_used': int,
'master/gpus_total': int,
'master/gpus_used': int,
'master/mem_total': int,
'master/mem_used': int,
'master/tasks_running': int,
'master/tasks_staging': int,
'master/tasks_starting': int,
'master/slaves_active': int,
'master/slaves_inactive': int,
},
)
class MesosMaster:
def __init__(self, config):
self.config = config
def __str__(self):
return "<master: {}>".format(self.key())
def key(self):
return self.config["master"]
@util.CachedProperty(ttl=5)
def host(self):
return "{}://{}".format(
self.config["scheme"],
self.resolve(self.config["master"]),
)
@util.CachedProperty(ttl=5)
def cache_host(self):
host_url = urlparse(self.host)
replaced = host_url._replace(netloc=host_url.hostname + ':5055')
return replaced.geturl()
async def _request(
self,
url: str,
method: str = 'GET',
cached: bool = False,
**kwargs,
) -> aiohttp.ClientResponse:
headers = {'User-Agent': get_user_agent()}
if cached and self.config.get("use_mesos_cache", False):
# TODO: fall back to original host if this fails?
host = self.cache_host
else:
host = self.host
try:
async with aiohttp.ClientSession(
conn_timeout=self.config["response_timeout"],
read_timeout=self.config["response_timeout"],
) as session:
async with session.request(
method=method,
url=urljoin(host, url),
headers=headers,
**kwargs,
) as resp:
# if nobody awaits resp.text() or resp.json() before we exit the session context manager, then the
# http connection gets closed before we read the response; then later calls to resp.text/json will
# fail.
await resp.text()
return resp
except aiohttp.client_exceptions.ClientConnectionError:
raise exceptions.MasterNotAvailableException(
MISSING_MASTER.format(host),
)
except aiohttp.client_exceptions.TooManyRedirects:
raise exceptions.MasterTemporarilyNotAvailableException(
(
"Unable to connect to master at %s, likely due to "
"an ongoing leader election"
) % host,
)
async def fetch(self, url, **kwargs):
return await self._request(url, **kwargs)
async def post(self, url, **kwargs):
return await self._request(url, method='POST', **kwargs)
def _file_resolver(self, cfg):
return self.resolve(open(cfg[6:], "r+").read().strip())
@retry(KazooTimeoutError, tries=5, delay=0.5, logger=logger)
def _zookeeper_resolver(self, cfg):
hosts, path = cfg[5:].split("/", 1)
path = "/" + path
retry = KazooRetry(max_tries=10)
with zookeeper.client(
hosts=hosts,
read_only=True,
connection_retry=retry,
command_retry=retry,
) as zk:
def master_id(key):
return int(key.split("_")[-1])
def get_masters():
return [x for x in zk.get_children(path)
if re.search(r"\d+", x)]
leader = sorted(get_masters(), key=lambda x: master_id(x))
if len(leader) == 0:
raise exceptions.MasterNotAvailableException(
f"cannot find any masters at {cfg}",
)
data, stat = zk.get(os.path.join(path, leader[0]))
if not data:
exceptions.MasterNotAvailableException(
"Cannot retrieve valid MasterInfo data from ZooKeeper",
)
else:
data = data.decode('utf8')
try:
parsed = json.loads(data)
if parsed and "address" in parsed:
ip = parsed["address"].get("ip")
port = parsed["address"].get("port")
if ip and port:
return f"{ip}:{port}"
except ValueError as parse_error:
log.debug(
"[WARN] No JSON content, probably connecting to older "
"Mesos version. Reason: {}".format(parse_error),
)
raise exceptions.MasterNotAvailableException(
"Failed to parse mesos master ip from ZK",
)
@log.duration
def resolve(self, cfg):
"""Resolve the URL to the mesos master.
The value of cfg should be one of:
- host:port
- zk://host1:port1,host2:port2/path
- zk://username:password@host1:port1/path
- file:///path/to/file (where file contains one of the above)
"""
if cfg.startswith("zk:"):
return self._zookeeper_resolver(cfg)
elif cfg.startswith("file:"):
return self._file_resolver(cfg)
else:
return cfg
@async_ttl_cache(ttl=15, cleanup_self=True)
async def state(self) -> MesosState:
return await (await self.fetch("/master/state.json", cached=True)).json()
async def state_summary(self) -> MesosState:
return await (await self.fetch("/master/state-summary")).json()
@async_ttl_cache(ttl=0, cleanup_self=True)
async def slave(self, fltr):
lst = await self.slaves(fltr)
log.debug(f"master.slave({fltr})")
if len(lst) == 0:
raise exceptions.SlaveDoesNotExist(
f"Slave {fltr} no longer exists.",
)
elif len(lst) > 1:
raise exceptions.MultipleSlavesForIDError(
"Multiple slaves matching filter {}. {}".format(
fltr,
",".join([slave.id for slave in lst]),
),
)
return lst[0]
async def slaves(self, fltr=""):
return [
slave.MesosSlave(self.config, x)
for x in (await self.state())['slaves']
if fltr == x['id']
]
async def _task_list(self, active_only=False):
keys = ["tasks"]
if not active_only:
keys.append("completed_tasks")
return itertools.chain(
*[util.merge(x, *keys) for x in await self._framework_list(active_only)],
)
async def task(self, fltr):
lst = await self.tasks(fltr)
if len(lst) == 0:
raise exceptions.TaskNotFoundException(
"Cannot find a task with filter %s" % fltr,
)
elif len(lst) > 1:
raise exceptions.MultipleTasksForIDError(
"Multiple tasks matching filter {}. {}".format(
fltr,
",".join([task.id for task in lst]),
),
)
return lst[0]
async def orphan_tasks(self):
return (await self.state())["orphan_tasks"]
# XXX - need to filter on task state as well as id
async def tasks(self, fltr="", active_only=False):
return [
task.Task(self, x)
for x in await self._task_list(active_only)
if fltr in x['id'] or fnmatch.fnmatch(x['id'], fltr)
]
async def framework(self, fwid):
return list(filter(
lambda x: x.id == fwid,
await self.frameworks(),
))[0]
async def _framework_list(self, active_only=False):
keys = ["frameworks"]
if not active_only:
keys.append("completed_frameworks")
return util.merge(await self._frameworks(), *keys)
@async_ttl_cache(ttl=15, cleanup_self=True)
async def _frameworks(self):
return await (await self.fetch("/master/frameworks", cached=True)).json()
async def frameworks(self, active_only=False):
return [framework.Framework(f)
for f in await self._framework_list(active_only)]
async def teardown(self, framework_id):
return await self.post(
"/master/teardown", data="frameworkId=%s" % framework_id,
)
async def metrics_snapshot(self) -> MesosMetrics:
return await (await self.fetch("/metrics/snapshot")).json()
@property # type: ignore
@util.memoize
def log(self):
return mesos_file.File(self, path="/master/log")
|
s1 = ["s1","s2","s3","s4","s5"]
s2 = s1[2:4]
print(f"Result: {s2}")
|
import json
from os import path
from punica.core.base_project import BaseProject
from punica.exception.punica_exception import PunicaException, PunicaError
class ProjectWithConfig(BaseProject):
def __init__(self, project_dir: str = ''):
super().__init__(project_dir)
pj_config_file_path = path.join(self.project_dir, 'punica.json')
old_pj_config_file_path = path.join(self.project_dir, 'punica-config.json')
if path.exists(pj_config_file_path):
self._pj_config_file_path = pj_config_file_path
elif path.exists(old_pj_config_file_path):
self._pj_config_file_path = old_pj_config_file_path
else:
raise PunicaException(PunicaError.config_file_not_found)
try:
with open(self._pj_config_file_path, 'r')as f:
self._pj_config = json.load(f)
except FileNotFoundError:
raise PunicaException(PunicaError.config_file_not_found)
@property
def pj_config(self):
return self._pj_config
@property
def default_network(self):
return self.pj_config.get('defaultNet', '')
def get_rpc_address(self, network: str = ''):
try:
if len(network) == 0:
network = self.pj_config['defaultNet']
networks = self.pj_config['networks']
host = networks[network]['host']
port = networks[network]['port']
return f'{host}:{port}'
except KeyError:
raise PunicaException(PunicaError.invalid_network_config)
|
# from ...geometry.Bisector import *
from ...Util import *
from ...graph.Vertex import *
from Event import *
from ...geometry.BisectorRay import *
from ..Breakpoint import *
class CircleEvent(Event):
# def __init__(self, arc):
def __init__(self, arc, causing_event_is_site_type):
self.arc = arc
self.causing_event_is_site_type = causing_event_is_site_type
def getArc(self):
return self.arc
def getCausingEventIsSiteType(self):
return self.causing_event_is_site_type
def setCausingEventIsSiteType(self, causing_event_is_site_type):
self.causing_event_is_site_type = causing_event_is_site_type
"""
def centerArcIsSplitResidueArc(self):
return (self.arc).getIsSplitResidueArc()
"""
# retrieve focus of associated arc
def _getAssociatedLocation(self):
return self.getArc().getFocus()
# append vertices to the vertex list
def handle(self, tree, vertex_list, event_queue, truncated_bisector_edge_dict, sweep_line, initial_y_value):
# print "handling a circle event"
# three phases
# 1. deal with arc tree and potentially
# invalidate future circle events
# 2. deal with features of diagram
# 3. check for newly visible circle events
# phase 1
arc = self.getArc()
left_arc = tree.getArcLeftNeighbor(arc)
right_arc = tree.getArcRightNeighbor(arc)
# print "left arc focus:", left_arc.getFocus()
# print "arc focus:", arc.getFocus()
# print "right arc focus:", right_arc.getFocus()
# left_found_arc, left_found_payload = tree.getArcWithPayload(left_arc)
l_y = sweep_line.getY()
left_found_payload = tree.getArcPayload(left_arc)
invalidated_left_arc_circle_event = False
arc1 = tree.getArcLeftNeighbor(left_arc)
arc2 = left_arc
arc3 = arc
if left_found_payload.hasCircleEvent() == True:
result = CircleEvent.getLargestEmptyCircleLowestExtent(l_y, arc1, arc2, arc3)
location = result
y = location[1]
curr_y = l_y
# print "circle event y and sweep-line y:", y, curr_y
# raise Exception()
# if CircleEvent.axisAlignedComp(y, curr_y) == 0:
if False:
# do not remove circle event
# raise Exception()
pass
else:
# print "removed event's arcs:", arc1.getFocus(), arc2.getFocus(), arc3.getFocus()
# remove circle event
# print "invalidating a circle event for a left neighbor arc with focus:", left_arc.getFocus()
left_circle_event = left_found_payload.getCircleEvent()
# event_queue.remove(left_circle_event)
event_queue.removeEvent(left_circle_event)
left_found_payload.removeCircleEvent()
invalidated_left_arc_circle_event = True
# right_found_arc, right_found_payload = tree.getArcWithPayload(right_arc)
right_found_payload = tree.getArcPayload(right_arc)
invalidated_right_arc_circle_event = False
arc1 = arc
arc2 = right_arc
arc3 = tree.getArcRightNeighbor(right_arc)
if right_found_payload.hasCircleEvent() == True:
result = CircleEvent.getLargestEmptyCircleLowestExtent(l_y, arc1, arc2, arc3)
location = result
y = location[1]
curr_y = l_y
# print "circle event y and sweep-line y:", y, curr_y
# if CircleEvent.axisAlignedComp(y, curr_y) == 0:
if False:
# do not remove circle event
# raise Exception()
pass
else:
# print "removed event's arcs:", arc1.getFocus(), arc2.getFocus(), arc3.getFocus()
# remove circle event
# print "invalidating a circle event for a right neighbor arc with focus:", right_arc.getFocus()
right_circle_event = right_found_payload.getCircleEvent()
event_queue.removeEvent(right_circle_event)
right_found_payload.removeCircleEvent()
invalidated_right_arc_circle_event = True
invalidated_values = [invalidated_left_arc_circle_event, invalidated_right_arc_circle_event]
affirmative_invalidated_values = [x for x in invalidated_values if x == True]
num_affirmative_invalidated_values = len(affirmative_invalidated_values)
# print tree.toFocusList(), tree.toIntervalStringList()
# print "circle event leads to # of circle events being invalidated:", num_affirmative_invalidated_values
# phase 2
# retrieve two edges
# create an edge
# create a vertex
# set three endpoints
left_site = left_arc.getFocus()
center_site = arc.getFocus()
right_site = right_arc.getFocus()
left_edge = truncated_bisector_edge_dict.getTruncatedBisectorEdge(left_site, center_site)
right_edge = truncated_bisector_edge_dict.getTruncatedBisectorEdge(center_site, right_site)
outgoing_edge = truncated_bisector_edge_dict.addTruncatedBisectorEdge(left_site, right_site)
might_occur, lowest_extent_location = CircleEvent.mightLeadToACircleEventOccurringInFuture(left_arc, arc, right_arc, sweep_line.getY())
l_y = sweep_line.getY()
# print "sweep-line y-value:", l_y
# raise Exception()
focus1 = left_arc.getFocus()
focus2 = arc.getFocus()
focus3 = right_arc.getFocus()
# bisector1 = Bisector(focus1, focus2)
bisectorRay1 = CircleEvent.getBisectorRay(l_y, left_arc, arc)
"""
print "bisector ray one:", bisectorRay1.toString()
print "focuses:", focus1, focus2, focus3
"""
# bisector2 = Bisector(focus2, focus3)
bisectorRay2 = CircleEvent.getBisectorRay(l_y, arc, right_arc)
"""
print "bisector ray two:", bisectorRay2.toString()
print tree.toIntervalStringList()
print tree.toFocusList()
"""
# raise Exception()
# print bisectorRay1.doesIntersectWithRay(bisectorRay2)
# print bisectorRay1.intersectWithRay(bisectorRay2)
# print focus1, focus2, focus3
# print bisector1.doesIntersectWithBisector(bisector2)
# print CircleEvent.getLargestEmptyCircleLowestExtent(left_arc, arc, right_arc), sweep_line.getY()
location = CircleEvent.getIntersectionHelper(l_y, left_arc, arc, right_arc)
# truncate components of location
# so as to have outputted features
# with certain amount of precision
x, y = location
"""
truncated_x = tree._truncate(x, tree.getPrecision())
truncated_y = tree._truncate(y, tree.getPrecision())
"""
rounded_x = round_with_precision(x, tree.getPrecision())
rounded_y = round_with_precision(y, tree.getPrecision())
"""
location_with_truncated_components = (truncated_x, truncated_y)
# vertex = Vertex(None, location)
vertex = Vertex(None, location_with_truncated_components)
"""
location_with_rounded_components = (rounded_x, rounded_y)
vertex = Vertex(None, location)
vertex_list.append(vertex)
# print "about to modify existing edges"
"""
left_edge.setIndeterminateEndpoint(location)
right_edge.setIndeterminateEndpoint(location)
outgoing_edge.setIndeterminateEndpoint(location)
"""
truncated_bisector_edge_dict.setIndeterminateEndpoint(left_site, center_site, vertex)
truncated_bisector_edge_dict.setIndeterminateEndpoint(center_site, right_site, vertex)
truncated_bisector_edge_dict.setIndeterminateEndpoint(left_site, right_site, vertex)
# phase 1 transplanted
# remove an arc
tree.removeArc(arc)
# phase 3
might_occur1 = None
might_occur2 = None
if tree.hasArcLeftNeighbor(left_arc):
triple1_left = tree.getArcLeftNeighbor(left_arc)
triple1_center = left_arc
triple1_right = right_arc
# print tree.toString()
might_occur1, lowest_extent_location1 = CircleEvent.mightLeadToACircleEventOccurringInFuture(triple1_left, triple1_center, triple1_right, sweep_line.getY())
# location1 = CircleEvent.getIntersectionHelper(triple1_left, triple1_center, triple1_right)
if might_occur1 == True:
# print "for a circle event, see that we have a possible additional circle event:", triple1_left.getFocus(), triple1_center.getFocus(), triple1_right.getFocus()
event = CircleEvent(triple1_center, True)
y = lowest_extent_location1[1]
priority = -1 * y
# priority = tree._truncate(-1 * y, tree.getPrecision())
# priority = -1 * getLargestEmptyCircleLowestExtent(triple1_left, triple1_center, triple1_right)
event_queue.insert((priority, triple1_center.getIsSplitResidueArc()), event)
# print "prospective circle event involving arcs with focuses:", triple1_left.getFocus(), triple1_center.getFocus(), triple1_right.getFocus()
# found_arc, found_payload = tree.getArcWithPayload(triple1_center)
found_payload = tree.getArcPayload(triple1_center)
found_payload.setCircleEvent(event)
if event.isCircleEvent() == False:
raise Exception("storing as a circle event a site event")
# found_payload.setEventPriority(priority)
if tree.hasArcRightNeighbor(right_arc):
triple2_left = left_arc
triple2_center = right_arc
triple2_right = tree.getArcRightNeighbor(right_arc)
might_occur2, lowest_extent_location2 = CircleEvent.mightLeadToACircleEventOccurringInFuture(triple2_left, triple2_center, triple2_right, sweep_line.getY())
# location2 = CircleEvent.getIntersectionHelper(triple2_left, triple2_center, triple2_right)
if might_occur2 == True:
# print "for a circle event, see that we have a possible additional circle event:", triple2_left.getFocus(), triple2_center.getFocus(), triple2_right.getFocus()
event = CircleEvent(triple2_center, True)
y = lowest_extent_location2[1]
priority = -1 * y
# priority = tree._truncate(-1 * y, tree.getPrecision())
# priority = -1 * getLargestEmptyCircleLowestExtent(triple2_left, triple2_center, triple2_right)
event_queue.insert((priority, triple2_center.getIsSplitResidueArc()), event)
# print "prospective circle event involving arcs with focuses:", triple2_left.getFocus(), triple2_center.getFocus(), triple2_right.getFocus()
# found_arc, found_payload = tree.getArcWithPayload(triple2_center)
# print "focus:", triple2_center.getFocus()
found_payload = tree.getArcPayload(triple2_center)
found_payload.setCircleEvent(event)
if event.isCircleEvent() == False:
raise Exception("storing as a circle event a site event")
# found_payload.setEventPriority(priority)
might_occur_values = [might_occur1, might_occur2]
affirmative_might_occur_values = [x for x in might_occur_values if x == True]
num_affirmative_might_occur_values = len(affirmative_might_occur_values)
# print "circle event leads to # of possible circle events:", num_affirmative_might_occur_values
# if a bisector intersection exists, return an (x, y) tuple
# otherwise, return None
@staticmethod
# def getIntersectionHelper(arc1, arc2, arc3):
def getIntersectionHelper(l_y, arc1, arc2, arc3):
focus1 = arc1.getFocus()
focus2 = arc2.getFocus()
focus3 = arc3.getFocus()
# print "focuses for arcs involved in an intersection:", focus1, focus2, focus3
# print "sweep-line y-value for an intersection:", l_y
# bisector1 = Bisector(focus1, focus2)
bisectorRay1 = CircleEvent.getBisectorRay(l_y, arc1, arc2)
# print focus1, focus2, focus3
# bisector2 = Bisector(focus2, focus3)
bisectorRay2 = CircleEvent.getBisectorRay(l_y, arc2, arc3)
# print "bisector one for intersection:", bisectorRay1.toString()
# print "bisector two for intersection:", bisectorRay2.toString()
# print focus1, focus2, focus3
# if not bisector1.doesIntersectWithBisector(bisector2):
# if not bisectorRay1.intersectWithRay(bisectorRay2):
# print "ray intersects:", bisectorRay1.doesIntersectWithRay(bisectorRay2)
# using this test to disqualify intersections is flawed;
# it ignores case where an arc is inserted that is at bottom
# of largest empty circle for a circle event
# middle_arc_is_growing = (arc1.isDegenerate(l_y) == False) and (arc2.isDegenerate(l_y) == True) and (arc3.isDegenerate(l_y) == False)
"""
print "middle arc is growing:", middle_arc_is_growing
print arc1.getFocus(), arc2.getFocus(), arc3.getFocus(), l_y
print "bisector rays intersect:", bisectorRay1.doesIntersectWithRay(bisectorRay2)
"""
# cover case where middle arc is growing but not degenerate
# resulting pair of arc neighbors would share same focus
"""
if bisectorRay1.doesIntersectWithRay(bisectorRay2) == False and not (arc1.isDegenerate(l_y) == False and arc2.isDegenerate(l_y) == True and arc3.isDegenerate(l_y) == False):
"""
# if (bisectorRay1.doesIntersectWithRay(bisectorRay2) == False or middle_arc_is_growing == True):
if (bisectorRay1.doesIntersectWithRay(bisectorRay2) == False):
# print arc2.isDegenerate(l_y)
# print "returning that we have no intersection"
return None
else:
# location = bisector1.intersectWithBisector(bisector2)
location = bisectorRay1.intersectWithRay(bisectorRay2)
# print "intersection location:", location
return location
@staticmethod
def intersectionExists(l_y, arc1, arc2, arc3):
location = CircleEvent.getIntersectionHelper(l_y, arc1, arc2, arc3)
# print "intersection location:", location
result = location != None
return result
# assume a bisector intersection exists
# return an (x, y) tuple
@staticmethod
def distanceFromBisectorIntersectionToContributingSite(l_y, arc1, arc2, arc3):
# distance from center of a largest empty circle to a site on its boundary
location = CircleEvent.getIntersectionHelper(l_y, arc1, arc2, arc3)
site = arc1.getFocus()
"""
site2 = arc2.getFocus()
site3 = arc3.getFocus()
"""
distance = getDistance(location, site)
# print "largest empty circle arc focuses:", arc1.getFocus(), arc2.getFocus(), arc3.getFocus()
# print "largest empty circle center, site, radius:", location, site, distance
"""
distance2 = getDistance(location, site2)
distance3 = getDistance(location, site3)
print distance, distance2, distance3
"""
return distance
# assume a bisector intersection exists
# return an (x, y) tuple
@staticmethod
def getLargestEmptyCircleLowestExtent(l_y, arc1, arc2, arc3):
location = CircleEvent.getIntersectionHelper(l_y, arc1, arc2, arc3)
distance = CircleEvent.distanceFromBisectorIntersectionToContributingSite(l_y, arc1, arc2, arc3)
# print arc1.getFocus(), arc2.getFocus(), arc3.getFocus()
lowest_extent_x = location[0]
lowest_extent_y = location[1] - distance
lowest_extent = (lowest_extent_x, lowest_extent_y)
# print "values related to largest empty circle:", location, distance, lowest_extent
return lowest_extent
# might occur, given that circle event might be invalidated
# by future, we mean at current sweep line location or lower,
# assuming that sweep line moves from top to bottom
# return a (might_occur, location) tuple
# might_occur is a boolean value
# location is an (x, y) tuple
# if a corresponding circle event
# will occur in the "future",
# or None if a corresponding circle event
# will not occur in the "future"
# location is lowest extent of corresponding largest empty circle
@staticmethod
# def mightLeadToACircleEventOccurringInFuture(arc1, arc2, arc3, l_y):
def mightLeadToACircleEventOccurringInFuture(arc1, arc2, arc3, l_y, tolerance = 0.001):
# result = getIntersectionHelper(arc1, arc2, arc3)
intersection_exists = CircleEvent.intersectionExists(l_y, arc1, arc2, arc3)
# print "intersection exists:", intersection_exists
# if result == None:
if intersection_exists == False:
# no bisector intersection exists
return (False, None)
else:
result = CircleEvent.getLargestEmptyCircleLowestExtent(l_y, arc1, arc2, arc3)
location = result
y = location[1]
# print arc1.getFocus(), arc2.getFocus(), arc3.getFocus()
# print "proposed circle event priority:", -1 * location[1]
# print location, l_y
# print "circle event prediction values:", y, l_y
# if y <= l_y:
# tolerance = tree._getTolerance()
# print y, l_y
y_is_satisfactory = (y <= (l_y + tolerance))
# print y, l_y
if y_is_satisfactory == True:
# print "circle event might occur"
return (True, location)
else:
return (False, None)
def toString(self):
return "circle event"
def isCircleEvent(self):
return True
# x and y are location components, i.e. scalars
# specifically, the location components
# are assumed to be values that are
# axis-aligned measurements
@staticmethod
# def _featureLocationComponentComparator(x, y, tolerance = 0.001):
def axisAlignedComp(x, y, tolerance = 0.001):
if x >= (y + tolerance):
return 1
elif x <= (y - tolerance):
return -1
elif (x <= (y + tolerance)) and (x >= (y - tolerance)):
return 0
# retrieve a bisector ray for a given left arc, right arc, and a sweep-line position
# we do not attempt to get bisector rays for leading arcs
@staticmethod
def getBisectorRay(l_y, arc1, arc2):
"""
arc_interval1 = arc_tree._getArcIntervalForArc(arc1)
arc_interval2 = arc_tree._getArcIntervalForArc(arc2)
"""
# generating breakpoints on-the-fly
# breakpoint = Breakpoint(arc1, arc2)
focus1 = arc1.getFocus()
focus2 = arc2.getFocus()
focus_y1 = focus1[1]
focus_y2 = focus2[1]
focus_x1 = focus1[0]
focus_x2 = focus2[0]
arcs = [arc1, arc2]
min_focus_x = min(focus_x1, focus_x2)
left_arc_candidates = [x for x in arcs if (x.getFocus())[0] == min_focus_x]
left_arc = left_arc_candidates[0]
right_arc_candidates = [x for x in arcs if x != left_arc]
right_arc = right_arc_candidates[0]
left_arc_focus = left_arc.getFocus()
right_arc_focus = right_arc.getFocus()
# sweep_line = arc_tree.getSweepLine()
# l_y = sweep_line.getY()
# if focus_y1 == focus_y2 and focus_y1 == l_y:
"""
if CircleEvent.axisAlignedComp(focus_y1, focus_y2) == 0 and CircleEvent.axisAlignedComp(focus_y1, l_y) == 0:
# both arcs are degenerate and are leading arcs
# create a downward-facing vertical bisector ray
# with base of midpoint of line segment
# from arc one focus to arc two focus
# such a bisector ray is clockwise ninety degrees
# from line segment from arc one focus to arc two focus
breakpoint = Breakpoint(arc1, arc2)
breakpoint_location = breakpoint.getLocation(l_y)
# bisectorRay = BisectorRay.construct(focus2, focus1)
# bisectorRay = BisectorRay.construct(focus2, focus1, breakpoint_location)
bisectorRay = BisectorRay.construct(right_arc_focus, left_arc_focus, breakpoint_location)
return bisectorRay
# elif focus_y1 == focus_y2 and focus_y1 != l_y:
"""
if CircleEvent.axisAlignedComp(focus_y1, focus_y2) == 0 and CircleEvent.axisAlignedComp(focus_y1, l_y) != 0:
# create a downward-facing vertical bisector
# with a base of breakpoint corresponding to the two arcs
breakpoint = Breakpoint(arc1, arc2)
breakpoint_location = breakpoint.getLocation(l_y)
# bisectorRay = BisectorRay(focus2, focus1, breakpoint_location)
bisectorRay = BisectorRay(right_arc_focus, left_arc_focus, breakpoint_location)
return bisectorRay
else:
# create a bisector ray that has base of a breakpoint
# corresponding to the two arcs
# use concept of a "funnel" to tell
# which of two possible directions
# the bisector ray should point in
# consider low arc
low_focus_y = min(focus_y1, focus_y2)
arcs = [arc1, arc2]
low_arc_candidates = [x for x in arcs if (x.getFocus())[1] == low_focus_y]
low_arc = low_arc_candidates[0]
low_arc_focus = low_arc.getFocus()
low_arc_focus_x = low_arc_focus[0]
# print arc1, arc2
# print focus1, focus2
breakpoint = Breakpoint(arc1, arc2)
breakpoint_location = breakpoint.getLocation(l_y)
# print breakpoint.getXComponent(l_y)
x, y = breakpoint_location
# print focus1, focus2, breakpoint_location
# have a degenerate case where low arc focus
# shares x-component with breakpoint;
# in this case, we consider high arc focus x
# assume that the two arcs have corresponding focuses
# that do not have x-component values that are the same,
# as any situation possibly leading to this outcome
# would have led arc with higher focus being removed
# upon insertion of the arc with lower focus
# if x < low_arc_focus_x:
if CircleEvent.axisAlignedComp(x, low_arc_focus_x) == -1:
if low_arc == arc1:
# choose bisector ray with negative x component
# bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(arc1, arc2, breakpoint_location)
bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(left_arc, right_arc, breakpoint_location)
return bisectorRay
elif low_arc == arc2:
# bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(arc1, arc2, breakpoint_location)
bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(left_arc, right_arc, breakpoint_location)
return bisectorRay
# elif x > low_arc_focus_x:
if CircleEvent.axisAlignedComp(x, low_arc_focus_x) == 1:
if low_arc == arc1:
# choose bisector ray with positive x component
# bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(arc1, arc2, breakpoint_location)
bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(left_arc, right_arc, breakpoint_location)
return bisectorRay
elif low_arc == arc2:
# bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(arc1, arc2, breakpoint_location)
bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(left_arc, right_arc, breakpoint_location)
return bisectorRay
# elif x == low_arc_focus_x:
# low arc is degenerate
elif CircleEvent.axisAlignedComp(x, low_arc_focus_x) == 0:
# choose bisector ray with x component
# pointing in direction of high arc focus x
# relative to breakpoint x component
# assume that high arc x is never equal to low arc x
high_arc_candidates = [x for x in arcs if (x.getFocus())[1] != low_focus_y]
high_arc = high_arc_candidates[0]
high_arc_focus = high_arc.getFocus()
high_arc_focus_x = high_arc_focus[0]
# print "high arc focus:", high_arc_focus
# if high_arc_focus_x < low_arc_focus_x:
if CircleEvent.axisAlignedComp(high_arc_focus_x, low_arc_focus_x) == -1:
if low_arc == arc1:
bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(arc1, arc2, breakpoint_location)
return bisectorRay
elif low_arc == arc2:
# choose bisector ray with negative x component
bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(arc1, arc2, breakpoint_location)
# print "bisector ray:", bisectorRay.toString()
return bisectorRay
# elif high_arc_focus_x > low_arc_focus_x:
elif CircleEvent.axisAlignedComp(high_arc_focus_x, low_arc_focus_x) == 1:
if low_arc == arc1:
# choose bisector ray with positive x component
bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(arc1, arc2, breakpoint_location)
# print "bisector ray:", bisectorRay.toString()
return bisectorRay
elif low_arc == arc2:
bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(arc1, arc2, breakpoint_location)
return bisectorRay
# elif high_arc_focus_x == low_arc_focus_x:
elif CircleEvent.axisAlignedComp(high_arc_focus_x, low_arc_focus_x) == 0:
if low_arc == arc1:
bisectorRay = CircleEvent.getBisectorRayWithPositiveXComponent(arc1, arc2, breakpoint_location)
return bisectorRay
elif low_arc == arc2:
bisectorRay = CircleEvent.getBisectorRayWithNegativeXComponent(arc1, arc2, breakpoint_location)
return bisectorRay
raise Exception("fell through")
@staticmethod
def getBisectorRayWithNegativeXComponent(arc1, arc2, offset_vector):
bisector_ray_candidates = CircleEvent.getBisectorRayCandidates(arc1, arc2, offset_vector)
satisfactory_bisector_rays = [x for x in bisector_ray_candidates if CircleEvent.bisectorRayHasNegativeXComponent(x) == True]
satisfactory_bisector_ray = satisfactory_bisector_rays[0]
return satisfactory_bisector_ray
@staticmethod
def getBisectorRayWithPositiveXComponent(arc1, arc2, offset_vector):
bisector_ray_candidates = CircleEvent.getBisectorRayCandidates(arc1, arc2, offset_vector)
satisfactory_bisector_rays = [x for x in bisector_ray_candidates if CircleEvent.bisectorRayHasPositiveXComponent(x) == True]
satisfactory_bisector_ray = satisfactory_bisector_rays[0]
return satisfactory_bisector_ray
# retrieve two candidates in the form of a two-tuple
@staticmethod
def getBisectorRayCandidates(arc1, arc2, offset_vector):
focus1 = arc1.getFocus()
focus2 = arc2.getFocus()
# create two bisector rays
# points ninety degrees CCW of line segment from focus1 to focus2
bisectorRay1 = BisectorRay(focus1, focus2, offset_vector)
# points ninety degrees CW of line segment from focus1 to focus2
bisectorRay2 = BisectorRay(focus2, focus1, offset_vector)
result = (bisectorRay1, bisectorRay2)
return result
@staticmethod
def bisectorRayHasPositiveXComponent(bisectorRay):
direction_vector = bisectorRay.getDirectionVector()
x_component = direction_vector[0]
# print "x component:", x_component
is_positive = x_component > 0
return is_positive
@staticmethod
def bisectorRayHasNegativeXComponent(bisectorRay):
direction_vector = bisectorRay.getDirectionVector()
x_component = direction_vector[0]
# print "x component:", x_component
is_negative = x_component < 0
return is_negative
"""
# invalid intersection
focus1 = (90.0, 100.0)
focus2 = (86.0, 98.0)
focus3 = (90.0, 100.0)
bisector1 = Bisector(focus1, focus2)
# print focus1, focus2, focus3
bisector2 = Bisector(focus2, focus3)
# print focus1, focus2, focus3
print bisector1.doesIntersectWithBisector(bisector2)
location = bisector1.intersectWithBisector(bisector2)
print location
"""
"""
from ..SweepLine import *
# from ..arc_tree.ArcTree import *
sweep_line = SweepLine()
"""
"""
arc_tree = ArcTree(3, sweep_line)
sweep_line.setY(364)
arc_tree.insertArc(arc1)
sweep_line.setY(328)
arc_tree.insertArc(arc2)
sweep_line.setY(243)
arc_tree.insertArc(arc3)
sweep_line.setY(155)
print arc_tree.toIntervalStringList()
"""
from ..arc_tree.Arc import *
"""
l_y = 155
arc1 = Arc((141, 364))
arc2 = Arc((255, 328))
arc3 = Arc((128, 243))
"""
"""
l_y = 268
arc1 = Arc((298, 366))
arc2 = Arc((299, 366))
arc3 = Arc((307, 268))
"""
"""
l_y = 203
arc1 = Arc((298, 366))
arc2 = Arc((299, 366))
arc3 = Arc((370, 203))
"""
"""
l_y = 150
arc1 = Arc((100, 200))
arc2 = Arc((200, 400))
arc3 = Arc((300, 200))
"""
"""
l_y = 79.289
arc1 = Arc((400, 100))
arc2 = Arc((400, 200))
arc3 = Arc((500, 200))
"""
"""
l_y = 79.289
arc1 = Arc((300, 200))
arc2 = Arc((400, 100))
arc3 = Arc((400, 200))
"""
"""
l_y = 79.289
arc1 = Arc((300, 200))
arc2 = Arc((400, 200))
arc3 = Arc((400, 100))
"""
"""
l_y = 200
arc1 = Arc((200, 400), 400)
arc2 = Arc((400, 400), 400)
arc3 = Arc((300, 200), 200)
"""
l_y = 228
"""
arc1 = Arc((170, 314))
arc2 = Arc((230, 271))
arc3 = Arc((170, 314))
"""
"""
arc1 = Arc((230, 271))
arc2 = Arc((170, 314))
arc3 = Arc((359, 228))
bisector_ray1 = CircleEvent.getBisectorRay(l_y, arc1, arc2)
bisector_ray2 = CircleEvent.getBisectorRay(l_y, arc2, arc3)
breakpoint1 = Breakpoint(arc1, arc2)
breakpoint_location1 = breakpoint1.getLocation(l_y)
breakpoint2 = Breakpoint(arc2, arc3)
breakpoint_location2 = breakpoint2.getLocation(l_y)
"""
"""
print "breakpoint one location:", breakpoint_location1
print "breakpoint two location:", breakpoint_location2
print "bisector ray one:", bisector_ray1.toString()
print "bisector ray two:", bisector_ray2.toString()
"""
"""
does_intersect = bisector_ray1.doesIntersectWithRay(bisector_ray2)
intersection = bisector_ray1.intersectWithRay(bisector_ray2)
"""
"""
print does_intersect
"""
"""
# raise Exception()
# print intersection
result = CircleEvent.mightLeadToACircleEventOccurringInFuture(arc1, arc2, arc3, l_y)
# print result
"""
|
import numpy as np
from torch import nn
from torchvision import models
class FineTuneModel(nn.Module):
"""Model used to test a ResNet50 with finetuning FC layers"""
def __init__(self, num_classes):
super(FineTuneModel, self).__init__()
# Everything except the last linear layer
original_model = models.resnet50(pretrained=True)
self.features = nn.Sequential(*list(original_model.children())[:-1])
self.classifier = nn.Sequential(
nn.Linear(2048, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_classes),
)
def forward(self, x):
f = self.features(x)
f = f.view(f.size(0), -1)
y = self.classifier(f)
return y
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import unittest
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.utils.node_evaluator import NodeEvaluator
from cfn_policy_validator.tests.parsers_tests import mock_node_evaluator_setup
from cfn_policy_validator.tests.utils import load, load_resources, account_config, expected_type_error
class WhenEvaluatingAPolicyWithASelectFunction(unittest.TestCase):
@mock_node_evaluator_setup()
def test_returns_value_at_index_1(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['1', ['option 0', 'option 1']]
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
result = node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual(result, 'option 1')
class WhenEvaluatingAPolicyWithASelectFunctionThatReturnsAnObject(unittest.TestCase):
@mock_node_evaluator_setup()
def test_returns_value_at_index_0(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['0', [{'option': '0'}, {'option': '1'}]]
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
result = node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual(result, {'option': '0'})
class WhenEvaluatingAPolicyWithASelectFunctionAndInvalidValue(unittest.TestCase):
@mock_node_evaluator_setup()
def test_raises_an_error(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': '0'
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual(expected_type_error('Fn::Select', 'array', "'0'"), str(context.exception))
class WhenEvaluatingAPolicyWithASelectFunctionAndAListNotEqualToTwo(unittest.TestCase):
@mock_node_evaluator_setup()
def test_raises_an_error(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['0', ['option 0'], '3rd']
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual("Additional items are not allowed ('3rd' was unexpected), Path: Fn::Select", str(context.exception))
class WhenEvaluatingAPolicyWithASelectFunctionAndAnInvalidFirstValue(unittest.TestCase):
@mock_node_evaluator_setup()
def tests_raises_an_error(self):
template = load({
'Resources': {
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['a', ['option 0', 'option 1']]
}
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual("The first value for Fn::Select must be an integer. Invalid value: a", str(context.exception))
class WhenEvaluatingAPolicyWithASelectFunctionAndAnInvalidSecondValue(unittest.TestCase):
@mock_node_evaluator_setup()
def tests_raises_an_error(self):
template = load_resources({
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['1', 'option 0']
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual(expected_type_error('Fn::Select.1', 'array', "'option 0'"), str(context.exception))
class WhenEvaluatingAPolicyWithASelectFunctionAndIndexIsLargerThanList(unittest.TestCase):
@mock_node_evaluator_setup()
def tests_raises_an_error(self):
template = load({
'Resources': {
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['2', ['option 0', 'option 1']]
}
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual("Fn::Select index is out of bounds of the list. Invalid value: ['2', ['option 0', 'option 1']]", str(context.exception))
class WhenEvaluatingAPolicyWithASelectFunctionAndIndexIsNegative(unittest.TestCase):
@mock_node_evaluator_setup()
def tests_raises_an_error(self):
template = load({
'Resources': {
'ResourceA': {
'Type': 'AWS::Random::Service',
'Properties': {
'PropertyA': {
'Fn::Select': ['-1', ['option 0', 'option 1']]
}
}
}
}
})
node_evaluator = NodeEvaluator(template, account_config, {})
with self.assertRaises(ApplicationError) as context:
node_evaluator.eval(template['Resources']['ResourceA']['Properties']['PropertyA'])
self.assertEqual("Fn::Select index is out of bounds of the list. Invalid value: ['-1', ['option 0', 'option 1']]", str(context.exception))
|
"""
Test an AppSettings without a prefix
"""
from django.test import TestCase, override_settings
from . import app_settings_no_prefix, app_settings_with_prefix
class TestAppSettingsWithoutPrefix(TestCase):
def test_attribute_default(self):
self.assertEqual(app_settings_no_prefix.UNSET_ATTRIBUTE, "default")
def test_attribute_overridden_in_settings(self):
self.assertEqual(app_settings_no_prefix.SET_ATTRIBUTE, "test")
@override_settings(SET_ATTRIBUTE="overridden")
def test_attribute_overridden_in_tests(self):
self.assertEqual(app_settings_no_prefix.SET_ATTRIBUTE, "overridden")
def test_property_default(self):
self.assertEqual(app_settings_no_prefix.UNSET_PROPERTY, "default")
def test_property_overridden(self):
self.assertEqual(app_settings_no_prefix.SET_PROPERTY, "test")
@override_settings(SET_PROPERTY="overridden")
def test_property_overridden_in_tests(self):
self.assertEqual(app_settings_no_prefix.SET_PROPERTY, "overridden")
def test_callable_default(self):
self.assertEqual(app_settings_no_prefix.UNSET_CALLABLE, "unset")
def test_callable_overridden(self):
self.assertEqual(app_settings_no_prefix.SET_CALLABLE, "called test")
@override_settings(SET_CALLABLE="overridden")
def test_callable_overridden_in_tests(self):
self.assertEqual(app_settings_no_prefix.SET_CALLABLE, "called overridden")
def test_undefined(self):
with self.assertRaises(AttributeError):
assert app_settings_no_prefix.UNDEFINED
class TestAppSettingsWithPrefix(TestCase):
def test_attribute_default(self):
self.assertEqual(app_settings_with_prefix.UNSET_ATTRIBUTE, "default")
def test_attribute_overridden(self):
self.assertEqual(app_settings_with_prefix.SET_ATTRIBUTE, "prefixed test")
@override_settings(PREFIX_SET_ATTRIBUTE="prefixed overridden")
def test_attribute_overridden_in_tests(self):
self.assertEqual(app_settings_with_prefix.SET_ATTRIBUTE, "prefixed overridden")
def test_property_default(self):
self.assertEqual(app_settings_with_prefix.UNSET_PROPERTY, "default")
def test_property_overridden(self):
self.assertEqual(app_settings_with_prefix.SET_PROPERTY, "prefixed test")
@override_settings(PREFIX_SET_PROPERTY="prefixed overridden")
def test_property_overridden_in_tests(self):
self.assertEqual(app_settings_with_prefix.SET_PROPERTY, "prefixed overridden")
def test_callable_default(self):
self.assertEqual(app_settings_with_prefix.UNSET_CALLABLE, "unset")
def test_callable_overridden(self):
self.assertEqual(app_settings_with_prefix.SET_CALLABLE, "called prefixed test")
@override_settings(PREFIX_SET_CALLABLE="prefixed overridden")
def test_callable_overridden_in_tests(self):
self.assertEqual(
app_settings_with_prefix.SET_CALLABLE, "called prefixed overridden"
)
def test_using_prefix__raises_exception(self):
with self.assertRaises(AttributeError):
assert app_settings_with_prefix.PREFIX_SET_ATTRIBUTE
def test_undefined(self):
with self.assertRaises(AttributeError):
assert app_settings_with_prefix.UNDEFINED
|
from rest_framework.routers import DefaultRouter
from ..viewsets.dynamic_menu import DynamicMenuViewSet
router = DefaultRouter()
router.register(prefix=r'menu', viewset=DynamicMenuViewSet, basename='menu')
urls = router.urls
|
"""
Python implementation of full-gmm-test.cc
"""
import unittest
import numpy as np
from kaldi.base import math as kaldi_math
from kaldi.matrix import Matrix, Vector
from kaldi.matrix.common import MatrixTransposeType
from kaldi.matrix.functions import vec_mat_vec
from kaldi.matrix.packed import SpMatrix, TpMatrix
from kaldi.gmm import *
from kaldi.gmm._model_test_common import *
def RandPosdefSpMatrix(dim):
"""
Generate random (non-singular) matrix
Arguments:
dim - int
matrix_sqrt - TpMatrix
logdet - float
Outputs:
matrix - SpMatrix
"""
while True:
tmp = Matrix(dim, dim)
tmp.set_randn_()
if tmp.cond() < 100: break
print("Condition number of random matrix large {}, trying again (this is normal)".format(tmp.cond()))
# tmp * tmp^T will give positive definite matrix
matrix = SpMatrix(dim)
matrix.add_mat2_(1.0, tmp, MatrixTransposeType.NO_TRANS, 0.0)
matrix_sqrt = TpMatrix(len(matrix))
matrix_sqrt = matrix_sqrt.cholesky_(matrix)
logdet_out = matrix.log_pos_def_det()
return matrix, matrix_sqrt, logdet_out
def init_rand_diag_gmm(gmm):
num_comp, dim = gmm.num_gauss(), gmm.dim()
weights = Vector([kaldi_math.rand_uniform() for _ in range(num_comp)])
tot_weigth = weights.sum()
for i, m in enumerate(weights):
weights[i] = m / tot_weigth
means = Matrix([[kaldi_math.rand_gauss() for _ in range(dim)] for _ in range(num_comp)])
vars_ = Matrix([[kaldi_math.exp(kaldi_math.rand_gauss()) for _ in range(dim)] for _ in range(num_comp)])
vars_.invert_elements_()
gmm.set_weights(weights)
gmm.set_inv_vars_and_means(vars_, means)
gmm.perturb(0.5 * kaldi_math.rand_uniform())
gmm.compute_gconsts()
class TestFullGmm(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
def testFullGmmEst(self):
fgmm = FullGmm()
dim = 10 + np.random.randint(low = 0, high = 10)
num_comp = 1 + np.random.randint(low = 0, high = 10)
num_frames = 5000
feats = Matrix(num_frames, dim)
init_rand_full(dim, num_comp, fgmm)
fgmm_normal = FullGmmNormal.new_with_other(fgmm)
fgmm_normal.rand(feats)
acc = AccumFullGmm.new_with_full(fgmm, GmmUpdateFlags.ALL)
for t in range(num_frames):
acc.accumulate_from_full(fgmm, feats[t,:], 1.0)
opts = MleFullGmmOptions()
objf_change, count = mle_full_gmm_update(opts, acc, GmmUpdateFlags.ALL, fgmm)
change = objf_change / count
num_params = num_comp * (dim + 1 + (dim * (dim + 1)/2))
predicted_change = 0.5 * num_params / num_frames
print("Objf change per frame was {} vs. predicted {}".format(change, predicted_change))
self.assertTrue(change < 2.0 * predicted_change)
self.assertTrue(change > 0.0)
def testFullGmm(self):
dim = 1 + np.random.randint(low = 0, high = 9)
nMix = 1 + np.random.randint(low = 0, high = 9)
print("Testing NumGauss: {}, Dim: {}".format(nMix, dim))
feat = Vector([kaldi_math.rand_gauss() for _ in range(dim)])
weights = Vector([kaldi_math.rand_uniform() for _ in range(nMix)])
tot_weigth = weights.sum()
for i, m in enumerate(weights):
weights[i] = m / tot_weigth
means = Matrix([[kaldi_math.rand_gauss() for _ in range(dim)] for _ in range(nMix)])
invcovars = [SpMatrix(dim) for _ in range(nMix)]
covars_logdet = []
for _ in range(nMix):
c, matrix_sqrt, logdet_out = RandPosdefSpMatrix(dim)
invcovars[_].copy_from_sp_(c)
invcovars[_].invert_double_()
covars_logdet.append(logdet_out)
# Calculate loglike for feature Vector
def auxLogLike(w, logdet, mean_row, invcovar):
return -0.5 * ( kaldi_math.M_LOG_2PI * dim \
+ logdet \
+ vec_mat_vec(mean_row, invcovar, mean_row) \
+ vec_mat_vec(feat, invcovar, feat)) \
+ vec_mat_vec(mean_row, invcovar, feat) \
+ np.log(w)
loglikes = [auxLogLike(weights[m], covars_logdet[m], means[m, :], invcovars[m]) for m in range(nMix)]
loglike = Vector(loglikes).log_sum_exp()
# new Gmm
gmm = FullGmm(nMix, dim)
gmm.set_weights(weights)
gmm.set_inv_covars_and_means(invcovars, means)
gmm.compute_gconsts()
loglike1, posterior1 = gmm.component_posteriors(feat)
self.assertAlmostEqual(loglike, loglike1, delta = 0.01)
self.assertAlmostEqual(1.0, posterior1.sum(), delta = 0.01)
weights_bak = gmm.weights()
means_bak = gmm.means()
invcovars_bak = gmm.covars()
for i in range(nMix):
invcovars_bak[i].invert_double_()
# Set all params one-by-one to new model
gmm2 = FullGmm(gmm.num_gauss(), gmm.dim())
gmm2.set_weights(weights_bak)
gmm2.set_means(means_bak)
gmm2.inv_covars_ = invcovars_bak
gmm2.compute_gconsts()
loglike_gmm2 = gmm2.log_likelihood(feat)
self.assertAlmostEqual(loglike1, loglike_gmm2, delta = 0.01)
loglikes = gmm2.log_likelihoods(feat)
self.assertAlmostEqual(loglikes.log_sum_exp(), loglike_gmm2)
indices = list(range(gmm2.num_gauss()))
loglikes = gmm2.log_likelihoods_preselect(feat, indices)
self.assertAlmostEqual(loglikes.log_sum_exp(), loglike_gmm2)
# Simple component mean accessor + mutator
gmm3 = FullGmm(gmm.num_gauss(), gmm.dim())
gmm3.set_weights(weights_bak)
means_bak.set_zero_()
for i in range(nMix):
gmm.get_component_mean(i, means_bak[i,:])
gmm3.set_means(means_bak)
gmm3.inv_covars_ = invcovars_bak
gmm3.compute_gconsts()
loglike_gmm3 = gmm3.log_likelihood(feat)
self.assertAlmostEqual(loglike1, loglike_gmm3, delta = 0.01)
gmm4 = FullGmm(gmm.num_gauss(), gmm.dim())
gmm4.set_weights(weights_bak)
invcovars_bak, means_bak = gmm.get_covars_and_means()
for i in range(nMix):
invcovars_bak[i].invert_double_()
gmm4.set_inv_covars_and_means(invcovars_bak, means_bak)
gmm4.compute_gconsts()
loglike_gmm4 = gmm4.log_likelihood(feat)
self.assertAlmostEqual(loglike1, loglike_gmm4, delta = 0.01)
# TODO: I/O tests
# CopyFromFullGmm
gmm4 = FullGmm()
gmm4.copy_from_full(gmm)
loglike5, _ = gmm4.component_posteriors(feat)
self.assertAlmostEqual(loglike, loglike5, delta = 0.01)
# CopyFromDiag
gmm_diag = DiagGmm(nMix, dim)
init_rand_diag_gmm(gmm_diag)
loglike_diag = gmm_diag.log_likelihood(feat)
gmm_full = FullGmm().copy(gmm_diag)
loglike_full = gmm_full.log_likelihood(feat)
gmm_diag2 = DiagGmm().copy(gmm_full)
loglike_diag2 = gmm_diag2.log_likelihood(feat)
self.assertAlmostEqual(loglike_diag, loglike_full, delta = 0.01)
self.assertAlmostEqual(loglike_diag, loglike_diag2, delta = 0.01)
# Split and merge test for 1 component
# TODO: Implement split
# weights1 = Vector([1.0])
# means1 = Matrix(means[0,:])
# invcovars1 = [invcovars[0]]
# gmm1 = FullGmm(1, dim)
# gmm1.set_weights(weights1)
# gmm1.SetInvCovarsAndMeans(invcovars1, means1)
# gmm1.ComputeGconsts()
# gmm2 = FullGmm()
# gmm2.CopyFromFullGmm(gmm1)
# gmm2.Split(2, 0.001)
# gmm2.Merge(1)
# loglike1 = gmm1.LogLikelihood(feat)
# loglike2 = gmm2.LogLikelihood(feat)
# self.assertAlmostEqual(loglike1, loglike2, delta = 0.01)
if __name__ == '__main__':
unittest.main()
|
from typing import Optional, Union, List, Dict
import pandas as pd
import torch
def convert_pandas_to_torch_tensor(
data_batch: pd.DataFrame,
columns: Optional[Union[List[str], List[List[str]]]] = None,
column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
The format of the return type will match the format of ``columns``. If a
list of columns is provided, the return type will be a single tensor. If
``columns`` is a list of lists, then the return type will be a list of
tensors.
Args:
data_batch (pandas.DataFrame): The pandas dataframe to convert to a
torch tensor.
columns (Optional[Union[List[str], List[List[str]]]):
The names of the columns in the dataframe to include in the
torch tensor. If this arg is a List[List[str]], then the return
type will be a List of tensors. This is useful for multi-input
models. If None, then use all columns in the ``data_batch``.
column_dtype (Optional[Union[torch.dtype, List[torch.dtype]): The
torch dtype to use for the tensor. If set to None,
then automatically infer the dtype.
Returns:
Either a torch tensor of size (N, len(columns)) where N is the
number of rows in the ``data_batch`` Dataframe, or a list of
tensors, where the size of item i is (N, len(columns[i])).
"""
multi_input = columns and (isinstance(columns[0], (list, tuple)))
if not multi_input and column_dtypes and type(column_dtypes) != torch.dtype:
raise TypeError(
"If `columns` is a list of strings, "
"`column_dtypes` must be None or a single `torch.dtype`."
f"Got {type(column_dtypes)} instead."
)
columns = columns if columns else []
def get_tensor_for_columns(columns, dtype):
feature_tensors = []
if columns:
batch = data_batch[columns]
else:
batch = data_batch
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(col_vals, dtype=dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if multi_input:
if type(column_dtypes) not in [list, tuple]:
column_dtypes = [column_dtypes] * len(columns)
return [
get_tensor_for_columns(columns=subcolumns, dtype=dtype)
for subcolumns, dtype in zip(columns, column_dtypes)
]
else:
return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
def load_torch_model(
saved_model: Union[torch.nn.Module, Dict],
model_definition: Optional[torch.nn.Module] = None,
) -> torch.nn.Module:
"""Loads a PyTorch model from the provided``saved_model``.
If ``saved_model`` is a torch Module, then return it directly. If ``saved_model`` is
a torch state dict, then load it in the ``model_definition`` and return the loaded
model.
"""
if isinstance(saved_model, torch.nn.Module):
return saved_model
elif isinstance(saved_model, dict):
if not model_definition:
raise ValueError(
"Attempting to load torch model from a "
"state_dict, but no `model_definition` was "
"provided."
)
model_definition.load_state_dict(saved_model)
return model_definition
else:
raise ValueError(
f"Saved model is of type {type(saved_model)}. "
f"The model saved in the checkpoint is expected "
f"to be of type `torch.nn.Module`, or a model "
f"state dict of type dict."
)
|
#!/usr/bin/env python
# coding: utf-8
# # Navigation
# In this notebook, you will learn how to use the Unity ML-Agents environment for the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893).
# ### 1. Start the Environment
import os
import sys
from os import path
from collections import deque
from datetime import datetime
import torch
import numpy as np
from unityagents import UnityEnvironment
# from matplotlib import pyplot as plt
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(0, os.getcwd())
CKPT_DIR = path.join(path.dirname(__file__), 'checkpoints')
from dqn_agent import Agent
env = UnityEnvironment(file_name="p1_navigation/Banana_Linux_NoVis/Banana.x86_64")
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents.
# Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# Examine the State and Action Spaces
# The simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal:
# - `0` - walk forward
# - `1` - walk backward
# - `2` - turn left
# - `3` - turn right
#
# The state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects
# around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward
# of `-1` is provided for collecting a blue banana.
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
print('Number of agents:', len(env_info.agents))
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
agent = Agent(state_size=state_size, action_size=action_size, seed=0, mode='Double_DQN', use_prioritized_memory=True)
def dqn(n_episodes=2000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations[0] # get the current state
score = 0
while 1:
action = agent.get_action(state, eps) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
agent.step(state, action, reward, next_state, done) # take step with agent (including learning)
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon for next episode
# Printing & Monitoring
cur_mean = np.mean(scores_window)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, cur_mean), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\tepsilon: {:.5f}'.format(i_episode, cur_mean,eps))
# Environment is solved if average score of last 100 episodes >= 13
if cur_mean >= 13.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, cur_mean))
file_basename = path.join(CKPT_DIR, 'solved_{}'.format(datetime.now().strftime('%Y%m%d_%H%M%S')))
torch.save(agent.qnetwork_local.state_dict(), file_basename + '.pt')
np.savez_compressed(file_basename + '.npz', scores=scores)
break
return scores
n_episodes_max = 1000
eps_end = .005
eps_decay = .995
scores = dqn(n_episodes=n_episodes_max, eps_end=eps_end, eps_decay=eps_decay)
n_episodes = len(scores)
print('# episodes: {}'.format(n_episodes))
env.close()
|
import fileimput
import random
import json
from State import State
if __name__ == '__main__':
state = State()
# for line in fileinput.input(bufsize=1):
# changes = json.loads(line)['cells']
# for change in changes:
# state.change(change)
#
# print(state.getJSON())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zoe Sysinfo - https://github.com/rmed/zoe-sysinfo
#
# Copyright (c) 2015 Rafael Medina García <rafamedgar@gmail.com>
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.)
import sys
sys.path.append('./lib')
import base64
import psutil
import zoe
from datetime import datetime
from zoe.deco import *
HTML_HEADER = """
<html>
<head>
<style>
h2 {
border-bottom: 1px solid #CCCCCC;
margin-top: 20px;
width: 100%;
}
table {
border-collapse: separate;
border-spacing: 5px;
}
</style>
</head><body>"""
HTML_FOOTER = "</body></html>"
@Agent(name="sysinfo")
class Sysinfo:
@Message(tags=["report"])
def complete_report(self, sender, src):
""" Send a complete report to user by mail. """
HTML = "" + HTML_HEADER
# CPU
cpu_html = "<h2>CPU Information</h2><ul>"
cpu_info = self.gather_cpu()
for cpu in cpu_info.keys():
info = cpu_info[cpu]
cpu_html += """
<li>%s<ul>
<li>User: %s %%</li>
<li>System: %s %%</li>
<li>Idle: %s %%</li>
</ul></li>""" % (
cpu.upper(), str(info["user"]),
str(info["system"]), str(info["idle"]))
cpu_html += "</ul>"
# Disks
disk_html = "<h2>Disk Information</h2><ul>"
disk_info = self.gather_disk()
for disk in disk_info.keys():
info = disk_info[disk]
usage = disk_info[disk]["usage"]
disk_html += """
<li>%s<ul>
<li>Mount point: %s</li>
<li>Filesystem: %s</li>
<li>Options: %s</li>
<li>Usage:<ul>
<li>Total: %s</li>
<li>Used: %s</li>
<li>Free: %s</li>
<li>Percentage used: %s %%</li>
</ul></li>
</ul></li>""" % (
disk,
info["mountpoint"],
info["fstype"],
info["opts"],
self.size_fmt(usage["total"]),
self.size_fmt(usage["used"]),
self.size_fmt(usage["free"]),
str(usage["percentage"]))
disk_html += "</ul>"
# Memory
mem_html = "<h2>Memory Information</h2><ul>"
mem_info = self.gather_memory()
for mem_type in mem_info.keys():
info = mem_info[mem_type]
mem_html += """
<li>%s<ul>
<li>Total: %s</li>
<li>Free: %s</li>
<li>Used: %s</li>
<li>Percentage used: %s</li>
</ul></li>""" % (
mem_type.title(),
self.size_fmt(info["total"]),
self.size_fmt(info["free"]),
self.size_fmt(info["used"]),
str(info["percentage"]))
mem_html += "</ul>"
# Processes
proc_html = "<h2>Running processes Information</h2><table>"
proc_html += """<tr>
<th>PID</th>
<th>Name</th>
<th>User</th>
<th>Status</th>
<th>Exec</th>
<th>Resident Memory</th>
<th>Virtual Memory</th></tr>"""
proc_info = self.gather_proc()
for proc in proc_info.keys():
info = proc_info[proc]
proc_html += """<tr>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td></tr>""" % (
str(proc),
str(info["name"]),
str(info["username"]),
str(info["status"]),
str(info["exe"]),
self.size_fmt(info["memory"]["resident"]),
self.size_fmt(info["memory"]["virtual"]))
proc_html += "</table>"
HTML += cpu_html + disk_html + mem_html + proc_html + HTML_FOOTER
attachment = self.attach_html(HTML)
return (self.feedback("Generating report...", sender, src),
self.feedback(attachment, sender, "mail"))
@Message(tags=["cpu"])
def info_cpu(self, sender, src):
""" Send basic information on CPU usage jabber or Telegram. """
cpu_info = self.gather_cpu()
msg = "CPU usage (%s)\n" % self.current_datetime()
for cpu in cpu_info.keys():
info = cpu_info[cpu]
msg += """ %s
--------
User: %s %%
System: %s %%
Idle: %s %%\n\n""" % (
cpu.upper(), str(info["user"]), str(info["system"]),
str(info["idle"]))
return self.feedback(msg, sender, src)
@Message(tags=["disk"])
def info_disk(self, sender, src):
""" Send basic information on disk usage by jabber or Telegram. """
disk_info = self.gather_disk()
msg = "Disk usage (%s)\n" % self.current_datetime()
for disk in disk_info.keys():
info = disk_info[disk]
usage = disk_info[disk]["usage"]
msg += """ %s
--------
Mount point: %s
Filesystem type: %s
Options: %s
Usage:
- Total: %s
- Used: %s
- Free: %s
- Percentage used: %s %%\n\n""" % (
disk, info["mountpoint"], info["fstype"], info["opts"],
self.size_fmt(usage["total"]),
self.size_fmt(usage["used"]),
self.size_fmt(usage["free"]),
str(usage["percentage"]))
return self.feedback(msg, sender, src)
@Message(tags=["mem"])
def info_memory(self, sender, src):
""" Send basic information on memory usage by jabber or Telegram. """
mem_info = self.gather_memory()
msg = "Memory usage (%s)\n" % self.current_datetime()
for mem_type in mem_info.keys():
info = mem_info[mem_type]
msg += """ %s
--------
Total: %s
Free: %s
Used: %s
Percentage used: %s %%\n\n""" % (
mem_type,
self.size_fmt(info["total"]),
self.size_fmt(info["free"]),
self.size_fmt(info["used"]),
str(info["percentage"]))
return self.feedback(msg, sender, src)
def attach_html(self, html):
""" Build the attachment file.
This file is stored in the directory specified in
ZOE_HOME/etc/sysinfo.conf (the directory must exist previously)
"""
now = datetime.today()
filename = "sysinfo_%s_%s_%s_%s_%s_%s.html" % (
str(now.day), str(now.month), str(now.year),
str(now.hour), str(now.minute), str(now.second))
b64 = base64.standard_b64encode(bytes(html, 'utf-8')).decode('utf-8')
return zoe.Attachment(b64, "text/html", filename)
def current_datetime(self):
""" Return the current date and time in human-readable format. """
now = datetime.today()
return now.strftime("%d/%m/%Y - %H:%M:%S")
def feedback(self, data, user, dst):
""" Send feedback to the user
data -- may be text or an attachment for e-mail
user -- user to send the feedback to
dst -- either 'jabber', 'tg' or 'mail'
"""
to_send = {
"dst": "relay",
"relayto": dst,
"to": user
}
if dst == "jabber" or dst == "tg":
to_send["msg"] = data
else:
to_send["html"] = data.str()
to_send["subject"] = "System information report"
return zoe.MessageBuilder(to_send)
def gather_cpu(self):
""" Gather information on system CPU.
Obtains usage percentage for each CPU present for user,
system and idle.
"""
result = {}
cpu_info = psutil.cpu_times_percent(percpu=True)
for index, cpu in enumerate(cpu_info):
result["cpu" + str(index)] = {
"user": cpu.user,
"system": cpu.system,
"idle": cpu.idle
}
return result
def gather_disk(self):
""" Gather information on system disks.
Obtains mounted disk partitions and their usage statistics.
"""
result = {}
partitions = psutil.disk_partitions(all=True)
for partition in partitions:
part_usage = psutil.disk_usage(partition.mountpoint)
result[partition.device] = {
"mountpoint": partition.mountpoint,
"fstype": partition.fstype,
"opts": partition.opts,
"usage": {
"total": part_usage.total,
"used": part_usage.used,
"free": part_usage.free,
"percentage": part_usage.percent
}
}
return result
def gather_memory(self):
""" Gather information on system memory.
Obtains physical RAM and swap statistics.
"""
result = {}
mem_info = psutil.virtual_memory()
swap_info = psutil.swap_memory()
result["ram"] = {
"total": mem_info.total,
"free": mem_info.available,
"used": mem_info.used,
"percentage": mem_info.percent
}
result["swap"] = {
"total": swap_info.total,
"free": swap_info.free,
"used": swap_info.used,
"percentage": swap_info.percent
}
return result
def gather_proc(self):
""" Gather information on running processes.
Obtains pid, name, executable, user running the process,
status of the process and memory usage.
"""
result = {}
for proc in psutil.process_iter():
try:
process = psutil.Process(proc.pid)
proc_data = process.as_dict(
attrs=["name", "exe", "username", "status"])
mem_info = process.memory_info()
proc_data["memory"] = {}
proc_data["memory"]["resident"] = mem_info.rss
proc_data["memory"]["virtual"] = mem_info.vms
result[proc.pid] = proc_data
except psutil.NoSuchProcess:
continue
return result
def size_fmt(self, num):
""" Represents amount of bytes in a better human-readable way.
Obtained from http://stackoverflow.com/a/1094933
"""
for unit in ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB']:
if abs(num) < 1024.0:
return "%3.1f %s" % (num, unit)
num /= 1024.0
return "%.1f %s" % (num, 'YiB')
|
import socket
class c:
hp = 50
def client_program(message):
global hp
host = input('please enter the hostname: \n') # as both code is running on same pc
port = int(input('please enter the port: \n')) # socket server port number
client_socket = socket.socket() # instantiate
client_socket.connect((host, port)) # connect to the server
#message = input(" -> ") # take input
while message.lower().strip() != 'bye':
client_socket.send(message.encode("utf-8")) # send message
data = client_socket.recv(1024).decode("utf-8") # receive response
if 'dmg' in data:
dmg_received = data.split(' ')[-1].strip()
print('damage received:', dmg_received)
hp -= int(dmg_received)
print('hp: ', hp)
data2 = 'enemyhp ' + str(hp)
conn.send(bytes(data2, "utf-8"))
elif 'hp' in data and not 'ehp':
hp_received = data.split(' ')[-1].strip()
print('hp:', hp_received)
elif 'ehp' in data:
ehp_received = data.split(' ')[-1].strip()
print('enemy hp:', ehp_received)
print('Received from server: ' + data) # show in terminal
#message = ''
#message = input(" -> ") # again take input
client_socket.close() # close the connection
if __name__ == '__main__':
c.client_program('dmg 567')
|
# Copyright (C) 2015-2016 Ammon Smith and Bradley Cai
# Available for use under the terms of the MIT License.
__all__ = [
'depends',
'config_fields',
'run'
]
from do_target import targetobj
from testparser import TestParser
import re
import os
import testobj
depends = [
'build',
]
config_fields = {
'fail-fast': bool,
'show-type': bool,
'show-skipped': bool,
'test-dir': str,
'recursive': bool,
'failed-test-log': (str, type(None)),
'ignore-tests': list,
'regex': dict,
'regex-order': list,
}
TEST_FILE_REGEX = re.compile(r'.+\.test', re.IGNORECASE)
def run(tracker):
if len(tracker.config['regex'].keys()) != len(tracker.config['regex-order']) or \
not set(tracker.config['regex'].keys()).issubset(tracker.config['regex-order']):
tracker.print_error("Configuration fields 'regex' and 'regex-order' don't match up.")
tracker.print_error("'regex-order' should be an ordered list of the keys in 'regex'.")
tracker.terminate()
directory = 'tests'
if not os.path.isdir(directory):
directory = '../tests'
tracker.print_activity("Switching directory to '%s'" % directory)
os.chdir(directory)
regex = tracker.run_job(job_get_regular_expression_order, "Geting order of regular expressions")
test_files = tracker.run_job(job_get_test_files, "Finding tests")
tracker.config['ignore-tests'] = set(tracker.config['ignore-tests'])
if not test_files:
tracker.print_error("No *.test files found.")
tracker.terminate()
tests = tracker.run_job(job_parse_tests, "Parsing %d test file%s" %
(len(test_files), plural(len(test_files))), test_files, regex)
passed, skipped, testcount, testsrun = \
tracker.run_job(job_run_tests, "Running tests", tests)
tracker.run_job(job_print_results, "Test results", \
passed, skipped, testcount, testsrun)
### Defined jobs ###
def job_get_regular_expression_order(tracker):
regex = []
counter = 1
for name in tracker.config['regex-order']:
tracker.print_operation(counter, name)
regex.append(testobj.TestableRegex(name, tracker.config['regex'][name], tracker))
if name not in tracker.config['regex'].keys():
tracker.print_error(
"Configuration error: regex '%s' mentioned in 'regex-order' but not specified in 'regex'."
% name)
tracker.terminate()
counter += 1
return tuple(regex)
def job_get_test_files(tracker):
gen = os.walk('.', followlinks=True)
test_files = []
for dirpath, dirnames, filenames in gen:
for filename in filenames:
if TEST_FILE_REGEX.match(filename):
test_files.append(os.path.join(dirpath, filename))
if not tracker.config['recursive']:
break
return sorted(test_files)
def job_parse_tests(tracker, test_files, regex):
parser = TestParser()
tests = []
if not test_files:
tracker.print_string("(nothing to do)")
for test_file in test_files:
name = os.path.basename(test_file)
if skip_test(test_file, tracker.config['ignore-tests']):
if tracker.config['show-skipped']:
tracker.print_operation('SKIP', name)
tests.append(testobj.SkipTest(name))
continue
tracker.print_operation("ADD", name)
try:
with open(test_file, 'r') as fh:
data = parser.parse(name, fh.readlines())
except IOError as err:
tracker.print_error("Unable to open '%s': %s." % (test_file, err))
tracker.failure()
# Take the collected data and build the test object
test = tracker.run_job(job_build_test, None, data, regex)
if test:
tests.append(test)
return tests
def job_build_test(tracker, data, regex):
if 'Name' not in data.keys():
tracker.print_error("%s: No name specified." % data['filename'])
tracker.failure()
return None
if len(data['Name']) > 1:
tracker.print_error("%s: Multiple names specified." % data['filename'])
tracker.failure()
return None
data['Name'] = data['Name'][0]
if 'Type' not in data.keys():
tracker.print_error("%s: No test type specified." % data['filename'])
tracker.failure()
return None
if len(data['Type']) > 1:
tracker.print_error("%s: Multiple test types specified." % data['filename'])
tracker.failure()
return None
data['Type'] = data['Type'][0]
if 'Input' not in data.keys():
tracker.print_error("%s: No input value specified." % data['filename'])
tracker.failure()
return None
if len(data['Input']) > 1:
tracker.print_error("%s: Multiple input values specified." % data['filename'])
tracker.failure()
return None
data['Input'] = data['Input'][0] + '\n'
if 'EscapeStrings' in data.keys():
if len(data['EscapeStrings']) > 1:
tracker.print_error("%s: Setting 'EscapeStrings' set multiple times." % data['filename'])
tracker.failure()
return None
data['EscapeStrings'] = get_boolean_option(tracker, data['filename'], data['EscapeStrings'][0])
if data['EscapeStrings'] is None:
return None
else:
data['EscapeStrings'] = False
for key in data.keys():
match = testobj.MULTIPLE_REGEX_FIELD_NAME_REGEX.match(key)
if match:
name = match.group(1).lower()
regex[name].multiple = get_boolean_option(tracker, data['filename'], data[key])
if regex[name].multiple is None:
return None
for regex_value in regex:
regex_value.group = tracker.config['regex'][regex_value.name].get('group', 0)
try:
return testobj.TEST_BUILDERS[data['Type']](data, regex)
except KeyError:
tracker.print_error("%s: Unknown test type: '%s'." % data['filename'])
tracker.print_notice("Supported test types: %s." % (', '.join(testobj.TEST_BUILDERS.keys())))
tracker.failure()
return None
def job_run_tests(tracker, tests):
passed = 0
skipped = 0
testcount = len(tests)
testsrun = testcount
testobj.Test.set_up_error_log(tracker.config['failed-test-log'])
for test in tests:
result = test.run()
if tracker.config['show-type']:
testinfo = "%s (%s)" % (test.name, test.type)
else:
testinfo = test.name
if result is None:
tracker.print_operation("SKIP", testinfo)
skipped += 1
testsrun -= 1
elif result is True:
tracker.print_operation("PASS", testinfo, targetobj.GREEN)
passed += 1
elif result is False:
tracker.print_operation("FAIL", testinfo, targetobj.RED)
if tracker.config['fail-fast']:
passed = -1
return
else:
tracker.print_error("Invalid test return value: %s\n" % result)
tacker.terminate()
return passed, skipped, testcount, testsrun
def job_print_results(tracker, passed, skipped, testcount, testsrun):
if passed < testsrun and tracker.args.usecolor:
label = "%sRESULTS%s" % (tracker.get_color(tracker.RED), tracker.end_color())
else:
label = "RESULTS"
tracker.print_operation(label, "Ran %d test%s." % (testsrun, plural(testsrun)))
if testsrun == 0:
tracker.print_notice("No tests were run.")
return
if tracker.config['fail-fast']:
if passed == -1:
tracker.print_operation(label, "A test failed, so the suite was aborted.")
else:
tracker.print_operation(label, "All tests passed.")
else:
tracker.print_operation(label,
"%d / %d (%.1f%%) of tests passed." %
(passed, testsrun, 100.0 * passed / testsrun))
if passed < testsrun and tracker.config['failed-test-log']:
tracker.print_notice("A report for failed tests was written in '%s'." % \
os.path.abspath(tracker.config['failed-test-log']))
tracker.failure()
### Helper functions ###
def plural(num):
return '' if num == 1 else 's'
def skip_test(test, ignore):
return test in ignore or \
test[:-5] in ignore or \
os.path.basename(test) in ignore or \
os.path.basename(test[:-5]) in ignore
def get_boolean_option(tracker, filename, string):
string = string.lower()
if string in ('true', 'yes', 'enable', 'enabled', 'set', '1'):
return True
elif string in ('false', 'no', 'disable', 'disabled', 'unset', '0'):
return False
else:
tracker.print_error('%s: Unknown boolean value: \'%s\'.' % (filename, string))
tracker.failure()
return None
|
import jsonlines
import json
from nltk.tokenize import sent_tokenize
# def preproc(split):
# data = []
# with jsonlines.open(f'{split}.txt') as reader:
# for obj in reader:
# # print(obj.keys())
# # print()
# # print(obj['metadata'])
# # print("SUMMURY", summury[session_id][f"{obj['metadata']['initial_data_id']}"])
# # print("SPEAKER 1",obj['personas'][0])
# # print("SPEAKER 2",obj['personas'][1])
# # print(obj['metadata'])
# # print(obj['init_personas'])
# dial = {"id":obj['metadata']['initial_data_id'],"meta":[],"dialogue":[]}
# dial["meta"] = {"user":obj['personas'][0],"assistant":obj['personas'][1]}
# temp = []
# for d in obj['dialog']:
# if d['id'] == "Speaker 1":
# temp = [d['text']]
# elif d['id'] == "Speaker 2":
# if len(temp)==0:
# print(obj['metadata']['initial_data_id'])
# else:
# temp.append(d['text'])
# dial['dialogue'].append(temp)
# # # print(f"{d['id']}: {d['text']}")
# # input()
# data.append(dial)
# return data
# for split in ["valid","test"]:
# for session_id in range(2,6):
# data = preproc(f"msc/msc_dialogue/session_{session_id}/{split}")
# with open(f'../msc/session-{session_id}-{split}.json', 'w') as fp:
# json.dump(data, fp, indent=4)
# summury_session = json.load(open("msc/msc_dialogue/sessionlevel_summaries_subsample5.json"))
# summury = json.load(open("msc/msc_dialogue/summaries_subsample5.json"))
def preproc(split):
data = []
with jsonlines.open(f'{split}.txt') as reader:
for obj in reader:
dial = {"id":obj['initial_data_id'],"user_memory":[],"dialogue":[]}
temp = []
for d in obj['dialog']:
# print(f"{d['id']}: {d['text']}")
if d['id'] == "bot_0":
temp = [d['text']]
if 'persona_text' in d:
# print("TLDR: ", d['persona_text'])
dial['user_memory'].append([d['persona_text']])
else:
# print("TLDR: None")
dial['user_memory'].append([])
elif d['id'] == "bot_1":
temp.append(d['text'])
dial['dialogue'].append(temp)
if len(dial["dialogue"]) != len(dial["user_memory"]):
temp.append("")
dial['dialogue'].append(temp)
assert len(dial["dialogue"]) == len(dial["user_memory"])
data.append(dial)
return data
for split in ["valid","test"]:
print(split)
for session_id in range(1,5):
print(session_id)
data = preproc(f"msc/msc_personasummary/session_{session_id}/{split}")
with open(f'../msc/parse-session-{session_id}-{split}.json', 'w') as fp:
json.dump(data, fp, indent=4)
|
from java.util import Map
from geoscript.util import xml
from geoscript.feature.feature import Feature
def writeGML(f, ver=2, format=True, bounds=False, xmldecl=False, nsprefix='gsf'):
"""
Writes a :class:`Feature <geoscript.feature.Feature>` object as GML.
*ver* specifies the gml version to encode. Supported versions include 2, 3,
and 3.2.
*format* specifies whether to format or pretty print the result.
*bounds* specifies whether to include feature bounds in the result.
*xmldecl* specifies whether to include the XML declaration in the result.
*nsprefix* specifies the prefix to be mapped to the namespace :attr:`uri <geoscript.feature.schema.Schema.uri>` for the feature schema.
"""
el = (xml.gml.uri(ver), "_Feature" if ver < 3.2 else "AbstractFeature")
return xml.gml.encode(f._feature, el, ver, format, bounds, xmldecl,
{nsprefix: f.schema.uri})
def readGML(input, ver=2):
"""
Reads a :class:`Feature <geoscript.feature.Feature>` from GML.
*input* is the GML to read specified as a str, file, or some other input
stream.
*ver* specifies the gml version to encode. Supported versions include 2, 3,
and 3.2.
"""
obj = xml.gml.parse(input, ver)
if isinstance(obj, Map):
# turn map into feature
fid = obj.remove('fid')
if not fid:
fid = obj.remove('id')
obj = Feature(dict(obj), fid)
return obj
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse('This is a bad request. Start with the music route.')
def music(request):
return HttpResponse('blues')
def artist1(request):
return HttpResponse('Muddy Waters')
def artist2(request):
return HttpResponse('Fats Domino')
def artist3(request):
return HttpResponse('B.B. King')
|
# Generated by Django 2.2.5 on 2020-02-27 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('record', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='record',
name='item',
),
migrations.AddField(
model_name='record',
name='quantity',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='record',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
migrations.DeleteModel(
name='Item',
),
]
|
import sys
sys.path.append("..")
from pyinsights import mlmodels, dataread, sklsetups
|
from database import db
class Naf(db.Model):
__tablename__ = "naf"
id_naf = db.Column(db.Integer, primary_key=True)
code_sous_classe = db.Column(db.String(255))
libelle_sous_classe = db.Column(db.String(255))
code_sous_classe_short = db.Column(db.String(255))
code_classe = db.Column(db.String(255))
libelle_classe = db.Column(db.String(255))
code_classe_short = db.Column(db.String(255))
code_groupe = db.Column(db.String(255))
libelle_groupe = db.Column(db.String(255))
code_groupe_short = db.Column(db.String(255))
code_division = db.Column(db.String(255))
libelle_division = db.Column(db.String(255))
code_section = db.Column(db.String(255))
libelle_section = db.Column(db.String(255))
def __init__(self, code_sous_classe, libelle_sous_classe, code_sous_classe_short, code_classe, libelle_classe, code_classe_short, code_groupe, libelle_groupe, code_groupe_short, code_division, libelle_division, code_section, libelle_section):
self.code_sous_classe = code_sous_classe
self.libelle_sous_classe = libelle_sous_classe
self.code_sous_classe_short = code_sous_classe_short
self.code_classe = code_classe
self.libelle_classe = libelle_classe
self.code_classe_short = code_classe_short
self.code_groupe = code_groupe
self.libelle_groupe = libelle_groupe
self.code_groupe_short = code_groupe_short
self.code_division = code_division
self.libelle_division = libelle_division
self.code_section = code_section
self.libelle_section = libelle_section
def __repr__(self):
return '%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s' % (self.code_sous_classe, self.libelle_sous_classe, self.code_sous_classe_short, self.code_classe, self.libelle_classe, self.code_classe_short, self.code_groupe, self.libelle_groupe, self.code_groupe_short, self.code_division, self.libelle_division, self.code_section, self.libelle_section)
|
# TensorBoxPy3 https://github.com/SMH17/TensorBoxPy3
import tensorflow as tf
from train import build_forward
import cv2
import argparse
import random
import json
import os
def create_meta_graph(args, H):
tf.reset_default_graph()
x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3])
if H['use_rezoom']:
pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
grid_area = H['grid_height'] * H['grid_width']
pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2])
if H['reregress']:
pred_boxes = pred_boxes + pred_boxes_deltas
else:
pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None)
tf.add_to_collection('placeholders', x_in)
tf.add_to_collection('vars', pred_boxes)
tf.add_to_collection('vars', pred_confidences)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.save(sess, args.output)
tf.train.write_graph(sess.graph.as_graph_def(), '', "{}.pb".format(args.output))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output', required=True)
parser.add_argument('--hypes', required=True)
parser.add_argument('--gpu', default=0)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
with open(args.hypes, 'r') as f:
H = json.load(f)
create_meta_graph(args, H)
|
import torch
from torch import nn
from torch.nn import functional as F
class Encoder(nn.Module):
def __init__(
self, in_channels, channels=(32, 32, 64, 64), n_latent=20, n_hidden=256, size=64
):
super().__init__()
self.size = size
in_ch = in_channels
layers = []
for ch in channels:
layers.append(nn.Conv2d(in_ch, ch, 4, stride=2, padding=1))
layers.append(nn.ReLU())
in_ch = ch
self.conv = nn.Sequential(*layers)
out_size = size // (len(channels) ** 2)
self.linear = nn.Sequential(
nn.Linear(out_size ** 2 * channels[-1], n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_latent * 2),
)
def sample(self, mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mean + std * eps
def forward(self, input):
if input.shape[2] != self.size:
input = F.interpolate(
input, size=(self.size, self.size), mode='bilinear', align_corners=False
)
out = self.conv(input)
out = out.view(out.shape[0], -1)
out = self.linear(out)
mean, logvar = out.chunk(2, dim=1)
return self.sample(mean, logvar), mean, logvar
class Decoder(nn.Module):
def __init__(
self, out_channels, channels=(64, 32, 32), n_latent=20, n_hidden=256, size=64
):
super().__init__()
start_size = size // (2 ** (len(channels) + 1))
self.start_size = start_size
self.linear = nn.Sequential(
nn.Linear(n_latent, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, (start_size ** 2) * channels[0]),
nn.ReLU(),
)
layers = []
in_ch = channels[0]
for ch in channels:
layers.append(nn.ConvTranspose2d(in_ch, ch, 4, stride=2, padding=1))
layers.append(nn.ReLU())
in_ch = ch
layers.append(nn.ConvTranspose2d(in_ch, out_channels, 4, stride=2, padding=1))
self.conv = nn.Sequential(*layers)
def forward(self, input):
out = self.linear(input)
out = out.view(out.shape[0], -1, self.start_size, self.start_size)
out = self.conv(out)
return out
class VAE(nn.Module):
def __init__(
self,
in_channels,
enc_channels=(32, 32, 64, 64),
dec_channels=(64, 32, 32),
n_latent=20,
n_hidden=256,
size=64,
):
super().__init__()
self.enc = Encoder(in_channels, enc_channels, n_latent, n_hidden, size)
self.dec = Decoder(in_channels, dec_channels, n_latent, n_hidden, size)
def forward(self, input, sample=True):
latent, mean, logvar = self.enc(input)
if not sample:
latent = mean
out = self.dec(latent)
return out, mean, logvar
|
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ChromeOS family boards."""
from cros.factory.device.boards import linux
from cros.factory.device import device_types
from cros.factory.utils import type_utils
class ChromeOSBoard(linux.LinuxBoard):
"""Common interface for ChromeOS boards."""
@device_types.DeviceProperty
def audio(self):
from cros.factory.device.audio import utils as audio_utils
return audio_utils.CreateAudioControl(
self, controller=audio_utils.CONTROLLERS.ALSA)
@device_types.DeviceProperty
def bluetooth(self):
from cros.factory.device.chromeos import bluetooth
return bluetooth.ChromeOSBluetoothManager(self)
@device_types.DeviceProperty
def camera(self):
from cros.factory.device.chromeos import camera
return camera.ChromeOSCamera(self)
@device_types.DeviceProperty
def display(self):
from cros.factory.device.chromeos import display
return display.ChromeOSDisplay(self)
@device_types.DeviceProperty
def fan(self):
from cros.factory.device import fan
return fan.ECToolFanControl(self)
@device_types.DeviceProperty
def power(self):
from cros.factory.device import power
return power.ChromeOSPower(self)
@device_types.DeviceProperty
def wifi(self):
from cros.factory.device import wifi
return wifi.WiFiChromeOS(self)
@device_types.DeviceProperty
def vpd(self):
from cros.factory.device import vpd
return vpd.ChromeOSVitalProductData(self)
@type_utils.Overrides
def GetStartupMessages(self):
res = super(ChromeOSBoard, self).GetStartupMessages()
mosys_log = self.CallOutput(
['mosys', 'eventlog', 'list'], stderr=self.STDOUT)
if mosys_log:
res['mosys_log'] = mosys_log
return res
|
import inspect
import decorateme
from mandos.model.apis.caching_pubchem_api import CachingPubchemApi
from mandos.model.apis.chembl_api import ChemblApi
from mandos.model.apis.chembl_scrape_api import (
CachingChemblScrapeApi,
ChemblScrapeApi,
QueryingChemblScrapeApi,
)
from mandos.model.apis.g2p_api import CachingG2pApi, G2pApi
from mandos.model.apis.hmdb_api import CachingHmdbApi, HmdbApi, QueryingHmdbApi
from mandos.model.apis.pubchem_api import PubchemApi
from mandos.model.apis.pubchem_similarity_api import (
CachingPubchemSimilarityApi,
QueryingPubchemSimilarityApi,
)
from mandos.model.apis.querying_pubchem_api import QueryingPubchemApi
from mandos.model.apis.similarity_api import SimilarityApi
from mandos.model.utils.setup import logger
@decorateme.auto_utils()
class Apis:
Pubchem: PubchemApi = None
Chembl: ChemblApi = None
ChemblScrape: ChemblScrapeApi = None
G2p: G2pApi = None
Hmdb: HmdbApi = None
Similarity: SimilarityApi = None
@classmethod
def set(
cls,
*,
chembl: ChemblApi,
pubchem: PubchemApi,
g2p: G2pApi,
hmdb: HmdbApi,
chembl_scrape: ChemblScrapeApi,
similarity: SimilarityApi,
) -> None:
cls.Chembl = chembl
cls.Pubchem = pubchem
cls.G2p = g2p
cls.Hmdb = hmdb
cls.ChemblScrape = chembl_scrape
cls.Similarity = similarity
logger.debug("Set custom API singletons")
cls.describe()
@classmethod
def set_default(
cls,
*,
pubchem: bool = True,
chembl: bool = True,
hmdb: bool = True,
g2p: bool = True,
scrape: bool = True,
similarity: bool = True,
) -> None:
if chembl:
from chembl_webresource_client.new_client import new_client as _Chembl
cls.Chembl = ChemblApi.wrap(_Chembl)
if pubchem:
cls.Pubchem = CachingPubchemApi(QueryingPubchemApi())
if hmdb:
cls.Hmdb = CachingHmdbApi(QueryingHmdbApi())
if g2p:
cls.G2p = CachingG2pApi()
if scrape:
cls.ChemblScrape = CachingChemblScrapeApi(QueryingChemblScrapeApi())
if similarity:
cls.Similarity = CachingPubchemSimilarityApi(QueryingPubchemSimilarityApi())
logger.debug("Set default singletons")
cls.describe()
@classmethod
def describe(cls) -> None:
for v in [
f" {m} = {getattr(cls, m)}"
for m in dir(cls)
if m[0].isupper() and not inspect.ismethod(m)
]:
logger.debug(v)
__all__ = ["Apis"]
|
"""Provide custom gateway for tests changing apicast parameters."""
import pytest
from weakget import weakget
from testsuite.gateways import gateway
from testsuite.gateways.apicast.template import TemplateApicast
from testsuite.utils import blame
from testsuite.utils import warn_and_skip
@pytest.fixture(scope="module", autouse=True)
def require_openshift(testconfig):
"""These tests require openshift available"""
if not weakget(testconfig)["openshift"]["servers"]["default"] % False:
warn_and_skip("All retry_policy tests skipped due to missing openshift")
@pytest.fixture(scope="module")
def staging_gateway(request):
"""Deploy self-managed template based apicast gateway."""
gw = gateway(kind=TemplateApicast, staging=True, name=blame(request, "gw"))
request.addfinalizer(gw.destroy)
gw.create()
return gw
|
#!/bin/python
from . import halt, Settings, interface
from time import sleep
import random
from threading import Thread
from .evolution_generations import Generations
import datetime
import os
import waitress
import promoterz
from version import VERSION
def launchWebEvolutionaryInfo():
print("WEBSERVER MODE")
webpageTitle = "japonicus evolutionary statistics - v%.2f" % VERSION
webApp, webServer = promoterz.webServer.core.build_server(webpageTitle)
webServerProcess = Thread(
target=waitress.serve,
kwargs={
"app": webServer,
"listen": "0.0.0.0:8182"
}
)
webServerProcess.start()
return webApp
def buildSettingsOptions(optionparser, settingSubsets):
settings = Settings.getSettings(SettingsFiles=settingSubsets)
# PARSE GENCONF & DATASET COMMANDLINE ARGUMENTS;
for settingSubset in settingSubsets:
parser = promoterz.metaPromoterz.generateCommandLineArguments(
optionparser,
settings[settingSubset])
options, args = parser.parse_args()
for settingSubset in settingSubsets:
settings[settingSubset] =\
promoterz.metaPromoterz.applyCommandLineOptionsToSettings(
options,
settings[settingSubset]
)
return settings, options
def loadEvaluationModule():
req = [
"validateSettings",
"showStatistics"
]
pass
class JaponicusSession():
def __init__(self, EvaluationModule, settings, options):
# ADDITIONAL MODES;
markzero_time = datetime.datetime.now()
print()
# show title;
interface.showTitleDisclaimer(settings['backtest'], VERSION)
self.web_server = launchWebEvolutionaryInfo()\
if options.spawn_web else None
sleep(1)
if not EvaluationModule.validateSettings(settings):
exit(1)
# --SELECT STRATEGY;
if options.random_strategy:
Strategy = ""
GekkoStrategyFolder = os.listdir(settings['Global']['gekkoPath'] + '/strategies')
while Strategy + '.js' not in GekkoStrategyFolder:
if Strategy:
print(
"Strategy %s descripted on settings but not found on strat folder." %
Strategy
)
Strategy = random.choice(list(settings['strategies'].keys()))
print("> %s" % Strategy)
elif options.strategy:
Strategy = options.strategy
elif not options.skeleton:
print("No strategy specified! Use --strat or go --help")
exit(1)
# --LAUNCH GENETIC ALGORITHM;
if options.genetic_algorithm:
japonicusOptions = {
"GenerationMethod": None,
"TargetParameters": None
}
japonicusOptions["GenerationMethod"] =\
'chromosome' if options.chromosome_mode else 'oldschool'
if options.skeleton:
EvaluationMode = 'indicator'
AllIndicators = Settings.getSettings()['indicators']
TargetParameters = Settings.getSettings()['skeletons'][options.skeleton]
for K in AllIndicators.keys():
if type(AllIndicators[K]) != dict:
TargetParameters[K] = AllIndicators[K]
elif AllIndicators[K]['active']:
TargetParameters[K] = AllIndicators[K]
TargetParameters[K]['active'] = (0, 1)
japonicusOptions["TargetParameters"] = TargetParameters
if not TargetParameters:
print("Bad configIndicators!")
exit(1)
else:
EvaluationMode = Strategy
# READ STRATEGY PARAMETER RANGES FROM TOML;
try:
TOMLData = promoterz.TOMLutils.preprocessTOMLFile(
"strategy_parameters/%s.toml" % Strategy
)
except FileNotFoundError:
print("Failure to find strategy parameter rules for " +
"%s at ./strategy_parameters" % Strategy)
gekkoParameterPath = "%s/config/strategies/%s.toml" %\
(settings['Global']['gekkoPath'], Strategy)
print("Trying to locate strategy parameters at %s" %
gekkoParameterPath)
TOMLData = promoterz.TOMLutils.preprocessTOMLFile(
gekkoParameterPath)
japonicusOptions["TargetParameters"] =\
promoterz.TOMLutils.TOMLToParameters(TOMLData)
# RUN ONE EQUAL INSTANCE PER REPEATER NUMBER SETTINGS,
# SEQUENTIALLY...
for s in range(options.repeater):
Generations(
EvaluationModule,
japonicusOptions,
EvaluationMode,
settings,
options,
web=self.web_server
)
deltatime = datetime.datetime.now() - markzero_time
print("Run took %i seconds." % deltatime.seconds)
if options.spawn_web:
print('Statistics info server still runs...')
|
from contextlib import contextmanager
try:
import _winreg as wr
except Exception, e:
LOG.error("Can't load win reg to register commands: %s" % e)
@contextmanager
def closing(root, path, style, value=None):
open_key = None
try:
open_key = wr.OpenKey(root, path, 0, style)
yield open_key
except:
reg = wr.CreateKey(root, path)
if value:
wr.SetValueEx(reg, None, 0, wr.REG_SZ, value)
wr.CloseKey(reg)
open_key = wr.OpenKey(root, path, 0, style)
yield open_key
finally:
if open_key is not None:
wr.CloseKey(open_key)
def register_extension(ext=None, name="Open with python", command="python %1", shift_required=False):
"""
register_extension allows you to have python do registry editing to add commands to the context menus in Windows Explorer.
:param str|None ext: String extension that you want to be affected by this change. None if you want it to affect all.
:param str name: User facing display name for this.
:param str command: The command that you want to run when the menu item is clicked. %1 is the object that you have selected being passed in.
:param bool shift_required: Does this command require shift to be held to show up?
:return: True if it succeeded.
:rtype: bool
"""
regPath = ext
if ext is None:
regPath = "*"
with closing(wr.HKEY_CLASSES_ROOT, regPath, wr.KEY_READ, value=ext[1].upper() + ext[2:] + "_pyReg") as key:
val = wr.QueryValue(key, None)
with closing(wr.HKEY_CLASSES_ROOT, val, wr.KEY_READ) as key:
with closing(key, "shell", wr.KEY_READ) as key_shell:
with closing(key_shell, name, wr.KEY_READ) as key_title:
if shift_required:
wr.SetValueEx(reg, "Extended", 0, wr.REG_SZ)
with closing(key_title, "command", wr.KEY_SET_VALUE) as key_command:
wr.SetValueEx(key_command, None, 0, wr.REG_SZ, command)
return True
|
# -*- coding: utf-8 -*-
__author__ ='Charles Keith Brown'
__email__ = 'charles.k.brown@gmail.com'
__version__ = '0.1.1'
|
import time
import datetime
from train_v2 import Train
from validation import Validation
import parameters as param
from build_dataset import BuildDataset
from testing import Testing
class Main:
def __init__(self):
# print('Mode: ', param.mode)
self.run(mode=param.mode, train_restored_saved_model=param.train_restored_saved_model)
#@staticmethod
def run(self, mode, train_restored_saved_model):
start_time = time.time()
print('Mode: ', mode)
print('Model_id: ', param.model_id)
# building dataset
if mode == param.mode_list[1] or mode == param.mode_list[2] or mode == param.mode_list[3] or \
mode == param.mode_list[6]:
BuildDataset(mode=param.mode)
# Training
elif mode == param.mode_list[4]:
Train()
# Testing
elif mode == param.mode_list[5]:
Testing(testing_mode='simple')
elif mode == param.mode_list[7]:
Testing(testing_mode='simple_duc')
else:
print('Please specify the mode')
print('\nTime: {}\t({:.3f}sec)'.format((datetime.timedelta(seconds=time.time() - start_time)), time.time() - start_time))
if __name__ == "__main__":
Main()
|
# -*- coding: utf-8 -*-
"""
Calculation of the dry deposition velocity of aerosol particles on the surface
of a vegetation.
In CFD models, the deposition of the aerosol may be included in the concentration
equation by an additional term,
dc/dt = - LAD ud c,
where c is the mass concentration, LAD is one-sided leaf area density,
and ud is the deposition velocity as calculated here.
This package serves to calculate ud, given the state of the atmosphere, the
properties of the vegetation, and the properties of the particles.
"""
from math import exp, pi, sqrt, log
import numpy as np
# Constants
KB = 1.3806488e-23 # Boltzmann constant
LAMBD = 0.066*1e-6 # Mean free path in air
GRAV = 9.81 # Gravitational constant
def slip_correction_factor(dp):
"""Slip correction factor for gravitational settling of particle with
diameter 'dp'"""
return 1. + 2*LAMBD/dp*(1.257 + 0.4*exp(-1.1*dp/(2*LAMBD)))
def browndif_coeff(environ, particle):
"""Coefficient of Brownian diffusion"""
T = environ["T"]
mua = environ["mu"]
dp = particle["d"]
Cc = slip_correction_factor(dp)
DB = KB*T*Cc/(3*pi*mua*dp)
return DB
def settling_velocity(environ, particle):
"""Return settling velocity of the particle in the air."""
rhop = particle["rho"]
dp = particle["d"]
mua = environ["mu"]
Cc = slip_correction_factor(dp)
return (rhop*(dp**2)*GRAV*Cc)/(18*mua)
class DepVelModel():
"""Base class for deposition velocity models"""
@classmethod
def get_components(cls):
"""Return list of names of deposition velocity components"""
return []
def get(self, comp):
"""Return value of the specified component"""
return getattr(self, comp)()
class PetroffModel(DepVelModel):
"""Vegetation model following (Petroff et al., 2008) for conifer trees and
(Petroff et al., 2009) for broadleaves.
In both cases, Dirac leaf size distribution is assumed.
Parameters:
environ: 'mu', 'rho', 'u', 'uf'
particle: 'd', 'rho'
vegetation: 'de', 'leafType', 'leafDist'
"""
def __init__(self, environ, particle, vegetation):
self.mua = environ["mu"]
self.rhoa = environ["rho"]
self.nua = self.mua/self.rhoa
self.u = environ["u"]
self.uf = environ["uf"]
self._set_veget_params(vegetation)
self.dp = particle["d"]
self.rhop = particle["rho"]
# Schmidt number
self.Sc = self.nua/browndif_coeff(environ, particle)
# Settling velocity
self.us = settling_velocity(environ, particle)
@classmethod
def get_components(cls):
return ["total", "brownian_diffusion", "sedimentation", "interception",
"impaction", "turbulent_impaction"]
def _set_veget_params(self, vegetation):
self.de = vegetation["de"]
self.leaf_type = vegetation["leafType"]
leaf_dist = vegetation["leafDist"]
dist_needle = {"horizontal": (2./pi**2, 1./pi),
"plagiophile": (0.27, 0.22),
"vertical": (1./pi, 0.),
"uniform": (0.27, 2./pi**2)}
dist_broadleaf = {"horizontal": (0., 0.5),
"plagiophile": (0.216, 0.340),
"vertical": (1./pi, 0.),
"uniform": (2/pi**2, 1./pi)}
if self.leaf_type == "needle":
self.kx, self.kz = dist_needle[leaf_dist]
self.Cb = 0.467
self.beta = 0.6
elif self.leaf_type == "broadleaf":
self.kx, self.kz = dist_broadleaf[leaf_dist]
self.Cb = 0.664
self.beta = 0.47
else:
raise ValueError("Unknown leafType %s" % self.leaf_type)
def brownian_diffusion(self):
"""Brownian diffusion"""
Re = self.u*self.de/self.nua
# See the article for constants
nb = 0.5
return 2 * self.u*self.Cb*(self.Sc**(-2./3.))*(Re**(nb - 1))
def sedimentation(self):
"""Sedimentation"""
return 2 * self.kz*self.us
def interception(self):
"""Interception"""
if self.leaf_type == "needle":
return 2 * (2*self.u*self.kx*(self.dp/self.de))
elif self.leaf_type == "broadleaf":
return 2 * (self.u*self.kx/2.0*(self.dp/self.de)
*(2.0 + log(4.0*self.de/self.dp)))
else:
raise ValueError("Unknown leafType %s" % self.leaf_type)
def impaction(self):
"""Impaction"""
Cc = slip_correction_factor(self.dp)
tauP = self.rhop*Cc*(self.dp**2)/(18.0*self.mua)
St = self.u*tauP/self.de
IIm = (St/(St + self.beta))**2
vIm = self.u*self.kx*IIm
return 2 * vIm
def turbulent_impaction(self):
"""Turbulent impaction"""
kIT1 = 3.5e-4
kIT2 = 0.18
Cc = slip_correction_factor(self.dp)
tauP = self.rhop*Cc*(self.dp**2)/(18.0*self.mua)
tauPPlus = tauP * self.uf**2 / self.nua
if tauPPlus <= 20:
return 2 * self.uf * kIT1 * tauPPlus**2
else:
return 2 * self.uf * kIT2
def total(self):
"""Total deposition velocity"""
return (self.brownian_diffusion()
+ self.sedimentation()
+ self.interception()
+ self.impaction()
+ self.turbulent_impaction())
class ApprxPetroffModel(DepVelModel):
"""Approximated Petroff model described in (Sip, 2016).
Parameters:
environ: 'mu', 'rho', 'u', 'uf'
particle: 'd', 'rho'
vegetation: 'de', 'leafType', 'leafDist'
"""
def __init__(self, environ, particle, vegetation):
self.mua = environ["mu"]
self.rhoa = environ["rho"]
self.nua = self.mua/self.rhoa
self.u = environ["u"]
self.uf = environ["uf"]
self.set_veget_params(vegetation)
self.dp = particle["d"]
self.rhop = particle["rho"]
# Settling velocity
self.us = settling_velocity(environ, particle)
self.environ = environ
self.particle = particle
sti = np.array([1e-2, 1e-1, 1e0, 1e1, 1e2])
ei = (sti/(sti + self.beta))**2
self.iim_fun = lambda x: np.interp(x, sti, ei, left=0.0, right=1.0)
@classmethod
def get_components(cls):
return ["total", "brownian_diffusion", "sedimentation", "interception",
"impaction", "turbulent_impaction"]
def set_veget_params(self, vegetation):
self.de = vegetation["de"]
self.leaf_type = vegetation["leafType"]
leaf_dist = vegetation["leafDist"]
dist_needle = {"horizontal": (2./pi**2, 1./pi),
"plagiophile": (0.27, 0.22),
"vertical": (1./pi, 0.),
"uniform": (0.27, 2./pi**2)}
dist_broadleaf = {"horizontal": (0., 0.5),
"plagiophile": (0.216, 0.340),
"vertical": (1./pi, 0.),
"uniform": (2/pi**2, 1./pi)}
if self.leaf_type == "needle":
self.kx, self.kz = dist_needle[leaf_dist]
self.Cb = 0.467
self.beta = 0.6
elif self.leaf_type == "broadleaf":
self.kx, self.kz = dist_broadleaf[leaf_dist]
self.Cb = 0.664
self.beta = 0.47
else:
raise ValueError("Unknown leafType %s" % self.leaf_type)
def brownian_diffusion(self):
"""Brownian diffusion"""
Re = self.u*self.de/self.nua
nb = 0.5
Cc = 3.34*LAMBD/self.dp
DB = (KB*self.environ["T"]*Cc)/(3*pi*self.mua*self.dp)
Sc = self.nua/DB
return 2*self.u*self.Cb*(Sc**(-2./3.))*(Re**(nb - 1))
def sedimentation(self):
"""Sedimentation"""
return 2*self.kz*self.us
def interception(self):
"""Interception"""
if self.leaf_type == "needle":
return 2*(2*self.u*self.kx*(self.dp/self.de))
elif self.leaf_type == "broadleaf":
a = 4.57
b = -0.078
return 2*(self.u*self.kx/2.0*(self.dp/self.de)
*(2.0 + log(4.0*self.de) + a*self.dp**b))
else:
raise ValueError("Unknown leafType %s" % self.leaf_type)
def impaction(self):
"""Impaction"""
Cc = 1.0
tauP = self.rhop*Cc*(self.dp**2)/(18.0*self.mua)
St = self.u*tauP/self.de
IIm = self.iim_fun(St)
vIm = self.u*self.kx*IIm
return 2*vIm
def turbulent_impaction(self):
"""Turbulent impaction"""
kIT1 = 3.5e-4
kIT2 = 0.18
Cc = slip_correction_factor(self.dp)
tauP = self.rhop*Cc*(self.dp**2)/(18.0*self.mua)
tauPPlus = tauP * self.uf**2 / self.nua
if tauPPlus <= 20:
return 2 * self.uf * kIT1 * tauPPlus**2
else:
return 2 * self.uf * kIT2
def total(self):
"""Total deposition velocity"""
return (self.brownian_diffusion()
+ self.sedimentation()
+ self.interception()
+ self.impaction()
+ self.turbulent_impaction())
class RaupachModel(DepVelModel):
"""Deposition velocity model used in (Raupach et al, 2001).
Incorporates only impaction and Brownian diffusion.
Parameters:
environ: 'mu', 'rho', 'u'
particle: 'd', 'rho'
vegetation: 'de', 'frontalToTotal'
"""
def __init__(self, environ, particle, vegetation):
self.mua = environ["mu"]
self.rhoa = environ["rho"]
self.nua = self.mua/self.rhoa
self.u = environ["u"]
self.de = vegetation["de"]
self.frontal_to_total = vegetation["frontalToTotal"]
self.dp = particle["d"]
self.rhop = particle["rho"]
# Schmidt number
self.Sc = self.nua/browndif_coeff(environ, particle)
@classmethod
def get_components(cls):
return ['brownian_diffusion', 'impaction', 'total']
def brownian_diffusion(self):
"""Brownian diffusion"""
Cpol = 1.32/2 # Polhausen coeffient
Re = self.u*self.de/self.nua
gpb = 2*self.u*Cpol*(Re**-0.5)*(self.Sc**(-2./3.))
return gpb
def impaction(self):
"""Impaction"""
St = ((self.dp**2)*self.rhop*self.u)/(18*self.mua*self.de)
p = 0.8
q = 2.0
E = (St/(St + p))**q
return 2*self.u*self.frontal_to_total*E
def total(self):
"""Total deposition velocity"""
return self.brownian_diffusion() + self.impaction()
class BruseModel(DepVelModel):
"""Vegetation model as described in (Bruse, 2007).
Incorporates sedimentation, impaction and Brownian diffusion."""
def __init__(self, environ, particle, vegetation):
self.mua = environ["mu"]
self.rhoa = environ["rho"]
self.nua = self.mua/self.rhoa
self.u = environ["u"]
self.set_veget_params(vegetation)
self.dp = particle["d"]
self.rhop = particle["rho"]
# Schmidt number
self.Sc = self.nua/browndif_coeff(environ, particle)
# Settling velocity
self.us = settling_velocity(environ, particle)
self.ra = self.A*sqrt(self.de/max(self.u, 0.05))
self.ustar = sqrt(self.u/self.ra)
def set_veget_params(self, vegetation):
self.de = vegetation["de"]
if vegetation["leafType"] == "broadleaf":
self.A = 87.0
else:
self.A = 200.0
@classmethod
def get_components(cls):
return ["total", "brownian_diffusion", "sedimentation", "impaction"]
def sedimentation(self):
"""Sedimentation"""
return self.us
def impaction(self):
"""Impaction"""
St = (self.us*(self.ustar**2))/(GRAV*self.nua)
return self.ustar*(10.0**(-3.0/St))
def brownian_diffusion(self):
"""Brownian diffusion"""
return self.ustar*(self.Sc**(-2.0/3.0))
def total(self):
"""Total deposition velocity"""
rb = 1.0/(self.brownian_diffusion() + self.impaction())
return 1.0/(self.ra + rb + self.ra*rb*self.us) + self.us
# Available models
MODELS = {
"Petroff": PetroffModel,
"ApprxPetroff": ApprxPetroffModel,
"Raupach": RaupachModel,
"Bruse": BruseModel
}
def calc(model, environ, particle, vegetation, component="total"):
"""Calculate deposition using given model.
All models are initialized by three dicts, describing the atmospheric
environment, particle, and vegetation. Used nomenclature is given below.
All quantities are in SI units.
Not all parameter are used in all models, check the documentation of each
model for the necessary ones.
Args:
environ (dict): Description of the atmospheric environment.
Dict may include:
'mu': Dynamic viscosity.
'rho': Air density.
'T': Air temperature.
'u': Wind speed.
'uf': Friction velocity.
particle (dict): Description of the particle.
Dict may include:
'd': Diameter of the particle.
'rho': Density of the particle.
vegetation (dict): Description of the vegetation.
Dict may include:
'de': Vegetation element (leaf, needle) diameter.
'leafType': Either 'needle' or 'broadleaf'
'leafDist': Distribution of the leaf orientation. One of
'horizontal', 'plagiophile', 'vertical', 'uniform'.
'frontalToTotal': Fraction of frontal area to total
(two sided) leaf area.
component (str): Component of the deposition velocity associated with a
physical process to get, or 'total' for the total deposition velocity.
For each model, :py:meth:`DepVelModel.get_components()` returns
list of the components ('total' included).
"""
if model not in MODELS:
raise ValueError("Model " + model + " not implemented.")
return MODELS[model](environ, particle, vegetation).get(component)
|
import time
x = 0
while x <= 10:
print x
x+=1
time.sleep(0.5)
print('I will trow an error now, lets see if you can catch it!')
i = 12
print('err '+i)
|
import io
import sys
import wordcloud
import numpy as np
from matplotlib import pyplot as plt
def custom_word_clouds(file_contents):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
# feel free to edit this list
uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", "thus", \
"we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \
"their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \
"have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \
"all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just"]
frequencies = {}
seperate = file_contents.split()
for word in seperate:
# disregard the words that are in our uninteresting_words list
if word in uninteresting_words:
pass
else:
for letter in word:
# remove/replace punctuations
if letter in punctuations:
word.replace(letter,'')
if word not in frequencies.keys():
frequencies[word] = 1
else:
frequencies[word] += 1
cloud = wordcloud.WordCloud()
cloud.generate_from_frequencies(frequencies)
return cloud.to_array()
# location of our txt file
file_contents = open('sample.txt').read()
# creating the image
myimage = custom_word_clouds(file_contents)
plt.imshow(np.real(myimage), interpolation = 'nearest')
plt.axis('off')
plt.show()
|
import numpy as np
import json
A = np.load("basic_features.npy")
b = np.load("basic_param.npy")
print(A)
print(b)
data = {"features": A.tolist(), "param": b.tolist()}
with open("simple_rep.json", "w") as outf:
json.dump(data, outf)
|
import unittest
from time import sleep
from core.client import DealTapAut
from pages.homepage import Home
from pages.login import Login
class TestLogin(unittest.TestCase):
def setUp(self):
self.dealtap_aut = DealTapAut()
self.dealtap_aut.launch()
self.home_page = Home()
self.home_page.click_login_from_homepage()
def tearDown(self):
self.dealtap_aut = DealTapAut()
self.dealtap_aut.close_browser()
def test_invalid_email_as_username(self):
login_page = Login()
login_page.set_login_page_username('john.doe')
login_page.set_login_page_password('abc123')
actual_invalid_email_text = login_page.get_invalid_email_text()
expected_invalid_email_text = "Email is invalid."
self.assertEqual(actual_invalid_email_text, expected_invalid_email_text)
def test_failed_login_attempt(self):
login_page = Login()
login_page.set_login_page_username('john.doe@test.ca')
login_page.set_login_page_password('abc123')
sleep(3)
login_page.click_on_login_button()
actual_login_failed_text = login_page.get_login_failed_text()
expected_login_failed_text = "Login failed. Incorrect username or password."
login_page.click_on_login_failed_okay_button()
self.assertEqual(actual_login_failed_text, expected_login_failed_text)
def test_successful_login_attempt(self):
login_page = Login()
login_page.set_login_page_username('mazlanislam@outlook.com')
login_page.set_login_page_password('Password1234')
sleep(3)
login_page.click_on_login_button()
actual_login_passed_text = login_page.get_login_passed_text()
expected_login_passed_text = "Dashboard - Home"
self.assertEqual(actual_login_passed_text, expected_login_passed_text)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
'''A testing script to evaluate action flows without accidentally running them at startup.'''
import sys
import time
import board
import busio
from random import randint
import adafruit_tca9548a
import adafruit_tcs34725
import adafruit_hcsr04
from adafruit_bus_device.i2c_device import I2CDevice
import modules.motor_smbus as motor_lib
from modules.robot import Robot
import modules.laser as laser_lib
if __name__ == "__main__":
with busio.I2C(board.SCL, board.SDA) as bus:
robot = Robot(bus)
start_ts = time.time()
LOST = False
while time.time() < start_ts + 30.0:
try:
if not LOST:
robot.leave_start() #leaves you at intersection
robot.do_motor_burst(motor_lib.FORWARD_CMD) #enter intersection
robot.do_align() #aligns to far side
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
robot.do_motor_burst(motor_lib.TRAN_R_CMD)
robot.check()
robot.fire(cmd=laser_lib.LASER_FIRE_CMD)
robot.do_align()
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.RIGHT_180_DIR)
robot.do_fwd_deflect_edge(stop_purple=0)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
# robot.do_align()
# robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
robot.do_fwd_deflect_edge(stop_purple=0)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
# robot.do_align()
# robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.check()
robot.fire(cmd=laser_lib.LASER_FIRE_CMD)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
robot.check()
robot.fire(cmd=laser_lib.LASER_FIRE_CMD)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.LEFT_180_DIR)
robot.do_align()
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.LEFT_90_DIR)
robot.do_fwd_deflect_edge(stop_purple=0)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.LEFT_90_DIR)
robot.check()
robot.fire(cmd=laser_lib.LASER_FIRE_CMD)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.LEFT_180_DIR)
robot.do_align()
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
robot.do_fwd_deflect_edge(stop_purple=1)
robot.do_motor_burst(motor_lib.FORWARD_CMD)
robot.do_align()
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
robot.do_fwd_deflect_edge(stop_purple=1)
robot.do_motor_burst(motor_lib.FORWARD_CMD)
robot.do_fwd_deflect_edge(stop_purple=1)
robot.do_motor_burst(motor_lib.FORWARD_CMD)
robot.do_fwd_deflect_edge(stop_purple=1)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
robot.do_turn(angle=motor_lib.LEFT_90_DIR)
robot.do_fwd_deflect_edge(stop_purple=0)
else:
choice = randint(0,5);
if choice == 0:
time.sleep(0.5)
elif choice == 1:
robot.do_align_check(timeout=1.0)
robot.do_motor_burst(motor_lib.BACKWARD_CMD)
elif choice == 2:
robot.do_turn(angle=motor_lib.RIGHT_90_DIR)
elif choice == 3:
robot.follow(timeout=5.0)
robot.fire(cmd=laser_lib.LASER_FIRE_CMD)
except KeyboardInterrupt:
print('!Interrupt')
break
except IOError:
print('!IOerror')
LOST = True
robot.stop()
|
from datetime import datetime
import json
def getPersonal(date):
personal = []
with open('./lib/data/events.json', 'r') as f:
file = json.load(f)
for key in file['events'].keys():
if file['events'][key]['repeat'] is not None:
if type(file['events'][key]['repeat']) == dict:
rep = checkRepeat(date, key, cus=file['events'][key]['repeat'])
else:
rep = checkRepeat(date, key, reptype=file['events'][key]['repeat'])
if rep == True:
personal.append(file['events'][key]['name'])
if key == str(date):
personal.append(file['events'][key]['name'])
return list(set(personal))
def checkRepeat(curr, eve, reptype=None, cus=None):
date1 = datetime.strptime(curr, "%Y-%m-%d")
date2 = datetime.strptime(eve, "%Y-%m-%d")
if reptype:
if reptype.lower() == 'day':
if date1 < date2:
return False
else:
return True
elif reptype.lower() == 'week':
if date1 < date2:
return False
else:
if (date1 - date2).days % 7 == 0:
return True
elif reptype.lower() == 'month':
if date1 < date2:
return False
elif date1.day == date2.day:
try:
return True
except ValueError:
return False
elif reptype.lower() == 'year':
if date1 < date2:
return False
elif date1.strftime('%m-%d') == date2.strftime('%m-%d'):
try:
return True
except ValueError:
return False
elif cus:
if cus['end'] is not None and date1 > datetime.strptime(cus['end'], "%Y-%m-%d"):
return False
if cus['every'].lower() == "week":
if int((date1 - date2).days // 7) % int(cus['num']) == 0:
if cus['days'] is not None and date1.strftime('%A') in cus['days']:
return True
else:
return True
else:
return False
if cus['every'].lower() == 'year':
if date1 < date2:
return False
elif date1.year - date2.year == int(cus['num']) and date1.strftime('%m-%d') == date2.strftime('%m-%d'):
return True
if cus['every'].lower() == 'day':
if int(date1.day) % int(cus['num']) == 0:
return True
else:
return False
if cus['every'].lower() == 'month':
if date1.day == date2.day and (date1.month - date2.month) % int(cus['num']) == 0:
return True
else:
return False
|
import argparse
import json
import sys
import os
import sh
from sh import docker
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
import time
# import multiprocessing
def reset(package: str):
container_name = package + "_build"
image_name = package + "_image"
docker.rm("-f", container_name, _ok_code=[0, 1])
docker.rmi(image_name, _ok_code=[0, 1])
def run(package: str, configuration_dir: str, binary_name: str, qemu: bool, minimize: bool, timeout: float,
fuzz_duration: float):
if os.path.exists(configuration_dir):
print("Skipping {0}. Directory already exists".format(configuration_dir))
return
reset(package)
print("Starting qemu={0},minimize={1},Fuzzing timeout={2}".format(qemu, minimize, timeout))
start = time.time()
container_name = package + "_build" # +"_"+str(uuid.uuid4())[:8]
image_name = package + "_image" # +"_"+str(uuid.uuid4())[:8]
timecommand = sh.Command("time") # type: sh.Command
docker_args = ["--name", container_name, "--entrypoint", "python", "pacmanfuzzer",
"/inputinferer/configfinder/builder_wrapper.py", "-p", package]
if qemu:
docker_args.append("-Q")
with timecommand(_with=True) as timeprocess:
print("Building")
build_process = docker.run(docker_args, _ok_code=[0, 1, 2], _out=sys.stdout,
_err=sys.stderr) # type: sh.RunningCommand
if not qemu and build_process.exit_code == 2:
print("WITHOUT QEMU: Failed")
return
with timecommand(_with=True):
docker.commit([container_name, image_name], _out=sys.stdout, _err=sys.stderr)
docker_args = ["--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python",
image_name, "/inputinferer/configfinder/config_finder_for_pacman_package.py", "-p", package, "-v",
"/results/"]
if qemu:
docker_args.append("-Q")
with timecommand(_with=True):
print("Finding the input vector")
input_process = docker.run(docker_args, _out=sys.stdout, _err=sys.stderr)
print(input_process.cmd)
with open(os.path.join(configuration_dir, package + "/" + binary_name + ".json")) as binary_name_fp:
config_dict = json.load(binary_name_fp)[0]
seeds = config_dict["file_type"]
parameter = config_dict["parameter"]
binary_path = config_dict["binary_path"]
if not os.path.exists(os.path.join(configuration_dir, package + "/" + binary_name)):
os.mkdir(os.path.join(configuration_dir, package + "/" + binary_name))
if minimize:
docker_args = ["--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python",
image_name, "/inputinferer/configfinder/controller.py", "minimize", "-p", package, "-v",
"/results", "-s", seeds,
"--parameter=" + parameter, "-b", binary_path, "-afile", binary_name + ".afl_config"]
if qemu:
docker_args.append("-Q")
with timecommand(_with=True):
print("Minimizing")
docker.run(docker_args, _out=sys.stdout, _err=sys.stderr)
with open(os.path.join(configuration_dir, package + "/" + binary_name + ".afl_config")) as afl_config_fp:
seeds = json.load(afl_config_fp)["min_seeds_dir"]
docker_args = ["--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python",
image_name, "/inputinferer/configfinder/controller.py", "evalfuzz"]
if fuzz_duration:
docker_args += ["-ft", fuzz_duration]
if timeout:
docker_args += ["-t", timeout]
docker_args += ["-p", package, "-v", "/results", "-s", seeds, "--parameter=" + parameter, "-b", binary_path,
"-afile", binary_name + ".afl_config"]
if qemu:
docker_args.append("-Q")
with timecommand(_with=True):
print("Fuzzing")
docker.run(docker_args, _out=sys.stdout, _err=sys.stderr)
print("Done")
end = time.time()
print("Time elapsed: ", str(end - start))
def main(package: str, configuration_dir: str, binary_name: str, timeout: float, fuzz_duration: float):
# p = mp.Pool(mp.cpu_count()-2)
# a1 = p.map_async(lambda x: run(package,x,binary_name,timeout=timeout,qemu=False,minimize=False),[os.path.join(os.getcwd(), configuration_dir + "/" + package + "noqemu/" + "run" + str(i)) for i in range(10)])
# a2 = p.map_async(lambda x: run(package, x, binary_name, timeout=timeout, qemu=False, minimize=True),[os.path.join(os.getcwd(), configuration_dir + "/" + package + "noqemuminimize/" + "run" + str(i)) for i in range(10)])
# a3 = p.map_async(lambda x: run(package, x, binary_name, timeout=timeout, qemu=True, minimize=False),[os.path.join(os.getcwd(), configuration_dir + "/" + package + "qemu/" + "run" + str(i)) for i in range(10)])
# a4 = p.map_async(lambda x: run(package, x, binary_name, timeout=timeout, qemu=True, minimize=True),[os.path.join(os.getcwd(), configuration_dir + "/" + package + "qemuminimize/" + "run" + str(i)) for i in range(10)])
# a1.get()
# a2.get()
# a3.get()
# a4.get()
for i in range(10):
run(package, os.path.join(os.getcwd(), configuration_dir + "/" + package + "noqemu/" + "run" + str(i)),
binary_name,
timeout=timeout, qemu=False, minimize=False, fuzz_duration=fuzz_duration)
run(package, os.path.join(os.getcwd(), configuration_dir + "/" + package + "noqemuminimize/" + "run" + str(i)),
binary_name, timeout=timeout, qemu=False, minimize=True, fuzz_duration=fuzz_duration)
run(package, os.path.join(os.getcwd(), configuration_dir + "/" + package + "qemu/" + "run" + str(i)),
binary_name, timeout=timeout, qemu=True, minimize=False, fuzz_duration=fuzz_duration)
run(package, os.path.join(os.getcwd(), configuration_dir + "/" + package + "qemuminimize/" + "run" + str(i)),
binary_name, timeout=timeout, qemu=True, minimize=True, fuzz_duration=fuzz_duration)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Examine a package.')
parser.add_argument("-p", "--package", required=True, type=str,
help="The package to be examined. Must be an apt package.")
parser.add_argument("-cd", "--configuration_dir", required=True, type=str, help="Where to store the results?")
parser.add_argument("-ft", "--fuzzer_timeout", required=False, type=float,
help="The timeout for afl (the whole fuzzer process)",
default=None) # Default timeout: None ( take the one from config)
parser.add_argument("-t", "--timeout", required=False, type=float,
help="The timeout for afl (per run)",
default=None) # Default timeout: None ( take the one from config)
parser.add_argument("-b", "--binary_path", required=True, type=str,
help="The name of ther binary.")
# Either fuzz projects or binaries
arguments = parser.parse_args()
main(arguments.package, arguments.configuration_dir, arguments.binary_path, timeout=arguments.timeout,
fuzz_duration=arguments.fuzzer_timeout)
|
import logging
from flask import jsonify, request, make_response, abort
from app import app
from app.errors import MyError
import app.utils_routes as utils
@app.errorhandler(400)
def not_found(error):
return make_response(
jsonify({'error': 'Bad request, sosi hui, hz pochemu'}), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found this url'}), 404)
type_request = {
0: utils.register,
1: utils.login,
2: utils.create_room,
3: utils.change_room,
4: utils.get_user_rooms,
5: utils.send_request_to_room,
6: utils.turn_off_alarm,
7: utils.search_rooms,
8: utils.check_alarm,
9: utils.is_request_in_room,
10: utils.get_room,
}
@app.route('/alarm_api', methods=['POST'])
def alarm_api():
try:
client_request = request.get_json()
if isinstance(client_request, dict):
ans = type_request[client_request['requestType']](client_request)
ans['requestType'] = client_request['requestType']
return jsonify(ans)
except MyError as e:
return jsonify({
'error': str(e),
'for egor': 'проверь название поля'
})
except Exception as e:
logging.INFO(e)
abort(400)
@app.route('/')
def about():
return 'Api by MBkkt for Alarm by alamasm'
|
'''
Mapping objects for translating between internal and external
representations.
'''
class FieldException(Exception):
pass
class EncoderException(Exception):
pass
class DecoderException(Exception):
pass
class FieldMap(object):
name = None
field_name = None
value = None
def __init__(self, name, field_name=None, value=None):
self.remote_name = name
self.value = value
self.field_name
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
self.value = value
class Encoder(object):
def encode(self, instance):
''' generate remote representation from local instance
'''
raise EncoderException("Not implemented!")
class Decoder(object):
def decode(self, instance):
''' generate local representation from remote instance
'''
raise DecoderException("Not implemented!")
class TransformBase(Encoder, Decoder):
''' Base class for transforming objects between local
representation and remote.
The naming convention is to "decode" remote representations
into local and "encode" local representations into remote.
'''
def read(self, instance):
''' Convenience wrapper for decode '''
return self.decode(instance)
def write(self, instance):
''' Convenience wrapper for encode '''
return self.encode(instance)
def get_value(self, field, instance):
''' Returns dict key value or class instance field value '''
class ModelTransform(TransformBase):
''' Extend this class to implement transforms between Django model
instances and remote representation .
'''
model = None
uri = None
def __init__(self, model, uri=None):
self.model = model
self.uri = uri
def get_field_value(field, instance, default=None):
''' Returns dict key value or class instance field value
This is effectively a convenience wrapper to check whether
instance is a dict or object to determine whether to use
dict.get or getattr
'''
if not field:
raise FieldException('None not allowed for field name')
try:
if isinstance(instance, dict):
value = instance.get(field, default) if default else instance.get(field)
elif isinstance(instance, object):
value = getattr(instance, field,default) if default else getattr(instance,field)
except Exception, e:
raise FieldException('Instance has no ield %s' % field, e)
return value
|
class Solution:
def expand(self, s, i):
left = i // 2 - (i % 2 == 0)
right = i // 2 + 1
while(left >= 0 and right < len(s) and s[left] == s[right]):
left -= 1
right += 1
return right - left - 1
def get_p(self, s, i, r):
pivot = i // 2
return s[pivot - r + (i % 2 == 1): pivot + r + 1]
def longestPalindrome(self, s):
n = 2 * len(s) - 1
ls = [0] * n
for i in range(n):
ls[i] = self.expand(s, i)
i_max = ls.index(max(ls))
r = ls[i_max] // 2
return self.get_p(s, i_max, r)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
'''An example of handling errors with s3 functions.
Disclaimer: this is only created to demonstrate unit testing'''
import boto3
import os
import json
# configure aws authentication
BUCKET_NAME = os.environ['S3_BUCKET']
SSM_S3_ACCESS_PARAM = os.environ['S3_ACCESS_PARAM']
SSM_S3_SECRET_PARAM = os.environ['S3_SECRET_PARAM']
# get s3 credentials from ssm
ssm_client = boto3.client('ssm')
ACCESS_KEY = ssm_client.get_parameter(Name=SSM_S3_ACCESS_PARAM, WithDecryption=True)['Parameter']['Value']
SECRET_KEY = ssm_client.get_parameter(Name=SSM_S3_SECRET_PARAM, WithDecryption=True)['Parameter']['Value']
# NOTE: do not hardcode these values
s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
def my_put_object(key, body):
# encodes the body and puts it in the bucket
body = body.encode('utf-8')
s3_client.put_object(Bucket=BUCKET_NAME, Key=key, Body=body)
def my_get_object(key):
# return body of object in s3. return false if exception raised.
try:
response = s3_client.get_object(Bucket=BUCKET_NAME, Key=key)
body = json.loads(response['Body'].read())
except Exception as e:
print(e)
return False
return body
def my_list_objects_v2():
# return list of objects in s3. return false if exception raised.
try:
response = s3_client.list_objects_v2(Bucket=BUCKET_NAME)
except Exception as e:
print(e)
return False
# if there is nothing in the bucket, raise an exception
if 'Contents' not in response:
raise Exception('Bucket empty')
return response['Contents']
|
from flask import Flask, render_template, Response
import cv2
import eyesight
import eyesight.services as services
def construct_eyesight_pipeline():
# First, get camera
camera = services.PiCamera()
# camera = services.VideoFileReader()
# Define services
detector = services.ObjectDetector(camera)
segmentator = services.SemanticSegmentator(camera)
tracker = services.OpticalFlowLucasKanade(camera)
contour_extrapolator = services.SegmentationExtrapolator(
segmentator=segmentator, tracker=tracker, orig_size=(640, 480))
# Output services
composer = services.DetectronDraw(
image_stream=camera,
# detector=detector,
# segmentator=segmentator,
tracker=tracker,
contours=contour_extrapolator,
)
return services.PerformanceBar(composer)
# Flask App
app = Flask(__name__)
# Define and start eyesight pipeline
service = construct_eyesight_pipeline()
manager = eyesight.ServiceManager(service)
manager.start()
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = cv2.cvtColor(camera.query()[1], cv2.COLOR_RGB2BGR)
frame = cv2.imencode('.jpg', frame)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(service),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
|
# Copyright 2021 Matt Chaput and Marcos Pontes. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains classes for scoring (and sorting) search results.
author: matt chaput and marcos pontes
"""
from __future__ import division
from collections import defaultdict
from math import log, pi, sqrt
from functools import lru_cache
from typing import Union
import whoosh.query
from whoosh.compat import iteritems
from whoosh.minterm import exist_minterm, get_minterm, get_minterm_match
from whoosh.weighting_schema import IDF, TF
# Base classes
class WeightingModel(object):
"""Abstract base class for scoring models. A WeightingModel object provides
a method, ``scorer``, which returns an instance of
:class:`whoosh.scoring.Scorer`.
Basically, WeightingModel objects store the configuration information for
the model (for example, the values of B and K1 in the BM25F model), and
then creates a scorer instance based on additional run-time information
(the searcher, the fieldname, and term text) to do the actual scoring.
"""
use_final = False
DOC_FREQUENCY_CACHE = 100
def idf(self, searcher, fieldname, text, idf_schema: IDF = IDF.default):
"""Returns the inverse document frequency of the given term.
"""
parent = searcher.get_parent()
return idf_schema(parent, fieldname, text)
@lru_cache(maxsize=DOC_FREQUENCY_CACHE)
def tf(self, searcher, fieldname, docnum, tf_schema: TF = TF.frequency):
"""Returns the document frequencies of the given (doc, term) pair.
"""
all_tf = {}
matcher = searcher.vector_as("weight", docnum, fieldname)
for term, freq in matcher:
max_weight = searcher.term_info(fieldname, term).max_weight()
all_tf[term] = tf_schema(freq, max_weight)
return all_tf
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
"""Returns an instance of :class:`whoosh.scoring.Scorer` configured
for the given searcher, fieldname, and term text.
"""
raise NotImplementedError(self.__class__.__name__)
def final(self, searcher, docnum, score):
"""Returns a final score for each document. You can use this method
in subclasses to apply document-level adjustments to the score, for
example using the value of stored field to influence the score
(although that would be slow).
WeightingModel sub-classes that use ``final()`` should have the
attribute ``use_final`` set to ``True``.
:param searcher: :class:`whoosh.searching.Searcher` for the index.
:param docnum: the doc number of the document being scored.
:param score: the document's accumulated term score.
:rtype: float
"""
return score
class BaseScorer(object):
"""Base class for "scorer" implementations. A scorer provides a method for
scoring a document, and sometimes methods for rating the "quality" of a
document and a matcher's current "block", to implement quality-based
optimizations.
Scorer objects are created by WeightingModel objects. Basically,
WeightingModel objects store the configuration information for the model
(for example, the values of B and K1 in the BM25F model), and then creates
a scorer instance.
"""
def supports_block_quality(self):
"""Returns True if this class supports quality optimizations.
"""
return False
def score(self, matcher):
"""Returns a score for the current document of the matcher.
"""
raise NotImplementedError(self.__class__.__name__)
def max_quality(self):
"""Returns the *maximum limit* on the possible score the matcher can
give. This can be an estimate and not necessarily the actual maximum
score possible, but it must never be less than the actual maximum
score.
"""
raise NotImplementedError(self.__class__.__name__)
def block_quality(self, matcher):
"""Returns the *maximum limit* on the possible score the matcher can
give **in its current "block"** (whatever concept of "block" the
backend might use). This can be an estimate and not necessarily the
actual maximum score possible, but it must never be less than the
actual maximum score.
If this score is less than the minimum score
required to make the "top N" results, then we can tell the matcher to
skip ahead to another block with better "quality".
"""
raise NotImplementedError(self.__class__.__name__)
# Scorer that just returns term weight
class WeightScorer(BaseScorer):
"""A scorer that simply returns the weight as the score. This is useful
for more complex weighting models to return when they are asked for a
scorer for fields that aren't scorable (don't store field lengths).
"""
def __init__(self, maxweight):
self._maxweight = maxweight
def supports_block_quality(self):
return True
def score(self, matcher):
return matcher.weight()
def max_quality(self):
return self._maxweight
def block_quality(self, matcher):
return matcher.block_max_weight()
@classmethod
def for_(cls, searcher, fieldname, text):
ti = searcher.term_info(fieldname, text)
return cls(ti.max_weight())
# Base scorer for models that only use weight and field length
class WeightLengthScorer(BaseScorer):
"""Base class for scorers where the only per-document variables are term
weight and field length.
Subclasses should override the ``_score(weight, length)`` method to return
the score for a document with the given weight and length, and call the
``setup()`` method at the end of the initializer to set up common
attributes.
"""
def setup(self, searcher, fieldname, text):
"""Initializes the scorer and then does the busy work of
adding the ``dfl()`` function and maximum quality attribute.
This method assumes the initializers of WeightLengthScorer subclasses
always take ``searcher, offset, fieldname, text`` as the first three
arguments. Any additional arguments given to this method are passed
through to the initializer.
Note: this method calls ``self._score()``, so you should only call it
in the initializer after setting up whatever attributes ``_score()``
depends on::
class MyScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, parm=1.0):
self.parm = parm
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return (weight / (length + 1)) * self.parm
"""
ti = searcher.term_info(fieldname, text)
if not searcher.schema[fieldname].scorable:
return WeightScorer(ti.max_weight())
self.dfl = lambda docid: searcher.doc_field_length(docid, fieldname, 1)
self._maxquality = self._score(ti.max_weight(), ti.min_length())
def supports_block_quality(self):
return True
def score(self, matcher):
return self._score(matcher.weight(), self.dfl(matcher.id()))
def max_quality(self):
return self._maxquality
def block_quality(self, matcher):
return self._score(matcher.block_max_weight(),
matcher.block_min_length())
def _score(self, weight, length):
# Override this method with the actual scoring function
raise NotImplementedError(self.__class__.__name__)
# WeightingModel implementations
# Debugging model
class DebugModel(WeightingModel):
def __init__(self):
self.log = []
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
return DebugScorer(searcher, fieldname, text, self.log)
class DebugScorer(BaseScorer):
def __init__(self, searcher, fieldname, text, log):
ti = searcher.term_info(fieldname, text)
self._maxweight = ti.max_weight()
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.log = log
def supports_block_quality(self):
return True
def score(self, matcher):
fieldname, text = self.fieldname, self.text
docid = matcher.id()
w = matcher.weight()
length = self.searcher.doc_field_length(docid, fieldname)
self.log.append((fieldname, text, docid, w, length))
return w
def max_quality(self):
return self._maxweight
def block_quality(self, matcher):
return matcher.block_max_weight()
# BM25F Model
def bm25(idf, tf, fl, avgfl, B, K1):
# idf - inverse document frequency
# tf - term frequency in the current document
# fl - field length in the current document
# avgfl - average field length across documents in collection
# B, K1 - free paramters
return idf * ((tf * (K1 + 1)) / (tf + K1 * ((1 - B) + B * fl / avgfl)))
class BM25F(WeightingModel):
"""Implements the BM25F scoring algorithm.
"""
def __init__(self, B=0.75, K1=1.2, **kwargs):
"""
>>> from whoosh import scoring
>>> # Set a custom B value for the "content" field
>>> w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
:param B: free parameter, see the BM25 literature. Keyword arguments of
the form ``fieldname_B`` (for example, ``body_B``) set field-
specific values for B.
:param K1: free parameter, see the BM25 literature.
"""
self.B = B
self.K1 = K1
self._field_B = {}
for k, v in iteritems(kwargs):
if k.endswith("_B"):
fieldname = k[:-2]
self._field_B[fieldname] = v
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
if fieldname in self._field_B:
B = self._field_B[fieldname]
else:
B = self.B
return BM25FScorer(searcher, fieldname, text, B, self.K1, qf=qf)
class BM25FScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, B, K1, qf=1):
# IDF and average field length are global statistics, so get them from
# the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.idf = parent.idf(fieldname, text)
self.avgfl = parent.avg_field_length(fieldname) or 1
self.B = B
self.K1 = K1
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
s = bm25(self.idf, weight, length, self.avgfl, self.B, self.K1)
return s
# DFree model
def dfree(tf, cf, qf, dl, fl):
# tf - term frequency in current document
# cf - term frequency in collection
# qf - term frequency in query
# dl - field length in current document
# fl - total field length across all documents in collection
prior = tf / dl
post = (tf + 1.0) / (dl + 1.0)
invpriorcol = fl / cf
norm = tf * log(post / prior)
return qf * norm * (tf * (log(prior * invpriorcol))
+ (tf + 1.0) * (log(post * invpriorcol))
+ 0.5 * log(post / prior))
class DFree(WeightingModel):
"""Implements the DFree scoring model from Terrier.
See http://terrier.org/
"""
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
return DFreeScorer(searcher, fieldname, text, qf=qf)
class DFreeScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, qf=1):
# Total term weight and total field length are global statistics, so
# get them from the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.cf = parent.frequency(fieldname, text)
self.fl = parent.field_length(fieldname)
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return dfree(weight, self.cf, self.qf, length, self.fl)
# PL2 model
rec_log2_of_e = 1.0 / log(2)
def pl2(tf, cf, qf, dc, fl, avgfl, c):
# tf - term frequency in the current document
# cf - term frequency in the collection
# qf - term frequency in the query
# dc - doc count
# fl - field length in the current document
# avgfl - average field length across all documents
# c -free parameter
TF = tf * log(1.0 + (c * avgfl) / fl)
norm = 1.0 / (TF + 1.0)
f = cf / dc
return norm * qf * (TF * log(1.0 / f)
+ f * rec_log2_of_e
+ 0.5 * log(2 * pi * TF)
+ TF * (log(TF) - rec_log2_of_e))
class PL2(WeightingModel):
"""Implements the PL2 scoring model from Terrier.
See http://terrier.org/
"""
def __init__(self, c=1.0):
self.c = c
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
return PL2Scorer(searcher, fieldname, text, self.c, qf=qf)
class PL2Scorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, c, qf=1):
# Total term weight, document count, and average field length are
# global statistics, so get them from the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.cf = parent.frequency(fieldname, text)
self.dc = parent.doc_count_all()
self.avgfl = parent.avg_field_length(fieldname) or 1
self.c = c
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return pl2(weight, self.cf, self.qf, self.dc, length, self.avgfl,
self.c)
class Boolean(WeightingModel):
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
ql = 1.0
if query_context:
ql = len(query_context.subqueries)
return BooleanScorer(ql)
class BooleanScorer(BaseScorer):
def __init__(self, ql):
self.ql = ql
def max_quality(self):
return 1.0
def block_quality(self, matcher):
return 1.0
def score(self, matcher):
return 1.0 / self.ql if matcher.weight() > 0.0 else 0.0
class Probabilistic(WeightingModel):
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
N = searcher.doc_count()
ni = searcher.doc_frequency(fieldname, text)
return ProbabilisticScorer(N, ni)
class ProbabilisticScorer(BaseScorer):
def __init__(self, N, ni):
self.N = N
self.ni = ni
def score(self, matcher):
return log((self.N + 0.5) / (self.ni + 0.5))
def max_quality(self):
return 1.0
def block_quality(self, matcher):
return 1.0
class Frequency(WeightingModel):
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
maxweight = searcher.term_info(fieldname, text).max_weight()
return WeightScorer(maxweight)
class TF_IDF(WeightingModel):
"""
Using the VSM classic approach (which vector=True for the queried field), the first query is quite
slower because of the calculation of all normalization terms. The next queries will be as faster as
the original whoosh approach.
"""
tf_normalize = True
def __init__(self, *, tf: Union[TF, str] = TF.frequency, idf: Union[IDF, str] = IDF.default):
"""
>>> from whoosh import scoring
>>> # You can set IDF and TF schemas
>>> w = scoring.TF_IDF(tf = TF.frequency, idf=IDF.inverse_frequency)
:param idf: free parameter, indicates the idf schema. See the Vector Space Model literature.
:param tf: free parameter, indicates the tf schema. See the Vector Space Model literature.
"""
self.idf_table = dict()
self.tf_table = defaultdict(dict)
self.query_terms = set()
self._idf_schema = IDF.get(idf) if isinstance(idf, str) else idf
self._tf_schema = TF.get(tf) if isinstance(tf, str) else tf
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
# IDF is a global statistic, so get it from the top-level searcher
self.extract_idf_table(searcher, fieldname, query_context)
# Global context is needed for TF
self.query_terms = set()
self.all_terms(query_context)
return TFIDFScorer(self, fieldname, text, searcher, query_context, self._tf_schema)
def extract_idf_table(self, searcher, fieldname, query_context):
if query_context:
if isinstance(query_context, whoosh.query.compound.CompoundQuery):
for subquery in query_context:
self.extract_idf_table(searcher, fieldname, subquery)
elif isinstance(query_context, whoosh.query.terms.Term):
self.idf_table[query_context.text] = searcher.idf(fieldname, query_context.text,
self._idf_schema)
def all_terms(self, context):
if context:
if isinstance(context, whoosh.query.compound.CompoundQuery):
for subquery in context:
self.all_terms(subquery)
elif isinstance(context, whoosh.query.terms.Term):
self.query_terms.add(context.text)
def apply_tf_normalization(self, score, docnum):
norm_tf = 0.0
for text, f in self.tf_table[docnum].items():
if text in self.query_terms:
norm_tf += (f * self.idf_table.get(text, 1)) ** 2
return score / sqrt(norm_tf) if norm_tf != 0 else 0.0
def tf(self, searcher, fieldname, docnum, tf_schema: TF = TF.frequency, context=None):
# slow: usar só no último caso
all_tf = super(TF_IDF, self).tf(searcher, fieldname, docnum, tf_schema)
if context and not self.query_terms:
self.all_terms(context)
filtered_terms = {term: freq for term, freq in all_tf.items() if term in self.query_terms}
return filtered_terms
class TFIDFScorer(BaseScorer):
"""
Basic formulation of TFIDF Similarity based on Lucene score function.
"""
def __init__(self, weighting, fieldname, text, searcher, query_context, tf_schema):
self._fieldname = fieldname
self._searcher = searcher
self._weighting = weighting
self._text = text.decode(encoding="utf-8")
self._context = query_context
self.tf_schema = tf_schema
self.idf_table = weighting.idf_table
self.norm_idf = sqrt(sum([v ** 2 for k, v in self.idf_table.items() if k in weighting.query_terms]))
def supports_block_quality(self):
return True
def score(self, matcher):
if self._searcher.has_vector(matcher.id(), self._fieldname):
return self._approach(matcher)
else:
return matcher.weight() * self.idf_table.get(self._text, 1) # whoosh default definition
def _approach(self, matcher):
tf_term = self._tf_statistics(matcher)
idf_term, norm_idf = self._idf_statistics()
return tf_term * (idf_term ** 2) / norm_idf
def _idf_statistics(self):
idf = self.idf_table.get(self._text, 1)
return idf, self.norm_idf
def _tf_statistics(self, matcher):
maxweight = self._searcher.term_info(self._fieldname, self._text).max_weight()
tf_term = self.tf_schema(matcher.weight(), maxweight)
self._weighting.tf_table[matcher.id()][self._text] = tf_term
return tf_term
def max_quality(self):
idf = self.idf_table[self._text] # idf global statistics
max_weight = self._searcher.term_info(self._fieldname, self._text).max_weight()
return max_weight * idf
def block_quality(self, matcher):
idf = self.idf_table[self._text] # idf global statistics
return matcher.block_max_weight() * idf
class BeliefNetwork(TF_IDF):
tf_normalize = True
def __init__(self, tf: Union[TF, str] = TF.frequency, idf: Union[IDF, str] = IDF.default):
"""
>>> from whoosh import scoring
>>> # You can set IDF and TF schemas
>>> w = scoring.BeliefNetwork(tf = TF.frequency, idf=IDF.inverse_frequency)
:param idf: free parameter, indicates the idf schema. See the Vector Space Model literature.
:param tf: free parameter, indicates the tf schema. See the Vector Space Model literature.
"""
super().__init__(tf=tf, idf=idf)
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
# IDF is a global statistic, so get it from the top-level searcher
# self.extract_idf_table(searcher, fieldname, query_context)
self.extract_idf_table(searcher, fieldname, query_context)
# Global context is needed for TF
self.query_terms = set()
self.all_terms(query_context)
return BeliefNetworkScorer(self, fieldname, text, searcher, query_context,
self._tf_schema, qf)
class BeliefNetworkScorer(TFIDFScorer):
"""
Basic formulation of BeliefNetwork Similarity.
"""
def __init__(self, weighting, fieldname, text, searcher, query_context, tf_schema, qf=1):
super().__init__(weighting, fieldname, text, searcher, query_context, tf_schema)
self.qf = qf
self.t = len(query_context.subqueries)
def _approach(self, matcher):
tf_term = self._tf_statistics(matcher)
idf_term, norm_idf = self._idf_statistics()
p_dj_k = tf_term * idf_term
p_q_k = self.qf * idf_term / norm_idf if norm_idf != 0 else 0.0
p_k = (1 / 2) ** self.t
return p_dj_k * p_q_k * p_k
class ExtendedBoolean(TF_IDF):
tf_normalize = False
def __init__(self, p=3):
"""
>>> from whoosh import scoring
>>> # You can set p value
>>> w = scoring.ExtendedBoolean(p=3)
"""
super().__init__(tf=TF.normalized_frequency, idf=IDF.inverse_frequency)
self.p = p
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
# IDF is a global statistic, so get it from the top-level searcher
self.extract_idf_table(searcher, fieldname, query_context)
# Global context is needed for TF
self.query_terms = set()
self.all_terms(query_context)
return ExtendedBooleanScorer(self, fieldname, text, searcher, query_context, self._tf_schema,
self.p)
@staticmethod
def apply_function(score, p, qry_type):
from math import pow
if qry_type == 'AND':
return 1 - pow(score, 1 / p if p != 0 else 1)
return pow(score, 1 / p if p != 0 else 1)
class ExtendedBooleanScorer(TFIDFScorer):
"""
Basic formulation of ExtendedBoolean Similarity.
"""
def __init__(self, weighting, fieldname, text, searcher, query_context, tf_schema, p):
super().__init__(weighting, fieldname, text, searcher, query_context, tf_schema)
self.p = p
self.qry_type = 'OR' if isinstance(query_context, whoosh.query.compound.Or) else 'AND'
self.t = len(query_context.subqueries)
def _approach(self, matcher):
tf_term = self._tf_statistics(matcher)
idf_term, _ = self._idf_statistics()
wij = tf_term * idf_term
if self.qry_type == 'AND':
return (1 - wij) ** self.p / self.t
return wij ** self.p / self.t
class GeneralizedVSM(TF_IDF):
tf_normalize = True
def __init__(self, *, mdb: str = None, tf: Union[TF, str] = TF.frequency, idf: Union[IDF, str] = IDF.default):
"""
>>> from whoosh import scoring
>>> # You can set IDF and TF schemas
>>> w = scoring.GeneralizedVSM(tf = TF.frequency, idf=IDF.inverse_frequency)
:param idf: free parameter, indicates the idf schema. See the Vector Space Model literature.
:param tf: free parameter, indicates the tf schema. See the Vector Space Model literature.
:param mdb: Minterm db base path. Example: 'mycol', 'mycol/body', etc.
"""
super().__init__(tf=tf, idf=idf)
self.mdb = mdb
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
# IDF is a global statistic, so get it from the top-level searcher
self.extract_idf_table(searcher, fieldname, query_context)
self.query_terms = set()
self.all_terms(query_context)
return GeneralizedVSMScorer(self, fieldname, text, searcher, query_context, self._tf_schema, self._idf_schema,
self.mdb)
class GeneralizedVSMScorer(TFIDFScorer):
"""
Basic formulation of BeliefNetwork Similarity.
"""
def __init__(self, weighting, fieldname, text, searcher, query_context, tf_schema, idf_schema, mdb):
super().__init__(weighting, fieldname, text, searcher, query_context, tf_schema)
self.idf_schema = idf_schema
self.mdb = mdb
def _approach(self, matcher):
if exist_minterm():
tf_term = self._tf_statistics(matcher)
idf_term, norm_idf = self._idf_statistics()
try:
minterm = get_minterm(self.mdb, self._text)
match_index = get_minterm_match(self.mdb, matcher.id())
return (tf_term * (idf_term ** 2) / norm_idf if norm_idf != 0 else 0.0) * \
minterm[match_index]
except KeyError:
return tf_term * (idf_term ** 2) / norm_idf if norm_idf != 0 else 0.0
else:
return matcher.weight() * self.idf_table.get(self._text, 1) # whoosh default definition
# Utility models
class Weighting(WeightingModel):
"""This class provides backwards-compatibility with the old weighting
class architecture, so any existing custom scorers don't need to be
rewritten.
"""
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
return self.CompatibilityScorer(searcher, fieldname, text, self.score)
def score(self, searcher, fieldname, text, docnum, weight):
raise NotImplementedError
class CompatibilityScorer(BaseScorer):
def __init__(self, searcher, fieldname, text, scoremethod):
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.scoremethod = scoremethod
def score(self, matcher):
return self.scoremethod(self.searcher, self.fieldname, self.text,
matcher.id(), matcher.weight())
class FunctionWeighting(WeightingModel):
"""Uses a supplied function to do the scoring. For simple scoring functions
and experiments this may be simpler to use than writing a full weighting
model class and scorer class.
The function should accept the arguments
``searcher, fieldname, text, matcher``.
For example, the following function will score documents based on the
earliest position of the query term in the document::
def pos_score_fn(searcher, fieldname, text, matcher):
poses = matcher.value_as("positions")
return 1.0 / (poses[0] + 1)
pos_weighting = scoring.FunctionWeighting(pos_score_fn)
with myindex.searcher(weighting=pos_weighting) as s:
results = s.search(q)
Note that the searcher passed to the function may be a per-segment searcher
for performance reasons. If you want to get global statistics inside the
function, you should use ``searcher.get_parent()`` to get the top-level
searcher. (However, if you are using global statistics, you should probably
write a real model/scorer combo so you can cache them on the object.)
"""
def __init__(self, fn):
self.fn = fn
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
return self.FunctionScorer(self.fn, searcher, fieldname, text, qf=qf)
class FunctionScorer(BaseScorer):
def __init__(self, fn, searcher, fieldname, text, qf=1):
self.fn = fn
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.qf = qf
def score(self, matcher):
return self.fn(self.searcher, self.fieldname, self.text, matcher)
class MultiWeighting(WeightingModel):
"""Chooses from multiple scoring algorithms based on the field.
"""
def __init__(self, default, **weightings):
"""The only non-keyword argument specifies the default
:class:`Weighting` instance to use. Keyword arguments specify
Weighting instances for specific fields.
For example, to use ``BM25`` for most fields, but ``Frequency`` for
the ``id`` field and ``TF_IDF`` for the ``keys`` field::
mw = MultiWeighting(BM25(), id=Frequency(), keys=TF_IDF())
:param default: the Weighting instance to use for fields not
specified in the keyword arguments.
"""
self.default = default
# Store weighting functions by field name
self.weightings = weightings
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
w = self.weightings.get(fieldname, self.default)
return w.scorer(searcher, fieldname, text, qf=qf)
class ReverseWeighting(WeightingModel):
"""Wraps a weighting object and subtracts the wrapped model's scores from
0, essentially reversing the weighting model.
"""
def __init__(self, weighting):
self.weighting = weighting
def scorer(self, searcher, fieldname, text, qf=1, query_context=None):
subscorer = self.weighting.scorer(searcher, fieldname, text, qf=qf)
return ReverseWeighting.ReverseScorer(subscorer)
class ReverseScorer(BaseScorer):
def __init__(self, subscorer):
self.subscorer = subscorer
def supports_block_quality(self):
return self.subscorer.supports_block_quality()
def score(self, matcher):
return 0 - self.subscorer.score(matcher)
def max_quality(self):
return 0 - self.subscorer.max_quality()
def block_quality(self, matcher):
return 0 - self.subscorer.block_quality(matcher)
|
from dataclasses import dataclass
@dataclass(frozen=True)
class Price:
"""
A generic (and minimalist) representation of the current price
"""
last: float
change: float
change_pct: float
|
import sys
def getbit(i, n):
return (i >> n) & 1
def setbit(i, n, val=1):
if val == 1:
return i | (1 << n)
return i & (~(1 << n))
def bitcmp(i1, i2, n):
for i in range(0, n):
b2 = getbit(i2, i)
b1 = getbit(i1, i)
if b1 != b2:
return b1 - b2
return 0
def fac_check(i, i1, i2, bits):
p = i1 * i2
if p > i:
return -3
if p == i:
if i1 == 1 or i2 == 1:
return -4
return 10
return bitcmp(i, p, bits)
def factorize_run(i, i1, i2, n):
for b in range(0, 4):
i1 = setbit(i1, n, getbit(b, 0))
i2 = setbit(i2, n, getbit(b, 1))
rc = fac_check(i, i1, i2, n + 1)
if rc == 10:
return factorize(i1) + factorize(i2)
if rc == 0:
f = factorize_run(i, i1, i2, n + 1)
if f != [i]:
return f
return [i]
def factorize(i):
l = factorize_run(i, 0, 0, 0)
l.sort()
return l
fpr = int(sys.argv[1])
print (("Factorizing " + str(fpr) + " ..."))
rc = factorize(fpr)
print (rc)
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
pconfig = Path(__file__).parents[1]
#aa = os.path.abspath(os.path.join(yourpath, os.pardir))
traindatapathForlogistic = "%s/data/raw/Social_Network_Ads.csv"%pconfig
if __name__ == "__main__":
print("羅吉斯訓練資料集在",traindatapathForlogistic)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Jupyter ipython notebook utilities.
#
# This file is part of the pybgl project.
# https://github.com/nokia/pybgl
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
from pybgl.html import html
from pybgl.graphviz import dotstr_to_html
def in_ipynb() -> bool:
"""
Tests whether the code is running inside a Jupyter Notebook.
Returns:
True iff the code is running inside a Jupyter Notebook.
"""
try:
return str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>"
except NameError:
return False
def background_template_html(background_color = None):
return "<div style='background-color:%s'>%%s</div>" % background_color if background_color else "%s"
def ipynb_display_graph(g, background_color = None, **kwargs):
"""
Display a Graph in a Jupyter Notebook.
"""
template_html = background_template_html(background_color)
html(template_html % dotstr_to_html(g.to_dot(**kwargs)))
def display_svg(svg, filename_svg, background_color = None):
with open(filename_svg, "w") as f:
print(svg, file = f)
html("<a href='%s' target='_blank'>View</a>" % filename_svg)
template_html = background_template_html(background_color)
html(template_html % svg)
def display_body(body_html, filename_html, **kwargs):
return display_svg(
"<html><head><meta charset='UTF-8'></head><body>%s</body></html>" % body_html,
filename_html,
**kwargs
)
def ipynb_get_background_color():
"""
Retrieves the HTML background color of the Jupyter notebook.
Returns:
The `str` containing the background color of the notebook, `None` otherwise.
"""
if not in_ipynb():
return None
# <<< Must be in an independant Jupyter cell to publish bgcolor
from IPython.display import Javascript, display
display(Javascript(
"""
var bgcolor = getComputedStyle(document.querySelector('.notebook_app #notebook')).backgroundColor;
IPython.notebook.kernel.execute("bgcolor = '" + bgcolor + "'")
"""
))
# >>>
return bgcolor
def ipynb_get_foreground_color():
"""
Retrieves the HTML foreground color of the Jupyter notebook.
Returns:
The `str` containing the background color of the notebook, `None` otherwise.
"""
if not in_ipynb():
return None
# <<< Must be in an independant Jupyter cell to publish fgcolor
from IPython.display import Javascript, display
display(Javascript(
"""
var fgcolor = getComputedStyle(document.querySelector('.notebook_app div.output_area pre')).color;
IPython.notebook.kernel.execute("fgcolor = '" + fgcolor + "'")
"""
))
# >>>
return fgcolor
|
#!/usr/bin/env python3
import can
import logging
import os
import signal
import socketcan
import socketcanopen
logging.basicConfig(level=logging.DEBUG)
CAN_INTERFACE = "vcan0"
can_bus = can.Bus(CAN_INTERFACE, bustype="socketcan")
node_id = 0x02
canopen_od = socketcanopen.ObjectDictionary.from_eds(os.path.dirname(os.path.relpath(__file__)) + '/node.eds', node_id)
node = socketcanopen.Node(can_bus, node_id, canopen_od)
signal.pause() # Run forever
|
from video import *
import sys
header_color='#ffaa00'
slide("title.png",48,[(50,"Creature Control in",header_color),
(50,"a Fluid Environment",header_color),
" ",
(36,"paperid 0143"),
(36,"SIGGRAPH 2009")])
slide("wind_blocks.png",36,
[(54,"Block Examples",header_color),
" ",
"Two blocks connected with",
"a single joint optimizing",
"various objective functions",
"(simple fluids)"])
slide("fluid_blocks.png",36,
[(54,"Block Examples",header_color),
" ",
"Two blocks connected with",
"a single joint optimizing",
"various objective functions",
"(Navier-Stokes fluids)"])
slide("human-back-effort.png",36,
[(54,"Human",header_color),
" ",
"A human with three controlled",
"joints optimizes effort",
"on the lower back"])
slide("human-effort.png",36,
[(54,"Human",header_color),
" ",
"A human with three controlled",
"joints optimizes effort",
"on the lower back and arms"])
slide("human-wind-back-effort.png",36,
[(54,"Human",header_color),
" ",
"A human with three controlled",
"joints optimizes effort",
"on the lower back"])
slide("human-wind-effort.png",36,
[(54,"Human",header_color),
" ",
"A human with three controlled",
"joints optimizes effort",
"on the lower back and arms"])
slide("human-wind-drag.png",36,
[(54,"Human",header_color),
" ",
"A human with three controlled",
"joints optimizes effort",
"on the lower back",
"and drag on the arms"])
slide("bird-block.png",36,
[(54,"Driven Flyer",header_color),
" ",
"A driven flyer with two",
"controlled joints alternates",
"maximizing and minimizing",
"drag"])
slide("bird-mesh.png",36,
[(54,"Driven Flyer",header_color),
" ",
"A driven flyer with two",
"controlled joints alternates",
"maximizing and minimizing",
"drag (with flesh)"])
slide("octosquid-close.png",36,
[(54,"Swimmer",header_color),
" ",
"A swimmer with five controlled",
"joints alternates maximizing",
"and minimizing drag, close"])
slide("octosquid-farther.png",36,
[(54,"Swimmer",header_color),
" ",
"A swimmer with five controlled",
"joints alternates maximizing",
"and minimizing drag, far"])
slide("half.png",36,
["half speed"])
slide("end.png",54,["The End"])
final_render_base="/solver/vol0/swimming4/final_video"
video_dir="swimming_video_tmp"
if not os.path.exists(video_dir):
os.mkdir(video_dir)
#if os.path.isdir(video_dir):
# print "%s already exists... delete? (y/n) "%video_dir,
# c=sys.stdin.readline()
# if c=="y\n": shutil.rmtree(video_dir)
# else: sys.exit(1)
shutil.rmtree(video_dir)
video=VIDEO(video_dir)
using_lengths=True
testing_titles=True
skip_interval=1
def caption_length(time=3.5):
if not using_lengths and testing_titles:
return 1
else:
return int(video.fps*time)
video.add_frame("title.png",caption_length(4./skip_interval))
# blocks demos
video.add_frame("wind_blocks.png",caption_length(8./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/wind_final_render",step=skip_interval)
video.add_frame("fluid_blocks.png",caption_length(8./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/fluids_final_render",step=skip_interval)
#humans
video.add_frame("human-back-effort.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/human_no_smoke_back_render",0,274,step=skip_interval)
video.add_frame("human-effort.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/human_no_smoke_min_effort_col_render",0,274,step=skip_interval)
video.add_frame("human-wind-back-effort.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/wind_back_final_render",124,399,step=skip_interval)
video.add_frame("human-wind-effort.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/wind_effort_final_render",124,399,step=skip_interval)
video.add_frame("human-wind-drag.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming2/final_input_videos/wind_drag_final_render",124,399,step=skip_interval)
video.add_frame("bird-block.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming9/render_backup/bird/bird_bones_mike/bird_flying_bones_Smoke_Postprocess_Test_3_Resolution_15_final_render",step=skip_interval)
video.add_frame("bird-mesh.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming9/render_backup/bird/bird_mesh_mike/bird_flying_mesh_Smoke_Postprocess_Test_2_Resolution_15_final_high_render",step=2*skip_interval)
#video.add_frame("half.png",caption_length(2./skip_interval))
#video.add_directory("/solver/vol0/swimming9/render_backup/bird/bird_mesh_mike/bird_flying_mesh_Smoke_Postprocess_Test_2_Resolution_15_final_high_render",step=skip_interval,duplicate=2)
video.add_frame("octosquid-close.png",caption_length(6./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming9/render_backup/octosquid/octosquid_mike_2/octosquid_final_close_render",step=2*skip_interval)
#video.add_frame("half.png",caption_length(2./skip_interval))
#video.add_directory("/solver/vol0/swimming9/render_backup/octosquid/octosquid_mike_2/octosquid_final_close_render",step=skip_interval,duplicate=2)
video.add_frame("octosquid-farther.png",caption_length(6./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol0/swimming9/render_backup/octosquid/octosquid_mike/octosquid_final_far_render",step=2*skip_interval)
#video.add_frame("half.png",caption_length(2./skip_interval))
#video.add_directory("/solver/vol0/swimming9/render_backup/octosquid/octosquid_mike/octosquid_final_far_render",step=skip_interval,duplicate=2)
video.add_frame("end.png",caption_length(2./skip_interval))
video.make_movie('swimming')
|
"""
Import as:
import dev_scripts.git.git_hooks.utils as dsgghout
"""
# NOTE: This file should depend only on Python standard libraries.
import compileall
import inspect
import logging
import os
import re
import string
import subprocess
import sys
from typing import Any, List, Optional, Tuple
_LOG = logging.getLogger(__name__)
# TODO(gp): Check these hooks
# https://github.com/pre-commit/pre-commit-hooks/tree/master/pre_commit_hooks
# https://github.com/pre-commit/pre-commit-hooks/blob/master/pre_commit_hooks/check_ast.py
# https://github.com/pre-commit/pre-commit-hooks/blob/master/pre_commit_hooks/check_added_large_files.py
# https://github.com/pre-commit/pre-commit-hooks/blob/master/pre_commit_hooks/check_merge_conflict.py
# https://code-maven.com/enforcing-commit-message-format-in-git
# TODO(gp): Add a check for "Do not commit" or "Do not merge".
# The path to the git-binary:
_GIT_BINARY_PATH = "git"
# Stat copy-paste from helpers/printing.py
_COLOR_MAP = {
"blue": 94,
"green": 92,
"white": 0,
"purple": 95,
"red": 91,
"yellow": 33,
# Blu.
"DEBUG": 34,
# Cyan.
"INFO": 36,
# Yellow.
"WARNING": 33,
# Red.
"ERROR": 31,
# White on red background.
"CRITICAL": 41,
}
def color_highlight(text: str, color: str) -> str:
"""
Return a colored string.
"""
prefix = "\033["
suffix = "\033[0m"
assert color in _COLOR_MAP
color_code = _COLOR_MAP[color]
txt = f"{prefix}{color_code}m{text}{suffix}"
return txt
# End copy-paste.
# Start copy-paste from helpers/introspection.py
def get_function_name(count: int = 0) -> str:
"""
Return the name of the function calling this function, i.e., the name of
the function calling `get_function_name()`.
"""
ptr = inspect.currentframe()
# count=0 corresponds to the calling function, so we need to add an extra
# step walking the call stack.
count += 1
for _ in range(count):
assert ptr is not None
ptr = ptr.f_back
func_name = ptr.f_code.co_name # type: ignore
return func_name
# End copy-paste.
# Start copy-paste from helpers/system_interaction.py
def _system_to_string(
cmd: str, abort_on_error: bool = True, verbose: bool = False
) -> Tuple[int, str]:
assert isinstance(cmd, str), "Type of '%s' is %s" % (str(cmd), type(cmd))
if verbose:
print(f"> {cmd}")
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
with subprocess.Popen(
cmd, shell=True, executable="/bin/bash", stdout=stdout, stderr=stderr
) as p:
output = ""
while True:
line = p.stdout.readline().decode("utf-8") # type: ignore
if not line:
break
# print((line.rstrip("\n")))
output += line
p.stdout.close() # type: ignore
rc = p.wait()
if abort_on_error and rc != 0:
msg = (
"cmd='%s' failed with rc='%s'" % (cmd, rc)
) + "\nOutput of the failing command is:\n%s" % output
_LOG.error(msg)
sys.exit(-1)
return rc, output
# End copy-paste.
# #############################################################################
# Utils.
# #############################################################################
def _get_files() -> List[str]:
"""
Get all the files to process.
"""
# Check all files staged and modified, i.e., skipping only un-tracked files.
# TODO(gp): In reality we should check only staged files.
# > git status --porcelain -uno
# M dev_scripts/git/git_hooks/pre-commit.py
cmd = f"{_GIT_BINARY_PATH} status --porcelain --untracked-files=no"
rc, txt = _system_to_string(cmd)
_ = rc
file_list: List[str] = txt.splitlines()
# Remove the Git codes (in the first 3 characters) leaving only the file name.
file_list = [file_name[3:] for file_name in file_list]
return file_list
def _report() -> str:
func_name = get_function_name(count=1)
print("\n" + color_highlight(f"##### {func_name} ######", "purple"))
return func_name
def _handle_error(func_name: str, error: bool, abort_on_error: bool) -> None:
"""
Abort depending on the error code `error` and on the desired behavior
`abort_on_error`.
"""
if error:
print("\n" + color_highlight(f"'{func_name}' failed", "red"))
if abort_on_error:
sys.exit(-1)
else:
print(color_highlight(f"'{func_name}' passed", "green"))
# #############################################################################
# check_master
# #############################################################################
def check_master(abort_on_error: bool = True) -> None:
"""
Check if we are committing directly to master, instead of a branch.
"""
func_name = _report()
# Print some information.
verbose = True
cmd = "git rev-parse --abbrev-ref HEAD"
rc, branch_name = _system_to_string(cmd, verbose=verbose)
_ = rc
branch_name = branch_name.lstrip().rstrip()
print(f"Branch is '{branch_name}'")
if branch_name == "master":
msg = (
"You shouldn't merge into `master`: please do a PR and then merge it"
)
_LOG.error(msg)
error = True
else:
error = False
# Handle error.
_handle_error(func_name, error, abort_on_error)
# #############################################################################
# check_author
# #############################################################################
def check_author(abort_on_error: bool = True) -> None:
"""
Ensure that the committer use a gmail and not a corporate account.
Extremely custom but effective invariant.
"""
func_name = _report()
# Print some information.
verbose = True
var = "user.name"
cmd = f"{_GIT_BINARY_PATH} config {var}"
_system_to_string(cmd, verbose=verbose)
cmd = f"{_GIT_BINARY_PATH} config --show-origin {var}"
_system_to_string(cmd, verbose=verbose)
#
var = "user.email"
cmd = f"{_GIT_BINARY_PATH} config {var}"
rc, user_email = _system_to_string(cmd, verbose=verbose)
_ = rc
user_email = user_email.lstrip().rstrip()
cmd = f"{_GIT_BINARY_PATH} config --show-origin {var}"
_system_to_string(cmd, verbose=verbose)
print(f"user_email='{user_email}")
# Check.
error = False
if not user_email.endswith("@gmail.com"):
_LOG.error("user_email='%s' is incorrect", user_email)
error = True
# Handle error.
_handle_error(func_name, error, abort_on_error)
# #############################################################################
# check_file_size
# #############################################################################
# Start copy-paste From helpers/introspection.py
def _sizeof_fmt(num: float) -> str:
"""
Return a human-readable string for a filesize (e.g., "3.5 MB").
"""
# From http://stackoverflow.com/questions/1094841
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
assert 0, "Invalid num='%s'" % num
# End copy-paste.
# The maximum file-size in KB for a file (not notebook) to be committed.
_MAX_FILE_SIZE_IN_KB = 512
def check_file_size(
abort_on_error: bool = True, file_list: Optional[List[str]] = None
) -> None:
"""
Ensure that (not notebook) files are not larger than a certain size.
The goal is to avoid to check in a 100MB file.
"""
func_name = _report()
print(f"max file size={_MAX_FILE_SIZE_IN_KB} KB")
if file_list is None:
file_list = _get_files()
_LOG.info("Files:\n%s", "\n".join(file_list))
# Check all files:
error = False
for file_name in file_list:
if not os.path.exists(file_name):
_LOG.warning("'%s' doesn't exist", file_name)
continue
_LOG.info(file_name)
stat = os.stat(file_name)
size = stat.st_size
size_as_str = _sizeof_fmt(stat.st_size)
_LOG.debug("%s: %s", file_name, size_as_str)
if not file_name.endswith(".ipynb"):
if size > _MAX_FILE_SIZE_IN_KB * 1024:
# File is to big, abort the commit.
msg = (
f"Filename '{file_name}' is too big to be committed"
+ f": {size_as_str} > {_MAX_FILE_SIZE_IN_KB} KB"
)
_LOG.error(msg)
error = True
# Handle error.
_handle_error(func_name, error, abort_on_error)
# #############################################################################
# check_words
# #############################################################################
_CAESAR_STEP = 7
def caesar(text: str, step: int) -> str:
def shift(alphabet: str) -> str:
return alphabet[step:] + alphabet[:step]
alphabets = (string.ascii_lowercase, string.ascii_uppercase, string.digits)
shifted_alphabets = tuple(map(shift, alphabets))
joined_alphabets = "".join(alphabets)
joined_shifted_alphabets = "".join(shifted_alphabets)
table = str.maketrans(joined_alphabets, joined_shifted_alphabets)
return text.translate(table)
def _get_regex(decaesarify: bool) -> Any:
# Prepare the regex.
words = "ln lnpk sptl sltvuhkl slt jyfwav"
if decaesarify:
words = caesar(words, -_CAESAR_STEP)
words_as_regex = "(" + "|".join(words.split()) + ")"
regex = fr"""
(?<![^\W_]) # The preceding char should not be a letter or digit char.
{words_as_regex}
(?![^\W_]) # The next char cannot be a letter or digit.
"""
# regex = re.compile(r"\b(%s)\b" % "|".join(words.split()))
# _LOG.debug("regex=%s", regex)
regex = re.compile(regex, re.IGNORECASE | re.VERBOSE)
# _LOG.debug("regex=%s", regex)
return regex
def _check_words_in_text(
file_name: str, lines: List[str], decaesarify: bool = True
) -> List[str]:
"""
Look for words in the content `lines` of `file_name`.
:return: violations in cfile format
"""
regex = _get_regex(decaesarify)
# Search for violations.
violations = []
for i, line in enumerate(lines):
_LOG.debug("%s: %s", i + 1, line)
m = regex.search(line)
if m:
# Remove some false positive.
if file_name.endswith(".ipynb") and "image/png" in line:
continue
if file_name.endswith(".html") and '<td class="ms' in line:
continue
if file_name.endswith("git.py") and "return _is_repo" in line:
continue
if file_name.endswith("ack") and "compressed" in line:
continue
if file_name.endswith("helpers/git.py") and "def is_" in line:
continue
# Found a violation.
val = m.group(1)
_LOG.debug(" -> found '%s'", val)
val = caesar(val, _CAESAR_STEP)
violation = f"{file_name}:{i+1}: Found '{val}'"
violations.append(violation)
return violations
def _check_words_files(file_list: List[str]) -> bool:
"""
Look for words in the passed files.
:return: error
"""
_LOG.debug("Processing %d files", len(file_list))
# Scan all the files.
violations = []
for file_name in file_list:
if any(file_name.endswith(ext) for ext in "jpg png zip pkl gz".split()):
_LOG.warning("Skipping '%s'", file_name)
continue
if not os.path.exists(file_name):
_LOG.warning("Skipping '%s' since it doesn't exist", file_name)
continue
if os.path.isdir(file_name):
_LOG.warning("Skipping '%s' since it is a dir", file_name)
continue
_LOG.info(file_name)
with open(file_name) as f:
lines = f.readlines()
violations.extend(_check_words_in_text(file_name, lines))
#
error = False
if violations:
file_content = "\n".join(map(str, violations))
_LOG.error("There are %d violations:\n%s", len(violations), file_content)
# Write file.
file_name = "cfile"
with open(file_name, "w") as f:
f.write(file_content)
_LOG.warning("Saved cfile in '%s'", file_name)
error = True
return error
def check_words(
abort_on_error: bool = True, file_list: Optional[List[str]] = None
) -> None:
"""
Check that certain words are not used in the staged files.
"""
func_name = _report()
# Get the files.
if file_list is None:
file_list = _get_files()
_LOG.info("Files:\n%s", "\n".join(file_list))
#
error = _check_words_files(file_list)
# Handle error.
_handle_error(func_name, error, abort_on_error)
# #############################################################################
# Python compile
# #############################################################################
def _check_python_compile(file_list: List[str]) -> bool:
"""
Run `compileall.compile_file()` on the files.
:return: error
"""
_LOG.debug("Processing %d files", len(file_list))
# Scan all the files.
violations = []
for file_name in file_list:
success = compileall.compile_file(file_name, force=True, quiet=0)
_LOG.debug("%s -> success=%s", file_name, success)
if not success:
_LOG.error("file_name='%s' doesn't compile correctly", file_name)
violations.append(file_name)
_LOG.debug("violations=%s", len(violations))
error = len(violations) > 0
return error
def check_python_compile(
abort_on_error: bool = True, file_list: Optional[List[str]] = None
) -> None:
"""
Check that code can be compiled. This is not as thorough as executing it.
"""
func_name = _report()
# Get the files.
if file_list is None:
file_list = _get_files()
_LOG.info("Files:\n%s", "\n".join(file_list))
# Keep only the python files.
file_list = [f for f in file_list if f.endswith(".py")]
_LOG.info("Python files:\n%s", "\n".join(file_list))
#
error = _check_python_compile(file_list)
# Handle error.
_handle_error(func_name, error, abort_on_error)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphrelatedPerson(Model):
"""relatedPerson.
:param display_name:
:type display_name: str
:param relationship: Possible values include: 'manager', 'colleague',
'directReport', 'dotLineReport', 'assistant', 'dotLineManager',
'alternateContact', 'friend', 'spouse', 'sibling', 'child', 'parent',
'sponsor', 'emergencyContact', 'other', 'unknownFutureValue'
:type relationship: str or ~users.models.enum
:param user_principal_name:
:type user_principal_name: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'relationship': {'key': 'relationship', 'type': 'str'},
'user_principal_name': {'key': 'userPrincipalName', 'type': 'str'},
}
def __init__(self, display_name=None, relationship=None, user_principal_name=None):
super(MicrosoftgraphrelatedPerson, self).__init__()
self.display_name = display_name
self.relationship = relationship
self.user_principal_name = user_principal_name
|
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import torch.utils.data as data_utils
from torch.autograd import Variable
from torchvision import models, datasets
from deeprobust.image.attack.deepfool import DeepFool
import deeprobust.image.netmodels.resnet as resnet
import matplotlib.pyplot as plt
'''
CIFAR10
'''
# load model
model = resnet.ResNet18().to('cuda')
print("Load network")
"""
Change the model directory here
"""
model.load_state_dict(torch.load("./trained_models/CIFAR10_ResNet18_epoch_20.pt"))
model.eval()
# load dataset
testloader = torch.utils.data.DataLoader(
datasets.CIFAR10('image/data', train = False, download = True,
transform = transforms.Compose([transforms.ToTensor()])),
batch_size = 1, shuffle = True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# choose attack example
X, Y = next(iter(testloader))
X = X.to('cuda').float()
# run deepfool attack
adversary = DeepFool(model)
AdvExArray = adversary.generate(X, Y).float()
# predict
pred = model(AdvExArray).cpu().detach()
# print and save result
print('===== RESULT =====')
print("true label:", classes[Y])
print("predict_adv:", classes[np.argmax(pred)])
AdvExArray = AdvExArray.cpu().detach().numpy()
AdvExArray = AdvExArray.swapaxes(1,3).swapaxes(1,2)[0]
plt.imshow(AdvExArray, vmin = 0, vmax = 255)
plt.savefig('./adversary_examples/cifar_advexample_deepfool.png')
|
name = "autoclf"
__all__ = ["classification", "datasets", "encoding", "getargs"]
|
from rest_framework import routers
from .api import NetworkLocationViewSet
from .api import NetworkSearchViewSet
router = routers.SimpleRouter()
router.register(r"networklocation", NetworkLocationViewSet, base_name="networklocation")
router.register(r"networksearch", NetworkSearchViewSet, base_name="networksearch")
urlpatterns = router.urls
|
# Generated by Django 2.1.15 on 2022-02-13 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20220213_1703'),
]
operations = [
migrations.AlterField(
model_name='studyfile',
name='size_kb',
field=models.PositiveIntegerField(),
),
]
|
from ._threads import _Thread
from .utils import filter
from .videobulk import _VideoBulk
from ._http import _get_playlist_data
from ._rgxs import _PlaylistPatterns as rgx
from typing import List, Optional, Dict, Any
class Playlist:
__HEAD = 'https://www.youtube.com/playlist?list='
def __init__(self, playlist_id: str):
"""
:param str playlist_id: the _id of the playlist
"""
if 'youtube.com' in playlist_id:
self.id = playlist_id.split('list=')[-1]
else:
self.id = playlist_id
self.__playlist_data = _get_playlist_data(self.id)
def __repr__(self):
return f'<Playlist {self.url}>'
@property
def name(self) -> Optional[str]:
"""
:return: the name of the playlist
"""
names = rgx.name.findall(self.__playlist_data)
return names[0] if names else None
@property
def url(self) -> Optional[str]:
"""
:return: url of the playlist
"""
return f'https://www.youtube.com/playlist?list={self.id}'
@property
def video_count(self) -> Optional[str]:
"""
:return: total number of videos in that playlist
"""
video_count = rgx.video_count.findall(self.__playlist_data)
return video_count[0] if video_count else None
@property
def videos(self) -> _VideoBulk:
"""
:return: list of < video objects > for each video in the playlist (consider limit)
"""
videos = rgx.video_id.findall(self.__playlist_data)
return _VideoBulk(filter(iterable=videos))
@property
def thumbnail(self) -> Optional[str]:
"""
:return: url of the thumbnail of the playlist
"""
thumbnails = rgx.thumbnail.findall(self.__playlist_data)
return thumbnails[0] if thumbnails else None
@property
def info(self) -> Dict[str, Any]:
"""
:return: a dict containing playlist info
"""
def _get_data(pattern):
data = pattern.findall(self.__playlist_data)
return data[0] if data else None
patterns = [rgx.name, rgx.video_count, rgx.thumbnail]
data = _Thread.run(_get_data, patterns)
return {
'name': data[0],
'video_count': data[1],
'videos': filter(rgx.video_id.findall(raw)),
'url': self.__HEAD + self.id,
'thumbnail': data[2]
}
|
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from models import MNBDocumentClassifier
if __name__ == "__main__":
# setup x_lab, y_lab
x_lab = 'sentence'
y_lab = 'class'
# load training data
with open('data/rotten_imdb/plot.tok.gt9.5000', encoding="ISO-8859-1") as csv:
df_subj = pd.read_csv(csv, delimiter='\n', header=None,
names=['sentence'])
df_subj['class'] = 'subj'
with open('data/rotten_imdb/quote.tok.gt9.5000', encoding="ISO-8859-1") as csv:
df_obj = pd.read_csv(csv, delimiter='\n', header=None,
names=['sentence'])
df_obj['class'] = 'obj'
df_train = df_subj.append(df_obj)
# create model
model = MNBDocumentClassifier()
# TODO: Add grid search for model parameter values
# TODO: Add option to pass parameter values to MNBDocumentClassifier
# get cross validated error of model
kf = KFold(n_splits=10)
scores = []
for train_i, test_i in kf.split(df_train):
# get split
train = df_train.iloc[train_i]
test = df_train.iloc[test_i]
# fit model
model.fit(train[x_lab], train[y_lab])
# get estimated accuracy score
y_pred = model.predict(test[x_lab])
score = accuracy_score(test[y_lab], y_pred)
scores.append(score)
# print accuracy metrics
print(f'K-Fold CV scores:\n')
for i, score in enumerate(scores):
print(f'{i}:\t{round(score, 3)}')
print(f'\nMean score:\t{sum(scores) / len(scores)}')
|
from objects_superclass import Task
from objects_superclass import Resource
class ImagingMissions(Task):
im_count = 0
tabu_freq = 0
def __init__(self, resource_reqd, tabu_tenure, name,size, lat, longi, payload_reqd, imaging_time, memory_reqd, look_angle_reqd, priority, repition):
self.resource_reqd = resource_reqd
self.tabu_tenure = tabu_tenure
self.name = name
self.size = size
self.lat = lat
self.longi = longi
self.payload_reqd = payload_reqd
self.imaging_time = imaging_time
self.memory_reqd = memory_reqd
self.look_angle_reqd = look_angle_reqd
self.priority = priority
self.repition =repition
ImagingMissions.im_count += 1
def __repr__(self):
return "0"
def get_name(self):
return self.name
def get_size(self):
return self.size
def get_pos(self):
return [float(self.lat),float(self.longi)]
def get_payload_reqd(self):
return self.payload_reqd
def get_imaging_time(self):
return float(self.imaging_time)
def get_memory_reqd(self):
return float(self.memory_reqd)
def get_look_angle_reqd(self):
return float(self.look_angle_reqd)
def get_priority(self):
return float(self.priority)
def get_repition(self):
return float(self.repition)
class Satellite(Resource):
sat_count = 0
def __init__(self,name,attitude,ave_angular_speed,payload,memory,max_memory,lat,longi,roll,pitch,yaw,altitude):
self.name = name
self.attitude = attitude
self.ave_angular_speed = ave_angular_speed
self.payload = payload
self.memory = memory
self.max_memory = max_memory
self.lat = lat
self.longi = longi
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.altitude = altitude
Satellite.sat_count += 1
def get_name(self):
return self.name
def get_attitude(self):
return self.attitude
def get_ave_angular_speed(self):
return float(self.ave_angular_speed)
def get_payload(self):
return self.payload
def get_memory(self):
return float(self.memory)
def get_max_memory(self):
return float(self.max_memory)
def get_orbit_params(self):
return [float(self.lat),float(self.longi)]
def get_sat_attitude(self):
return [float(self.roll),float(self.pitch), float(self.yaw)]
def get_sat_altitude(self):
return float(self.altitude)
class GroundStation(Resource):
gs_count = 0
def __init__(self, name, lat, longi):
self.name = name
self.lat = lat
self.longi = longi
def get_name(self):
return self.name
def get_pos(self):
return [float(self.lat),float(self.longi)]
class ImagingOpp(ImagingMissions):
im_opp_count = 0
tabu_freq = 0
time_taken = 0
def __init__(self, resource_reqd, tabu_tenure, name, size, lat,longi, payload_reqd, imaging_time, memory_reqd, look_angle_reqd, name1, satellite, ATW,priority, score, repition):
self.resource_reqd = resource_reqd
self.tabu_tenure = tabu_tenure
self.name = name
self.size = size
self.lat = lat
self.longi = longi
self.payload_reqd = payload_reqd
self.imaging_time = imaging_time
self.memory_reqd = memory_reqd
self.look_angle_reqd = look_angle_reqd
self.priority = priority
self.name1 = name1
self.satellite = satellite
self.ATW = ATW
self.score = score
self.repition =repition
ImagingOpp.im_opp_count += 1
def __repr__(self):
return "1"
def get_name1(self):
return self.name1
def get_satellite(self):
return self.satellite
def get_ATW(self):
return self.ATW
def get_score(self):
return self.score
class nullImagingOpp(ImagingMissions):
im_opp_count = 0
tabu_freq = 0
def __init__(self, resource_reqd, tabu_tenure, name, size, lat,longi, payload_reqd, imaging_time, memory_reqd, look_angle_reqd, name1, satellite, ATW,priority = 1, score = 0, repition = 0):
self.resource_reqd = resource_reqd #1
self.tabu_tenure = tabu_tenure #2
self.name = name #3
self.size = size #4
self.lat = lat #5
self.longi = longi #6
self.payload_reqd = payload_reqd #7
self.imaging_time = imaging_time #8
self.memory_reqd = memory_reqd #9
self.look_angle_reqd = look_angle_reqd #10
self.priority = priority #11
self.name1 = name1 #12
self.satellite = satellite #13
self.ATW = ATW #14
self.score = score #15
self.repition =repition #16
ImagingOpp.im_opp_count += 1
def __repr__(self):
return "0"
def get_name1(self):
return self.name1
def get_satellite(self):
return self.satellite
def get_ATW(self):
return self.ATW
def get_score(self):
return self.score
class Downlink(Task):
dl_count = 0
tabu_freq = 0
def __init__(self, name, name1, resource_reqd, tabu_tenure, groundstation, lat, longi, downlink_time, satellite, ATW, score):
self.name = name
self.name1 = name1
self.resource_reqd = resource_reqd
self.tabu_tenure = tabu_tenure
self.groundstation = groundstation
self.lat = lat
self.longi = longi
self.downlink_time = downlink_time
self.satellite = satellite
self.ATW = ATW
self.score = score
Downlink.dl_count+= 1
def get_name(self):
return self.name
def get_name1(self):
return self.name1
def get_groundstation(self):
return self.groundstation
def get_resource_reqd(self):
return self.resource_reqd
def get_pos(self):
return [float(self.lat), float(self.longi)]
def get_downlink_time(self):
return self.downlink_time
def get_satellite(self):
return self.satellite
def get_ATW(self):
return self.ATW
def get_score(self):
return self.score
#Testing
# test_IM = ImagingMissions(['satellite'],1,'1',[1,2],'EO',5,1,45,1)
# print(test_IM)
# print(test_IM.im_count)
# print("-------------------------------------------------------------------------")
# test_sat = Satellite('T1',[0.1,0.1,0.1],3.75,'EO',8,[1,1])
# print(test_sat)
# print(test_sat.sat_count)
# print(test_sat.get_name())
# print('-------------------------------------------------------------------------')
# test_IMopp = ImagingOpp(['satellite'],1,'1',[1,2],'EO',5,1,45,1,'1',test_sat,[100,120])
# print(test_IMopp)
# print(test_IMopp.im_opp_count)
|
from behave import *
from kelpy import cronjob
from kubernetes import config, client
from jinja2 import Environment, FileSystemLoader
@when("the CronJob called {cronjob_name} is created")
@given("the CronJob called {cronjob_name} exists")
def step_impl(context, cronjob_name):
cronjob_namespace = "default"
env = Environment(
loader=FileSystemLoader("./templates"), trim_blocks=True, lstrip_blocks=True
)
cronjob_tmpl = env.get_template("cronjob.j2")
cronjob_spec_file = cronjob_tmpl.render(
cronjob_name=cronjob_name, cronjob_namespace=cronjob_namespace
)
context.create_resp = cronjob.create(
context.k8s_v1_batch_client, cronjob_spec_file, cronjob_namespace
)
@when("the user attempts to retrieve the CronJob {cronjob_name}")
def step_impl(context, cronjob_name):
context.get_resp = cronjob.get(context.k8s_v1_batch_client, cronjob_name)
@given("a CronJob called {cronjob_name} does not exist")
def step_impl(context, cronjob_name):
cronjob.delete(context.k8s_v1_batch_client, cronjob_name)
@then("None is returned instead of the CronJob")
def step_impl(context):
assert context.get_resp is None, "Did not return None"
@then("Results for the CronJob {cronjob_name} are returned")
def step_impl(context, cronjob_name):
assert context.get_resp, "Should've returned a valid response"
@then("a valid CronJob called {cronjob_name} can be found")
def step_impl(context, cronjob_name):
assert context.create_resp, f"Should've found {cronjob_name} CronJob"
|
from measure_mate.settings.base import *
DEBUG = os.environ.get('DJANGO_DEBUG', True)
CSP_CONNECT_SRC += (
"ws://localhost:*", "ws://127.0.0.1:*",
"http://localhost:*", "http://127.0.0.1:*"
)
INSTALLED_APPS += (
'django_extensions',
)
|
# Import all python files within this folder
from os.path import dirname, basename, isdir, realpath
from commands import getstatusoutput as bash
from nrutils import verbose,__pathsfile__
import glob
# # list all py files within this directory
# modules = [ basename(f)[:-3] for f in glob.glob(dirname(__file__)+"/*.py") if not ('__init__.py' in f) ]
#
# # Dynamically import all modules within this folder (namespace preserving)
# for module in modules:
# if verbose: print ' .%s' % module
# exec 'import %s' % module
# Search recurssively within the config's sim_dir for files matching the config's metadata_id
this_file = realpath(__file__)
if this_file[-1] == 'c': this_file = this_file[:-1]
cmd = 'find %s -maxdepth 2 -name "__init__.py"' % dirname(this_file)
status, output = bash(cmd)
# make a list of all packages within the directory which contains this file
dir_list = output.split(chr(10))
internal_packages = [ basename(dirname(p)) for p in dir_list if not (p == this_file) ]
#
__all__ = internal_packages
# Import all modules from each package
# if verbose: print '\t%s:\n' % __name__
for p in internal_packages:
if verbose: print ' .%s: ' % p
exec 'import %s' % p
# exec 'from %s import *' % p
|
from datetime import datetime
import logging
from pathlib import Path
class LoggingMeta(type):
_is_logging = True
_log_location = Path.home() / ".crucyble" / "{}.log".format(datetime.now().isoformat())
def no_log(self):
self._is_logging = False
@property
def is_logging(self):
return self._is_logging
@classmethod
def log_to(cls, log_location: Path):
cls.log_location = log_location
@property
def log_location(self):
if not self._log_location.parent.exists():
self._log_location.parent.mkdir(exist_ok=True)
return self._log_location
@log_location.setter
def log_location(self, new_location):
if not isinstance(new_location, Path):
raise ValueError("GloVe.log_location must be a valid Path object! Got {} instead.".format(type(new_location)))
self._log_location = new_location
@property
def log_location_char(self):
return str(self.log_location).encode("utf-8")
def logging_callback(logger, log_path: Path):
with open(log_path, 'r') as f:
for line in f:
line = line.rstrip()
if line != "":
logger.info(line)
|
from typing import Callable, Coroutine
from fastapi import Depends, Request
from hibiapi.api.wallpaper import (
Config,
EndpointsType,
NetRequest,
WallpaperCategoryType,
WallpaperEndpoint,
WallpaperOrderType,
)
from hibiapi.utils.routing import SlashRouter, exclude_params
__mount__, __config__ = "wallpaper", Config
router = SlashRouter(tags=["Wallpaper"])
WallpaperAPIRoot = NetRequest()
async def request_client():
async with WallpaperAPIRoot as client:
yield WallpaperEndpoint(client)
@router.get("/", summary="爱壁纸 API 兼容实现", deprecated=True)
async def _match_all(
request: Request,
type: EndpointsType = EndpointsType.wallpaper,
endpoint: WallpaperEndpoint = Depends(request_client),
):
func: Callable[..., Coroutine] = getattr(endpoint, type)
return await func(**exclude_params(func, request.query_params))
@router.get(EndpointsType.wallpaper)
async def wallpaper(
category: WallpaperCategoryType,
limit: int = 20,
skip: int = 0,
adult: bool = True,
order: WallpaperOrderType = WallpaperOrderType.hot,
endpoint: WallpaperEndpoint = Depends(request_client),
):
"""
## Name: `wallpaper`
> 横版壁纸
---
### Required:
- ***WallpaperCategoryType*** **`category`
- Description: 壁纸分类
---
### Optional:
- ***int*** `limit` = `20`
- Description: 指定返回结果数量, 范围[20, 49]
- ***int*** `skip` = `0`
- Description: 跳过的壁纸数
- ***bool*** `adult` = `true`
- Description: NSFW开关, 没太大效果
- ***OrderType*** `order` = `OrderType.hot`
- Description: 搜索结果排序
"""
return await endpoint.wallpaper(
category=category,
limit=limit,
skip=skip,
adult=adult,
order=order,
)
@router.get(EndpointsType.vertical)
async def vertical(
category: WallpaperCategoryType,
limit: int = 20,
skip: int = 0,
adult: bool = True,
order: WallpaperOrderType = WallpaperOrderType.hot,
endpoint: WallpaperEndpoint = Depends(request_client),
):
"""
## Name: `vertical`
> 竖版壁纸
---
### Required:
- ***WallpaperCategoryType*** **`category`
- Description: 壁纸分类
---
### Optional:
- ***int*** `limit` = `20`
- Description: 指定返回结果数量, 范围[20, 49]
- ***int*** `skip` = `0`
- Description: 跳过的壁纸数
- ***bool*** `adult` = `true`
- Description: NSFW开关, 没太大效果
- ***OrderType*** `order` = `OrderType.hot`
- Description: 搜索结果排序
"""
return await endpoint.vertical(
category=category,
limit=limit,
skip=skip,
adult=adult,
order=order,
)
|
class TextEditor:
_content = ''
def type(self, words):
self._content = self._content + ' ' + words
def get_content(self):
return self._content.strip()
editor = TextEditor()
editor.type('This is the first sentence')
editor.type('This is the second.')
print(editor.get_content())
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internationalization helpers."""
import gettext
_DEFAULT_LANGUAGE_CODE = 'en-US'
_LOCALE_DOMAIN = 'voice-recognizer'
_language_code = _DEFAULT_LANGUAGE_CODE
_locale_dir = None
def set_locale_dir(locale_dir):
"""Sets the directory that contains the language bundles.
This is only required if you call set_language_code with gettext_install=True.
"""
global _locale_dir
if not locale_dir:
raise ValueError('locale_dir must be valid')
_locale_dir = locale_dir
def set_language_code(code, gettext_install=False):
"""Set the BCP-47 language code that the speech systems should use.
Args:
gettext_install: if True, gettext's _() will be installed in as a builtin.
As this has global effect, it should only be done by applications.
"""
global _language_code
_language_code = code.replace('_', '-')
if gettext_install:
if not _locale_dir:
raise ValueError('locale_dir is not set. Please call set_locale_dir().')
language_id = code.replace('-', '_')
t = gettext.translation(_LOCALE_DOMAIN, _locale_dir, [language_id], fallback=True)
t.install()
def get_language_code():
"""Returns the BCP-47 language code that the speech systems should use.
We don't use the system locale because the Assistant API only supports
en-US at launch, so that should be used by default in all environments.
"""
return _language_code
|
# Escreva um programa que leia dois números e mostre uma mensagem na tela:
# - O primeiro valor é maior
# - o segundo valor é maior
# - Não exxiste valor maior, os dois são iguais
n1 = float(input('Digite o 1° número: '))
n2 = float(input('Digite o 2° número: '))
if n1 > n2:
print('{} é maior que {}.'.format(n1, n2))
elif n1 < n2:
print('{} é maior que {}.'.format(n2, n1))
else:
print('{} é igual a {}.'.format(n1, n2))
|
from osgeo import ogr, osr
databaseServer = "localhost"
databaseName = "te_data"
databaseUser = "postgres"
databasePW = "123456"
connString = "PG: host=%s dbname=%s user=%s password=%s" % (databaseServer,databaseName,databaseUser,databasePW)
def GetPGLayer( lyr_name ):
conn = ogr.Open(connString)
lyr = conn.GetLayer( lyr_name )
# feature_count = lyr.GetFeatureCount()
# create long line:
line = ogr.Geometry(ogr.wkbLineString)
for f in lyr:
geom = f.GetGeometryRef()
line.AddPoint(geom.GetX(), geom.GetY())
# get current layer srs
srs = lyr.GetSpatialRef()
# out layer name
out_layer_name = "line_test"
# Create a layer
conn.CreateLayer(out_layer_name, geom_type=ogr.wkbLineString, srs=srs, options=['OVERWRITE=YES'])
# Get Created layer
lyr = conn.GetLayer( out_layer_name )
# Add an ID field
# idField = ogr.FieldDefn("id", ogr.OFTInteger)
# lyr.CreateField(idField)
# Create the feature and set values
featureDefn = lyr.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(line)
# feature.SetField("id", 1)
# Start transaction
lyr.StartTransaction()
lyr.CreateFeature(feature)
# Commit Transaction
lyr.CommitTransaction()
feature = None
# Delete Layer
# conn.DeleteLayer(lyr_name)
# Close connection
conn = None
# lyr_name = 'pharmacies'
# lyr = GetPGLayer(lyr_name)
|
def isPalindrome(string):
# Write your code here.
i =0
while (i < len(string)//2):
if string[i]!=string[-i-1]:
return False
else: i += 1
return True
string = "abcdefghhgfedcba"
print(isPalindrome(string))
string = "abcdefgghgfedcba"
print(isPalindrome(string))
|
from ..registry import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module
class FasterRCNN(TwoStageDetector):
def __init__(self,
backbone,
rpn_head,
bbox_roi_extractor,
bbox_head,
train_cfg,
test_cfg,
neck=None,
shared_head=None,
pretrained=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
shared_head=shared_head,
rpn_head=rpn_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
|
#!python3
import subprocess
from typing import Tuple, List, Dict, Union
from magLabUtilities.parametrictestutilities.testmanager import TestCase
class GeometryGenerator:
@staticmethod
def fromScript(scriptFP:str, overrideParameterList:List[Tuple[str,Union[str,int,float]]]):
cmdString = ''
cmdList = []
for overrideParameter in overrideParameterList:
if isinstance(overrideParameter[1], str):
cmdString += r'%s = \'%s\'\n' % (overrideParameter[0], overrideParameter[1])
cmdList.append('%s = \'%s\'' % (overrideParameter[0], overrideParameter[1]))
elif isinstance(overrideParameter[1], int):
cmdString += r'%s = %d\n' % (overrideParameter[0], overrideParameter[1])
cmdList.append('%s = %d' % (overrideParameter[0], overrideParameter[1]))
elif isinstance(overrideParameter[1], float):
cmdString += r'%s = %f\n' % (overrideParameter[0], overrideParameter[1])
cmdList.append('%s = %f' % (overrideParameter[0], overrideParameter[1]))
subprocess.run(['py', '-2', scriptFP, *cmdList])
@staticmethod
def fromTestCase(testCase:TestCase, processParameterList:List[str], scriptFP:str):
overrideParameterList = []
overrideParameterList.append(('unvFD', testCase.caseFD))
for processParameter in processParameterList:
coord = testCase.coordDict[processParameter]
overrideParameterList.append((coord.name, coord.value))
GeometryGenerator.fromScript(scriptFP, overrideParameterList)
|
__author__ = 'Qiao Zhang'
from tianyancha.tianyancha import Tianyancha
from tianyancha.tianyancha import WriterJson
|
"""
Unit tests for the cluster renderer
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from .. import clusterRenderer
from ... import utils
from ....filtering import clusters
from ....filtering.filters import clusterFilter
from ....system import lattice
class TestClusterRenderer(unittest.TestCase):
"""
Test the cluster renderer
"""
def setUp(self):
"""
Called before each test
"""
# lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("He", [0, 0, 0], 0)
self.lattice.addAtom("He", [0, 0, 8], 0)
self.lattice.addAtom("He", [3, 0, 0], 0)
self.lattice.addAtom("He", [0, 11, 22], 0)
self.lattice.addAtom("He", [4, 7, 1], 0)
self.lattice.addAtom("He", [1.5, 3, 0], 0)
self.lattice.addAtom("He", [0, 4, 30], 0)
self.lattice.addAtom("He", [1.5, 1.5, 3], 0)
self.lattice.addAtom("He", [99, 99, 99], 0)
# reference
self.ref = lattice.Lattice()
self.ref.addAtom("He", [0, 0, 0], 0)
self.ref.addAtom("He", [0, 0, 8], 0)
self.ref.addAtom("He", [3, 0, 0], 0)
self.ref.addAtom("He", [0, 11, 22], 0)
self.ref.addAtom("He", [4, 7, 1], 0)
self.ref.addAtom("He", [1.5, 3, 0], 0)
self.ref.addAtom("He", [0, 4, 30], 0)
self.ref.addAtom("He", [1.5, 1.5, 3], 0)
self.ref.addAtom("He", [99, 99, 99], 0)
# cluster list
cluster = clusters.AtomCluster(self.lattice)
cluster.addAtom(0)
cluster.addAtom(2)
cluster.addAtom(5)
cluster.addAtom(7)
self.atomClusters = [cluster]
cluster = clusters.DefectCluster(self.lattice, self.ref)
cluster.addInterstitial(2)
cluster.addInterstitial(5)
cluster.addVacancy(0)
cluster.addVacancy(7)
self.defectClusters = [cluster]
# settings
self.settings = clusterFilter.ClusterFilterSettings()
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.ref = None
self.atomClusters = None
self.defectClusters = None
self.settings = None
def test_clusterRendererAtoms(self):
"""
Cluster renderer (atoms)
"""
# run the renderer
renderer = clusterRenderer.ClusterRenderer()
renderer.render(self.atomClusters, self.settings, refState=None)
# check result is correct type
self.assertIsInstance(renderer.getActor(), utils.ActorObject)
def test_clusterRendererDefects(self):
"""
Cluster renderer (defects)
"""
# run the renderer
renderer = clusterRenderer.ClusterRenderer()
renderer.render(self.defectClusters, self.settings, refState=self.ref)
# check result is correct type
self.assertIsInstance(renderer.getActor(), utils.ActorObject)
|
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdata.base.service
import gdata.service
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata.base
# Demonstrates queries to the itemtypes feed for specified locale.
gb_client = gdata.base.service.GBaseService()
locale = raw_input('Please enter locale (ex: en_US): ')
q = gdata.base.service.BaseQuery()
q.feed = '/base/feeds/itemtypes/%s' % locale
print q.ToUri()
feed = gb_client.QueryItemTypesFeed(q.ToUri())
print feed.title.text
for entry in feed.entry:
print '\t' + entry.title.text
for attr in entry.attributes.attribute:
print '\t\tAttr name:%s, type:%s' % (attr.name, attr.type)
|
import os
def read_file(file_path, split=False):
"""Read the contents of a file from disk.
Args:
file_name (string): full path to file on disk
"""
data = None
if os.path.isfile(file_path):
with open(file_path, 'r') as fs:
data = fs.read().strip('\n')
if split:
data = data.split('\n')
return data
def repo_location():
return os.path.expanduser('~/.dotfiles')
VERSION = read_file(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'VERSION'))
|
"""
Runs a series of maintenance operations on the collection of entry files, updating the table of content files for
each category as well as creating a statistics file.
Counts the number of records each sub-folder and updates the overview.
Sorts the entries in the contents files of each sub folder alphabetically.
This script runs with Python 3, it could also with Python 2 with some minor tweaks probably.
"""
import re
import urllib.request
import http.client
import datetime
import json
import textwrap
from utils.utils import *
TOC = '_toc.md'
def get_category_paths():
"""
Returns all sub folders of the games path.
"""
return [os.path.join(games_path, x) for x in os.listdir(games_path) if os.path.isdir(os.path.join(games_path, x))]
def get_entry_paths(category_path):
"""
Returns all files of a category path, except for '_toc.md'.
"""
return [os.path.join(category_path, x) for x in os.listdir(category_path) if x != TOC and os.path.isfile(os.path.join(category_path, x))]
def extract_overview_for_toc(file):
"""
Parses a file for some interesting fields and concatenates the content.
To be displayed after the game name in the category TOCs.
"""
info = infos[file]
output = []
if 'code language' in info:
output.extend(info['code language'])
if 'code license' in info:
output.extend(info['code license'])
# state
if 'state' in info:
output.extend(info['state'])
output = ", ".join(output)
return output
def update_readme():
"""
Recounts entries in sub categories and writes them to the readme.
Also updates the _toc files in the categories directories.
Note: The Readme must have a specific structure at the beginning, starting with "# Open Source Games" and ending
on "A collection.."
Needs to be performed regularly.
"""
print('update readme file')
# read readme
readme_text = read_text(readme_file)
# compile regex for identifying the building blocks
regex = re.compile(r"(.*?)(\[comment\]: # \(start.*?end of autogenerated content\))(.*)", re.DOTALL)
# apply regex
matches = regex.findall(readme_text)
assert len(matches) == 1
matches = matches[0]
start = matches[0]
end = matches[2]
# get sub folders
category_paths = get_category_paths()
# assemble paths
toc_paths = [os.path.join(path, TOC) for path in category_paths]
# get titles (discarding first two ("# ") and last ("\n") characters)
category_titles = [read_first_line(path)[2:-1] for path in toc_paths]
# get number of files (minus 1 for the already existing TOC file) in each sub folder
n_entries = [len(os.listdir(path)) - 1 for path in category_paths]
# combine titles, category names, numbers in one list
info = zip(category_titles, [os.path.basename(path) for path in category_paths], n_entries)
# sort according to sub category title (should be unique)
info = sorted(info, key=lambda x:x[0])
# assemble output
update = ['- **[{}](games/{}/{})** ({})\n'.format(entry[0], entry[1], TOC, entry[2]) for entry in info]
update = "{} entries\n".format(sum(n_entries)) + "".join(update)
# insert new text in the middle
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)" + end
# write to readme
write_text(readme_file, text)
def update_category_tocs():
"""
Lists all entries in all sub folders and generates the list in the toc file.
Needs to be performed regularly.
"""
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
print('generate toc for {}'.format(os.path.basename(category_path)))
# read toc header line
toc_file = os.path.join(category_path, TOC)
toc_header = read_first_line(toc_file) # stays as is
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
# get titles (discarding first two ("# ") and last ("\n") characters)
titles = [read_first_line(path)[2:-1] for path in entry_paths]
# get more interesting info
more = [extract_overview_for_toc(path) for path in entry_paths]
# combine name, file name and more info
info = zip(titles, [os.path.basename(path) for path in entry_paths], more)
# sort according to entry title (should be unique)
info = sorted(info, key=lambda x:x[0])
# assemble output
update = ['- **[{}]({})** ({})\n'.format(*entry) for entry in info]
update = "".join(update)
# combine with toc header
text = toc_header + '\n' + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)"
# write to toc file
with open(toc_file, mode='w', encoding='utf-8') as f:
f.write(text)
def check_validity_external_links():
"""
Checks all external links it can find for validity. Prints those with non OK HTTP responses. Does only need to be run
from time to time.
"""
# regex for finding urls (can be in <> or in () or a whitespace
regex = re.compile(r"[\s\n]<(http.+?)>|\]\((http.+?)\)|[\s\n](http[^\s\n,]+)")
# count
number_checked_links = 0
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
print('check links for {}'.format(os.path.basename(category_path)))
# get entry paths
entry_paths = get_entry_paths(category_path)
# for each entry
for entry_path in entry_paths:
# read entry
content = read_text(entry_path)
# apply regex
matches = regex.findall(content)
# for each match
for match in matches:
# for each possible clause
for url in match:
# if there was something
if url:
try:
# without a special header, frequent 403 responses occur
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'})
urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
print("{}: {} - {}".format(os.path.basename(entry_path), url, e.code))
except urllib.error.URLError as e:
print("{}: {} - {}".format(os.path.basename(entry_path), url, e.reason))
except http.client.RemoteDisconnected:
print("{}: {} - disconnected without response".format(os.path.basename(entry_path), url))
number_checked_links += 1
if number_checked_links % 50 == 0:
print("{} links checked".format(number_checked_links))
print("{} links checked".format(number_checked_links))
def check_template_leftovers():
"""
Checks for template leftovers.
Should be run only occasionally.
"""
# load template and get all lines
text = read_text(os.path.join(games_path, 'template.md'))
text = text.split('\n')
check_strings = [x for x in text if x and not x.startswith('##')]
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
for entry_path in entry_paths:
# read it line by line
content = read_text(entry_path)
for check_string in check_strings:
if content.find(check_string) >= 0:
print('{}: found {}'.format(os.path.basename(entry_path), check_string))
def parse_entry(content):
"""
Returns a dictionary of the features of the content
"""
info = {}
# read title
regex = re.compile(r"^# (.*)") # start of content, starting with "# " and then everything until the end of line
matches = regex.findall(content)
assert len(matches) == 1
assert matches[0]
info['title'] = matches[0]
# read description
regex = re.compile(r"^.*\n\n_(.*)_\n") # third line from top, everything between underscores
matches = regex.findall(content)
assert len(matches) == 1, info['title']
assert matches[0]
info['description'] = matches[0]
# first read all field names
regex = re.compile(r"^- (.*?): ", re.MULTILINE) # start of each line having "- ", then everything until a colon, then ": "
fields = regex.findall(content)
# check that essential fields are there
essential_fields = ['Home', 'State', 'Code repository', 'Code language']
for field in essential_fields:
if field not in fields:
print('Error: Essential field "{}" missing in entry "{}"'.format(field, info['title']))
return info # so that the remaining entries can also be parsed
# check that all fields are valid fields and are existing in that order
valid_fields = ('Home', 'Media', 'State', 'Play', 'Download', 'Platform', 'Keywords', 'Code repository', 'Code language', 'Code license', 'Code dependencies', 'Assets license', 'Build system', 'Build instructions')
index = 0
for field in fields:
while index < len(valid_fields) and field != valid_fields[index]:
index += 1
if index == len(valid_fields):
print('Error: Field "{}" in entry "{}" either misspelled or in wrong order'.format(field, info['title']))
return info # so that the remaining entries can also be parsed
# iterate over found fields
for field in fields:
regex = re.compile(r"- {}: (.*)".format(field))
matches = regex.findall(content)
assert len(matches) == 1 # every field should only be present once
v = matches[0]
# first store as is
info[field.lower()+'-raw'] = v
# remove parenthesis
v = re.sub(r'\([^)]*\)', '', v)
# split on ','
v = v.split(',')
# strip
v = [x.strip() for x in v]
# remove all being false (empty)
v = [x for x in v if x]
# if entry is of structure <..> remove <>
v = [x[1:-1] if x[0] is '<' and x[-1] is '>' else x for x in v]
# empty fields will not be stored
if not v:
continue
# store in info
info[field.lower()] = v
# state (essential field) must contain either beta or mature but not both, but at least one
v = info['state']
for t in v:
if t != 'beta' and t != 'mature' and not t.startswith('inactive since '):
print('Error: Unknown state tage "{}" in entry "{}"'.format(t, info['title']))
return info # so that the rest can run through
if 'beta' in v != 'mature' in v:
print('Error: State must be one of <"beta", "mature"> in entry "{}"'.format(info['title']))
return info # so that the rest can run through
# extract inactive year
phrase = 'inactive since '
inactive_year = [x[len(phrase):] for x in v if x.startswith(phrase)]
assert len(inactive_year) <= 1
if inactive_year:
info['inactive'] = inactive_year[0]
# urls in home, download, play and code repositories must start with http or https (or git) and should not contain space
for field in ['home', 'download', 'play', 'code repository']:
if field in info:
for url in info[field]:
if not (url.startswith('http://') or url.startswith('https://') or url.startswith('git://')):
print('URL "{}" in entry "{}" does not start with http'.format(url, info['title']))
if ' ' in url:
print('URL "{}" in entry "{}" contains a space'.format(url, info['title']))
# github repositories should end on .git
if 'code repository' in info:
for repo in info['code repository']:
if repo.startswith('https://github.com/') and not repo.endswith('.git'):
print('Github repo {} in entry "{}" should end on .git.'.format(repo, info['title']))
# check valid platform tags
valid_platforms = ('Android', 'Windows', 'Linux', 'macOS', 'Browser')
if 'platform' in info:
for platform in info['platform']:
if platform not in valid_platforms:
print('Error: invalid platform tag "{}" in entry "{}"'.format(platform, info['title']))
return info # so that the rest can run through
return info
def assemble_infos():
"""
Parses all entries and assembles interesting infos about them.
"""
# get category paths
category_paths = get_category_paths()
# a database of all important infos about the entries
infos = {}
# for each category
for category_path in category_paths:
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
# get titles (discarding first two ("# ") and last ("\n") characters)
category = read_first_line(os.path.join(category_path, TOC))[2:-1]
for entry_path in entry_paths:
# read entry
content = read_text(entry_path)
# parse entry
info = parse_entry(content)
# add category
info['category'] = category
# add file information
info['file'] = os.path.basename(entry_path)[:-3] # [:-3] to cut off the .md
info['path'] = os.path.basename(category_path) + '/' + os.path.basename(entry_path)
# add to list
infos[entry_path] = info
return infos
def generate_statistics():
"""
Generates the statistics page.
Should be done every time the entries change.
"""
# for this function replace infos with infos.values
infois = infos.values()
# start the page
statistics_path = os.path.join(games_path, 'statistics.md')
statistics = '[comment]: # (autogenerated content, do not edit)\n# Statistics\n\n'
# total number
number_entries = len(infois)
rel = lambda x: x / number_entries * 100 # conversion to percent
statistics += 'analyzed {} entries on {}\n\n'.format(number_entries, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# State (beta, mature, inactive)
statistics += '## State\n\n'
number_state_beta = sum(1 for x in infois if 'beta' in x['state'])
number_state_mature = sum(1 for x in infois if 'mature' in x['state'])
number_inactive = sum(1 for x in infois if 'inactive' in x)
statistics += '- mature: {} ({:.1f}%)\n- beta: {} ({:.1f}%)\n- inactive: {} ({:.1f}%)\n\n'.format(number_state_mature, rel(number_state_mature), number_state_beta, rel(number_state_beta), number_inactive, rel(number_inactive))
if number_inactive > 0:
entries_inactive = [(x['title'], x['inactive']) for x in infois if 'inactive' in x]
entries_inactive.sort(key=lambda x: x[0]) # first sort by name
entries_inactive.sort(key=lambda x: x[1], reverse=True) # then sort by inactive year (more recently first)
entries_inactive = ['{} ({})'.format(*x) for x in entries_inactive]
statistics += '##### Inactive State\n\n' + ', '.join(entries_inactive) + '\n\n'
# Language
statistics += '## Code Languages\n\n'
field = 'code language'
# those without language tag
# TODO the language tag is now an essential field, this cannot happen anymore
# number_no_language = sum(1 for x in infois if field not in x)
# if number_no_language > 0:
# statistics += 'Without language tag: {} ({:.1f}%)\n\n'.format(number_no_language, rel(number_no_language))
# entries_no_language = [x['title'] for x in infois if field not in x]
# entries_no_language.sort()
# statistics += ', '.join(entries_no_language) + '\n\n'
# get all languages together
languages = []
for info in infois:
if field in info:
languages.extend(info[field])
unique_languages = set(languages)
unique_languages = [(l, languages.count(l) / len(languages)) for l in unique_languages]
unique_languages.sort(key=lambda x: x[0]) # first sort by name
unique_languages.sort(key=lambda x: x[1], reverse=True) # then sort by occurrence (highest occurrence first)
unique_languages = ['- {} ({:.1f}%)\n'.format(x[0], x[1]*100) for x in unique_languages]
statistics += '##### Language frequency\n\n' + ''.join(unique_languages) + '\n'
# Licenses
statistics += '## Code licenses\n\n'
field = 'code license'
# those without license
number_no_license = sum(1 for x in infois if field not in x)
if number_no_license > 0:
statistics += 'Without license tag: {} ({:.1f}%)\n\n'.format(number_no_license, rel(number_no_license))
entries_no_license = [x['title'] for x in infois if field not in x]
entries_no_license.sort()
statistics += ', '.join(entries_no_license) + '\n\n'
# get all licenses together
licenses = []
for info in infois:
if field in info:
licenses.extend(info[field])
unique_licenses = set(licenses)
unique_licenses = [(l, licenses.count(l) / len(licenses)) for l in unique_licenses]
unique_licenses.sort(key=lambda x: x[0]) # first sort by name
unique_licenses.sort(key=lambda x: -x[1]) # then sort by occurrence (highest occurrence first)
unique_licenses = ['- {} ({:.1f}%)\n'.format(x[0], x[1]*100) for x in unique_licenses]
statistics += '##### Licenses frequency\n\n' + ''.join(unique_licenses) + '\n'
# Keywords
statistics += '## Keywords\n\n'
field = 'keywords'
# get all keywords together
keywords = []
for info in infois:
if field in info:
keywords.extend(info[field])
unique_keywords = set(keywords)
unique_keywords = [(l, keywords.count(l) / len(keywords)) for l in unique_keywords]
unique_keywords.sort(key=lambda x: x[0]) # first sort by name
unique_keywords.sort(key=lambda x: -x[1]) # then sort by occurrence (highest occurrence first)
unique_keywords = ['- {} ({:.1f}%)'.format(x[0], x[1]*100) for x in unique_keywords]
statistics += '##### Keywords frequency\n\n' + '\n'.join(unique_keywords) + '\n\n'
# no download or play field
statistics += '## Entries without download or play fields\n\n'
entries = []
for info in infois:
if 'download' not in info and 'play' not in info:
entries.append(info['title'])
entries.sort()
statistics += '{}: '.format(len(entries)) + ', '.join(entries) + '\n\n'
# code hosted not on github, gitlab, bitbucket, launchpad, sourceforge
popular_code_repositories = ('github.com', 'gitlab.com', 'bitbucket.org', 'code.sf.net', 'code.launchpad.net')
statistics += '## Entries with a code repository not on a popular site\n\n'
entries = []
field = 'code repository'
for info in infois:
if field in info:
popular = False
for repo in info[field]:
for popular_repo in popular_code_repositories:
if popular_repo in repo:
popular = True
break
# if there were repositories, but none popular, add them to the list
if not popular:
entries.append(info['title'])
# print(info[field])
entries.sort()
statistics += '{}: '.format(len(entries)) + ', '.join(entries) + '\n\n'
# Build systems:
statistics += '## Build systems\n\n'
field = 'build system'
# get all build systems together
build_systems = []
for info in infois:
if field in info:
build_systems.extend(info[field])
statistics += 'Build systems information available for {:.1f}% of all projects.\n\n'.format(len(build_systems) / len(infois) * 100)
unique_build_systems = set(build_systems)
unique_build_systems = [(l, build_systems.count(l) / len(build_systems)) for l in unique_build_systems]
unique_build_systems.sort(key=lambda x: x[0]) # first sort by name
unique_build_systems.sort(key=lambda x: -x[1]) # then sort by occurrence (highest occurrence first)
unique_build_systems = ['- {} ({:.1f}%)'.format(x[0], x[1]*100) for x in unique_build_systems]
statistics += '##### Build systems frequency ({})\n\n'.format(len(build_systems)) + '\n'.join(unique_build_systems) + '\n\n'
# C, C++ projects without build system information
c_cpp_project_without_build_system = []
for info in infois:
if field not in info and ('C' in info['code language'] or 'C++' in info['code language']):
c_cpp_project_without_build_system.append(info['title'])
c_cpp_project_without_build_system.sort()
statistics += '##### C and C++ projects without build system information ({})\n\n'.format(len(c_cpp_project_without_build_system)) + ', '.join(c_cpp_project_without_build_system) + '\n\n'
# C, C++ projects with build system information but without CMake as build system
c_cpp_project_not_cmake = []
for info in infois:
if field in info and 'CMake' in info[field] and ('C' in info['code language'] or 'C++' in info['code language']):
c_cpp_project_not_cmake.append(info['title'])
c_cpp_project_not_cmake.sort()
statistics += '##### C and C++ projects with a build system different from CMake ({})\n\n'.format(len(c_cpp_project_not_cmake)) + ', '.join(c_cpp_project_not_cmake) + '\n\n'
# Platform
statistics += '## Platform\n\n'
field = 'platform'
# get all platforms together
platforms = []
for info in infois:
if field in info:
platforms.extend(info[field])
statistics += 'Platform information available for {:.1f}% of all projects.\n\n'.format(len(platforms) / len(infois) * 100)
unique_platforms = set(platforms)
unique_platforms = [(l, platforms.count(l) / len(platforms)) for l in unique_platforms]
unique_platforms.sort(key=lambda x: x[0]) # first sort by name
unique_platforms.sort(key=lambda x: -x[1]) # then sort by occurrence (highest occurrence first)
unique_platforms = ['- {} ({:.1f}%)'.format(x[0], x[1]*100) for x in unique_platforms]
statistics += '##### Platforms frequency\n\n' + '\n'.join(unique_platforms) + '\n\n'
with open(statistics_path, mode='w', encoding='utf-8') as f:
f.write(statistics)
def export_json():
"""
Parses all entries, collects interesting info and stores it in a json file suitable for displaying
with a dynamic table in a browser.
"""
# make database out of it
db = {'headings': ['Game', 'Description', 'Download', 'Category', 'State', 'Keywords', 'Source']}
entries = []
for info in infos.values():
# game & description
entry = ['{} (<a href="{}">home</a>, <a href="{}">entry</a>)'.format(info['title'], info['home'][0],
r'https://github.com/Trilarion/opensourcegames/blob/master/games/' + info['path']),
textwrap.shorten(info['description'], width=60, placeholder='..')]
# download
field = 'download'
if field in info and info[field]:
entry.append('<a href="{}">Link</a>'.format(info[field][0]))
else:
entry.append('')
# category
entry.append(info['category'])
# state (field state is essential)
entry.append('{} / {}'.format(info['state'][0], 'inactive since {}'.format(info['inactive']) if 'inactive' in info else 'active'))
# keywords
field = 'keywords'
if field in info and info[field]:
entry.append(', '.join(info[field]))
else:
entry.append('')
# source
text = []
field = 'code repository'
if field in info and info[field]:
text.append('<a href="{}">Source</a>'.format(info[field][0]))
field = 'code language'
if field in info and info[field]:
text.append(', '.join(info[field]))
field = 'code license'
if field in info and info[field]:
text.append(info[field][0])
entry.append(' - '.join(text))
# append to entries
entries.append(entry)
# sort entries by game name
entries.sort(key=lambda x: x[0])
db['data'] = entries
# output
json_path = os.path.join(games_path, os.path.pardir, 'docs', 'data.json')
text = json.dumps(db, indent=1)
write_text(json_path, text)
def git_repo(repo):
"""
Tests if a repo is a git repo, then returns the repo url, possibly modifying it slightly.
"""
# generic (https://*.git) or (http://*.git) ending on git
if (repo.startswith('https://') or repo.startswith('http://')) and repo.endswith('.git'):
return repo
# for all others we just check if they start with the typical urls of git services
services = ['https://git.tuxfamily.org/', 'http://git.pond.sub.org/', 'https://gitorious.org/', 'https://git.code.sf.net/p/']
for service in services:
if repo.startswith(service):
return repo
# the rest is ignored
return None
def svn_repo(repo):
"""
"""
if repo.startswith('https://svn.code.sf.net/p/') and repo.endswith('/code/'):
return repo
if repo.startswith('http://svn.uktrainsim.com/svn/'):
return repo
if repo is 'https://rpg.hamsterrepublic.com/source/wip':
return repo
# not svn
return None
def hg_repo(repo):
"""
"""
if repo.startswith('https://bitbucket.org/') and not repo.endswith('.git'):
return repo
if repo.startswith('http://hg.'):
return repo
# not hg
return None
def bzr_repo(repo):
if repo.startswith('https://code.launchpad.net/'):
return repo
# not bzr
return None
def update_primary_code_repositories():
primary_repos = {'git':[],'svn':[],'hg':[],'bzr':[]}
unconsumed_entries = []
# for every entry filter those that are known git repositories (add additional repositories)
for info in infos.values():
field = 'code repository-raw'
# if field 'Code repository' is available
if field in info:
consumed = False
repos = info[field]
if repos:
# split at comma
repos = repos.split(',')
# keep the first and all others containing "(+)"
additional_repos = [x for x in repos[1:] if "(+)" in x]
repos = repos[0:1]
repos.extend(additional_repos)
for repo in repos:
# remove parenthesis and strip of white spaces
repo = re.sub(r'\([^)]*\)', '', repo)
repo = repo.strip()
url = git_repo(repo)
if url:
primary_repos['git'].append(url)
consumed = True
continue
url = svn_repo(repo)
if url:
primary_repos['svn'].append(url)
consumed = True
continue
url = hg_repo(repo)
if url:
primary_repos['hg'].append(url)
consumed=True
continue
url = bzr_repo(repo)
if url:
primary_repos['bzr'].append(url)
consumed=True
continue
if not consumed:
unconsumed_entries.append([info['title'], info[field]])
# print output
#if info['code repository']:
# print('Entry "{}" unconsumed repo: {}'.format(info['title'], info[field]))
#if not info['code repository']:
# print('Entry "{}" unconsumed repo: {}'.format(info['title'], info[field]))
# sort them alphabetically (and remove duplicates)
for k, v in primary_repos.items():
primary_repos[k] = sorted(set(v))
# write them to tools/git
json_path = os.path.join(games_path, os.path.pardir, 'tools', 'archives.json')
text = json.dumps(primary_repos, indent=1)
write_text(json_path, text)
if __name__ == "__main__":
# paths
games_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'games'))
readme_file = os.path.realpath(os.path.join(games_path, os.pardir, 'README.md'))
# assemble info
infos = assemble_infos()
# recount and write to readme
update_readme()
# generate list in toc files
update_category_tocs()
# generate report
generate_statistics()
# update database for html table
export_json()
# check for unfilled template lines
check_template_leftovers()
# check external links (only rarely)
# check_validity_external_links()
# collect list of primary code repositories
update_primary_code_repositories()
|
# Filename: config.py
"""
Globe Indexer Configuration Module
"""
# Original data set
DATA_SET_URL = 'http://download.geonames.org/export/dump/cities1000.zip'
# Download file
DATA_CHUNK_SIZE = 1024
# For proximity search limit
DEFAULT_PROXIMITY_LIMIT = 5
# The radius of earth in kilometers
EARTH_RADIUS = 6371
# New line character
NEW_LINE = '\n'
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for cros."""
from __future__ import print_function
import sys
from chromite.lib import commandline
from chromite.lib import cros_test_lib
from chromite.scripts import cros
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class RunScriptTest(cros_test_lib.MockTempDirTestCase):
"""Test the main functionality."""
def setUp(self):
self.PatchObject(cros, '_RunSubCommand', autospec=True)
def testDefaultLogLevel(self):
"""Test that the default log level is set to notice."""
arg_parser = self.PatchObject(commandline, 'ArgumentParser',
return_value=commandline.ArgumentParser())
cros.GetOptions()
arg_parser.assert_called_with(caching=True, default_log_level='notice')
def testSubcommand(self):
"""Test parser when given a subcommand."""
parser = cros.GetOptions('lint')
opts = parser.parse_args(['lint'])
self.assertEqual(opts.subcommand, 'lint')
|
#!/usr/bin/env python3
import socket, time
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 10047 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print("Waiting for connection...")
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
print("Got msg:", data.decode('utf-8'))
continue
time.sleep(1)
msg = "Das msg"
s.sendall(bytes(msg, 'utf-8'))
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Nets for predicting 2D lines."""
import tensorflow as tf
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.util import line_util
from ldif.util import net_util
# pylint: enable=g-bad-import-order
def lstm_lines_transcoder(inputs, model_config):
"""A model that uses an LSTM to predict a sequence of lines."""
batch_size = model_config.hparams.bs
height = model_config.hparams.h
width = model_config.hparams.w
conv_layer_depths = [16, 32, 64, 128, 128]
number_of_lines = model_config.hparams.sc # Was 4!
number_of_layers = model_config.hparams.cc
line_embedding_size = 5
encoded_image_length = 1024
# embedding_size = line_embedding_size * number_of_lines
lstm_size = model_config.hparams.hlw
reencode_partial_outputs = True
encoded_image, _, _ = net_util.encode(
inputs,
model_config,
conv_layer_depths=conv_layer_depths,
fc_layer_depths=[encoded_image_length],
name='Encoder')
if reencode_partial_outputs:
encoded_partial_output, _, _ = net_util.encode(
tf.ones_like(inputs),
model_config,
conv_layer_depths=conv_layer_depths,
fc_layer_depths=[encoded_image_length],
name='OutputReencoder')
else:
encoded_partial_output = None
lstm = tf.contrib.rnn.MultiRNNCell([
tf.contrib.rnn.BasicLSTMCell(lstm_size) for _ in range(number_of_layers)
])
state = lstm.zero_state(batch_size, tf.float32)
partial_images = []
for i in range(number_of_lines):
if reencode_partial_outputs:
embedding_in = tf.concat([encoded_image, encoded_partial_output], axis=-1)
else:
embedding_in = encoded_image
output, state = lstm(embedding_in, state)
line_parameters = tf.contrib.layers.fully_connected(
inputs=output,
num_outputs=line_embedding_size,
activation_fn=None,
normalizer_fn=None)
current_image = line_util.network_line_parameters_to_line(
line_parameters, height, width)
current_image = 2.0 * current_image - 1.0
if partial_images:
current_image = tf.minimum(partial_images[-1], current_image)
partial_images.append(current_image)
if reencode_partial_outputs and i < number_of_lines - 1:
encoded_partial_output, _, _ = net_util.encode(
current_image,
model_config,
conv_layer_depths=conv_layer_depths,
fc_layer_depths=[encoded_image_length],
name='OutputReencoder')
return partial_images
def bag_of_lines_transcoder(inputs, model_config):
"""A model that outputs K lines all at once to replicate a target shape."""
batch_size = model_config.hparams.bs
height = model_config.hparams.h
width = model_config.hparams.w
conv_layer_depths = [16, 32, 64, 128, 128]
number_of_lines = 4
line_embedding_size = 5
embedding_size = line_embedding_size * number_of_lines
z, _, _ = net_util.encode(
inputs,
model_config,
conv_layer_depths=conv_layer_depths,
fc_layer_depths=[embedding_size],
name='Encoder')
z = tf.reshape(z, [batch_size, number_of_lines, line_embedding_size])
# Code is not batched:
batch_items = []
with tf.name_scope('Decoder'):
for bi in range(batch_size):
line_images = []
for i in range(number_of_lines):
line_parameters = z[bi, i, :]
line_image = line_util.network_line_parameters_to_line(
line_parameters, height, width)
line_images.append(line_image)
batch_items.append(line_util.union_of_line_drawings(line_images))
decoded_batch = tf.stack(batch_items, axis=0)
# Lines are in the range [0,1], so rescale to [-1, 1].
return 2.0 * decoded_batch - 1.0
|
import sys
import os
import yaml
class SingleCrawlerManifestManager(object):
"""
"""
required_files = ["spider_manifest.json", "spider_manifest.py"]
def __init__(self, config_path=None):
print("Setting ETI path as: {}".format(config_path))
self.config_path = config_path
def import_files(self):
# print("self.manifest_path", self.config_path)
self.spider_config = yaml.load(open("{}/spider_manifest.yml".format(self.config_path)))
sys.path.append(self.config_path)
import spider_transformations
self.cti_transformations_module = spider_transformations
# print("cti_manifest is {}".format(self.spider_config))
# print("cti_transformations_module is {}".format(self.cti_transformations_module))
def validate_cti_path_and_files(self):
errors = []
try:
files_in_path = os.listdir(self.config_path)
except Exception as e:
errors.append("No such path exist {}".format(self.config_path))
files_in_path = []
if errors == 0:
for required_file in self.required_files:
if required_file not in files_in_path:
errors.append("{} file not in the path {}".format(required_file, self.config_path))
return errors
def import_cti_transformations(self):
for tranformation in self.spider_config.get("transformations", []):
method_to_call = getattr(self.cti_transformations_module, tranformation.get("transformation_fn"))
tranformation['transformation_fn'] = method_to_call
def get_manifest(self):
errors = self.validate_cti_path_and_files()
if len(errors) > 0:
return None, errors
self.import_files()
self.import_cti_transformations()
return self.spider_config, errors
|
L = [100]
L.append(200)
print L
|
#!/usr/bin/env python
"""
Determines the frequencies of residue pair contacts in molecular
dynamics simulations. Given one or more MDContact outputs, this
script determines the frequency of each unique interaction of the
form (itype, residue 1, residue2), weighted by number of frames,
across all inputs.
The inputs are one or more MDContact output file paths as well as an
output path. The user may also specify a subset of interaction types
to compute frequencies for. The user may additionally provide a label
file to convert residue labellings (typically for the use of aligning
sequences for performing frequency comparisons with other
trajectories).
The output is a single tsv file with each row indicating residue
id 1, residue id 2, and contact frequency.
"""
from __future__ import division
from collections import defaultdict
import sys
import argparse
def atomid_to_resid(atom):
return atom[0:atom.rfind(":")]
# return ':'.join(atom.split(':')[1:3])
def gen_counts(input_lines, interaction_types, residuelabels=None):
"""
Parse each line in `input_lines` as a line from MDContacts and return interaction-counts for each residue pair. If
`residuelabels` is defined it is used to modify residue identifiers and to filter out residues not indicated.
For example:
inputs = [
"# total_frames: 3",
"\t".join(["0", "hbbb", "A:ALA:1:N", "A:ARG:4:O"]),
"\t".join(["0", "vdw", "A:ALA:1:CB", "A:ARG:4:CA"]),
"\t".join(["1", "vdw", "A:ALA:1:N", "A:CYS:5:CA"]),
"\t".join(["2", "hbbb", "A:THR:2:N", "A:CYS:5:O"]),
"\t".join(["2", "hbss", "A:ALA:1:N", "A:CYS:5:O"])
]
labels = {"A:ALA:1": "A1", "A:ARG:4": "R4", "A:CYS:5": "C5"}
# Only consider hbbb and vdw, filter away THR, and map to single-letter labels
gen_counts(inputs, ["hbbb", "vdw"], labels)
# Returns: { ("A1", "R4"): 1, ("A1", "C5"): 1 }
Parameters
----------
input_lines: Iterable[str]
Interactions formatted as MDContacts output, e.g. ["0\thbbb\tA:ALA:1:N\tA:ARG:4:H", ...]
interaction_types: list of str
Which interaction types to consider
residuelabels: dict of (str: str)
Remaps and filters residuelabels, e.g. {"A:ARG:4": "R4"}
Returns
-------
(int, dict of (str, str): int)
Total frame-count and mapping of residue-residue interactions to frame-count
"""
# Maps residue pairs to set of frames in which they're present
rescontact_frames = defaultdict(set)
total_frames = 0
for line in input_lines:
line = line.strip()
if "total_frames" in line:
tokens = line.split(" ")
total_frames = int(tokens[1][tokens[1].find(":")+1:])
if len(line) == 0 or line[0] == "#":
continue
tokens = line.split("\t")
# Check that the interaction type is specified
itype = tokens[1]
if itype not in interaction_types:
continue
frame = int(tokens[0])
if frame + 1 > total_frames:
total_frames = frame + 1
res1 = atomid_to_resid(tokens[2])
res2 = atomid_to_resid(tokens[3])
# Change residue id according to `residuelabels` or skip if any of the residues are not present
if residuelabels is not None:
if res1 not in residuelabels or res2 not in residuelabels:
continue
res1 = residuelabels[res1]
res2 = residuelabels[res2]
# Ensure lexicographical order of residue names
if res2 < res1:
res1, res2 = res2, res1
rescontact_frames[(res1, res2)].add(frame)
# Insted of returning list of frames for each interaction, only return number of frames
rescontact_counts = {(res1, res2): len(frames) for (res1, res2), frames in rescontact_frames.items()}
return total_frames, rescontact_counts
def parse_labelfile(label_file):
"""
Parses a label-file and returns a dictionary with the residue label mappings. Unless prepended with a comment-
indicator (#), each line is assumed to have a valid residue identifier (e.g. "A:ALA:1") and a label which the
residue should be mapped to (e.g. "A1").
Example:
parse_labelfile(["A:ALA:1\tA1")
# Returns {"A:ALA:1": "A1"}
Parameters
----------
label_file: Iterable[str]
Lines with tab-separated residue identifier and label
Returns
-------
dict of str: str
Mapping from residue-id in contact-file to label of any format
"""
ret = {}
for line in label_file:
line = line.strip()
# Ignore line if empty or comment
if line[0] == "#" or len(line) == 0:
continue
tokens = line.split("\t")
ret[tokens[0]] = tokens[1]
return ret
def gen_frequencies(count_list):
"""
Take a list of residue contact counts (see output of `gen_counts`) and compute total counts and frequencies.
Example:
clist = [
(4, {("A1", "R4"): 4, ("A1", "C5"): 3}), # First simulation has 4 frames and two contacts
(3, {("A1", "R4"): 2}) # Second simulation has 3 frames and one contact
]
gen_frequencies(clist)
# Returns: (7, {("A1", "R4"): (6, 0.857), ("A1", "C5"): (3, 0.429)})
Parameters
----------
count_list: list of (int, dict of (str, str): int)
List with individual frame counts and dictionaries mapping residue pairs to frame-counts
Return
------
(int, dict of (str, str): (int, float))
Total framecount and mapping of residue ID pairs to the number of frames in which they contact and the frequency
"""
rescontact_count = defaultdict(int)
total_frames = 0
for frames, rescount_dict in count_list:
total_frames += frames
for (res1, res2), count in rescount_dict.items():
rescontact_count[(res1, res2)] += count
respair_freqs = {respair: (count, float(count) / total_frames) for respair, count in rescontact_count.items()}
return total_frames, respair_freqs
def main():
# Parse command line arguments
class MyParser(argparse.ArgumentParser):
def error(self, message):
# Prints full program help when error occurs
self.print_help(sys.stderr)
sys.stderr.write('\nError: %s\n' % message)
sys.exit(2)
parser = MyParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_files',
type=argparse.FileType('r'),
required=True,
nargs='+',
metavar='FILE.tsv',
help="Path to one or more contact-file outputs")
parser.add_argument('--label_file',
type=argparse.FileType('r'),
required=False,
metavar='FILE.tsv',
help="A label file for standardizing residue names between different proteins")
parser.add_argument('--output_file',
type=argparse.FileType('w'),
required=True,
metavar='FILE.tsv',
help="Path to output file")
parser.add_argument('--itypes',
required=False,
default="all",
type=str,
nargs="+",
metavar="ITYPE",
help='Include only these interaction types in frequency computation. Valid choices are: \n'
'* all (default), \n'
'* sb (salt-bridges), \n'
'* pc (pi-cation), \n'
'* ps (pi-stacking), \n'
'* ts (t-stacking), \n'
'* vdw (van der Waals), \n'
'* hbbb, hbsb, hbss, (hydrogen bonds with specific backbone/side-chain profile)\n'
'* wb, wb2 (water-bridges and extended water-bridges) \n'
'* hls, hlb (ligand-sidechain and ligand-backbone hydrogen bonds), \n'
'* lwb, lwb2 (ligand water-bridges and extended water-bridges)')
# results, unknown = parser.parse_known_args()
args = parser.parse_args()
# Update itypes if "all" is specified
if "all" in args.itypes:
args.itypes = ["sb", "pc", "ps", "ts", "vdw", "hb", "lhb", "hbbb", "hbsb",
"hbss", "wb", "wb2", "hls", "hlb", "lwb", "lwb2"]
output_file = args.output_file
input_files = args.input_files
itypes = args.itypes
labels = parse_labelfile(args.label_file) if args.label_file else None
counts = [gen_counts(input_file, itypes, labels) for input_file in input_files]
total_frames, frequencies = gen_frequencies(counts)
output_file.write('#\ttotal_frames:%d\tinteraction_types:%s\n' % (total_frames, ','.join(itypes)))
output_file.write('#\tColumns:\tresidue_1,\tresidue_2\tframe_count\tcontact_frequency\n')
for (res1, res2), (count, frequency) in frequencies.items():
output_file.write('\t'.join([res1, res2, "%.3f" % frequency]) + "\n")
if __name__ == '__main__':
main()
|
#import MySQLdb
import psycopg2
import os
DATABASE_URL = os.environ['DATABASE_URL']
db = psycopg2.connect(DATABASE_URL, sslmode='require')
#db = psycopg2.connect(host = "localhost", user = "postgres", password = "test", database = "forensics")
cursor = db.cursor()
cursor.execute("SET SCHEMA '{}'".format('forensics'))
sqlcmd2 = "SET search_path TO forensics;"
cursor.execute(sqlcmd2)
print('Switched')
sql_cmd = "SELECT * FROM forensics.papers;"
#sql_cmd = "select year, count(*) from papers group by year"
cursor.execute(sql_cmd)
resp = cursor.fetchall()
print(resp)
kind_list=[]
for kind in range(len(resp)):
print(resp[kind])
print(resp[kind][0])
kind_list.append(resp[kind][0])
print(kind_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.