hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
984f83c6fc04501148f422fce34dbcf040931285 | 898 | py | Python | tests/conftest.py | arkturix/internet-heroku-aut | 7ad32faaacb820ca3952543c05035cca2e744cb8 | [
"MIT"
] | null | null | null | tests/conftest.py | arkturix/internet-heroku-aut | 7ad32faaacb820ca3952543c05035cca2e744cb8 | [
"MIT"
] | null | null | null | tests/conftest.py | arkturix/internet-heroku-aut | 7ad32faaacb820ca3952543c05035cca2e744cb8 | [
"MIT"
] | null | null | null | import pytest
from herokuapp_internet.login_page import LoginPage
from herokuapp_internet.disappearing_elements_page import DisappearingElementsPage
import logging
logger = logging.getLogger(__name__)
def pytest_addoption(parser):
parser.addoption(
"--headless", action="store", default="false", help="Run browser tests in headless mode. Default: false"
)
@pytest.fixture(scope='session')
def headless_option(request):
if request.config.getoption("--headless") == "false":
return False
else:
return True
@pytest.fixture(scope="module")
def login_page(headless_option):
login = LoginPage(headless=headless_option)
yield login
login.quit()
@pytest.fixture(scope="module")
def de_page(headless_option):
dissapearing_elems = DisappearingElementsPage(headless=headless_option)
yield dissapearing_elems
dissapearing_elems.quit()
| 24.27027 | 112 | 0.75167 | 0 | 0 | 279 | 0.31069 | 514 | 0.572383 | 0 | 0 | 122 | 0.135857 |
98501d1d0a0e678ce5a1a02156e3060dda0fe5b8 | 2,286 | py | Python | dodo.py | spandanb/textwalker | 0ebabf5a1cf9142b7ca031d7070ba7cf1d118e34 | [
"MIT"
] | 2 | 2021-05-07T23:41:32.000Z | 2021-05-08T15:52:08.000Z | dodo.py | spandanb/textwalker | 0ebabf5a1cf9142b7ca031d7070ba7cf1d118e34 | [
"MIT"
] | null | null | null | dodo.py | spandanb/textwalker | 0ebabf5a1cf9142b7ca031d7070ba7cf1d118e34 | [
"MIT"
] | null | null | null | """
doit docs: https://pydoit.org/cmd_run.html
"""
import pdoc
import os
import os.path
def generate_docs(docs_dir: str):
"""
python callable that creates docs like docs/textwalker.html, docs/patternparser.py
Args:
docs_dir: location to output docs to
"""
if not os.path.exists(docs_dir):
print(f'{docs_dir} does not exist; creating dir')
os.mkdir(docs_dir)
mod_names = ["textwalker", "textwalker.textwalker", "textwalker.pattern_parser", "textwalker.utils"]
context = pdoc.Context()
modules = [pdoc.Module(mod, context=context)
for mod in mod_names]
pdoc.link_inheritance(context)
for module in modules:
if module.name == "textwalker":
filepath = os.path.join(docs_dir, 'index.html')
else:
pkg, modname = module.name.split('.')
filename = f'{modname}.html'
filepath = os.path.join(docs_dir, filename)
with open(filepath, 'w', encoding='utf-8') as fp:
fp.write(module.html())
print(f'wrote docs for module {module.name} to {filepath}')
def task_run_tests():
"""
run tests
"""
task = {
'actions': ['pytest textwalker'],
'verbosity': 2
}
return task
def task_run_tests_with_codecov():
"""
run tests with codecov
"""
task = {
'actions': ['pytest --cov=textwalker'],
'verbosity': 2
}
return task
def task_run_flake8():
"""
calls flake8 linter
"""
task = {
'actions': ['flake8 textwalker'],
'verbosity': 2
}
return task
def task_run_black():
"""
calls black code formatter
"""
task = {
'actions': ['black textwalker'],
'verbosity': 2
}
return task
def task_run_pdoc_cli():
"""
calls pdoc via CLI
pdoc3 : https://pdoc3.github.io/pdoc/doc/pdoc/#programmatic-usage
"""
task = {
'actions': ['pdoc3 --html --force textwalker -o docs'],
'verbosity': 2
}
return task
def task_run_pdoc():
"""
calls pdoc via python
pdoc3 : https://pdoc3.github.io/pdoc/doc/pdoc/#programmatic-usage
"""
task = {
'actions': [(generate_docs, ('docs',))],
'verbosity': 2
}
return task
| 22.194175 | 104 | 0.575241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,030 | 0.450569 |
98507d75380fc2a9e718b46b8379ee3c6dd67ff6 | 2,329 | py | Python | ComplexNetworkSim/statistics.py | Juliet-Chunli/cnss | 534c7e0b0338e831ec20b5002d1fdf1cc0879a2c | [
"BSD-2-Clause"
] | 14 | 2015-04-24T07:34:36.000Z | 2021-08-02T09:50:49.000Z | ComplexNetworkSim/statistics.py | Juliet-Chunli/cnss | 534c7e0b0338e831ec20b5002d1fdf1cc0879a2c | [
"BSD-2-Clause"
] | 1 | 2015-06-30T01:46:00.000Z | 2015-07-03T18:45:23.000Z | ComplexNetworkSim/statistics.py | Juliet-Chunli/cnss | 534c7e0b0338e831ec20b5002d1fdf1cc0879a2c | [
"BSD-2-Clause"
] | 8 | 2015-03-26T20:59:26.000Z | 2020-07-29T09:02:26.000Z | '''
Module for basic averaging of system states across multiple trials.
Used in plotting.
@author: Joe Schaul <joe.schaul@gmail.com>
'''
class TrialState(object):
def __init__(self, trial_id, times, systemStates, uniqueStates, stateCounterForStateX):
self.trial_id = trial_id
self.times = times
self.systemStates = systemStates
self.uniqueStates = uniqueStates
self.stateCounterForStateX = stateCounterForStateX
class TrialStats(object):
def __init__(self, allTrialStateTuples, allTrialTopologyTuples):
self.stateTuples = allTrialStateTuples
self.topoTuples = allTrialTopologyTuples
self.trialstates = []
self.trialAverage = None
self.calculateAllStateCounts()
self.calculateAverageStateCount()
def calculateAllStateCounts(self):
for trial in range(len(self.stateTuples)):
times = [t for (t,s) in self.stateTuples[trial]]
systemStates = [s for (t,s) in self.stateTuples[trial]]
uniqueStates = reduce(lambda x, y: set(y).union(set(x)), systemStates)
stateCounterDict = {}
for x in uniqueStates:
stateXCounts = [state.count(x) for state in systemStates]
stateCounterDict[x] = stateXCounts
#store info about this trial
self.trialstates.append(TrialState(trial, times, systemStates, uniqueStates, stateCounterDict))
def calculateAverageStateCount(self):
times = self.trialstates[0].times
uniqueStates = self.trialstates[0].uniqueStates
for trial in self.trialstates:
try:
uniqueStates = set(trial.uniqueStates).union(set(uniqueStates))
except:
pass
stateCounterDict = {}
dummy = [0 for x in trial.systemStates]
for x in uniqueStates:
array = [trial.stateCounterForStateX.get(x, dummy) for trial in self.trialstates]
averages = [sum(value)/len(self.trialstates) for value in zip(*array)]
stateCounterDict[x] = averages
self.trialAverage = TrialState(-1, times, None, uniqueStates, stateCounterDict)
| 37.564516 | 108 | 0.613568 | 2,145 | 0.920996 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.073422 |
9850f21cd3ef993acf8dcfbfcc45ff349ee32df7 | 636 | py | Python | lhotse/bin/modes/recipes/vctk.py | m-wiesner/lhotse | a9c26ba39bc56d27130ff8600c1796db08038c48 | [
"Apache-2.0"
] | null | null | null | lhotse/bin/modes/recipes/vctk.py | m-wiesner/lhotse | a9c26ba39bc56d27130ff8600c1796db08038c48 | [
"Apache-2.0"
] | null | null | null | lhotse/bin/modes/recipes/vctk.py | m-wiesner/lhotse | a9c26ba39bc56d27130ff8600c1796db08038c48 | [
"Apache-2.0"
] | null | null | null | import click
from lhotse.bin.modes import obtain, prepare
from lhotse.recipes import download_vctk, prepare_vctk
from lhotse.utils import Pathlike
__all__ = ['vctk']
@prepare.command()
@click.argument('corpus_dir', type=click.Path(exists=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
def vctk(
corpus_dir: Pathlike,
output_dir: Pathlike
):
"""VCTK data preparation."""
prepare_vctk(corpus_dir, output_dir=output_dir)
@obtain.command()
@click.argument('target_dir', type=click.Path())
def vctk(
target_dir: Pathlike
):
"""VCTK download."""
download_vctk(target_dir)
| 22.714286 | 74 | 0.718553 | 0 | 0 | 0 | 0 | 462 | 0.726415 | 0 | 0 | 90 | 0.141509 |
98514167071fff282b20e3764c9ba865aff2249e | 1,627 | py | Python | external/fv3fit/setup.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | null | null | null | external/fv3fit/setup.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | null | null | null | external/fv3fit/setup.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
requirements = [
"xarray>=0.14",
"numpy>=1.11",
"scikit-learn>=0.22",
"fsspec>=0.6.2",
"pyyaml>=5.1.2",
"tensorflow>=2.2.0",
"tensorflow-addons>=0.11.2",
"typing_extensions>=3.7.4.3",
"dacite>=1.6.0",
"wandb>=0.12.1",
# fv3fit also depends on fv3gfs-util>=0.6.0, but pip-compile does not work
# for packages not hosted on pypi.
]
setup_requirements = []
test_requirements = ["pytest"]
setup(
author="Vulcan Technologies LLC",
author_email="jeremym@vulcan.com",
python_requires=">=3.6.9",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="FV3Fit is used to train machine learning models.",
install_requires=requirements,
dependency_links=["../loaders/", "../vcm/"],
extras_require={},
license="BSD license",
long_description="FV3Fit is used to train machine learning models.",
include_package_data=True,
keywords="fv3fit",
name="fv3fit",
packages=find_packages(include=["fv3fit", "fv3fit.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/VulcanClimateModeling/fv3fit",
version="0.1.0",
zip_safe=False,
)
| 28.54386 | 78 | 0.631838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 904 | 0.555624 |
98534ca864e3b4a1ef0ef0e2df72a864ede4128f | 1,447 | py | Python | dupa/__init__.py | kr1surb4n/dupa | 676577c7148e28eb8d1577bf2703029e6ba3a926 | [
"MIT"
] | null | null | null | dupa/__init__.py | kr1surb4n/dupa | 676577c7148e28eb8d1577bf2703029e6ba3a926 | [
"MIT"
] | null | null | null | dupa/__init__.py | kr1surb4n/dupa | 676577c7148e28eb8d1577bf2703029e6ba3a926 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Dupa
Set of tools handy during working, debuging and testing
the code."""
__version__ = '0.0.1'
__author__ = 'Kris Urbanski <kris@whereibend.space>'
import time
from functools import wraps
from dupa.fixturize import fixturize
def debug(func):
"""Print the function signature and return value"""
@wraps(func)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"Signature:\n{func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"{func.__name__} RETURN:\n{value!r}\n")
return value
return wrapper_debug
def print_wrap(func):
@wraps(func)
def wrap(*args, **kwargs):
print("DUPA Start: %s" % func.__name__)
out = func(*args, **kwargs)
print("DUPA End: %s" % func.__name__)
return out
return wrap
DUPA_COUNTER = 1
def dupa(marker=None):
if marker:
print(f"DUPA {marker}")
return None
global DUPA_COUNTER
print(f"DUPA {DUPA_COUNTER}")
DUPA_COUNTER += 1
def fart(marker=None):
def closure(func):
"""A decorator around a function."""
@wraps(func)
def wrapper(*args, **kwargs):
dupa(marker)
return func(*args, **kwargs)
return wrapper
return closure
| 20.671429 | 63 | 0.601935 | 0 | 0 | 0 | 0 | 704 | 0.486524 | 0 | 0 | 399 | 0.275743 |
9853ddeb20cc00b452e1f159e3fbc7ede42048e7 | 2,272 | py | Python | spa/templates.py | fergalmoran/dss | 684fb4030e33212c3ecde774ca86cb74a1ffc8ac | [
"BSD-2-Clause"
] | null | null | null | spa/templates.py | fergalmoran/dss | 684fb4030e33212c3ecde774ca86cb74a1ffc8ac | [
"BSD-2-Clause"
] | 3 | 2020-02-11T21:55:44.000Z | 2021-06-10T17:35:37.000Z | spa/templates.py | fergalmoran/dss | 684fb4030e33212c3ecde774ca86cb74a1ffc8ac | [
"BSD-2-Clause"
] | null | null | null | from django.contrib.sites.models import Site
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from htmlmin.decorators import not_minified_response
from dss import localsettings
from spa.forms import UserForm
__author__ = 'fergalm'
@not_minified_response
def get_template(request, template_name):
return render_to_response(
'views/%s.html' % template_name,
context_instance=RequestContext(request))
@not_minified_response
def get_template_ex(request, template_name):
html = render_to_response(
'views/%s.html' % template_name,
context_instance=RequestContext(request, {'form': UserForm()}))
return html
@not_minified_response
def get_embed_codes_dialog(request, slug):
payload = {
'embed_code': 'http://%s/embed/mix/%s' % (Site.objects.get_current().domain, slug)
}
return render_to_response(
'views/dlg/EmbedCodes.html',
payload,
context_instance=RequestContext(request))
@not_minified_response
def get_dialog(request, dialog_name, **kwargs):
return render_to_response(
'views/dlg/%s.html' % dialog_name,
context_instance=RequestContext(request))
def get_javascript(request, template_name):
localsettings.JS_SETTINGS.update({
'CURRENT_USER_ID': request.user.get_profile().id if not request.user.is_anonymous() else -1,
'CURRENT_USER_NAME': request.user.get_profile().get_nice_name() if not request.user.is_anonymous() else -1,
'CURRENT_USER_URL': request.user.get_profile().get_profile_url() if not request.user.is_anonymous() else -1,
'CURRENT_USER_SLUG': request.user.get_profile().slug if not request.user.is_anonymous() else -1,
'CURRENT_USER_CANHOMEPAGE': request.user.has_perm('spa.mix_add_homepage') or request.user.is_staff if not request.user.is_anonymous() else False,
'AVATAR_IMAGE': request.user.get_profile().get_small_profile_image() if not request.user.is_anonymous() else ""
})
return render_to_response(
'javascript/%s.js' % template_name,
localsettings.JS_SETTINGS,
context_instance=RequestContext(request),
mimetype="text/javascript")
| 39.172414 | 154 | 0.716109 | 0 | 0 | 0 | 0 | 941 | 0.414173 | 0 | 0 | 299 | 0.131602 |
9856146cbc1b1623eb88d71962e3c10eca29c3be | 1,207 | py | Python | service/imagepost_views.py | sandipsahajoy/Distributed-Social-Networking | 7cc56c2759955e2293791bb7009821666b24944b | [
"Apache-2.0"
] | 5 | 2022-01-14T10:32:43.000Z | 2022-02-28T00:30:10.000Z | service/imagepost_views.py | sandipsahajoy/Distributed-Social-Networking | 7cc56c2759955e2293791bb7009821666b24944b | [
"Apache-2.0"
] | 47 | 2022-02-10T03:17:26.000Z | 2022-02-23T01:22:33.000Z | service/imagepost_views.py | sandipsahajoy/Distributed-Social-Networking | 7cc56c2759955e2293791bb7009821666b24944b | [
"Apache-2.0"
] | 2 | 2022-03-08T22:48:24.000Z | 2022-03-22T03:05:49.000Z | from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from drf_yasg.utils import swagger_auto_schema
from service.serializers import PostSerializer, ImagePostSerializer
from .models import Author, Post
@swagger_auto_schema(method='get', operation_description="Return image from a post if exists")
@ api_view(['GET'])
# Return image from a post if exists
def imagepost(request, author_pk, post_pk):
if request.method == 'GET':
try:
author = Author.objects.get(pk=author_pk)
post = author.post_set.get(pk=post_pk)
imageURL= PostSerializer(post, many=False).data["imageSource"]
if imageURL is not None:
imageData = ImagePostSerializer(imageURL).data
return Response(imageData)
else:
return Response('Image doesn\'t exist', status=status.HTTP_404_NOT_FOUND)
except Author.DoesNotExist:
return Response('Author doesn\'t exist', status=status.HTTP_404_NOT_FOUND)
except Post.DoesNotExist:
return Response('Post doesn\'t exist', status=status.HTTP_404_NOT_FOUND) | 43.107143 | 94 | 0.694283 | 0 | 0 | 0 | 0 | 932 | 0.772162 | 0 | 0 | 166 | 0.137531 |
9856bd361d77e2facedd153b43e10e2a8e45b702 | 4,894 | py | Python | pixivpy_async/bapi.py | Kyle2142/pixivpy-async | 639ddf46fbeeaa2aade059e4f5eac80bd450ee52 | [
"Unlicense"
] | null | null | null | pixivpy_async/bapi.py | Kyle2142/pixivpy-async | 639ddf46fbeeaa2aade059e4f5eac80bd450ee52 | [
"Unlicense"
] | null | null | null | pixivpy_async/bapi.py | Kyle2142/pixivpy-async | 639ddf46fbeeaa2aade059e4f5eac80bd450ee52 | [
"Unlicense"
] | null | null | null | # -*- coding:utf-8 -*-
import hashlib
import os
from datetime import datetime
from .error import *
from .utils import Utils
from .api import API
from .net import Net
class BasePixivAPI(Net, Utils):
def __init__(self, **requests_kwargs):
self.additional_headers = {}
self.client_id = 'MOBrBDS8blbauoSck0ZfDbtuzpyT'
self.client_secret = 'lsACyCD94FhDUtGTXi3QzcFE2uU1hqtDaKeqrdwj'
self.hash_secret = '28c1fdd170a5204386cb1313c7077b34f83e4aaf4aa829ce78c231e05b0bae2c'
self.access_token = None
self.user_id = 0
self.refresh_token = None
self.api = API()
super().__init__(**requests_kwargs)
async def requests_(
self,
method: str,
url: str,
headers: dict = None,
params: dict = None,
data: dict = None,
auth: bool = True
):
if auth:
if self.access_token is None:
raise NoTokenError
headers = self.set_headers(headers, self.access_token)
return await self.requests_call(method=method, url=url, headers=headers, params=params, data=data)
def set_api_proxy(self, proxy_hosts="http://app-api.pixivlite.com"):
"""Set proxy hosts: eg pixivlite.com"""
self.api = API(proxy_hosts)
def set_auth(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
def set_additional_headers(self, headers):
self.additional_headers = headers
def set_client(self, client_id, client_secret):
self.client_id = client_id
self.client_secret = client_secret
def set_accept_language(self, language):
self.additional_headers['Accept-Language'] = language
async def requests_call(self, method, url, headers=None, params=None, data=None):
data = data if data else dict()
params = params if params else dict()
headers = headers if headers else dict()
w = await self.req(method=method, url=url, headers=headers, params=params, data=data)
return self.parse_json(w)
async def req(self, method, url, headers=None, data=None, params=None):
headers.update(self.additional_headers)
if method == 'GET':
return await self.fetch(url, headers, params)
elif method == 'POST':
return await self.post(url, data, headers, params)
elif method == 'DELETE':
return await self.delete(url, headers, params)
else:
raise MethodError(method)
async def login(self, username=None, password=None, refresh_token=None):
"""Login with password, or use the refresh_token to acquire a new bearer token"""
url = 'https://oauth.secure.pixiv.net/auth/token'
local_time = datetime.utcnow().strftime( '%Y-%m-%dT%H:%M:%S+00:00' )
headers = {
'User-Agent': 'PixivAndroidApp/5.0.64 (Android 6.0)',
'X-Client-Time': local_time,
'X-Client-Hash': hashlib.md5((local_time + self.hash_secret).encode('utf-8')).hexdigest(),
}
data = {
'get_secure_url': 1,
'client_id': self.client_id,
'client_secret': self.client_secret,
}
if (username is not None) and (password is not None):
data['grant_type'] = 'password'
data['username'] = username
data['password'] = password
elif (refresh_token is not None) or (self.refresh_token is not None):
data['grant_type'] = 'refresh_token'
data['refresh_token'] = refresh_token or self.refresh_token
else:
raise NoLoginError
# return auth/token response
return await self.auth_req(url, headers, data)
async def auth_req(self, url, headers, data):
r, status, code = await self.auth(url, headers, data)
if not status:
if data['grant_type'] == 'password':
raise AuthCredentialsError(code, r)
else:
raise AuthTokenError(code, r)
token = None
try:
token = self.parse_json(r)
self.access_token = token.response.access_token
self.user_id = token.response.user.id
self.refresh_token = token.response.refresh_token
except Exception as e:
raise TokenError(token, e)
return token
async def download(self, url, prefix='', path=os.path.curdir,
name=None, replace=False, referer='https://app-api.pixiv.net/'):
name = prefix + name if name else prefix + os.path.basename(url)
img_path = os.path.join(path, name)
if not os.path.exists(img_path) or replace:
e = await self.down(url, referer)
with open(img_path, 'wb') as out_file:
out_file.write(e) | 37.646154 | 106 | 0.6132 | 4,726 | 0.965672 | 0 | 0 | 0 | 0 | 3,566 | 0.728647 | 711 | 0.14528 |
98573eee3b7071227d0b56c5a935ae363ee0a8cb | 10,057 | py | Python | examples/ConsumptionSaving/example_ConsPortfolioModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | examples/ConsumptionSaving/example_ConsPortfolioModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | examples/ConsumptionSaving/example_ConsPortfolioModel.py | HsinYiHung/HARK_HY | 086c46af5bd037fe1ced6906c6ea917ed58b134f | [
"Apache-2.0"
] | null | null | null | # %%
'''
Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel
'''
from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio
from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle
from HARK.utilities import plotFuncs
from copy import copy
from time import time
import numpy as np
import matplotlib.pyplot as plt
# %%
# Make and solve an example portfolio choice consumer type
print('Now solving an example portfolio choice problem; this might take a moment...')
MyType = PortfolioConsumerType()
MyType.cycles = 0
t0 = time()
MyType.solve()
t1 = time()
MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)]
MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)]
print('Solving an infinite horizon portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# %%
# Plot the consumption and risky-share functions
print('Consumption function over market resources:')
plotFuncs(MyType.cFunc[0], 0., 20.)
print('Risky asset share as a function of market resources:')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plotFuncs([MyType.ShareFunc[0]
# ,lambda m: RiskyShareMertSamLogNormal(MyType.RiskPrem,MyType.CRRA,MyType.RiskyVar)*np.ones_like(m)
,lambda m: MyType.ShareLimit*np.ones_like(m)
] , 0., 200.)
# %%
# Now simulate this consumer type
MyType.track_vars = ['cNrmNow', 'ShareNow', 'aNrmNow', 't_age']
MyType.T_sim = 100
MyType.initializeSim()
MyType.simulate()
# %%
print('\n\n\n')
print('For derivation of the numerical limiting portfolio share')
print('as market resources approach infinity, see')
print('http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/')
# %%
""
# Make another example type, but this one optimizes risky portfolio share only
# on the discrete grid of values implicitly chosen by RiskyCount, using explicit
# value maximization.
init_discrete_share = init_portfolio.copy()
init_discrete_share['DiscreteShareBool'] = True
init_discrete_share['vFuncBool'] = True # Have to actually construct value function for this to work
# %%
# Make and solve a discrete portfolio choice consumer type
print('Now solving a discrete choice portfolio problem; this might take a minute...')
DiscreteType = PortfolioConsumerType(**init_discrete_share)
DiscreteType.cycles = 0
t0 = time()
DiscreteType.solve()
t1 = time()
DiscreteType.cFunc = [DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle)]
DiscreteType.ShareFunc = [DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle)]
print('Solving an infinite horizon discrete portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# %%
# Plot the consumption and risky-share functions
print('Consumption function over market resources:')
plotFuncs(DiscreteType.cFunc[0], 0., 50.)
print('Risky asset share as a function of market resources:')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plotFuncs([DiscreteType.ShareFunc[0]
,lambda m: DiscreteType.ShareLimit*np.ones_like(m)
] , 0., 200.)
# %%
print('\n\n\n')
# %%
""
# Make another example type, but this one can only update their risky portfolio
# share in any particular period with 15% probability.
init_sticky_share = init_portfolio.copy()
init_sticky_share['AdjustPrb'] = 0.15
# %%
# Make and solve a discrete portfolio choice consumer type
print('Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...')
StickyType = PortfolioConsumerType(**init_sticky_share)
StickyType.cycles = 0
t0 = time()
StickyType.solve()
t1 = time()
StickyType.cFuncAdj = [StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle)]
StickyType.cFuncFxd = [StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle)]
StickyType.ShareFunc = [StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle)]
print('Solving an infinite horizon sticky portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# %%
# Plot the consumption and risky-share functions
print('Consumption function over market resources when the agent can adjust his portfolio:')
plotFuncs(StickyType.cFuncAdj[0], 0., 50.)
# %%
print("Consumption function over market resources when the agent CAN'T adjust, by current share:")
M = np.linspace(0., 50., 200)
for s in np.linspace(0.,1.,21):
C = StickyType.cFuncFxd[0](M, s*np.ones_like(M))
plt.plot(M,C)
plt.xlim(0.,50.)
plt.ylim(0.,None)
plt.show()
# %%
print('Risky asset share function over market resources (when possible to adjust):')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
plotFuncs([StickyType.ShareFunc[0]
,lambda m: StickyType.ShareLimit*np.ones_like(m)
] , 0., 200.)
# %%
""
# Make another example type, but this one has *age-varying* perceptions of risky asset returns.
# Begin by making a lifecycle dictionary, but adjusted for the portfolio choice model.
init_age_varying_risk_perceptions = copy(init_lifecycle)
init_age_varying_risk_perceptions['RiskyCount'] = init_portfolio['RiskyCount']
init_age_varying_risk_perceptions['ShareCount'] = init_portfolio['ShareCount']
init_age_varying_risk_perceptions['aXtraMax'] = init_portfolio['aXtraMax']
init_age_varying_risk_perceptions['aXtraCount'] = init_portfolio['aXtraCount']
init_age_varying_risk_perceptions['aXtraNestFac'] = init_portfolio['aXtraNestFac']
init_age_varying_risk_perceptions['BoroCnstArt'] = init_portfolio['BoroCnstArt']
init_age_varying_risk_perceptions['CRRA'] = init_portfolio['CRRA']
init_age_varying_risk_perceptions['DiscFac'] = init_portfolio['DiscFac']
# %%
init_age_varying_risk_perceptions['RiskyAvg'] = 10*[1.08]
init_age_varying_risk_perceptions['RiskyStd'] = [0.20,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29]
init_age_varying_risk_perceptions['RiskyAvgTrue'] = 1.08
init_age_varying_risk_perceptions['RiskyStdTrue'] = 0.20
AgeVaryingRiskPercType = PortfolioConsumerType(**init_age_varying_risk_perceptions)
AgeVaryingRiskPercType.cycles = 1
# %%
# Solve the agent type with age-varying risk perceptions
print('Now solving a portfolio choice problem with age-varying risk perceptions...')
t0 = time()
AgeVaryingRiskPercType.solve()
AgeVaryingRiskPercType.cFunc = [AgeVaryingRiskPercType.solution[t].cFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)]
AgeVaryingRiskPercType.ShareFunc = [AgeVaryingRiskPercType.solution[t].ShareFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)]
t1 = time()
print('Solving a ' + str(AgeVaryingRiskPercType.T_cycle) + ' period portfolio choice problem with age-varying risk perceptions took ' + str(t1-t0) + ' seconds.')
# %%
# Plot the consumption and risky-share functions
print('Consumption function over market resources in each lifecycle period:')
plotFuncs(AgeVaryingRiskPercType.cFunc, 0., 20.)
print('Risky asset share function over market resources in each lifecycle period:')
plotFuncs(AgeVaryingRiskPercType.ShareFunc, 0., 200.)
# %% [markdown]
# The code below tests the mathematical limits of the model.
# Create a grid of market resources for the plots
mMin = 0 # Minimum ratio of assets to income to plot
mMax = 5*1e2 # Maximum ratio of assets to income to plot
mPts = 1000 # Number of points to plot
eevalgrid = np.linspace(0,mMax,mPts) # range of values of assets for the plot
# Number of points that will be used to approximate the risky distribution
risky_count_grid = [5,200]
# Plot by ages (time periods) at which to plot. We will use the default
# life-cycle calibration, which has 10 periods.
ages = [2, 4, 6, 8]
# Create a function to compute the Merton-Samuelson limiting portfolio share.
def RiskyShareMertSamLogNormal(RiskPrem,CRRA,RiskyVar):
return RiskPrem/(CRRA*RiskyVar)
# %% Calibration and solution
for rcount in risky_count_grid:
# Create a new dictionary and replace the number of points that
# approximate the risky return distribution
# Create new dictionary copying the default
merton_dict = init_lifecycle.copy()
merton_dict['RiskyCount'] = rcount
# Create and solve agent
agent = PortfolioConsumerType(**merton_dict)
agent.solve()
# Compute the analytical Merton-Samuelson limiting portfolio share
RiskyVar = agent.RiskyStd**2
RiskPrem = agent.RiskyAvg - agent.Rfree
MS_limit = RiskyShareMertSamLogNormal(RiskPrem,
agent.CRRA,
RiskyVar)
# Now compute the limiting share numerically, using the approximated
# distribution
agent.updateShareLimit()
NU_limit = agent.ShareLimit
plt.figure()
for a in ages:
plt.plot(eevalgrid,
agent.solution[a]\
.ShareFuncAdj(eevalgrid),
label = 't = %i' %(a))
plt.axhline(NU_limit, c='k', ls='-.', label = 'Exact limit as $m\\rightarrow \\infty$.')
plt.axhline(MS_limit, c='k', ls='--', label = 'M&S Limit without returns discretization.')
plt.ylim(0,1.05)
plt.xlim(eevalgrid[0],eevalgrid[-1])
plt.legend()
plt.title('Risky Portfolio Share by Age\n Risky distribution with {points} equiprobable points'.format(points = rcount))
plt.xlabel('Wealth (m)')
plt.ioff()
plt.draw()
# %%
# %%
| 40.389558 | 161 | 0.74038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,862 | 0.483444 |
985927c5dcddc08c61effceeb19e8c46ce9d7c79 | 1,648 | py | Python | tests/test_order.py | mikedh/lobby | d8449bd2a0dbeafc7d7689b37e9d3ca65eed89e4 | [
"MIT"
] | 1 | 2020-12-09T01:16:01.000Z | 2020-12-09T01:16:01.000Z | tests/test_order.py | ncantrell/lobby | d8449bd2a0dbeafc7d7689b37e9d3ca65eed89e4 | [
"MIT"
] | null | null | null | tests/test_order.py | ncantrell/lobby | d8449bd2a0dbeafc7d7689b37e9d3ca65eed89e4 | [
"MIT"
] | 1 | 2020-10-24T23:53:58.000Z | 2020-10-24T23:53:58.000Z | import lobby
def test_book():
# Create a LOB object
lob = lobby.OrderBook()
########### Limit Orders #############
# Create some limit orders
someOrders = [{'type': 'limit',
'side': 'ask',
'qty': 5,
'price': 101,
'tid': 100},
{'type': 'limit',
'side': 'ask',
'qty': 5,
'price': 103,
'tid': 101},
{'type': 'limit',
'side': 'ask',
'qty': 5,
'price': 101,
'tid': 102},
{'type': 'limit',
'side': 'ask',
'qty': 5,
'price': 101,
'tid': 103},
{'type': 'limit',
'side': 'bid',
'qty': 5,
'price': 99,
'tid': 100},
{'type': 'limit',
'side': 'bid',
'qty': 5,
'price': 98,
'tid': 101},
{'type': 'limit',
'side': 'bid',
'qty': 5,
'price': 99,
'tid': 102},
{'type': 'limit',
'side': 'bid',
'qty': 5,
'price': 97,
'tid': 103},
]
# Add orders to LOB
for order in someOrders:
trades, idNum = lob.processOrder(order, False, False)
if __name__ == '__main__':
test_book()
| 28.912281 | 61 | 0.283981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.268204 |
98594e8f28f9b9c4731a1d8dca33c92082e2d4cf | 2,451 | py | Python | examples/main_simulation_lemon_graph.py | KaterynaMelnyk/GraphKKE | 4651f1a5e75e23ad0b84403151f7000ab8f292eb | [
"MIT"
] | 1 | 2021-07-23T08:47:05.000Z | 2021-07-23T08:47:05.000Z | examples/main_simulation_lemon_graph.py | k-melnyk/graphKKE | 4651f1a5e75e23ad0b84403151f7000ab8f292eb | [
"MIT"
] | null | null | null | examples/main_simulation_lemon_graph.py | k-melnyk/graphKKE | 4651f1a5e75e23ad0b84403151f7000ab8f292eb | [
"MIT"
] | null | null | null | import os
import argparse
import numpy as np
import scipy
import imageio
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import graphkke.generate_graphs.graph_generation as graph_generation
import graphkke.generate_graphs.generate_SDE as generate_SDE
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str,
default='/home/katerynam/work/data/artificial/test/')
parser.add_argument('--n_graphs', type=int,
default=500)
parser.add_argument('--n_nodes', type=int,
default=300)
parser.add_argument('--radius', type=float,
default=0.6)
parser.add_argument('--n_wells', type=int,
default=3)
parser.add_argument('--out_state', type=int,
default=0.1)
parser.add_argument('--if_plot', type=bool,
default=True)
parser.add_argument('--seed', type=int,
default=7)
args = parser.parse_args()
def randb(n, b):
return b[0] + (b[1] - b[0]) * scipy.rand(1, n)
def rand(n, bounds, boxes):
d = boxes.size
x = np.zeros([d, n])
for i in range(d):
x[i, :] = randb(n, bounds[i, :])
return x
if __name__ == '__main__':
lm = generate_SDE.LemonSlice2D([0.9, 0.9], args.n_graphs, 2, args.n_wells)
x = rand(1, np.asarray([[-0.5, 0.5], [-0.5, 0.5]]), np.asarray([10, 10]))
sde_traj = np.asarray(lm.sim_determ_system(x[:, 0]))
k_means = KMeans(n_clusters=args.n_wells).fit(sde_traj)
graph_states = k_means.labels_
# sde_traj = np.load(args.input_dir + 'traj.npy')
# graph_states = np.load(args.input_dir + 'graph_states.npy')
plt.scatter(sde_traj[:, 0], sde_traj[:, 1], c=graph_states)
plt.show()
sim_graph = graph_generation.LemonGraph(args.radius, args.n_graphs, args.n_nodes,
graph_states)
graphs, images, node_points = sim_graph.create_adj_matrix(sde_traj, args.out_state, args.if_plot)
for i, image in enumerate(images):
imageio.imwrite(args.input_dir + f'/traj_{i}.png', image)
imageio.mimsave(args.input_dir + '/anim.gif', images, fps=2)
np.save(os.path.join(args.input_dir + 'traj.npy'), sde_traj)
np.save(os.path.join(args.input_dir + 'graphs.npy'), graphs)
np.save(os.path.join(args.input_dir + 'graph_states.npy'), graph_states)
np.save(os.path.join(args.input_dir + 'node_points.npy'), node_points)
| 32.68 | 101 | 0.641779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.137495 |
98598ee67261e34d645c03e2285132218a35b61d | 741 | py | Python | tests/io/open_plus.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 37 | 2017-12-07T15:49:29.000Z | 2022-03-16T16:01:38.000Z | tests/io/open_plus.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 17 | 2016-06-20T09:06:14.000Z | 2016-08-21T10:09:39.000Z | tests/io/open_plus.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 22 | 2016-08-01T01:35:30.000Z | 2022-03-22T18:12:23.000Z | import sys
try:
import uos as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
# cleanup in case testfile exists
try:
os.unlink("testfile")
except OSError:
pass
try:
f = open("testfile", "r+b")
print("Unexpectedly opened non-existing file")
except OSError:
print("Expected OSError")
pass
f = open("testfile", "w+b")
f.write(b"1234567890")
f.seek(0)
print(f.read())
f.close()
# Open with truncation
f = open("testfile", "w+b")
f.write(b"abcdefg")
f.seek(0)
print(f.read())
f.close()
# Open without truncation
f = open("testfile", "r+b")
f.write(b"1234")
f.seek(0)
print(f.read())
f.close()
# cleanup
try:
os.unlink("testfile")
except OSError:
pass
| 15.122449 | 50 | 0.643725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.364372 |
985b201040312e7c31425d9070fec5f6579d56c8 | 1,198 | py | Python | scraper/spiders/ytch.py | IDex/youtube-live-alert2 | 59d100ee33c4fa5ef2b097ee1c3014d341f414a1 | [
"MIT"
] | null | null | null | scraper/spiders/ytch.py | IDex/youtube-live-alert2 | 59d100ee33c4fa5ef2b097ee1c3014d341f414a1 | [
"MIT"
] | null | null | null | scraper/spiders/ytch.py | IDex/youtube-live-alert2 | 59d100ee33c4fa5ef2b097ee1c3014d341f414a1 | [
"MIT"
] | null | null | null | import json
import re
import datetime
import scrapy
import yaml
import munch
import pathlib
from appdirs import user_config_dir
config_path = list(pathlib.Path(__file__).parent.parent.parent.resolve().glob('config.yml'))[0]
class YtchSpider(scrapy.Spider):
name = "ytch"
start_urls = list(
munch.Munch.fromDict(
yaml.safe_load(
(
(
config_path
).read_text()
)
)
).channels.values()
)
start_urls = [x + "/live" for x in start_urls]
def parse(self, response):
"""Parse live stream details if channel is streaming."""
res = [
re.findall(r'(".*?"):(".*?"|\[.*?\]|true|false)', x)
for x in re.findall(r'videoDetails":{(.*?)}', response.text)
]
if not res:
return
res = res[0]
res = "{%s}" % ",".join([":".join(x) for x in res][:-1])
res = json.loads(res)
res["keywords"] = ", ".join(res["keywords"])
res["scraped_time"] = datetime.datetime.now()
res = dict(table="youtube_streams", value=res, key="videoId")
return res
| 27.860465 | 95 | 0.522538 | 970 | 0.809683 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.18197 |
985d8dba6a28036853c43ac631423cf9f7e8d275 | 1,031 | py | Python | pih2o/controls/pump.py | anxuae/piH2O | 4b6330ef51865cd3f028c81d9dcc19de0d7a1acd | [
"MIT"
] | 8 | 2018-05-24T21:38:18.000Z | 2021-11-14T19:54:09.000Z | pih2o/controls/pump.py | anxuae/piH2O | 4b6330ef51865cd3f028c81d9dcc19de0d7a1acd | [
"MIT"
] | null | null | null | pih2o/controls/pump.py | anxuae/piH2O | 4b6330ef51865cd3f028c81d9dcc19de0d7a1acd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Pih2o pump / electro-valve management.
"""
import threading
from RPi import GPIO
from pih2o.utils import LOGGER
class Pump(object):
def __init__(self, pin):
self._running = threading.Event()
self.pin = pin
GPIO.setup(pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
def is_running(self):
"""
Return True if the pump is started.
"""
return self._running.is_set()
def start(self):
"""
Start the pump.
"""
if self.is_running():
# Avoid starting several times to prevent concurent access
raise IOError("Watering is already started")
LOGGER.info("Starting pump (physical pin %s)", self.pin)
GPIO.output(self.pin, GPIO.HIGH)
self._running.set()
def stop(self):
"""
Stop the pump.
"""
LOGGER.info("Stoping pump (physical pin %s)", self.pin)
GPIO.output(self.pin, GPIO.LOW)
self._running.clear()
| 22.911111 | 70 | 0.57129 | 887 | 0.86033 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.345296 |
985e9fbe858937265cb958daa577a1fdbc91ad59 | 1,193 | py | Python | driloader/config/config_base.py | lucasmello/Driloader | 09790187eee7550e9fc93eb3479124a63d225e85 | [
"MIT"
] | 4 | 2017-07-16T20:48:45.000Z | 2017-10-05T21:45:24.000Z | driloader/config/config_base.py | lucasmello/Driloader | 09790187eee7550e9fc93eb3479124a63d225e85 | [
"MIT"
] | 16 | 2017-05-31T17:08:43.000Z | 2021-04-19T17:34:31.000Z | driloader/config/config_base.py | lucasmello/Driloader | 09790187eee7550e9fc93eb3479124a63d225e85 | [
"MIT"
] | null | null | null | """
Responsible to return the abstract browser configs.
"""
from abc import ABC, abstractmethod
class BrowserConfigBase(ABC):
"""
Holds abstract methods to be implemented in Browser Config classes.
"""
@abstractmethod
def base_url(self):
"""
Return base_url.
"""
raise NotImplementedError
@abstractmethod
def zipped_file_name(self, replace_version=''):
"""
Return zipped_file_name.
@param replace_version: if in browsers.ini there's a '{version}'
due to dynamic versions with geckodriver.
"""
raise NotImplementedError
@abstractmethod
def unzipped_file_name(self):
"""
Return unzipped_file_name.
"""
raise NotImplementedError
@abstractmethod
def latest_release_url(self):
"""
Return latest_release_url.
"""
raise NotImplementedError
@abstractmethod
def index_url(self):
"""
Return index_url.
"""
raise NotImplementedError
@abstractmethod
def versions_url(self):
"""
Return versions_url.
"""
raise NotImplementedError
| 21.690909 | 72 | 0.606035 | 1,094 | 0.917016 | 0 | 0 | 941 | 0.788768 | 0 | 0 | 540 | 0.45264 |
985fbf3ad25006c87636cc7cd72711de296dda57 | 939 | py | Python | visualize-candle.py | frozenrainyoo/deep_learning.study | 43c8338c7e7cc67cbb6eec61a2666c2487e4bd0e | [
"Unlicense"
] | null | null | null | visualize-candle.py | frozenrainyoo/deep_learning.study | 43c8338c7e7cc67cbb6eec61a2666c2487e4bd0e | [
"Unlicense"
] | null | null | null | visualize-candle.py | frozenrainyoo/deep_learning.study | 43c8338c7e7cc67cbb6eec61a2666c2487e4bd0e | [
"Unlicense"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_finance import candlestick_ohlc
# import matplotlib as mpl then mpl.use('TkAgg')
import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv('BitMEX-OHLCV-1d.csv')
df.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_figure = plt.figure(figsize=(10, 5))
chart_figure.set_facecolor('w')
chart_gridspec = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axes = []
axes.append(plt.subplot(chart_gridspec[0]))
axes.append(plt.subplot(chart_gridspec[1], sharex=axes[0]))
axes[0].get_xaxis().set_visible(False)
x = np.arange(len(df.index))
ohlc = df[['open', 'high', 'low', 'close']].astype(int).values
dohlc = np.hstack((np.reshape(x, (-1, 1)), ohlc))
candlestick_ohlc(axes[0], dohlc, width=0.5, colorup='r', colordown='b')
axes[1].bar(x, df.volume, color='k', width=0.6, align='center')
plt.tight_layout()
plt.show()
| 31.3 | 71 | 0.715655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.160809 |
98617c5497bd8dec26edb8b7e9fa627e1c216a59 | 6,006 | py | Python | vmware_nsx/tests/unit/common_plugin/test_housekeeper.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | vmware_nsx/tests/unit/common_plugin/test_housekeeper.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | vmware_nsx/tests/unit/common_plugin/test_housekeeper.py | salv-orlando/vmware-nsx | 6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron.tests import base
from neutron_lib import exceptions as n_exc
from vmware_nsx.plugins.common.housekeeper import base_job
from vmware_nsx.plugins.common.housekeeper import housekeeper
class TestJob1(base_job.BaseJob):
def __init__(self, global_readonly, readonly_jobs):
super(TestJob1, self).__init__(global_readonly, readonly_jobs)
def get_name(self):
return 'test_job1'
def get_project_plugin(self, plugin):
return 'Dummy'
def get_description(self):
return 'test'
def run(self, context, readonly=False):
pass
class TestJob2(TestJob1):
def get_name(self):
return 'test_job2'
class TestHousekeeper(base.BaseTestCase):
def setUp(self):
self.jobs = ['test_job1', 'test_job2']
self.readonly_jobs = ['test_job1']
self.readonly = False
self.housekeeper = housekeeper.NsxHousekeeper(
hk_ns='stevedore.test.extension',
hk_jobs=self.jobs,
hk_readonly=self.readonly,
hk_readonly_jobs=self.readonly_jobs)
self.job1 = TestJob1(self.readonly, self.readonly_jobs)
self.job2 = TestJob2(self.readonly, self.readonly_jobs)
self.housekeeper.jobs = {'test_job1': self.job1,
'test_job2': self.job2}
self.context = mock.Mock()
self.context.session = mock.Mock()
super(TestHousekeeper, self).setUp()
def test_run_job_readonly(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
self.housekeeper.run(self.context, 'test_job1', readonly=True)
run1.assert_called_with(mock.ANY, readonly=True)
self.housekeeper.run(self.context, 'test_job2', readonly=True)
run2.assert_called_with(mock.ANY, readonly=True)
def test_run_job_readwrite(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
# job1 is configured as a readonly job so this should fail
self.assertRaises(
n_exc.ObjectNotFound,
self.housekeeper.run, self.context, 'test_job1',
readonly=False)
self.assertFalse(run1.called)
# job2 should run
self.housekeeper.run(self.context, 'test_job2', readonly=False)
run2.assert_called_with(mock.ANY, readonly=False)
def test_run_all_readonly(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
self.housekeeper.run(self.context, 'all', readonly=True)
run1.assert_called_with(mock.ANY, readonly=True)
run2.assert_called_with(mock.ANY, readonly=True)
def test_run_all_readwrite(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
self.housekeeper.run(self.context, 'all', readonly=False)
# job1 is configured as a readonly job so it was not called
self.assertFalse(run1.called)
# job2 should run
run2.assert_called_with(mock.ANY, readonly=False)
class TestHousekeeperReadOnly(TestHousekeeper):
def setUp(self):
super(TestHousekeeperReadOnly, self).setUp()
self.housekeeper.global_readonly = True
def test_run_job_readonly(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
self.housekeeper.run(self.context, 'test_job1', readonly=True)
run1.assert_called_with(mock.ANY, readonly=True)
self.housekeeper.run(self.context, 'test_job2', readonly=True)
run2.assert_called_with(mock.ANY, readonly=True)
def test_run_job_readwrite(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
# job1 is configured as a readonly job so this should fail
self.assertRaises(
n_exc.ObjectNotFound,
self.housekeeper.run, self.context, 'test_job1',
readonly=False)
self.assertFalse(run1.called)
# global readonly flag so job2 should also fail
self.assertRaises(
n_exc.ObjectNotFound,
self.housekeeper.run, self.context, 'test_job2',
readonly=False)
self.assertFalse(run2.called)
def test_run_all_readonly(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
self.housekeeper.run(self.context, 'all', readonly=True)
run1.assert_called_with(mock.ANY, readonly=True)
run2.assert_called_with(mock.ANY, readonly=True)
def test_run_all_readwrite(self):
with mock.patch.object(self.job1, 'run') as run1,\
mock.patch.object(self.job2, 'run') as run2:
# global readonly flag so 'all' should fail
self.assertRaises(
n_exc.ObjectNotFound,
self.housekeeper.run, self.context, 'all',
readonly=False)
self.assertFalse(run1.called)
self.assertFalse(run2.called)
| 38.012658 | 78 | 0.641026 | 5,143 | 0.85631 | 0 | 0 | 0 | 0 | 0 | 0 | 1,215 | 0.202298 |
986246b1b1aaab1a1a109f6cc6235d6d3aee9c4d | 1,829 | py | Python | tests/integration/fields/follow_reference/test_complex_type.py | guglielmoseminara/mongoengine-goodjson | 00470409fc44e927bc75789b6852cf48215f9d04 | [
"MIT"
] | null | null | null | tests/integration/fields/follow_reference/test_complex_type.py | guglielmoseminara/mongoengine-goodjson | 00470409fc44e927bc75789b6852cf48215f9d04 | [
"MIT"
] | null | null | null | tests/integration/fields/follow_reference/test_complex_type.py | guglielmoseminara/mongoengine-goodjson | 00470409fc44e927bc75789b6852cf48215f9d04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
"""Complex Type Tests."""
import json
import mongoengine_goodjson as gj
import mongoengine as db
from ...fixtures.base import Dictable
from ....con_base import DBConBase
class FollowReferenceFieldLimitRecursionComlexTypeTest(DBConBase):
"""Follow reference field limit recursion with complex type test."""
def setUp(self):
"""Setup."""
class SubDocument(Dictable, gj.EmbeddedDocument):
parent = gj.FollowReferenceField("MainDocument")
ref_list = db.ListField(gj.FollowReferenceField("MainDocument"))
class MainDocument(Dictable, gj.Document):
name = db.StringField()
ref_list = db.ListField(gj.FollowReferenceField("self"))
subdoc = db.EmbeddedDocumentField(SubDocument)
self.main_doc_cls = MainDocument
self.sub_doc_cls = SubDocument
self.main_docs = []
for counter in range(4):
main_doc = MainDocument(name=("Test {}").format(counter))
main_doc.save()
self.main_docs.append(main_doc)
for (index, doc) in enumerate(self.main_docs):
doc.subdoc = SubDocument(
parent=doc, ref_list=[
doc, self.main_docs[index - 1],
self.main_docs[index - 2]
]
)
doc.ref_list.extend([
doc, self.main_docs[index - 1], self.main_docs[index - 3]
])
doc.save()
self.expected_data = [doc.to_dict() for doc in self.main_docs]
self.maxDiff = None
def test_to_json(self):
"""The serialized json should be equal to the expected data."""
result = [json.loads(item.to_json()) for item in self.main_docs]
self.assertEqual(self.expected_data, result)
| 32.087719 | 76 | 0.612903 | 1,615 | 0.882996 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.1345 |
9862a740ff21dde6fd66f39e14e99ce972ea2db9 | 995 | py | Python | src/mine/invest/calc_stock_cost.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | src/mine/invest/calc_stock_cost.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | src/mine/invest/calc_stock_cost.py | AldrichYang/HelloPython3 | de3d3f3cf293980a7e11aaa488a2621035efc599 | [
"Apache-2.0"
] | null | null | null | # 现在单位成本价,现在数量
current_unit_cost = 78.1
current_amount = 1300
# 计算补仓后成本价
def calc_stock_new_cost(add_buy_amount,add_buy_unit_cost):
# 补仓买入成本
buy_stock_cost = add_buy_amount*add_buy_unit_cost
# 补仓后总投入股票成本 = 现数量 * 现成本单价 + 新数量 * 新成本单价
new_stock_cost = current_amount * current_unit_cost + buy_stock_cost
# 补仓后总股票数量 = 现数量 + 新数量
new_stock_amount = current_amount + add_buy_amount
# 补仓后新成本价 = 补仓后总投入股票成本 / 补仓后总股票数量
new_stock_unit_cost = new_stock_cost/new_stock_amount
# 补仓后新市值 = 新成本单价 * 总股票数量
new_stock_value = add_buy_unit_cost * new_stock_amount
# 补仓后跌幅 = (补仓后新市值-补仓后总投入股票成本)/补仓后总投入股票成本
value_diff_cost = new_stock_value-new_stock_cost
stock_rate = value_diff_cost/new_stock_cost*100
print("本次补仓买入成本: %.2f, 总买入成本: %.2f, 新成本单价: %.2f" % (buy_stock_cost,new_stock_cost, new_stock_unit_cost))
print("新市值: %.2f, 新涨跌幅: %.2f, 新盈亏额: %.2f " % (new_stock_value, stock_rate, value_diff_cost))
# 2021.07.28 预计算补仓后成本价
calc_stock_new_cost(2000,53.3) | 39.8 | 108 | 0.743719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.478132 |
9862a74b10166312c570b45f508f9467f0daa8cf | 16,112 | py | Python | appengine/findit/libs/test_results/webkit_layout_test_results.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/libs/test_results/webkit_layout_test_results.py | asdfghjjklllllaaa/infra | 8f63af54e46194cd29291813f2790ff6e986804d | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/libs/test_results/webkit_layout_test_results.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is for webkit-layout-tests-related operations."""
import re
from libs import test_name_util
from libs.test_results.base_test_results import BaseTestResults
from libs.test_results.classified_test_results import ClassifiedTestResults
# PASSING_STATUSES, FAILING_STATUSES and SKIPPING_STATUSES are copied from
# https://chromium.googlesource.com/chromium/tools/build/+/80940a89cc82f08cca98eb220d9c4b39a6000451/scripts/slave/recipe_modules/test_utils/util.py
PASSING_STATUSES = (
# PASS - The test ran as expected.
'PASS',
# REBASELINE, NEEDSREBASELINE, NEEDSMANUALREBASELINE - Layout test
# specific. Considers all *BASELINE results non-failures.
'REBASELINE',
'NEEDSREBASELINE',
'NEEDSMANUALREBASELINE',
)
FAILING_STATUSES = (
# FAIL - The test did not run as expected.
'FAIL',
# CRASH - The test runner crashed during the test.
'CRASH',
# TIMEOUT - The test hung (did not complete) and was aborted.
'TIMEOUT',
# MISSING - Layout test specific. The test completed but we could not
# find an expected baseline to compare against.
'MISSING',
# LEAK - Layout test specific. Memory leaks were detected during the
# test execution.
'LEAK',
# TEXT, AUDIO, IMAGE, IMAGE+TEXT - Layout test specific, deprecated.
# The test is expected to produce a failure for only some parts.
# Normally you will see "FAIL" instead.
'TEXT',
'AUDIO',
'IMAGE',
'IMAGE+TEXT',
)
SKIPPING_STATUSES = (
# SKIP - The test was not run.
'SKIP',
'WONTFIX')
# These statuses should not appear in actual results, rather they should only
# appear in expects.
NON_TEST_OUTCOME_EXPECTATIONS = ('REBASELINE', 'SKIP', 'SLOW', 'WONTFIX')
_BASE_FILE_PATH = 'third_party/blink/web_tests'
_VIRTUAL_TEST_NAME_PATTERN = re.compile(r'^virtual/[^/]+/(.*)$')
class WebkitLayoutTestResults(BaseTestResults):
def __init__(self, raw_test_results_json, partial_result=False):
super(WebkitLayoutTestResults, self).__init__(raw_test_results_json,
partial_result)
self.test_results_json = WebkitLayoutTestResults.FlattenTestResults(
raw_test_results_json)
def DoesTestExist(self, test_name):
"""Checks if can find the test name in test_results if result is valid.
Returns:
True if test_results_json is valid and the test exists in
test_results_json, False otherwise.
"""
return bool(
self.test_results_json and
(self.test_results_json.get('tests') or {}).get(test_name))
@property
def contains_all_tests(self):
"""
True if the test result is merged results for all shards; False if it's a
partial result.
"""
return not self.partial_result
def IsTestEnabled(self, test_name):
"""Returns True if the test is enabled, False otherwise.
A test can be skipped by setting the expected result to SKIP or WONTFIX.
But the actual result for a skipped test will only be SKIP but not WONTFIX.
"""
if not self.DoesTestExist(test_name):
return False
test_result = self.test_results_json['tests'][test_name]
return not any(s in test_result['expected'] for s in SKIPPING_STATUSES)
def GetFailedTestsInformation(self):
"""Parses the json data to get all reliable failures' information.
Currently this method will only get:
- failed tests in a test step on waterfall from output.json, not include
flakes (tests that eventually passed during retry).
TODO(crbug/836994): parse other test results to get failed tests info.
Returns:
failed_test_log: Logs for failed tests, currently empty string.
reliable_failed_tests: reliable failed tests, and the base name for each
test - For webkit_layout_test base name should be the same as test name.
"""
if not self.test_results_json or not self.test_results_json.get('tests'):
return {}, {}
failed_test_log = {}
reliable_failed_tests = {}
for test_name, test_result in self.test_results_json['tests'].iteritems():
if test_result.get('actual'): # pragma: no branch.
actuals = test_result['actual'].split(' ')
expects = test_result['expected'].split(' ')
if all(
result in FAILING_STATUSES and
not self.ResultWasExpected(result, expects)
for result in set(actuals)): # pragma: no branch.
# A relibale failure is found when all test results are failing
# statuses.
# For the case where test failed with different statuses, we still
# treat it as a reliable failure to be consistent with other tools.
reliable_failed_tests[test_name] = test_name
failed_test_log[test_name] = ''
return failed_test_log, reliable_failed_tests
def IsTestResultUseful(self):
"""Checks if the log contains useful information."""
return bool(
self.test_results_json and
self.test_results_json.get('num_failures_by_type') and
self.test_results_json.get('tests') and all(
isinstance(i, dict) and i.get('actual') and i.get('expected')
for i in self.test_results_json['tests'].itervalues()))
def GetTestLocation(self, test_name):
"""Gets test location for a specific test.
Test file path is constructed from test_name based on some heuristic rule:
1. For test_name in the format like 'virtual/a/bb/ccc.html', file path
should be: 'third_party/blink/web_tests/bb/ccc.html'
2. For other test names, file path should like
'third_party/blink/web_tests/%s' % test_name
# TODO(crbug/806002): Handle below cases.
There are other cases which has NOT been covered:
1. Baseline files: for example, for a test a/bb/ccc.html, it's
possible to find a file like
'third_party/blink/web_tests/a/bb/ccc_expected.txt'. Such files should
also be considered to add to test locations, but not covered right now.
2. Derived tests: for example, for a file named external/wpt/foo.any.js,
there will be two tests generated from it, external/wpt/foo.window.html
and external/wpt/foo.worker.html.
There will be no line number info for webkit_layout_tests because typically
a file is a test.
Note: Since the test location is gotten from heuristic, it will not be as
reliable as gtest (which is from test results log): file might not exist.
Returns:
(dict, str): A dict containing test location info and error string if any.
"""
if not self.DoesTestExist(test_name):
return None, 'test_location not found for %s.' % test_name
test_name = test_name_util.RemoveSuffixFromWebkitLayoutTestName(
test_name_util.RemoveVirtualLayersFromWebkitLayoutTestName(test_name))
return {
'line': None,
'file': '%s/%s' % (_BASE_FILE_PATH, test_name),
}, None
def GetClassifiedTestResults(self):
"""Parses webkit_layout_test results, counts and classifies test results by:
* status_group: passes/failures/skips/unknowns,
* status: actual result status.
Also counts number of expected and unexpected results for each test:
if the status is included in expects or can be considered as expected, it
is expected; otherwise it's unexpected.
Returns:
(ClassifiedTestResults) An object with information for each test:
* total_run: total number of runs,
* num_expected_results: total number of runs with expected results,
* num_unexpected_results: total number of runs with unexpected results,
* results: classified test results in 4 groups: passes, failures, skips
and unknowns. There's another 'notruns' group for gtests, but not
meaningful for webkit_layout_test, so it will always be empty here.
"""
if not self.IsTestResultUseful():
return {}
test_results = ClassifiedTestResults()
for test_name, test_result in self.test_results_json['tests'].iteritems():
actuals = test_result['actual'].split(' ')
expects = test_result['expected'].split(' ')
test_results[test_name].total_run = len(actuals)
for actual in actuals:
if self.ResultWasExpected(actual, expects):
test_results[test_name].num_expected_results += 1
else:
test_results[test_name].num_unexpected_results += 1
if actual in PASSING_STATUSES:
test_results[test_name].results.passes[actual] += 1
elif actual in FAILING_STATUSES:
test_results[test_name].results.failures[actual] += 1
elif actual in SKIPPING_STATUSES:
test_results[test_name].results.skips[actual] += 1
else:
test_results[test_name].results.unknowns[actual] += 1
return test_results
@staticmethod
def IsTestResultsInExpectedFormat(test_results_json):
"""Checks if the log can be parsed by this parser.
Args:
test_results_json (dict): It should be in one of below formats:
{
"tests": {
"mojom_tests": {
"parse": {
"ast_unittest": {
"ASTTest": {
"testNodeBase": {
"expected": "PASS",
"actual": "PASS",
"artifacts": {
"screenshot": ["screenshots/page.png"],
}
}
}
}
}
}
},
"interrupted": false,
"path_delimiter": ".",
"version": 3,
"seconds_since_epoch": 1406662283.764424,
"num_failures_by_type": {
"FAIL": 0,
"PASS": 1
},
"artifact_types": {
"screenshot": "image/png"
}
}
Or
{
"tests": {
"mojom_tests/parse/ast_unittest/ASTTest/testNodeBase": {
"expected": "PASS",
"actual": "PASS",
"artifacts": {
"screenshot": ["screenshots/page.png"],
}
}
},
"interrupted": false,
"path_delimiter": ".",
"version": 3,
"seconds_since_epoch": 1406662283.764424,
"num_failures_by_type": {
"FAIL": 0,
"PASS": 1
},
"artifact_types": {
"screenshot": "image/png"
}
}
"""
if (not isinstance(test_results_json, dict) or
not isinstance(test_results_json.get('tests'), dict)):
return False
flattened = WebkitLayoutTestResults.FlattenTestResults(test_results_json)
return all(
isinstance(i, dict) and i.get('actual') and i.get('expected')
for i in flattened['tests'].itervalues())
@staticmethod
def _GetPathDelimiter(test_results_json):
"""Gets path delimiter, default to '/'."""
return test_results_json.get('path_delimiter') or '/'
@staticmethod
def FlattenTestResults(test_results_json):
"""Flatten test_results_json['tests'] from a trie to a one level dict
and generate new format test_results_json."""
if not test_results_json or not test_results_json.get('tests'):
return test_results_json
sample_key = test_results_json['tests'].keys()[0]
path_delimiter = WebkitLayoutTestResults._GetPathDelimiter(
test_results_json)
if path_delimiter in sample_key:
# This should not happen in raw data, assuming the test results log is
# already flattened.
return test_results_json
# Checks if the sub_test_results_json is a leaf node.
# Checks if can find actual and expected keys in dict since they are
# required fields in per-test results.
def is_a_leaf(sub_test_results_json):
return (sub_test_results_json.get('actual') and
sub_test_results_json.get('expected'))
flattened = {}
def flatten(tests, parent_key=''):
for k, v in tests.items():
new_key = parent_key + path_delimiter + k if parent_key else k
if isinstance(v, dict):
if not is_a_leaf(v):
flatten(v, new_key)
else:
flattened[new_key] = v
new_results = {}
for k, v in test_results_json.iteritems():
if k == 'tests':
flatten(v)
new_results[k] = flattened
else:
new_results[k] = v
return new_results
@staticmethod
def GetMergedTestResults(shard_results):
"""Merges the shards into one and returns the flatten version.
Args:
shard_results (list): A list of dicts with individual shard results.
Returns:
A dict with
- all tests in shards
- constants across all shards
- accumulated values for some keys
"""
if len(shard_results) == 1:
return WebkitLayoutTestResults.FlattenTestResults(shard_results[0])
def MergeAddable(key, merged_value, shard_value):
if (merged_value and not isinstance(merged_value, type(shard_value))):
raise Exception('Different value types for key %s when merging '
'json test results.' % key)
if isinstance(shard_value, int):
merged_value = shard_value + (merged_value or 0)
elif isinstance(shard_value, dict):
merged_value = merged_value or {}
for sub_key, sub_value in shard_value.iteritems():
merged_value[sub_key] = MergeAddable(
sub_key, merged_value.get(sub_key), sub_value)
else:
raise Exception('Value for key %s is not addable.' % key)
return merged_value
merged_results = {}
def MergeShards(shard_result):
matching = [
'builder_name', 'build_number', 'chromium_revision', 'path_delimiter'
]
addable = [
'fixable', 'num_flaky', 'num_passes', 'num_regressions', 'skipped',
'skips', 'num_failures_by_type'
]
for key, value in shard_result.iteritems():
if key == 'interrupted':
# If any shard is interrupted, mark the whole thing as interrupted.
merged_results[key] = value or merged_results.get(key, False)
elif key in matching:
# These keys are constants which should be the same across all shards.
if key in merged_results and merged_results[key] != value:
raise Exception('Different values for key %s when merging '
'json test results: %s vs %s.' %
(key, merged_results.get(key), value))
merged_results[key] = value
elif key in addable:
# These keys are accumulated sums we want to add together.
merged_results[key] = MergeAddable(key, merged_results.get(key),
value)
elif key == 'tests':
merged_results[key] = merged_results.get(key) or {}
merged_results[key].update(value)
for shard_result in shard_results:
MergeShards(WebkitLayoutTestResults.FlattenTestResults(shard_result))
return merged_results
@staticmethod
def ResultWasExpected(result, expected_results):
# pylint: disable=line-too-long
"""Returns whether the result can be treated as an expected result.
Reference: https://chromium.googlesource.com/chromium/src/+/519d9521d16d9d3af3036daf4d1d5f4398f4396a/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py#970
Args:
result: actual result of a test execution
expected_results: list of results listed in test_expectations
"""
if not set(expected_results) - set(NON_TEST_OUTCOME_EXPECTATIONS):
expected_results = set(['PASS'])
if result in expected_results:
return True
if result in ('PASS', 'TEXT', 'IMAGE', 'IMAGE+TEXT', 'AUDIO',
'MISSING') and 'NEEDSMANUALREBASELINE' in expected_results:
return True
if result in ('TEXT', 'IMAGE', 'IMAGE+TEXT',
'AUDIO') and 'FAIL' in expected_results:
return True
if result == 'MISSING' and 'REBASELINE' in expected_results:
return True
if result == 'SKIP':
return True
return False
| 36.618182 | 178 | 0.660191 | 14,101 | 0.875186 | 0 | 0 | 7,329 | 0.454878 | 0 | 0 | 8,528 | 0.529295 |
9865a2e19bb04e676f8b6abab3559b06ec09af1e | 3,477 | py | Python | recvCases/views.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | recvCases/views.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | recvCases/views.py | BattleJudge/recvCase | b7e55cd3c40603fe2c0086066421b269a0664f1e | [
"MIT"
] | null | null | null | import os
import zipfile
import hashlib
import logging
import json
from django.conf import settings
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import (AllowAny, IsAuthenticated, )
from .serializers import TestCaseSerializer
from .utils import (rand_str, filter_name_list)
from .conf import ErrorMsg
logger=logging.getLogger("")
logger.setLevel(level = logging.INFO)
log_file=logging.FileHandler("recvCases/log/log.txt")
log_file.setLevel(logging.INFO)
logger.addHandler(log_file)
class UploadCases(APIView):
permission_classes = [AllowAny,]
def post(self, request):
resp_data = {'code': 0, 'msg': 'success', 'data': {}}
req_serializer = TestCaseSerializer(data = request.data)
if not req_serializer.is_valid():
resp_data['code'] = -1
resp_data['msg'] = 'Request data error'
return Response(data=resp_data)
req_data = request.data
file = req_data['file']
zip_file = f"/tmp/{rand_str()}.zip"
with open(zip_file, "wb") as f:
for chunk in file:
f.write(chunk)
info, ret_str = self.process_zip(zip_file, req_data['problem_id'])
os.remove(zip_file)
if ret_str != "OK":
resp_data['msg'] = ErrorMsg[ret_str]
resp_data['code'] = -1
return Response(data = resp_data)
resp_data['data'] = info # info is a dict
resp_data['data']['problem_id'] = req_data['problem_id']
return Response(data = resp_data)
def process_zip(self, uploaded_zip_file, problem_id, dir=""):
try:
zip_file = zipfile.ZipFile(uploaded_zip_file, "r")
except zipfile.BadZipFile:
logger.info(f'{problem_id}: The uploaded test case zip file is bad.')
return {}, "BadZipFile"
name_list = zip_file.namelist()
test_case_list = filter_name_list(name_list, dir=dir)
if not test_case_list:
logger.info(f'{problem_id}: The uploaded test case zip file is empty.')
return {}, "EmptyZipFile"
test_case_id = problem_id
test_case_dir = os.path.join(settings.TEST_CASE_DIR, test_case_id)
try:
os.makedirs(test_case_dir)
except Exception as e:
pass
os.chmod(test_case_dir, 0o710)
size_cache = {}
md5_cache = {}
# 格式化压缩包中in out文件的换行符
# 并将文件落盘
for item in test_case_list:
with open(os.path.join(test_case_dir, item), "wb") as f:
content = zip_file.read(f"{dir}{item}").replace(b"\r\n", b"\n")
size_cache[item] = len(content)
if item.endswith(".out"):
md5_cache[item] = hashlib.md5(content.rstrip()).hexdigest()
f.write(content)
# 保留了spj字段 为了兼容judge server
# 不提供spj测试用例的上传
test_case_info = {"spj": False, "test_cases": {}}
info = {}
# ["1.in", "1.out", "2.in", "2.out"] => [("1.in", "1.out"), ("2.in", "2.out")]
test_case_list = zip(*[test_case_list[i::2] for i in range(2)])
# 有多少组用例,下面的循环执行几次
for index, item in enumerate(test_case_list):
data = {"stripped_output_md5": md5_cache[item[1]],
"input_size": size_cache[item[0]],
"output_size": size_cache[item[1]],
"input_name": item[0],
"output_name": item[1]}
info[str(index + 1)] = data
test_case_info["test_cases"][str(index + 1)] = data
# 写入测试用例的info文件
with open(os.path.join(test_case_dir, "info"), "w", encoding="utf-8") as f:
f.write(json.dumps(test_case_info, indent=4))
# 更改in out info文件的权限
for item in os.listdir(test_case_dir):
os.chmod(os.path.join(test_case_dir, item), 0o640)
logger.info(f'{problem_id}: Test cases upload success.')
return info, "OK" | 30.5 | 80 | 0.699166 | 3,023 | 0.835776 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.243572 |
9866792ed7574a448f6b711a55f93e86a444f71b | 4,309 | py | Python | client/python/tests/test_multi_packet.py | SaschaZ/ff-proxy | 4f692d2edf6533843be00b87d2eb31a2a690b9ba | [
"MIT"
] | 1 | 2020-04-22T18:49:16.000Z | 2020-04-22T18:49:16.000Z | client/python/tests/test_multi_packet.py | SaschaZ/ff-proxy | 4f692d2edf6533843be00b87d2eb31a2a690b9ba | [
"MIT"
] | null | null | null | client/python/tests/test_multi_packet.py | SaschaZ/ff-proxy | 4f692d2edf6533843be00b87d2eb31a2a690b9ba | [
"MIT"
] | null | null | null | from ff_client import FfClient, FfConfig, FfRequest
import unittest
import logging
class TestFfClientMutiPacket(unittest.TestCase):
def test_create_request_packets(self):
client = FfClient(FfConfig(ip_address='127.0.0.1',
port=8080,
log_level=logging.DEBUG))
http_request = "POST / HTTP/1.1\nHost: google.com.au\n\n".ljust(
2000, '0')
packets = client.create_request_packets(http_request, https = False)
self.assertEqual(2, len(packets))
packet1_buff = packets[0].payload
packet1_len = packets[0].length
ptr = 0
# -- Packet 1 --
self.assertEqual(1300, packet1_len)
# Request version
self.assertEqual(FfRequest.Version.V1,
packet1_buff[ptr] << 8 | packet1_buff[ptr + 1])
ptr += 2
# Request ID
packet1_request_id = (
packet1_buff[ptr] << 56
| packet1_buff[ptr + 1] << 48
| packet1_buff[ptr + 2] << 40
| packet1_buff[ptr + 3] << 32
| packet1_buff[ptr + 4] << 24
| packet1_buff[ptr + 5] << 16
| packet1_buff[ptr + 6] << 8
| packet1_buff[ptr + 7]
)
self.assertNotEqual(0, packet1_request_id)
ptr += 8
# Total length
self.assertEqual(len(http_request), (
packet1_buff[ptr] << 24
| packet1_buff[ptr + 1] << 16
| packet1_buff[ptr + 2] << 8
| packet1_buff[ptr + 3]
))
ptr += 4
# Chunk offset
self.assertEqual(0, (
packet1_buff[ptr] << 24
| packet1_buff[ptr + 1] << 16
| packet1_buff[ptr + 2] << 8
| packet1_buff[ptr + 3]
))
ptr += 4
# Chunk length
self.assertEqual(1277, (
packet1_buff[ptr] << 8
| packet1_buff[ptr + 1]
))
ptr += 2
# EOL option type
self.assertEqual(FfRequest.Option.Type.EOL, packet1_buff[ptr])
ptr += 1
# EOL option length
self.assertEqual(0, packet1_buff[ptr] << 16 | packet1_buff[ptr + 1])
ptr += 2
# Payload
self.assertEqual(bytearray(http_request.encode('utf8'))[:1277],
packet1_buff[ptr:packet1_len])
# -- Packet 2 --
packet2_buff = packets[1].payload
packet2_len = packets[1].length
ptr = 0
self.assertEqual(746, packet2_len)
# Request version
self.assertEqual(FfRequest.Version.V1,
packet2_buff[ptr] << 8 | packet2_buff[ptr + 1])
ptr += 2
# Request ID
packet2_request_id = (
packet2_buff[ptr] << 56
| packet2_buff[ptr + 1] << 48
| packet2_buff[ptr + 2] << 40
| packet2_buff[ptr + 3] << 32
| packet2_buff[ptr + 4] << 24
| packet2_buff[ptr + 5] << 16
| packet2_buff[ptr + 6] << 8
| packet2_buff[ptr + 7]
)
self.assertNotEqual(0, packet2_request_id)
ptr += 8
# Total length
self.assertEqual(len(http_request), (
packet2_buff[ptr] << 24
| packet2_buff[ptr + 1] << 16
| packet2_buff[ptr + 2] << 8
| packet2_buff[ptr + 3]
))
ptr += 4
# Chunk offset
self.assertEqual(1277, (
packet2_buff[ptr] << 24
| packet2_buff[ptr + 1] << 16
| packet2_buff[ptr + 2] << 8
| packet2_buff[ptr + 3]
))
ptr += 4
# Chunk length
self.assertEqual(len(http_request) - 1277, (
packet2_buff[ptr] << 8
| packet2_buff[ptr + 1]
))
ptr += 2
# EOL option type
self.assertEqual(FfRequest.Option.Type.EOL, packet2_buff[ptr])
ptr += 1
# EOL option length
self.assertEqual(0, packet2_buff[ptr] << 16 | packet2_buff[ptr + 1])
ptr += 2
# Payload
self.assertEqual(bytearray(http_request.encode('utf8'))[1277:],
packet2_buff[ptr:packet2_len])
# Request ID's
self.assertEqual(packet1_request_id, packet2_request_id)
| 29.114865 | 76 | 0.508239 | 4,223 | 0.980042 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.080297 |
9866d3b08c24b3c2f1e959650162041d580a0cb0 | 22,429 | py | Python | model_card_toolkit/model_card.py | Saiprasad16/model-card-toolkit | ad25e9db786ced922510940988d79c71dd09dbb6 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:53:11.000Z | 2021-05-10T10:53:11.000Z | model_card_toolkit/model_card.py | Saiprasad16/model-card-toolkit | ad25e9db786ced922510940988d79c71dd09dbb6 | [
"Apache-2.0"
] | null | null | null | model_card_toolkit/model_card.py | Saiprasad16/model-card-toolkit | ad25e9db786ced922510940988d79c71dd09dbb6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Card Data Class.
The Model Card (MC) is the document designed for transparent reporting of AI
model provenance, usage, and ethics-informed evaluation. The model card can be
presented by different formats (e.g. HTML, PDF, Markdown). The properties of
the Model Card (MC) are defined by a json schema. The ModelCard class in the
ModelCardsToolkit serves as an API to read and write MC properties by the users.
"""
import abc
import json as json_lib
from typing import Any, Dict, List, Optional, Text
import dataclasses
from model_card_toolkit.proto import model_card_pb2
from model_card_toolkit.utils import validation
from google.protobuf import descriptor
from google.protobuf import message
_SCHEMA_VERSION_STRING = "schema_version"
# TODO(b/181702622): Think about a smart and clean way to control the required
# field.
class BaseModelCardField(abc.ABC):
"""Model card field base class.
This is an abstract class. All the model card fields should inherit this class
and override the _proto_type property to the corresponding proto type. This
abstract class provides methods `copy_from_proto`, `merge_from_proto` and
`to_proto` to convert the class from and to proto. The child class does not
need to override this unless it needs some special process.
"""
@property
@abc.abstractmethod
def _proto_type(self):
"""The proto type. Child class should overwrite this."""
def to_proto(self) -> message.Message:
"""Convert this class object to the proto."""
proto = self._proto_type()
for field_name, field_value in self.__dict__.items():
if not hasattr(proto, field_name):
raise ValueError("%s has no such field named '%s'." %
(type(proto), field_name))
if not field_value:
continue
field_descriptor = proto.DESCRIPTOR.fields_by_name[field_name]
# Process Message type.
if field_descriptor.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for nested_message in field_value:
getattr(proto, field_name).add().CopyFrom(nested_message.to_proto()) # pylint: disable=protected-access
else:
getattr(proto, field_name).CopyFrom(field_value.to_proto()) # pylint: disable=protected-access
# Process Non-Message type
else:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
getattr(proto, field_name).extend(field_value)
else:
setattr(proto, field_name, field_value)
return proto
def _from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Convert proto to this class object."""
if not isinstance(proto, self._proto_type):
raise TypeError("%s is expected. However %s is provided." %
(self._proto_type, type(proto)))
for field_descriptor in proto.DESCRIPTOR.fields:
field_name = field_descriptor.name
if not hasattr(self, field_name):
raise ValueError("%s has no such field named '%s.'" %
(self, field_name))
# Process Message type.
if field_descriptor.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Clean the list first.
setattr(self, field_name, [])
for p in getattr(proto, field_name):
# To get the type hint of a list is not easy.
field = self.__annotations__[field_name].__args__[0]() # pytype: disable=attribute-error
field._from_proto(p) # pylint: disable=protected-access
getattr(self, field_name).append(field)
elif proto.HasField(field_name):
getattr(self, field_name)._from_proto(getattr(proto, field_name)) # pylint: disable=protected-access
# Process Non-Message type
else:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
setattr(self, field_name, getattr(proto, field_name)[:])
elif proto.HasField(field_name):
setattr(self, field_name, getattr(proto, field_name))
return self
def merge_from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Merges the contents of the model card proto into current object."""
current = self.to_proto()
current.MergeFrom(proto)
self.clear()
return self._from_proto(current)
def copy_from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Copies the contents of the model card proto into current object."""
self.clear()
return self._from_proto(proto)
def to_json(self) -> Text:
"""Convert this class object to json."""
return json_lib.dumps(self.to_dict(), indent=2)
def to_dict(self) -> Dict[Text, Any]:
"""Convert your model card to a python dictionary."""
# ignore None properties recursively to allow missing values.
ignore_none = lambda properties: {k: v for k, v in properties if v}
return dataclasses.asdict(self, dict_factory=ignore_none)
def clear(self):
"""Clear the subfields of this BaseModelCardField."""
for field_name, field_value in self.__dict__.items():
if isinstance(field_value, BaseModelCardField):
field_value.clear()
elif isinstance(field_value, list):
setattr(self, field_name, [])
else:
setattr(self, field_name, None)
@classmethod
def _get_type(cls, obj: Any):
return type(obj)
@dataclasses.dataclass
class Owner(BaseModelCardField):
"""The information about owners of a model.
Attributes:
name: The name of the model owner.
contact: The contact information for the model owner or owners. These could
be individual email addresses, a team mailing list expressly, or a
monitored feedback form.
"""
name: Optional[Text] = None
contact: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Owner)] = model_card_pb2.Owner
@dataclasses.dataclass
class Version(BaseModelCardField):
"""The information about verions of a model.
If there are multiple versions of the model, or there may be in the future,
it’s useful for your audience to know which version of the model is
discussed
in the Model Card. If there are previous versions of this model, briefly
describe how this version is different. If no more than one version of the
model will be released, this field may be omitted.
Attributes:
name: The name of the version.
date: The date this version was released.
diff: The changes from the previous version.
"""
name: Optional[Text] = None
date: Optional[Text] = None
diff: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Version)] = model_card_pb2.Version
@dataclasses.dataclass
class License(BaseModelCardField):
"""The license information for a model.
Attributes:
identifier: A standard SPDX license identifier (https://spdx.org/licenses/),
or "proprietary" for an unlicensed Module.
custom_text: The text of a custom license.
"""
identifier: Optional[Text] = None
custom_text: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.License)] = model_card_pb2.License
@dataclasses.dataclass
class Reference(BaseModelCardField):
"""Reference for a model.
Attributes:
reference: A reference to a resource.
"""
reference: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Reference)] = model_card_pb2.Reference
@dataclasses.dataclass
class Citation(BaseModelCardField):
"""A citation for a model.
Attributes:
style: The citation style, such as MLA, APA, Chicago, or IEEE.
citation: the citation.
"""
style: Optional[Text] = None
citation: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Citation)] = model_card_pb2.Citation
@dataclasses.dataclass
class ModelDetails(BaseModelCardField):
"""This section provides a general, high-level description of the model.
Attributes:
name: The name of the model.
overview: A description of the model card.
documentation: A more thorough description of the model and its usage.
owners: The individuals or teams who own the model.
version: The version of the model.
licenses: The license information for the model. If the model is licensed
for use by others, include the license type. If the model is not licensed
for future use, you may state that here as well.
references: Provide any additional links the reader may need. You can link
to foundational research, technical documentation, or other materials that
may be useful to your audience.
citations: How should the model be cited? If the model is based on published
academic research, cite the research.
"""
name: Optional[Text] = None
overview: Optional[Text] = None
documentation: Optional[Text] = None
owners: List[Owner] = dataclasses.field(default_factory=list)
version: Optional[Version] = dataclasses.field(default_factory=Version)
licenses: List[License] = dataclasses.field(default_factory=list)
references: List[Reference] = dataclasses.field(default_factory=list)
citations: List[Citation] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelDetails)] = model_card_pb2.ModelDetails
@dataclasses.dataclass
class Graphic(BaseModelCardField):
"""A named inline plot.
Attributes:
name: The name of the graphic.
image: The image string encoded as a base64 string.
"""
name: Optional[Text] = None
image: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Graphic)] = model_card_pb2.Graphic
@dataclasses.dataclass
class GraphicsCollection(BaseModelCardField):
"""A collection of graphics.
Each ```graphic``` in the ```collection``` field has both a ```name``` and
an ```image```. For instance, you might want to display a graph showing the
number of examples belonging to each class in your training dataset:
```python
model_card.model_parameters.data.train.graphics.collection = [
{'name': 'Training Set Size', 'image': training_set_size_barchart},
]
```
Then, provide a description of the graph:
```python
model_card.model_parameters.data.train.graphics.description = (
'This graph displays the number of examples belonging to each class ',
'in the training dataset. ')
```
Attributes:
description: The description of graphics.
collection: A collection of graphics.
"""
description: Optional[Text] = None
collection: List[Graphic] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.GraphicsCollection)] = model_card_pb2.GraphicsCollection
@dataclasses.dataclass
class SensitiveData(BaseModelCardField):
"""Sensitive data, such as PII (personally-identifiable information).
Attributes:
sensitive_data: A description of any sensitive data that may be present in a
dataset. Be sure to note PII information such as names, addresses, phone
numbers, etc. Preferably, such info should be scrubbed from a dataset if
possible. Note that even non-identifying information, such as zip code,
age, race, and gender, can be used to identify individuals when
aggregated. Please describe any such fields here.
"""
sensitive_data: List[Text] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.SensitiveData)] = model_card_pb2.SensitiveData
@dataclasses.dataclass
class Dataset(BaseModelCardField):
"""Provide some information about a dataset used to generate a model.
Attributes:
name: The name of the dataset.
description: The description of dataset.
link: A link to the dataset.
sensitive: Does this dataset contain human or other sensitive data?
graphics: Visualizations of the dataset.
"""
name: Optional[Text] = None
description: Optional[Text] = None
link: Optional[Text] = None
sensitive: Optional[SensitiveData] = dataclasses.field(
default_factory=SensitiveData)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Dataset)] = model_card_pb2.Dataset
@dataclasses.dataclass
class ModelParameters(BaseModelCardField):
"""Parameters for construction of the model.
Attributes:
model_architecture: specifies the architecture of your model.
data: specifies the datasets used to train and evaluate your model.
input_format: describes the data format for inputs to your model.
output_format: describes the data format for outputs from your model.
"""
model_architecture: Optional[Text] = None
data: List[Dataset] = dataclasses.field(default_factory=list)
input_format: Optional[Text] = None
output_format: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelParameters)] = model_card_pb2.ModelParameters
@dataclasses.dataclass
class PerformanceMetric(BaseModelCardField):
"""The details of the performance metric.
Attributes:
type: What performance metric are you reporting on?
value: What is the value of this performance metric?
slice: What slice of your data was this metric computed on?
"""
# TODO(b/179415408): add fields (name, value, confidence_interval, threshold,
# slice) after gathering requirements (potential clients: Jigsaw)
# The following fields are EXPERIMENTAL and introduced for migration purpose.
type: Optional[Text] = None
value: Optional[Text] = None
slice: Optional[Text] = None
_proto_type: dataclasses.InitVar[BaseModelCardField._get_type(
model_card_pb2.PerformanceMetric)] = model_card_pb2.PerformanceMetric
@dataclasses.dataclass
class QuantitativeAnalysis(BaseModelCardField):
"""The quantitative analysis of a model.
Identify relevant performance metrics and display values. Let’s say you’re
interested in displaying the accuracy and false positive rate (FPR) of a
cat vs. dog classification model. Assuming you have already computed both
metrics, both overall and per-class, you can specify metrics like so:
```python
model_card.quantitative_analysis.performance_metrics = [
{'type': 'accuracy', 'value': computed_accuracy},
{'type': 'accuracy', 'value': cat_accuracy, 'slice': 'cat'},
{'type': 'accuracy', 'value': dog_accuracy, 'slice': 'dog'},
{'type': 'fpr', 'value': computed_fpr},
{'type': 'fpr', 'value': cat_fpr, 'slice': 'cat'},
{'type': 'fpr', 'value': dog_fpr, 'slice': 'dog'},
]
```
Attributes:
performance_metrics: The performance metrics being reported.
graphics: A collection of visualizations of model performance.
"""
performance_metrics: List[PerformanceMetric] = dataclasses.field(
default_factory=list)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(model_card_pb2.QuantitativeAnalysis
)] = model_card_pb2.QuantitativeAnalysis
@dataclasses.dataclass
class User(BaseModelCardField):
"""A type of user for a model.
Attributes:
description: A description of a user.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.User)] = model_card_pb2.User
@dataclasses.dataclass
class UseCase(BaseModelCardField):
"""A type of use case for a model.
Attributes:
description: A description of a use case.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.UseCase)] = model_card_pb2.UseCase
@dataclasses.dataclass
class Limitation(BaseModelCardField):
"""A limitation a model.
Attributes:
description: A description of the limitation.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Limitation)] = model_card_pb2.Limitation
@dataclasses.dataclass
class Tradeoff(BaseModelCardField):
"""A tradeoff for a model.
Attributes:
description: A description of the tradeoff.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Tradeoff)] = model_card_pb2.Tradeoff
@dataclasses.dataclass
class Risk(BaseModelCardField):
"""Information about risks involved when using the model.
Attributes:
name: The name of the risk.
mitigation_strategy: A mitigation strategy that you've implemented, or one
that you suggest to users.
"""
name: Optional[Text] = None
mitigation_strategy: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Risk)] = model_card_pb2.Risk
@dataclasses.dataclass
class Considerations(BaseModelCardField):
"""Considerations related to model construction, training, and application.
The considerations section includes qualitative information about your model,
including some analysis of its risks and limitations. As such, this section
usually requires careful consideration, and conversations with many relevant
stakeholders, including other model developers, dataset producers, and
downstream users likely to interact with your model, or be affected by its
outputs.
Attributes:
users: Who are the intended users of the model? This may include
researchers, developers, and/or clients. You might also include
information about the downstream users you expect to interact with your
model.
use_cases: What are the intended use cases of the model? What use cases are
out-of-scope?
limitations: What are the known limitations of the model? This may include
technical limitations, or conditions that may degrade model performance.
tradeoffs: What are the known accuracy/performance tradeoffs for the model?
ethical_considerations: What are the ethical risks involved in application
of this model? For each risk, you may also provide a mitigation strategy
that you've implemented, or one that you suggest to users.
"""
users: List[User] = dataclasses.field(default_factory=list)
use_cases: List[UseCase] = dataclasses.field(default_factory=list)
limitations: List[Limitation] = dataclasses.field(default_factory=list)
tradeoffs: List[Tradeoff] = dataclasses.field(default_factory=list)
ethical_considerations: List[Risk] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Considerations)] = model_card_pb2.Considerations
@dataclasses.dataclass
class ModelCard(BaseModelCardField):
"""Fields used to generate the Model Card.
Attributes:
model_details: Descriptive metadata for the model.
model_parameters: Technical metadata for the model.
quantitative_analysis: Quantitative analysis of model performance.
considerations: Any considerations related to model construction, training,
and application.
"""
model_details: ModelDetails = dataclasses.field(default_factory=ModelDetails)
model_parameters: ModelParameters = dataclasses.field(
default_factory=ModelParameters)
quantitative_analysis: QuantitativeAnalysis = dataclasses.field(
default_factory=QuantitativeAnalysis)
considerations: Considerations = dataclasses.field(
default_factory=Considerations)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelCard)] = model_card_pb2.ModelCard
def to_json(self) -> Text:
"""Write ModelCard to JSON."""
model_card_dict = self.to_dict()
model_card_dict[
_SCHEMA_VERSION_STRING] = validation.get_latest_schema_version()
return json_lib.dumps(model_card_dict, indent=2)
def _from_json(self, json_dict: Dict[Text, Any]) -> "ModelCard":
"""Read ModelCard from JSON.
If ModelCard fields have already been set, this function will overwrite any
existing values.
WARNING: This method's interface may change in the future, do not use for
critical workflows.
Args:
json_dict: A JSON dict from which to populate fields in the model card
schema.
Returns:
self
Raises:
JSONDecodeError: If `json_dict` is not a valid JSON string.
ValidationError: If `json_dict` does not follow the model card JSON
schema.
ValueError: If `json_dict` contains a value not in the class or schema
definition.
"""
def _populate_from_json(json_dict: Dict[Text, Any],
field: BaseModelCardField) -> BaseModelCardField:
for subfield_key in json_dict:
if subfield_key.startswith(_SCHEMA_VERSION_STRING):
continue
elif not hasattr(field, subfield_key):
raise ValueError(
"BaseModelCardField %s has no such field named '%s.'" %
(field, subfield_key))
elif isinstance(json_dict[subfield_key], dict):
subfield_value = _populate_from_json(
json_dict[subfield_key], getattr(field, subfield_key))
elif isinstance(json_dict[subfield_key], list):
subfield_value = []
for item in json_dict[subfield_key]:
if isinstance(item, dict):
new_object = field.__annotations__[subfield_key].__args__[0]() # pytype: disable=attribute-error
subfield_value.append(_populate_from_json(item, new_object))
else: # if primitive
subfield_value.append(item)
else:
subfield_value = json_dict[subfield_key]
setattr(field, subfield_key, subfield_value)
return field
validation.validate_json_schema(json_dict)
self.clear()
_populate_from_json(json_dict, self)
return self
| 36.175806 | 116 | 0.727139 | 20,501 | 0.913795 | 0 | 0 | 16,485 | 0.734789 | 0 | 0 | 11,179 | 0.498284 |
98684fbbb86d8c3abdb991d0ab578c4633a3f36f | 3,356 | py | Python | nodes/ros_random_search.py | ARQ-CRISP/bopt_grasp_quality | 219372e6644005651e166ed3091c5410385c7d30 | [
"MIT"
] | null | null | null | nodes/ros_random_search.py | ARQ-CRISP/bopt_grasp_quality | 219372e6644005651e166ed3091c5410385c7d30 | [
"MIT"
] | null | null | null | nodes/ros_random_search.py | ARQ-CRISP/bopt_grasp_quality | 219372e6644005651e166ed3091c5410385c7d30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import rospy
from rospkg.rospack import RosPack
from copy import deepcopy
from tf2_ros import TransformListener, Buffer
from bopt_grasp_quality.srv import bopt, boptResponse
from bayesian_optimization import Random_Explorer
from bayesian_optimization.opt_nodes import RS_Node
# from math import nan
from geometry_msgs.msg import PoseStamped, Pose, Transform
def TF2Pose(TF_msg):
new_pose = PoseStamped()
new_pose.header = TF_msg.header
new_pose.pose.position.x = TF_msg.transform.translation.x
new_pose.pose.position.y = TF_msg.transform.translation.y
new_pose.pose.position.z = TF_msg.transform.translation.z
new_pose.pose.orientation.x = TF_msg.transform.rotation.x
new_pose.pose.orientation.y = TF_msg.transform.rotation.y
new_pose.pose.orientation.z = TF_msg.transform.rotation.z
new_pose.pose.orientation.w = TF_msg.transform.rotation.w
return new_pose
if __name__ == "__main__":
rospy.init_node('ros_bo')
# lb_y = rospy.get_param('~lb_x', -.2)
# ub_y = rospy.get_param('~ub_x', .2)
lb_x = [float(xx) for xx in rospy.get_param('~lb_x', [-.2, 0., -.2])]
ub_x = [float(xx) for xx in rospy.get_param('~ub_x', [.2, 0., .2])]
ee_link = rospy.get_param('~ee_link', 'hand_root')
base_link = rospy.get_param('~base_link', 'world')
service_name = rospy.get_param('~commander_service', 'bayes_optimization')
n_iter = rospy.get_param('~search_iters', 20)
resolution = rospy.get_param('~resolution', .001)
tf_buffer = Buffer(rospy.Duration(50))
tf_listener = TransformListener(tf_buffer)
rospy.loginfo(rospy.get_name().split('/')[1] + ': Initialization....')
rospy.loginfo(rospy.get_name().split('/')[1] + ': Getting current pose....')
rospy.sleep(0.5)
try:
ARM_TF = tf_buffer.lookup_transform(base_link, ee_link, rospy.Time().now(), rospy.Duration(0.1))
current_pose = TF2Pose(ARM_TF)
except Exception as e:
rospy.logerr('error in finding the arm...')
rospy.logerr('Starting at (0, 0, 0), (0, 0, 0, 1)')
current_pose = PoseStamped()
current_pose.pose.orientation.w = 1.
pose = [
[current_pose.pose.position.x, current_pose.pose.position.y, current_pose.pose.position.z],
[current_pose.pose.orientation.x, current_pose.pose.orientation.y, current_pose.pose.orientation.z, current_pose.pose.orientation.w]]
rospy.loginfo(
rospy.get_name().split('/')[1] + ': starting at: ({:.3f}, {:.3f}, {:.3f})-({:.3f}, {:.3f}, {:.3f}, {:.3f})'.format(*pose[0] + pose[1])
)
n = len(lb_x)
init_pos = np.array([
current_pose.pose.position.x,
current_pose.pose.position.y,
current_pose.pose.position.z])
assert(len(lb_x) == len(ub_x))
params = {
Random_Explorer.PARAMS.iters :n_iter,
Random_Explorer.PARAMS.init_pos : init_pos,
Random_Explorer.PARAMS.sampling : [resolution] * n}
# lb = current_pose.pose.position.y + lb_x * np.ones((n,))
# ub = current_pose.pose.position.y + ub_x * np.ones((n,))
lb = init_pos[np.arange(len(lb_x))] + lb_x - 1e-10
ub = init_pos[np.arange(len(ub_x))] + ub_x
RS_Node(n, params, lb=lb, ub=ub, init_pose=current_pose.pose, service_name=service_name)
| 40.433735 | 142 | 0.676103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.170739 |
986961f64f65f1a7c4186940df49bdfa9b732036 | 2,356 | py | Python | modules/channel_opting/channel_message_manager.py | DenverCoder1/jct-discord-bot | e4b1df71d96a4127be27e40c9aa41dbb50c3379c | [
"MIT"
] | 10 | 2021-01-06T13:25:13.000Z | 2022-03-01T13:16:46.000Z | modules/channel_opting/channel_message_manager.py | DenverCoder1/jct-discord-bot | e4b1df71d96a4127be27e40c9aa41dbb50c3379c | [
"MIT"
] | 124 | 2021-01-06T13:29:55.000Z | 2022-03-25T14:18:59.000Z | modules/channel_opting/channel_message_manager.py | DenverCoder1/jct-discord-bot | e4b1df71d96a4127be27e40c9aa41dbb50c3379c | [
"MIT"
] | 3 | 2021-01-08T10:27:34.000Z | 2021-03-04T14:49:36.000Z | from typing import List, Optional
from utils.embedder import build_embed
from utils.utils import one
from database.channel_message import ChannelMessage
import discord
class ChannelMessageManager:
def __init__(self, host_channel: discord.TextChannel, emoji: str):
self.__host_channel = host_channel
self.__emoji = emoji
async def process_reaction(
self,
reaction: discord.RawReactionActionEvent,
channel_messages: List[ChannelMessage],
):
if reaction.emoji.name == self.__emoji:
try:
channel_message = one(
cm
for cm in channel_messages
if cm.message_id == reaction.message_id
)
allowed: Optional[bool]
if reaction.event_type == "REACTION_ADD":
member = reaction.member
allowed = False
else:
member = discord.utils.get(
self.__host_channel.members, id=reaction.user_id
)
allowed = None
if not member or member.bot:
return
channel = channel_message.referenced_channel
assert channel is not None
await channel.set_permissions(member, view_channel=allowed)
except StopIteration:
pass
async def create_channel_message(
self, channel_messages: List[ChannelMessage], channel: discord.abc.GuildChannel
):
if not isinstance(channel, discord.TextChannel):
return
message = await self.__send_channel_message(channel)
channel_messages.append(
await ChannelMessage.add_to_database(
message.id, channel.id, self.__host_channel.id
)
)
async def delete_channel_message(
self, channel_messages: List[ChannelMessage], channel: discord.abc.GuildChannel
):
if not isinstance(channel, discord.TextChannel):
return
try:
channel_message = one(
cm for cm in channel_messages if cm.referenced_channel_id == channel.id
)
await (await channel_message.message).delete()
await channel_message.delete_from_database()
channel_messages.remove(channel_message)
except StopIteration:
pass
async def __send_channel_message(
self, channel: discord.TextChannel
) -> discord.Message:
embed = build_embed(
title=channel.name.replace("-", " ").title(),
footer=f"Click {self.__emoji} to opt out of this channel.",
description=channel.mention,
colour=discord.Colour.dark_purple(),
)
message = await self.__host_channel.send(embed=embed)
await message.add_reaction(self.__emoji)
return message
| 29.08642 | 81 | 0.744482 | 2,185 | 0.927419 | 0 | 0 | 0 | 0 | 2,017 | 0.856112 | 71 | 0.030136 |
986a40624bae1a159be9cd68b43440b563e81ee0 | 208 | py | Python | test.py | Timokasse/rediscache | e5bef0da973bdf53efaaea99b0ed9b41bb331ade | [
"Apache-2.0"
] | null | null | null | test.py | Timokasse/rediscache | e5bef0da973bdf53efaaea99b0ed9b41bb331ade | [
"Apache-2.0"
] | null | null | null | test.py | Timokasse/rediscache | e5bef0da973bdf53efaaea99b0ed9b41bb331ade | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from time import sleep
from rediscache import rediscache
import time, redis
@rediscache(1, 2)
def getTestValue():
return (5, 'toto')
if __name__ == '__main__':
myfunction()
| 13.866667 | 33 | 0.697115 | 0 | 0 | 0 | 0 | 60 | 0.288462 | 0 | 0 | 37 | 0.177885 |
986bbe481dae578c33047776f9bf717531d791dd | 588 | py | Python | backend/api/migrations/0005_education.py | EmileSchneider/cityrepo | fc9d84016342f7cc83231aab9853b89c37d542f9 | [
"MIT"
] | null | null | null | backend/api/migrations/0005_education.py | EmileSchneider/cityrepo | fc9d84016342f7cc83231aab9853b89c37d542f9 | [
"MIT"
] | null | null | null | backend/api/migrations/0005_education.py | EmileSchneider/cityrepo | fc9d84016342f7cc83231aab9853b89c37d542f9 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-12-11 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20201211_1411'),
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('institution', models.CharField(max_length=200)),
('description', models.CharField(max_length=2000)),
],
),
]
| 26.727273 | 114 | 0.585034 | 495 | 0.841837 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.207483 |
986c69808538d451d52888704f858b32d31572cc | 1,246 | py | Python | data/example_dataset/carla/image_file_index/make_image_file_index.py | lukschwalb/bisenetv2-tensorflow | d0bc7406118891fc93e3b9e3aeee4e9dbc9e4cd0 | [
"MIT"
] | null | null | null | data/example_dataset/carla/image_file_index/make_image_file_index.py | lukschwalb/bisenetv2-tensorflow | d0bc7406118891fc93e3b9e3aeee4e9dbc9e4cd0 | [
"MIT"
] | null | null | null | data/example_dataset/carla/image_file_index/make_image_file_index.py | lukschwalb/bisenetv2-tensorflow | d0bc7406118891fc93e3b9e3aeee4e9dbc9e4cd0 | [
"MIT"
] | null | null | null | import os
import os.path as ops
import glob
import random
import tqdm
SOURCE_IMAGE_DIR = '/home/luk/datasets/carla/04-10-town02-ss/gt_images'
SOURCE_LABEL_DIR = '/home/luk/datasets/carla/04-10-town02-ss/gt_annotation'
DST_IMAGE_INDEX_FILE_OUTPUT_DIR = '.'
unique_ids = []
for dir_name in os.listdir(SOURCE_IMAGE_DIR):
image_file_index = []
source_dir = ops.join(SOURCE_IMAGE_DIR, dir_name)
source_image_paths = glob.glob('{:s}/**/*.png'.format(source_dir), recursive=True)
for source_image in tqdm.tqdm(source_image_paths):
image_name = ops.split(source_image)[1]
image_id = image_name.split('.')[0]
label_image_name = '{:s}.png'.format(image_id)
label_image_dir = ops.join(SOURCE_LABEL_DIR, dir_name)
label_image_path = ops.join(label_image_dir, label_image_name)
assert ops.exists(label_image_path), '{:s} not exist'.format(label_image_path)
image_file_index.append('{:s} {:s}'.format(source_image, label_image_path))
random.shuffle(image_file_index)
output_file_path = ops.join(DST_IMAGE_INDEX_FILE_OUTPUT_DIR, '{:s}.txt'.format(dir_name))
with open(output_file_path, 'w') as file:
file.write('\n'.join(image_file_index))
print('Complete')
| 32.789474 | 93 | 0.719904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.154896 |
986cf0c2c336b0be083773fcf498596f4ecc8d23 | 216 | py | Python | Chicago Data Clean/categories_chicago.py | minxstm/Bootcamp_Project_1 | 1f5d15faab605f7e5b678b6eca802eca04351189 | [
"MIT"
] | null | null | null | Chicago Data Clean/categories_chicago.py | minxstm/Bootcamp_Project_1 | 1f5d15faab605f7e5b678b6eca802eca04351189 | [
"MIT"
] | null | null | null | Chicago Data Clean/categories_chicago.py | minxstm/Bootcamp_Project_1 | 1f5d15faab605f7e5b678b6eca802eca04351189 | [
"MIT"
] | null | null | null | import pandas as pd
chicago_df=pd.read_csv("Chicago_Crime_2015-2017.csv")
#print(chicago_df.head())
chicago_vc = chicago_df["Primary Type"].value_counts()
pd.DataFrame(chicago_vc).to_csv("crime_types_chicago_1.csv")
| 36 | 60 | 0.805556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.439815 |
986d1878579f7b790140714073e46cd0014bd988 | 293 | py | Python | Day 3/part1.py | jonomango/advent-of-code-2020 | 86a20a5c48019ae1adacc7b36481e2df0f0069db | [
"MIT"
] | null | null | null | Day 3/part1.py | jonomango/advent-of-code-2020 | 86a20a5c48019ae1adacc7b36481e2df0f0069db | [
"MIT"
] | null | null | null | Day 3/part1.py | jonomango/advent-of-code-2020 | 86a20a5c48019ae1adacc7b36481e2df0f0069db | [
"MIT"
] | null | null | null | trees = []
with open("input.txt", "r") as f:
for line in f.readlines():
trees.append(line[:-1])
# curr pos
x, y = 0, 0
count = 0
while True:
x += 3
y += 1
if y >= len(trees):
break
if trees[y][x % len(trees[y])] == '#':
count += 1
print(count) | 13.318182 | 41 | 0.477816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.095563 |
986dcd1400b08fbde9b3bc935b159961a94ee6ae | 3,193 | py | Python | happenings/tests.py | doismellburning/tango-happenings | f6fc937cc4339812c1e208f328ee223ddc21103a | [
"MIT"
] | null | null | null | happenings/tests.py | doismellburning/tango-happenings | f6fc937cc4339812c1e208f328ee223ddc21103a | [
"MIT"
] | 1 | 2018-03-01T17:14:00.000Z | 2018-03-01T17:56:40.000Z | happenings/tests.py | doismellburning/tango-happenings | f6fc937cc4339812c1e208f328ee223ddc21103a | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import Event
UserModel = get_user_model()
class TestHappeningsGeneralViews(TestCase):
fixtures = ['events.json', 'users.json']
def setUp(self):
self.event = Event.objects.get(id=1)
self.user = UserModel.objects.all()[0]
def test_index(self):
"""
Test index
"""
resp = self.client.get(reverse('events_index'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object_list' in resp.context)
def test_events_by_region(self):
"""
Test events_by_region
"""
resp = self.client.get(reverse('events_by_region', args=['Pacific']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object_list' in resp.context)
self.assertTrue('region' in resp.context)
def test_event_detail(self):
"""
Test for valid event detail.
"""
resp = self.client.get(reverse('event_detail', args=[self.event.slug]))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object' in resp.context)
self.assertTrue('key' in resp.context)
self.assertEquals(self.event.id, resp.context['object'].id)
if self.event.ended:
self.assertFalse('schedule/">Schedule</a>' in resp.content)
def test_event_creation(self):
"""
Test for valid event creation.
"""
self.client.login(username=self.user.username, password='test')
response = self.client.get(reverse('add_event'))
self.assertEqual(response.status_code, 200)
self.assertTrue('form' in response.context)
new_event = {
"featured": True,
"has_playlist": False,
"submitted_by": 1,
"add_date": "2013-08-05",
"slug": "new-test-event",
"start_date": "2013-08-10",
"approved": True,
"info": "This is a new test event.",
"name": "New Test Event",
"region": "Pacific",
}
response = self.client.post(reverse('add_event'))
self.assertEqual(response.status_code, 200)
def test_event_editing(self):
"""
Test for valid event editing.
"""
response = self.client.get(reverse('edit-event', args=[self.event.slug]))
self.assertEqual(response.status_code, 200)
self.assertTrue('object' in response.context)
self.assertTrue('form' in response.context)
def test_ical_creation(self):
response = self.client.get(reverse('event_ical', args=[self.event.slug]))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('text/calendar'))
self.assertEquals(response['Filename'], 'filename.ics')
self.assertEquals(response['Content-Disposition'], 'attachment; filename=filename.ics')
response_list = response.content.split('\r\n')
self.assertEquals(response_list[0], 'BEGIN:VCALENDAR')
self.assertEquals(response_list[9], 'SUMMARY:Test Event')
| 36.284091 | 95 | 0.625744 | 3,008 | 0.942061 | 0 | 0 | 0 | 0 | 0 | 0 | 810 | 0.25368 |
986ee4d9a3c5ac634d9bf7328c6b4976f021aaac | 12,618 | py | Python | bopflow/models/yolonet.py | parejadan/bopflow | 183a0e0ae4c76265c1614402c59b3a54d328e097 | [
"Apache-2.0"
] | null | null | null | bopflow/models/yolonet.py | parejadan/bopflow | 183a0e0ae4c76265c1614402c59b3a54d328e097 | [
"Apache-2.0"
] | 6 | 2020-11-13T18:49:14.000Z | 2022-02-10T02:18:36.000Z | bopflow/models/yolonet.py | parejadan/bopflow | 183a0e0ae4c76265c1614402c59b3a54d328e097 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.losses import binary_crossentropy, sparse_categorical_crossentropy
from bopflow.models.darknet import darknet_conv_upsampling, darknet_conv, darknet
from bopflow.models.utils import (
DOutput,
DLabel,
reshape_lambda,
reduce_max_lambda,
boxes_lambda,
nms_lambda,
)
from bopflow.const import (
YOLO_MAX_BOXES,
YOLO_IOU_THRESHOLD,
YOLO_SCORE_THRESHOLD,
DEFAULT_IMAGE_SIZE,
COCO_DEFAULT_CLASSES,
)
from bopflow import LOGGER
def yolo_conv(filters: int, name=None):
def _yolo_conv(x_in):
if isinstance(x_in, tuple):
x, inputs = darknet_conv_upsampling(
x_in=x_in, filters=filters, size=1, up_sampling=2
)
else:
x = inputs = Input(x_in.shape[1:])
x = darknet_conv(x=x, filters=filters, size=1)
x = darknet_conv(x=x, filters=filters * 2, size=3)
x = darknet_conv(x=x, filters=filters, size=1)
x = darknet_conv(x=x, filters=filters * 2, size=3)
x = darknet_conv(x=x, filters=filters, size=1)
return Model(inputs, x, name=name)(x_in)
return _yolo_conv
def yolo_output(filters: int, anchors, num_classes, name=None):
def _yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = darknet_conv(x=x, filters=filters * 2, size=3)
x = darknet_conv(
x=x, filters=anchors * (num_classes + 5), size=1, batch_norm=False
)
x = reshape_lambda(anchors=anchors, num_classes=num_classes + 5)(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return _yolo_output
def yolo_boxes(pred, anchors, num_classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...num_classes))
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, num_classes), axis=-1
)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, num_classes):
boxes, conf, types = [], [], []
for o in outputs:
boxes.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
conf.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
types.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(boxes, axis=1)
confidence = tf.concat(conf, axis=1)
class_probs = tf.concat(types, axis=1)
scores = confidence * class_probs
boxes, scores, num_classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=YOLO_MAX_BOXES,
max_total_size=YOLO_MAX_BOXES,
iou_threshold=YOLO_IOU_THRESHOLD,
score_threshold=YOLO_SCORE_THRESHOLD,
)
return boxes, scores, num_classes, valid_detections
def yolo_loss(anchors, num_classes=80, ignore_thresh=0.5):
def _yolo_loss(y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, num_classes
)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh), tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
reduce_max_lambda, (pred_box, true_box, obj_mask), tf.float32
)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
# 5. calculate all losses
xy_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
)
wh_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss
# TODO: use binary_crossentropy instead
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class
)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return _yolo_loss
class BaseNet:
def __init__(self, labels_mapping: dict):
self.labels_mapping = labels_mapping
self.model = None
def load_saved_model(self, saved_model: str):
loaded = tf.saved_model.load(saved_model)
self.model = loaded.signatures["serving_default"]
return self.model
def load_weights(self, weights_path):
LOGGER.info(f"Loading weights from {weights_path}")
return self.model.load_weights(weights_path)
@property
def layer_names(self):
return [layer.name for layer in self.model.layers]
def get_label_name(self, target_id):
for label_name, label_id in self.labels_mapping.items():
if label_id == target_id:
return label_name
def evaluate(self, image):
"""
Returns
=======
[
DOutput(
box=BBox(),
confidence_score: float, # detection confidence score in (0.5, 1.0)
label=DLabel(),
),
...
]
"""
detections = []
boxes, scores, label_ids, detection_count = self.model(image)
boxes = boxes[0]
scores = scores[0]
label_ids = label_ids[0]
detection_count = detection_count[0].numpy()
for i in range(detection_count):
label_id = int(label_ids[i].numpy())
detections.append(
DOutput(
box=boxes[i].numpy(),
score=scores[i].numpy(),
label=DLabel(
number=label_id, name=self.get_label_name(target_id=label_id)
),
)
)
return detections
class YoloNet(BaseNet):
def __init__(
self,
channels: int,
anchors: np.array,
masks: np.array,
num_classes: int,
labels_mapping: dict,
size=None,
training=False,
):
super().__init__(labels_mapping=labels_mapping)
self.channels = channels if channels else 3
self.num_classes = num_classes if num_classes else 80
self.size = size
self.training = training
if not anchors:
self.anchors = (
np.array(
[
(10, 13),
(16, 30),
(33, 23),
(30, 61),
(62, 45),
(59, 119),
(116, 90),
(156, 198),
(373, 326),
],
np.float32,
)
/ DEFAULT_IMAGE_SIZE
)
else:
self.anchors = anchors
if not masks:
self.masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
else:
self.masks = masks
self._conv_creator = yolo_conv
self.set_model()
def get_input(self):
return Input([self.size, self.size, self.channels], name="input")
def get_conv(self, x: tf.Tensor, x_prev: tf.Tensor, filters: int, mask_index: int):
x_ins = (x, x_prev) if isinstance(x_prev, tf.Tensor) else x
conv_tensor = self._conv_creator(
filters=filters, name=f"yolo_conv_{mask_index}"
)(x_ins)
output_layer = yolo_output(
filters=filters,
anchors=len(self.masks[mask_index]),
num_classes=self.num_classes,
name=f"yolo_output_{mask_index}",
)(conv_tensor)
return conv_tensor, output_layer
def get_lambda_boxes(self, output_layer, mask_index: int):
lambda_instance = boxes_lambda(
box_func=yolo_boxes,
anchors=self.anchors[self.masks[mask_index]],
num_classes=self.num_classes,
lambda_name=f"yolo_boxes_{mask_index}",
)
return lambda_instance(output_layer)
def get_output(self, boxes: tuple):
lambda_instance = nms_lambda(
nms_func=yolo_nms,
anchors=self.anchors,
masks=self.masks,
num_classes=self.num_classes,
lambda_name="yolo_nms",
)
return lambda_instance(boxes)
def set_model(self):
x = inputs = self.get_input()
x_36, x_61, dark_tensor = darknet(name="yolo_darknet")(x)
conv_0, output_0 = self.get_conv(
x=dark_tensor, x_prev=None, filters=512, mask_index=0
)
conv_1, output_1 = self.get_conv(
x=conv_0, x_prev=x_61, filters=256, mask_index=1
)
conv_2, output_2 = self.get_conv(
x=conv_1, x_prev=x_36, filters=128, mask_index=2
)
if self.training:
self.model = Model(inputs, (output_0, output_1, output_2), name="yolov3")
else:
boxes_0 = self.get_lambda_boxes(output_layer=output_0, mask_index=0)
boxes_1 = self.get_lambda_boxes(output_layer=output_1, mask_index=1)
boxes_2 = self.get_lambda_boxes(output_layer=output_2, mask_index=2)
outputs = self.get_output(boxes=(boxes_0[:3], boxes_1[:3], boxes_2[:3]))
self.model = Model(inputs, outputs, name="yolov3")
return self.model
def yolo_v3(
size=None,
channels=3,
anchors=None,
masks=None,
num_classes=80,
labels_mapping=None,
training=False,
just_model=True,
):
network = YoloNet(
channels=channels,
anchors=anchors,
masks=masks,
num_classes=num_classes,
labels_mapping=labels_mapping,
size=size,
training=training,
)
return network.model if just_model else network
def default_detector(weights_path: str, labels_mapping=COCO_DEFAULT_CLASSES):
"""
Loads COCO model from a weights.tf resource
"""
detector = yolo_v3(
num_classes=len(labels_mapping), labels_mapping=labels_mapping, just_model=False
)
detector.load_weights(weights_path).expect_partial()
return detector
def default_model(saved_model: str, labels_mapping=COCO_DEFAULT_CLASSES):
"""
Loads COCO model from a saved_model.pb resource
"""
detector = BaseNet(labels_mapping=labels_mapping)
detector.load_saved_model(saved_model)
return detector
| 32.689119 | 89 | 0.593042 | 5,287 | 0.419005 | 0 | 0 | 95 | 0.007529 | 0 | 0 | 1,180 | 0.093517 |
986f08850a9f3156a8739237ba8493743a09c545 | 1,904 | py | Python | vespa/simulation/auto_gui/experiment_list.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | null | null | null | vespa/simulation/auto_gui/experiment_list.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 4 | 2021-04-17T13:58:31.000Z | 2022-01-20T14:19:57.000Z | vespa/simulation/auto_gui/experiment_list.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 3 | 2021-06-05T16:34:57.000Z | 2022-01-19T16:13:22.000Z | # -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.3 on Wed Sep 11 13:50:00 2019
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.SetSize((473, 300))
self.ListExperiments = wx.ListBox(self, wx.ID_ANY, choices=[], style=0)
self.ButtonCopy = wx.Button(self, wx.ID_ANY, "Copy List to Clipboard")
self.ButtonClose = wx.Button(self, wx.ID_CLOSE, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_copy, self.ButtonCopy)
self.Bind(wx.EVT_BUTTON, self.on_close, self.ButtonClose)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetTitle("dialog_1")
self.SetSize((473, 300))
self.ButtonClose.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.ListExperiments, 1, wx.ALL | wx.EXPAND, 10)
sizer_2.Add(self.ButtonCopy, 0, 0, 0)
sizer_2.Add((20, 20), 1, 0, 0)
sizer_2.Add(self.ButtonClose, 0, 0, 0)
sizer_1.Add(sizer_2, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def on_copy(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_copy' not implemented!")
event.Skip()
def on_close(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_close' not implemented!")
event.Skip()
# end of class MyDialog
| 31.213115 | 79 | 0.629202 | 1,693 | 0.889181 | 0 | 0 | 0 | 0 | 0 | 0 | 542 | 0.284664 |
9873743ca2a564b0129fc4f8eb4dc2d65eeae81c | 1,227 | py | Python | S4/S4 Library/simulation/familiars/familiar_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/familiars/familiar_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/familiars/familiar_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from gsi_handlers.sim_handlers import _get_sim_info_by_id
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
familiar_schema = GsiGridSchema(label='Familiars', sim_specific=True)
familiar_schema.add_field('familiar_name', label='Name')
familiar_schema.add_field('familiar_type', label='Type')
familiar_schema.add_field('familiar_active', label='Active')
@GsiHandler('familiar_view', familiar_schema)
def generate_sim_skill_view_data(sim_id:int=None):
familiar_data = []
cur_sim_info = _get_sim_info_by_id(sim_id)
if cur_sim_info is not None:
familiar_tracker = cur_sim_info.familiar_tracker
if familiar_tracker is not None:
active_familiar_id = familiar_tracker.active_familiar_id
for familiar_info in familiar_tracker:
if active_familiar_id is None:
familiar_active = False
else:
familiar_active = familiar_info.uid == active_familiar_id
entry = {'familiar_name': familiar_info.raw_name, 'familiar_type': str(familiar_info.familiar_type), 'familiar_active': str(familiar_active)}
familiar_data.append(entry)
return familiar_data
| 49.08 | 157 | 0.730236 | 0 | 0 | 0 | 0 | 835 | 0.680522 | 0 | 0 | 140 | 0.114099 |
98737ef22d1fe7939a250bbf74abbab55ff11614 | 368 | py | Python | for python/data/mramesh/pframe.py | aerolalit/Auto-Testing-Python-Programs | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | 4 | 2019-10-03T21:16:51.000Z | 2019-10-04T01:28:08.000Z | for python/data/mramesh/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | for python/data/mramesh/pframe.py | aerolalit/Auto-Testing | dd49ab266c9f0fd8e34278f68f8af017711942e3 | [
"MIT"
] | null | null | null | #35011
#a3_p10.py
#Miruthula Ramesh
#mramesh@jacobs-university.de
n = int(input("Enter the width"))
w = int(input("Enter the length"))
c = input("Enter a character")
space=" "
def print_frame(n, w):
for i in range(n):
if i == 0 or i == n-1:
print(w*c)
else:
print(c + space*(w-2) + c)
print_frame(n,w)
| 21.647059 | 42 | 0.540761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.334239 |
98746ffc4cea4687e7a965dcec7581058c45625e | 3,430 | py | Python | detectors/hamlog.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | null | null | null | detectors/hamlog.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-07-03T13:32:38.000Z | 2019-07-03T13:32:38.000Z | detectors/hamlog.py | time-track-tool/time-track-tool | a1c280f32a7766e460c862633b748fa206256f24 | [
"MIT"
] | 1 | 2019-05-15T16:01:31.000Z | 2019-05-15T16:01:31.000Z | # Copyright (C) 2012 Dr. Ralf Schlatterbeck Open Source Consulting.
# Reichergasse 131, A-3411 Weidling.
# Web: http://www.runtux.com Email: office@runtux.com
# All rights reserved
# ****************************************************************************
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ****************************************************************************
#
#++
# Name
# doc
#
# Purpose
# Detectors for hamlog
#--
from roundup.exceptions import Reject
from roundup.cgi.TranslationService import get_translation
from hamlib import fix_qsl_status
import common
def check_qso_empty (db, cl, nodeid, old_values) :
""" Retire qsl if qso Link is removed """
if 'qso' in old_values and not cl.get (nodeid, 'qso') :
cl.retire (nodeid)
# end def check_qso_empty
def check_dupe_qsl_type (db, cl, nodeid, new_values) :
common.require_attributes (_, cl, nodeid, new_values, 'qsl_type', 'qso')
type = new_values ['qsl_type']
qso = new_values ['qso']
qsl = db.qsl.filter (None, dict (qso = qso, qsl_type = type))
qn = db.qsl_type.get (type, 'name')
if qsl :
raise Reject, _ ('Duplicate QSL type "%s" for QSO' % qn)
# end def check_dupe_qsl_type
def check_owner_has_qsos (db, cl, nodeid, new_values) :
if 'call' not in new_values :
return
oldcalls = set (cl.get (nodeid, 'call'))
newcalls = set (new_values ['call'])
deleted = oldcalls - newcalls
if not deleted :
return
for call in deleted :
qsos = db.qso.filter (None, dict (owner = call))
if qsos :
name = db.ham_call.get (call, 'name')
raise Reject, _ ('Cant\'t delete "%(name)s" Call has QSOs') \
% locals ()
else :
db.ham_call.retire (call)
# end def check_owner_has_qsos
def fix_stati_qsl (db, cl, nodeid, old_values) :
fix_qsl_status (db, cl.get (nodeid, 'qso'))
# end def fix_stati_qsl
def fix_stati_qso (db, cl, nodeid, old_values) :
w = 'wont_qsl_via'
if ( not old_values
or (w in old_values and old_values [w] != cl.get (nodeid, w))
) :
fix_qsl_status (db, nodeid)
# end def fix_stati_qso
def init (db) :
if 'qso' not in db.classes :
return
global _
_ = get_translation \
(db.config.TRACKER_LANGUAGE, db.config.TRACKER_HOME).gettext
db.qsl.react ('set', check_qso_empty)
db.qsl.audit ('create', check_dupe_qsl_type)
db.qsl.react ('create', fix_stati_qsl)
db.qsl.react ('set', fix_stati_qsl)
db.qso.react ('create', fix_stati_qso)
db.qso.react ('set', fix_stati_qso)
db.user.audit ('set', check_owner_has_qsos)
# end def init
### __END__
| 35.360825 | 78 | 0.62828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,507 | 0.439359 |
9874db724f940ec221da9bfba43331479dc0fde4 | 8,091 | py | Python | grgrlib/core.py | gboehl/grgrlib | 745281875148afc8a3442fdd6ddbf16db9ca49db | [
"MIT"
] | null | null | null | grgrlib/core.py | gboehl/grgrlib | 745281875148afc8a3442fdd6ddbf16db9ca49db | [
"MIT"
] | null | null | null | grgrlib/core.py | gboehl/grgrlib | 745281875148afc8a3442fdd6ddbf16db9ca49db | [
"MIT"
] | 4 | 2018-11-17T08:43:16.000Z | 2022-01-25T13:20:03.000Z | #!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as nl
import scipy.linalg as sl
import scipy.stats as ss
import time
aca = np.ascontiguousarray
def nul(n):
return np.zeros((n, n))
def iuc(x, y):
"""
Checks if pair of generalized EVs x,y is inside the unit circle. Here for legacy reasons
"""
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = abs(x[nonzero] / y[nonzero]) < 1.0
return out
def ouc(x, y):
"""
Check if pair of generalized EVs x,y is outside the unit circle. Here for legacy reasons
"""
# stolen from scipy and inverted
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = True
out[nonzero] = abs(x[nonzero] / y[nonzero]) > 1.0
return out
def klein(A, B=None, nstates=None, verbose=False, force=False):
"""
Klein's method
"""
st = time.time()
if B is None:
B = np.eye(A.shape[0])
SS, TT, alp, bet, Q, Z = sl.ordqz(A, B, sort="ouc")
if np.any(np.isclose(alp, bet)):
mess = " Warning: unit root detected!"
else:
mess = ""
# check for precision
if not fast0(Q @ SS @ Z.T - A, 2):
raise ValueError("Numerical errors in QZ")
if verbose > 1:
out = np.empty_like(alp)
nonzero = bet != 0
out[~nonzero] = np.inf * np.abs(alp[~nonzero])
out[nonzero] = alp[nonzero] / bet[nonzero]
print(
"[RE solver:]".ljust(15, " ") + " Generalized EVs:\n", np.sort(np.abs(out))
)
# check for Blanchard-Kahn
out = ouc(alp, bet)
if not nstates:
nstates = sum(out)
else:
if not nstates == sum(out):
mess = (
"B-K condition not satisfied: %s states but %s Evs inside the unit circle."
% (nstates, sum(out))
+ mess
)
if not force:
raise ValueError(mess)
elif verbose:
print(mess)
S11 = SS[:nstates, :nstates]
T11 = TT[:nstates, :nstates]
Z11 = Z[:nstates, :nstates]
Z21 = Z[nstates:, :nstates]
# changed from sl to nl because of stability:
omg = Z21 @ nl.inv(Z11)
lam = Z11 @ nl.inv(S11) @ T11 @ nl.inv(Z11)
if verbose:
print(
"[RE solver:]".ljust(15, " ")
+ " Done in %s. Determinant of `Z11` is %1.2e. There are %s EVs o.u.c. (of %s)."
% (np.round((time.time() - st), 5), nl.det(Z11), sum(out), len(out))
+ mess
)
return omg, lam
# def re_bk(A, B=None, d_endo=None, verbose=False, force=False):
# """
# Klein's method
# """
# # TODO: rename this
# print('[RE solver:]'.ljust(15, ' ') +
# ' `re_bk` is depreciated. Use `klein` instead.')
# if B is None:
# B = np.eye(A.shape[0])
# MM, PP, alp, bet, Q, Z = sl.ordqz(A, B, sort='iuc')
# if not fast0(Q @ MM @ Z.T - A, 2):
# raise ValueError('Numerical errors in QZ')
# if verbose > 1:
# print('[RE solver:]'.ljust(15, ' ') +
# ' Pairs of `alp` and `bet`:\n', np.vstack((alp, bet)).T)
# out = ouc(alp, bet)
# if not d_endo:
# d_endo = sum(out)
# else:
# if sum(out) > d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# elif sum(out) < d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# else:
# mess = ''
# if mess and not force:
# raise ValueError(mess)
# elif mess and verbose:
# print(mess)
# Z21 = Z.T[-d_endo:, :d_endo]
# Z22 = Z.T[-d_endo:, d_endo:]
# if verbose:
# print('[RE solver:]'.ljust(
# 15, ' ')+' Determinant of `Z21` is %1.2e. There are %s EVs o.u.c.' % (nl.det(Z21), sum(out)))
# return -nl.inv(Z21) @ Z22
def lti(AA, BB, CC, dimp, dimq, tol=1e-6, check=False, verbose=False):
"""standard linear time iteration"""
if check:
pass
g = np.eye(dimq + dimp)
norm = tol + 1
icnt = 0
while norm > tol:
gn = g
g = -nl.solve(BB + AA @ g, CC)
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
omg = g[dimq:, :dimq]
lam = g[:dimq, :dimq]
return omg, lam
def speed_kills(A, B, dimp, dimq, selector=None, tol=1e-6, check=False, verbose=False):
"""Improved linear time iteration"""
q, A = nl.qr(A)
B = q.T @ B
B11i = nl.inv(B[dimq:, dimq:])
A[dimq:] = B11i @ A[dimq:]
B[dimq:] = B11i @ B[dimq:]
A[:dimq] -= B[:dimq, dimq:] @ A[dimq:]
B[:dimq, :dimq] -= B[:dimq, dimq:] @ B[dimq:, :dimq]
B[:dimq, dimq:] = 0
B[dimq:, dimq:] = np.eye(dimp)
A1 = A[:dimq, :dimq]
A3 = A[dimq:, dimq:]
A2 = A[:dimq, dimq:]
B1 = B[:dimq, :dimq]
B2 = B[dimq:, :dimq]
g = -B2
norm = tol + 1
icnt = 0
icnt = 0
while norm > tol:
gn = g
g = A3 @ g @ nl.solve(A1 + A2 @ g, B1) - B2
if selector is not None:
norm = np.max(np.abs(gn - g)[selector])
else:
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
if icnt == max_iter:
raise Exception("iteration did not converge")
return g, -nl.inv(A[:dimq, :dimq] + A2 @ g) @ B1
def fast0(A, mode=-1, tol=1e-08):
con = abs(A) < tol
if mode == -1:
return con
elif mode == 0:
return con.all(axis=0)
elif mode == 1:
return con.all(axis=1)
else:
return con.all()
def map2arr(iterator, return_np_array=True, check_nones=True):
"""Function to cast result from `map` to a tuple of stacked results
By default, this returns numpy arrays. Automatically checks if the map object is a tuple, and if not, just one object is returned (instead of a tuple). Be warned, this does not work if the result of interest of the mapped function is a single tuple.
Parameters
----------
iterator : iter
the iterator returning from `map`
Returns
-------
numpy array (optional: list)
"""
res = ()
mode = 0
for obj in iterator:
if check_nones and obj is None:
continue
if not mode:
if isinstance(obj, tuple):
for entry in obj:
res = res + ([entry],)
mode = 1
else:
res = [obj]
mode = 2
else:
if mode == 1:
for no, entry in enumerate(obj):
res[no].append(entry)
else:
res.append(obj)
if return_np_array:
if mode == 1:
res = tuple(np.array(tupo) for tupo in res)
else:
res = np.array(res)
return res
def napper(cond, interval=0.1):
import time
start_time = time.time()
while not cond():
elt = round(time.time() - start_time, 3)
print("Zzzz... " + str(elt) + "s", end="\r", flush=True)
time.sleep(interval)
print("Zzzz... " + str(elt) + "s.")
def timeprint(s, round_to=5, full=False):
if s < 60:
if full:
return str(np.round(s, round_to)) + " seconds"
return str(np.round(s, round_to)) + "s"
m, s = divmod(s, 60)
if m < 60:
if full:
return "%s minutes, %s seconds" % (int(m), int(s))
return "%sm%ss" % (int(m), int(s))
h, m = divmod(m, 60)
if full:
return "%s hours, %s minutes, %s seconds" % (int(h), int(m), int(s))
return "%sh%sm%ss" % (int(h), int(m), int(s))
def shuffle(a, axis=-1):
"""Shuffle along single axis"""
shape = a.shape
res = a.reshape(-1, a.shape[axis])
np.random.shuffle(res)
return res.reshape(shape)
def print_dict(d):
for k in d.keys():
print(str(k) + ":", d[k])
return 0
def sabs(x, eps=1e-10):
"""absolute value but smooth around 0"""
return np.sqrt(x ** 2 + eps)
# aliases
map2list = map2arr
indof = np.searchsorted
| 22.412742 | 253 | 0.53096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,689 | 0.332345 |
98793af1d2a1730aeaf0a67933baaa6757991887 | 1,357 | py | Python | Largest_product_in_series.py | tuyenta/Project-Euler-Solutions | 7480f39351e71afaf9285a5730ab5dc1c8adb0c8 | [
"MIT"
] | null | null | null | Largest_product_in_series.py | tuyenta/Project-Euler-Solutions | 7480f39351e71afaf9285a5730ab5dc1c8adb0c8 | [
"MIT"
] | null | null | null | Largest_product_in_series.py | tuyenta/Project-Euler-Solutions | 7480f39351e71afaf9285a5730ab5dc1c8adb0c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 10:36:17 2019
@author: tuyenta
"""
s = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
largestProduct = 0
for i in range(0, len(s) - 13):
product = 1
for j in range(i, i + 13):
product *= int(s[j: j + 1])
if product > largestProduct:
largestProduct = product
print (largestProduct) | 61.681818 | 1,006 | 0.884304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,108 | 0.816507 |
987a1d628a455fb7547d351f18993c8d80a36f30 | 6,649 | py | Python | django-app/main/views.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | 9 | 2019-07-07T02:57:50.000Z | 2022-01-07T10:03:30.000Z | django-app/main/views.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | django-app/main/views.py | honchardev/crypto-sentiment-app | 176a6ed61246490c42d2a2b7af4d45f67e3c7499 | [
"MIT"
] | null | null | null | import json
import time
from datetime import datetime
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from ratelimit.decorators import ratelimit
from .preprocessors.findata_preprocessor import (MarketDataCleaner,
PriceDataCleaner)
from .scrapers.scrape_fin import (FinScraperPerformer, MarketDataScraper,
PriceDataScraper)
from .scrapers.scrape_twitter import TwitterScraperPerformer
twitter_scraper_performer = TwitterScraperPerformer()
fin_scraper_performer = FinScraperPerformer()
marketdata_scraper = MarketDataScraper()
pricedata_scraper = PriceDataScraper()
def index(req):
return render(req, 'index.html')
def currencies(req):
context = {
'currencies_page_active': True
}
return render(req, 'currencies.html', context=context)
def market(req):
context = {
'market_page_active': True
}
return render(req, 'market.html', context=context)
def news(req):
context = {
'news_page_active': True
}
return render(req, 'news.html', context=context)
@login_required
def dev(req):
users = User.objects.all()
context = {
'dev_page_active': True,
'users': users
}
return render(req, 'dev.html', context=context)
@login_required
def signup(req):
if not req.user.is_superuser:
return redirect('index')
context = {}
if req.method == 'POST':
form = UserCreationForm(req.POST)
if form.is_valid():
form.save()
context['form'] = form
context['status'] = True
context['new_user_username'] = form.cleaned_data.get('username')
else:
context['status'] = False
else:
form = UserCreationForm()
context['form'] = form
return render(req, 'signup.html', context=context)
@ratelimit(key='ip', rate='10/m')
def api_index(req):
resp_data = {
'status': 'OK',
'v': 0.01,
'time': timezone.now(),
'msg': 'Welcome to sentpredapp API!',
}
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
def api_get_market_last(req):
resp_data = {}
if req.method == 'GET':
today_data = []
for mkt_data in marketdata_scraper.scrape_today_data():
today_data.append(mkt_data)
today_data_jsonified = [data.jsonify() for data in today_data]
resp_data = {
'status': 'OK',
'data': today_data_jsonified
}
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
def api_get_price_last(req):
resp_data = {}
if req.method == 'GET':
full_data = pricedata_scraper.scrape_current_full_data()
resp_data = {
'status': 'OK',
'data': full_data
}
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
def api_get_tweets_last(req):
resp_data = {}
if req.method == 'GET':
tweets = twitter_scraper_performer.get_last_tweets(20)
tweets_jsonified = [tweet.jsonify() for tweet in tweets]
resp_data = {
'status': 'OK',
'data': tweets_jsonified
}
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
def api_get_reddit_last(req):
resp_data = {}
if req.method == 'GET':
# todo:
resp_data['status'] = 'OK'
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
def api_get_news_last(req):
resp_data = {}
if req.method == 'GET':
# todo:
resp_data['status'] = 'OK'
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
@csrf_exempt
def api_update_last_data(req):
resp_data = {}
if req.method == 'POST':
post_req_dict = req.POST
button_id = post_req_dict['btn_id']
if button_id == "updTwitterLast":
resp_data = twitter_scraper_performer.scrape_last_updated_data()
resp_data_jsonified = [tweet.jsonify() for tweet in resp_data['data']]
resp_data['data'] = resp_data_jsonified
elif button_id == "updRedditLast":
pass
elif button_id == "updNewsLast":
pass
elif button_id == "updFinGlobalLast":
resp_data = fin_scraper_performer.scrape_market_data()
resp_data_jsonified = [data.jsonify() for data in resp_data['data']]
resp_data['data'] = resp_data_jsonified
elif button_id == "updFinCurrenciesLast":
resp_data = fin_scraper_performer.scrape_price_data()
resp_data_jsonified = [data.jsonify() for data in resp_data['data']]
resp_data['data'] = resp_data_jsonified
else:
resp_data['status'] = 'FAIL'
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
@ratelimit(key='ip', rate='10/m')
@csrf_exempt
def api_update_range_data(req):
resp_data = {}
if req.method == 'POST':
post_req_dict = req.POST
print('\n{0}\n'.format(post_req_dict))
if not post_req_dict['btn_id'] or not post_req_dict['from_time'] or not post_req_dict['to_time']:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
from_date = datetime.strptime(post_req_dict['from_time'], "%m/%d/%Y")
to_date = datetime.strptime(post_req_dict['to_time'], "%m/%d/%Y")
button_id = post_req_dict['btn_id']
if button_id == "updTwitterRange":
resp_data = twitter_scraper_performer.scrape_date_range_data(from_date, to_date)
resp_data_jsonified = [tweet.jsonify() for tweet in resp_data['data']]
resp_data['data'] = resp_data_jsonified
# todo: clean twitter data
elif button_id == "updRedditRange":
pass
elif button_id == "updNewsRange":
pass
else:
resp_data['status'] = 'FAIL'
else:
resp_data['status'] = 'FAIL'
return JsonResponse(resp_data)
| 30.222727 | 105 | 0.628666 | 0 | 0 | 0 | 0 | 5,097 | 0.766581 | 0 | 0 | 934 | 0.140472 |
987ad6ec30c9c05b606f50c7a12db0da2ad93e50 | 12,104 | py | Python | bluesky_browser/viewer/figures.py | EliotGann/bluesky-browser | e86e259c21d6dbeb781f32de8485f706b3b17bdc | [
"BSD-3-Clause"
] | null | null | null | bluesky_browser/viewer/figures.py | EliotGann/bluesky-browser | e86e259c21d6dbeb781f32de8485f706b3b17bdc | [
"BSD-3-Clause"
] | null | null | null | bluesky_browser/viewer/figures.py | EliotGann/bluesky-browser | e86e259c21d6dbeb781f32de8485f706b3b17bdc | [
"BSD-3-Clause"
] | null | null | null | import collections
import logging
from event_model import DocumentRouter, RunRouter
import numpy
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import matplotlib
from qtpy.QtWidgets import ( # noqa
QLabel,
QWidget,
QVBoxLayout,
)
from traitlets.traitlets import Bool, List, Set
from traitlets.config import Configurable
from .hints import hinted_fields, guess_dimensions # noqa
from .image import LatestFrameImageManager
from ..utils import load_config
matplotlib.use('Qt5Agg') # must set before importing matplotlib.pyplot
import matplotlib.pyplot as plt # noqa
log = logging.getLogger('bluesky_browser')
class LinePlotManager(Configurable):
"""
Manage the line plots for one FigureManager.
"""
omit_single_point_plot = Bool(True, config=True)
def __init__(self, fig_manager, dimensions):
self.update_config(load_config())
self.fig_manager = fig_manager
self.start_doc = None
self.dimensions = dimensions
self.dim_streams = set(stream for _, stream in self.dimensions)
if len(self.dim_streams) > 1:
raise NotImplementedError
def __call__(self, name, start_doc):
self.start_doc = start_doc
return [], [self.subfactory]
def subfactory(self, name, descriptor_doc):
if self.omit_single_point_plot and self.start_doc.get('num_points') == 1:
return []
if len(self.dimensions) > 1:
return [] # This is a job for Grid.
fields = set(hinted_fields(descriptor_doc))
# Filter out the fields with a data type or shape that we cannot
# represent in a line plot.
for field in list(fields):
dtype = descriptor_doc['data_keys'][field]['dtype']
if dtype not in ('number', 'integer'):
fields.discard(field)
ndim = len(descriptor_doc['data_keys'][field]['shape'] or [])
if ndim != 0:
fields.discard(field)
callbacks = []
dim_stream, = self.dim_streams # TODO Handle multiple dim_streams.
if descriptor_doc.get('name') == dim_stream:
dimension, = self.dimensions
x_keys, stream_name = dimension
fields -= set(x_keys)
assert stream_name == dim_stream # TODO Handle multiple dim_streams.
for x_key in x_keys:
figure_label = f'Scalars v {x_key}'
fig = self.fig_manager.get_figure(
('line', x_key, tuple(fields)), figure_label, len(fields), sharex=True)
for y_key, ax in zip(fields, fig.axes):
log.debug('plot %s against %s', y_key, x_key)
ylabel = y_key
y_units = descriptor_doc['data_keys'][y_key].get('units')
ax.set_ylabel(y_key)
if y_units:
ylabel += f' [{y_units}]'
# Set xlabel only on lowest axes, outside for loop below.
def func(event_page, y_key=y_key):
"""
Extract x points and y points to plot out of an EventPage.
This will be passed to LineWithPeaks.
"""
y_data = event_page['data'][y_key]
if x_key == 'time':
t0 = self.start_doc['time']
x_data = numpy.asarray(event_page['time']) - t0
elif x_key == 'seq_num':
x_data = event_page['seq_num']
else:
x_data = event_page['data'][x_key]
return x_data, y_data
line = Line(func, ax=ax)
callbacks.append(line)
if fields:
# Set the xlabel on the bottom-most axis.
if x_key == 'time':
xlabel = x_key
x_units = 's'
elif x_key == 'seq_num':
xlabel = 'sequence number'
x_units = None
else:
xlabel = x_key
x_units = descriptor_doc['data_keys'][x_key].get('units')
if x_units:
xlabel += f' [{x_units}]'
ax.set_xlabel(x_key)
fig.tight_layout()
# TODO Plot other streams against time.
for callback in callbacks:
callback('start', self.start_doc)
callback('descriptor', descriptor_doc)
return callbacks
class Line(DocumentRouter):
"""
Draw a matplotlib Line Arist update it for each Event.
Parameters
----------
func : callable
This must accept an EventPage and return two lists of floats
(x points and y points). The two lists must contain an equal number of
items, but that number is arbitrary. That is, a given document may add
one new point to the plot, no new points, or multiple new points.
label_template : string
This string will be formatted with the RunStart document. Any missing
values will be filled with '?'. If the keyword argument 'label' is
given, this argument will be ignored.
ax : matplotlib Axes, optional
If None, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.plot` to style Line object.
"""
def __init__(self, func, *, label_template='{scan_id} [{uid:.8}]', ax=None, **kwargs):
self.func = func
if ax is None:
import matplotlib.pyplot as plt
_, ax = plt.subplots()
self.ax = ax
self.line, = ax.plot([], [], **kwargs)
self.x_data = []
self.y_data = []
self.label_template = label_template
self.label = kwargs.get('label')
def start(self, doc):
if self.label is None:
d = collections.defaultdict(lambda: '?')
d.update(**doc)
label = self.label_template.format_map(d)
else:
label = self.label
if label:
self.line.set_label(label)
self.ax.legend(loc='best')
def event_page(self, doc):
x, y = self.func(doc)
self._update(x, y)
def _update(self, x, y):
"""
Takes in new x and y points and redraws plot if they are not empty.
"""
if not len(x) == len(y):
raise ValueError("User function is expected to provide the same "
"number of x and y points. Got {len(x)} x points "
"and {len(y)} y points.")
if not x:
# No new data. Short-circuit.
return
self.x_data.extend(x)
self.y_data.extend(y)
self.line.set_data(self.x_data, self.y_data)
self.ax.relim(visible_only=True)
self.ax.autoscale_view(tight=True)
self.ax.figure.canvas.draw_idle()
class Grid(DocumentRouter):
"""
Draw a matplotlib AxesImage Arist update it for each Event.
The purposes of this callback is to create (on initialization) of a
matplotlib grid image and then update it with new data for every `event`.
NOTE: Some important parameters are fed in through **kwargs like `extent`
which defines the axes min and max and `origin` which defines if the grid
co-ordinates start in the bottom left or top left of the plot. For more
info see https://matplotlib.org/tutorials/intermediate/imshow_extent.html
or https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
Parameters
----------
func : callable
This must accept a BulkEvent and return three lists of floats (x
grid co-ordinates, y grid co-ordinates and grid position intensity
values). The three lists must contain an equal number of items, but
that number is arbitrary. That is, a given document may add one new
point, no new points or multiple new points to the plot.
shape : tuple
The (row, col) shape of the grid.
ax : matplotlib Axes, optional.
if ``None``, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.imshow` to style the AxesImage object.
"""
def __init__(self, func, shape, *, ax=None, **kwargs):
self.func = func
self.shape = shape
if ax is None:
_, ax = plt.subplots()
self.ax = ax
self.grid_data = numpy.full(self.shape, numpy.nan)
self.image, = ax.imshow(self.grid_data, **kwargs)
def event_page(self, doc):
'''
Takes in a bulk_events document and updates grid_data with the values
returned from self.func(doc)
Parameters
----------
doc : dict
The bulk event dictionary that contains the 'data' and 'timestamps'
associated with the bulk event.
Returns
-------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the bulk event.
'''
x_coords, y_coords, I_vals = self.func(doc)
self._update(x_coords, y_coords, I_vals)
def _update(self, x_coords, y_coords, I_vals):
'''
Updates self.grid_data with the values from the lists x_coords,
y_coords, I_vals.
Parameters
----------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the event. The length of all three lists must
be the same.
'''
if not len(x_coords) == len(y_coords) == len(I_vals):
raise ValueError("User function is expected to provide the same "
"number of x, y and I points. Got {0} x points, "
"{1} y points and {2} I values."
"".format(len(x_coords), len(y_coords),
len(I_vals)))
if not x_coords:
# No new data, Short-circuit.
return
# Update grid_data and the plot.
self.grid_data[x_coords, y_coords] = I_vals
self.image.set_array(self.grid_data)
class FigureManager(Configurable):
"""
For a given Viewer, encasulate the matplotlib Figures and associated tabs.
"""
factories = List([
LinePlotManager,
LatestFrameImageManager],
config=True)
enabled = Bool(True, config=True)
exclude_streams = Set([], config=True)
def __init__(self, add_tab):
self.update_config(load_config())
self.add_tab = add_tab
self._figures = {}
def get_figure(self, key, label, *args, **kwargs):
try:
return self._figures[key]
except KeyError:
return self._add_figure(key, label, *args, **kwargs)
def _add_figure(self, key, label, *args, **kwargs):
tab = QWidget()
fig, _ = plt.subplots(*args, **kwargs)
canvas = FigureCanvas(fig)
canvas.setMinimumWidth(640)
canvas.setParent(tab)
toolbar = NavigationToolbar(canvas, tab)
tab_label = QLabel(label)
tab_label.setMaximumHeight(20)
layout = QVBoxLayout()
layout.addWidget(tab_label)
layout.addWidget(canvas)
layout.addWidget(toolbar)
tab.setLayout(layout)
self.add_tab(tab, label)
self._figures[key] = fig
return fig
def __call__(self, name, start_doc):
if not self.enabled:
return [], []
dimensions = start_doc.get('hints', {}).get('dimensions', guess_dimensions(start_doc))
rr = RunRouter(
[factory(self, dimensions) for factory in self.factories])
rr('start', start_doc)
return [rr], []
| 37.015291 | 102 | 0.574686 | 11,373 | 0.939607 | 0 | 0 | 0 | 0 | 0 | 0 | 4,515 | 0.373017 |
987b85fcc75895ef8fd5e121355ef5a571c2a852 | 586 | py | Python | gpytorch/priors/__init__.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | null | null | null | gpytorch/priors/__init__.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | null | null | null | gpytorch/priors/__init__.py | bdecost/gpytorch | a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | [
"MIT"
] | 1 | 2018-11-15T10:03:40.000Z | 2018-11-15T10:03:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .gamma_prior import GammaPrior
from .multivariate_normal_prior import MultivariateNormalPrior
from .normal_prior import NormalPrior
from .smoothed_box_prior import SmoothedBoxPrior
from .wishart_prior import InverseWishartPrior, WishartPrior
from .lkj_prior import LKJCovariancePrior
__all__ = [GammaPrior, InverseWishartPrior, MultivariateNormalPrior, NormalPrior,
SmoothedBoxPrior, WishartPrior, LKJCovariancePrior]
| 36.625 | 81 | 0.863481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
987d237bdddb2b7462cf1bdbb5b65fbc8aca3063 | 2,660 | py | Python | fuelweb_ui_test/tests/preconditions.py | Miroslav-Anashkin/fuel-main | 533d1722016da9774fb406f86d35942e5870ed73 | [
"Apache-2.0"
] | null | null | null | fuelweb_ui_test/tests/preconditions.py | Miroslav-Anashkin/fuel-main | 533d1722016da9774fb406f86d35942e5870ed73 | [
"Apache-2.0"
] | null | null | null | fuelweb_ui_test/tests/preconditions.py | Miroslav-Anashkin/fuel-main | 533d1722016da9774fb406f86d35942e5870ed73 | [
"Apache-2.0"
] | 1 | 2021-10-31T05:34:01.000Z | 2021-10-31T05:34:01.000Z | import time
from pageobjects.environments import Environments, Wizard, DeployChangesPopup
from pageobjects.header import TaskResultAlert
from pageobjects.nodes import Nodes, RolesPanel
from settings import OPENSTACK_CENTOS, OPENSTACK_RELEASE_CENTOS
from tests.base import BaseTestCase
class Environment:
@staticmethod
def simple_flat(name=OPENSTACK_CENTOS,
release=OPENSTACK_RELEASE_CENTOS):
BaseTestCase.get_home()
Environments().create_cluster_box.click()
with Wizard() as w:
w.name.send_keys(name)
w.release.select_by_visible_text(release)
w.next.click()
w.mode_multinode.click()
for i in range(5):
w.next.click()
w.create.click()
w.wait_until_exists()
@staticmethod
def ha_flat(name=OPENSTACK_CENTOS,
release=OPENSTACK_RELEASE_CENTOS):
BaseTestCase.get_home()
Environments().create_cluster_box.click()
with Wizard() as w:
w.name.send_keys(name)
w.release.select_by_visible_text(release)
w.next.click()
w.mode_ha_compact.click()
for i in range(5):
w.next.click()
w.create.click()
w.wait_until_exists()
@staticmethod
def simple_neutron_gre(name=OPENSTACK_CENTOS,
release=OPENSTACK_RELEASE_CENTOS):
BaseTestCase.get_home()
Environments().create_cluster_box.click()
with Wizard() as w:
w.name.send_keys(name)
w.release.select_by_visible_text(release)
w.next.click()
w.mode_multinode.click()
for i in range(2):
w.next.click()
w.network_neutron_gre.click()
for i in range(3):
w.next.click()
w.create.click()
w.wait_until_exists()
@staticmethod
def deploy_nodes(controllers=0, computes=0, cinders=0, cephs=0):
def add(role, amount):
if amount < 1:
return
Nodes().add_nodes.click()
time.sleep(1)
for i in range(amount):
Nodes().nodes_discovered[i].checkbox.click()
getattr(RolesPanel(), role).click()
Nodes().apply_changes.click()
time.sleep(1)
add('controller', controllers)
add('compute', computes)
add('cinder', cinders)
add('ceph_osd', cephs)
time.sleep(1)
Nodes().deploy_changes.click()
DeployChangesPopup().deploy.click()
TaskResultAlert().close.click()
| 32.439024 | 77 | 0.58609 | 2,372 | 0.891729 | 0 | 0 | 2,330 | 0.87594 | 0 | 0 | 39 | 0.014662 |
987dfbf265d5a98d22ad6b1285c6bfb0af90398e | 3,080 | py | Python | web_client_external_partner_feedback.py | UKPLab/emnlp2019-NeuralWeb | 9f94eec6bff92396c0401b6981bcb46f8d5e1f80 | [
"Apache-2.0"
] | 3 | 2019-07-10T08:12:46.000Z | 2020-02-05T08:36:45.000Z | web_client_external_partner_feedback.py | UKPLab/emnlp2019-NeuralWeb | 9f94eec6bff92396c0401b6981bcb46f8d5e1f80 | [
"Apache-2.0"
] | 6 | 2020-01-28T22:49:43.000Z | 2022-02-10T00:11:52.000Z | web_client_external_partner_feedback.py | UKPLab/emnlp2019-NeuralWeb | 9f94eec6bff92396c0401b6981bcb46f8d5e1f80 | [
"Apache-2.0"
] | 1 | 2021-02-16T01:57:07.000Z | 2021-02-16T01:57:07.000Z | import base64
import datetime
import http.client
import json
import sys
from pyblake2 import blake2b
from flask import Flask, request
from flask import render_template
from util.config import host_config
app = Flask(__name__)
AUTH_SIZE = 16
API_KEY = 'aaeaa04350f3485eb3074dd3a2c4429a' # use the provided one
SECRET_KEY = '2a4309a8a2c54e539e5cb57e3a4816d9' # use the provided one; keep this one well protected
def generate(msg):
h = blake2b(digest_size=AUTH_SIZE, key=SECRET_KEY.encode('utf-8'))
h.update(msg.encode('utf-8'))
return h.hexdigest()
def send_response(data):
host = '0.0.0.0'
port = '12892'
path = '/api/external/v1/feedback/flair'
headers = {"Content-type": "application/json", "charset": "utf-8"}
conn = http.client.HTTPConnection(host, port)
conn.request("POST", path, json.JSONEncoder().encode(data), headers)
response = conn.getresponse()
print(response.status, response.reason)
tag_result = response.read()
print(tag_result)
return tag_result
def encode_request_json(text):
data = {
'text': base64.b64encode(text.encode('utf-8')).decode('utf-8'),
'api-key': API_KEY,
'user': '2IFQKT0_1_1',
'case': '1',
'proof': generate(text)
}
print(data)
return data
import json
def encode_response_json(response):
response=json.loads(response)
data={}
content_len=0
reasoning_len=0
if len(response["content"])>0:
content_len=len(response["content"][0])
if len(response["reasoning"])>0:
reasoning_len=len(response["reasoning"][0])
keys_content=["index_c","position_c","word_c","token_string_c","comment_c"]
keys_reasoning=["index_r","position_r","word_r","token_string_c","comment_r"]
for i in range(content_len):
data[keys_content[i]]=" ".join(str(response["content"][0][i])) if type(response["content"][0][i]) is list else response["content"][0][i]
for i in range(reasoning_len):
data[keys_reasoning[i]]=" ".join(str(response["reasoning"][0][i])) if type(response["reasoning"][0][i]) is list else response["reasoning"][0][i]
print(data)
return data
@app.route('/predict', methods=['GET'])
def api_feedback_with_model():
mytext = request.args.get('text', '')
data=encode_request_json(mytext)
tag_result=send_response(data)
mydata=encode_response_json(tag_result)
return render_template('main.html', **mydata)
# ### Host Server ####
if __name__ == '__main__':
env = sys.argv[1] if len(sys.argv) == 2 else 'prod'
print('{0} App will be served in port:{1}....'.format(datetime.datetime.now(), host_config[env]['port']))
print('{0} Loading models...'.format(datetime.datetime.now()))
#preload_models()
print('{0} Models loaded....'.format(datetime.datetime.now()))
print('{0} Serving in port:{1}....'.format(datetime.datetime.now(), host_config[env]['port']))
#app.run(host=host_config[env]['host'], port=host_config[env]['port'], threaded=True)
app.run(debug=True, host='0.0.0.0')
print('started')
| 29.333333 | 152 | 0.669156 | 0 | 0 | 0 | 0 | 279 | 0.090584 | 0 | 0 | 860 | 0.279221 |
987ebf3bb40f294fbfe683c9dee6246ece40b947 | 937 | py | Python | globus_contents_manager/scripts/spawn_tokens.py | NickolausDS/globus-contents-manager | 40ad5e8ef97686feff4ae36ff0f71b0c600c3e83 | [
"Apache-2.0"
] | null | null | null | globus_contents_manager/scripts/spawn_tokens.py | NickolausDS/globus-contents-manager | 40ad5e8ef97686feff4ae36ff0f71b0c600c3e83 | [
"Apache-2.0"
] | null | null | null | globus_contents_manager/scripts/spawn_tokens.py | NickolausDS/globus-contents-manager | 40ad5e8ef97686feff4ae36ff0f71b0c600c3e83 | [
"Apache-2.0"
] | null | null | null | import os
import json
from fair_research_login import NativeClient
CLIENT_ID = 'e54de045-d346-42ef-9fbc-5d466f4a00c6'
APP_NAME = 'My App'
SCOPES = 'openid email profile urn:globus:auth:scope:transfer.api.globus.org:all urn:globus:auth:scope:search.api.globus.org:all'
CONFIG_FILE = 'tokens-data.json'
tokens = None
# try to load tokens from local file (native app config)
client = NativeClient(client_id=CLIENT_ID, app_name=APP_NAME)
try:
tokens = client.load_tokens(requested_scopes=SCOPES)
except:
pass
if not tokens:
# if no tokens, need to start Native App authentication process to get tokens
tokens = client.login(requested_scopes=SCOPES,
refresh_tokens=False)
try:
# save the tokens
client.save_tokens(tokens)
# create environment variable
os.environ['GLOBUS_DATA'] = json.dumps(tokens, indent=4, sort_keys=True)
except:
pass
| 29.28125 | 129 | 0.709712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.401281 |
9881d8d0fdd5338a351035a1a6b03ec773499908 | 1,996 | py | Python | new_listings_scraper.py | hokkiefrank/gateio-crypto-trading-bot-binance-announcements-new-coins | 86d7ed0dd612fe85f2cfc314a966d32912ae73ec | [
"MIT"
] | null | null | null | new_listings_scraper.py | hokkiefrank/gateio-crypto-trading-bot-binance-announcements-new-coins | 86d7ed0dd612fe85f2cfc314a966d32912ae73ec | [
"MIT"
] | null | null | null | new_listings_scraper.py | hokkiefrank/gateio-crypto-trading-bot-binance-announcements-new-coins | 86d7ed0dd612fe85f2cfc314a966d32912ae73ec | [
"MIT"
] | null | null | null | import requests
import os.path, json
import time
from store_order import *
from load_config import *
def get_last_coin():
"""
Scrapes new listings page for and returns new Symbol when appropriate
"""
latest_announcement = requests.get("https://www.binance.com/bapi/composite/v1/public/cms/article/catalog/list/query?catalogId=48&pageNo=1&pageSize=15")
latest_announcement = latest_announcement.json()
latest_announcement = latest_announcement['data']['articles'][0]['title']
# Binance makes several annoucements, irrevelant ones will be ignored
exclusions = ['Futures', 'Margin', 'adds', 'Adds']
for item in exclusions:
if item in latest_announcement:
return None
enum = [item for item in enumerate(latest_announcement)]
#Identify symbols in a string by using this janky, yet functional line
uppers = ''.join(item[1] for item in enum if item[1].isupper() and (enum[enum.index(item)+1][1].isupper() or enum[enum.index(item)+1][1]==' ' or enum[enum.index(item)+1][1]==')') )
return uppers
def store_new_listing(listing):
"""
Only store a new listing if different from existing value
"""
if os.path.isfile('new_listing.json'):
file = load_order('new_listing.json')
if listing in file:
return file
else:
file = listing
store_order('new_listing.json', file)
print("[SCRAPER-Thread]New listing detected, updating file")
return file
else:
new_listing = store_order('new_listing.json', listing)
print("[SCRAPER-Thread]File does not exist, creating file")
return new_listing
def search_and_update():
"""
Pretty much our main func
"""
while True:
latest_coin = get_last_coin()
if latest_coin:
store_new_listing(latest_coin)
print("[SCRAPER-Thread]Checking for coin announcements every 1 minute (in a separate thread)")
time.sleep(10)
| 32.721311 | 184 | 0.662826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 777 | 0.389279 |
98821ed78144f8e01e3dd237b4ed382e65a64488 | 797 | py | Python | setup.py | poliquin/pyfixwidth | a41f25b788f5bd78465f51b42258922709dce9bc | [
"MIT"
] | 6 | 2020-02-13T22:20:15.000Z | 2021-10-12T02:30:51.000Z | setup.py | poliquin/pyfixwidth | a41f25b788f5bd78465f51b42258922709dce9bc | [
"MIT"
] | null | null | null | setup.py | poliquin/pyfixwidth | a41f25b788f5bd78465f51b42258922709dce9bc | [
"MIT"
] | 1 | 2021-06-16T21:21:38.000Z | 2021-06-16T21:21:38.000Z | # -*- coding: utf8 -*-
from distutils.core import setup
setup(
name='pyfixwidth',
packages=['fixwidth'],
version='0.1.1',
description="Read fixed width data files",
author='Chris Poliquin',
author_email='chrispoliquin@gmail.com',
url='https://github.com/poliquin/pyfixwidth',
keywords=['data', 'fixed width', 'parse', 'parser'],
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities'
],
long_description="""\
Read fixed width data files
---------------------------
Python 3 module for reading fixed width data files and converting the field
contents to appropriate Python types.
"""
)
| 28.464286 | 75 | 0.624843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.666248 |
9882eef9c8c7e6195e084b2763326602869d2ca1 | 1,274 | py | Python | examples/oucru/oucru-full/test_invert_dicts.py | bahp/datablend | f0b69a012af6ea7cedc9210d46b3047d8e0bf504 | [
"FTL",
"RSA-MD",
"CECILL-B"
] | null | null | null | examples/oucru/oucru-full/test_invert_dicts.py | bahp/datablend | f0b69a012af6ea7cedc9210d46b3047d8e0bf504 | [
"FTL",
"RSA-MD",
"CECILL-B"
] | 1 | 2021-05-26T13:09:08.000Z | 2021-05-26T13:09:08.000Z | examples/oucru/oucru-full/test_invert_dicts.py | bahp/datablend | f0b69a012af6ea7cedc9210d46b3047d8e0bf504 | [
"FTL",
"RSA-MD",
"CECILL-B"
] | null | null | null | # Libraries
import ast
import collections
import pandas as pd
# -----------------
# Methods
# -----------------
def invert(d):
if isinstance(d, dict):
return {v: k for k, v in d.items()}
return d
def str2eval(x):
if pd.isnull(x):
return None
return ast.literal_eval(x)
def sortkeys(d):
if isinstance(d, dict):
return collections.OrderedDict(sorted(d.items()))
return d
codes = ['06dx', '13dx', '32dx', '42dx', 'md']
path = "../oucru-{0}/resources/outputs/"
path += "templates/ccfgs_{1}_data_fixed.xlsx"
# Loop
for c in codes:
# Create path
path_tmp = path.format(c, c)
# Read excel
sheets = pd.read_excel(path_tmp, sheet_name=None)
# Loop
for sheet, df in sheets.items():
df.to_replace = df.to_replace.apply(str2eval)
df.to_replace = df.to_replace.apply(invert)
#df.to_replace = df.to_replace.apply(sortkeys)
# Create fullpath
fullpath = path_tmp.replace('.xlsx', '_inverted.xlsx')
# Creating Excel Writer Object from Pandas
writer = pd.ExcelWriter(fullpath, engine='xlsxwriter')
# Save each frame
for sheet, frame in sheets.items():
frame.to_excel(writer, sheet_name=sheet, index=False)
# critical last step
writer.save() | 21.965517 | 61 | 0.627159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.290424 |
9884063271053768538df189e1b6b1a2f033e411 | 3,424 | py | Python | twitter_likes.py | reinhartP/twitter-likes-media-downloader | d49857d9ef36776d47e076b88253d4dc5da7ebe2 | [
"MIT"
] | 1 | 2020-04-09T01:09:12.000Z | 2020-04-09T01:09:12.000Z | twitter_likes.py | reinhartP/twitter-likes-media-downloader | d49857d9ef36776d47e076b88253d4dc5da7ebe2 | [
"MIT"
] | null | null | null | twitter_likes.py | reinhartP/twitter-likes-media-downloader | d49857d9ef36776d47e076b88253d4dc5da7ebe2 | [
"MIT"
] | null | null | null | import argparse
import twitter
import os
import json
from likes import Likes
import sys
import time
class Downloader:
def __init__(self):
self._current_path = os.path.dirname(os.path.realpath(__file__))
def downloadLikes(self, api, screen_name, force_redownload):
liked_tweets = Likes(
api, screen_name, self._current_path, force_redownload)
liked_tweets.createTable()
liked_tweets.download()
def generateConfig(self):
base = {
"consumer_key": "",
"consumer_secret": "",
"access_token_key": "",
"access_token_secret": "",
}
with open(os.path.join(self._current_path, "config.json"), "w", encoding="utf-8") as f:
json.dump(base, f, ensure_ascii=False, indent=4)
print("Config generated at config.json")
sys.exit()
def main(self):
parser = argparse.ArgumentParser(
description="Download media from liked tweets of a specified user."
)
parser.add_argument(
"-u", "--user", help="Twitter username, @twitter would just be twitter"
)
parser.add_argument(
"--images",
help="Download only images, downloads videos and images by default",
action="store_true",
)
parser.add_argument(
"--videos",
help="Download only videos, downloads videos and images by default",
action="store_true",
)
parser.add_argument(
"-g",
"--generate-config",
help="Generates skeleton config file(config.json), will overwrite existing config if exists",
action="store_true",
)
parser.add_argument(
"-c",
"--config",
help="JSON file containing API keys. Default(config.json) is used if not specified",
)
parser.add_argument(
"-f", "--force", help="Redownloads all media", action="store_true"
)
parser.add_argument(
"-l", "--loop", help="Run forever", action="store_true"
)
args = parser.parse_args()
if args.generate_config:
self.generateConfig()
if not args.user:
print("No user specified, exiting")
sys.exit()
config_name = "config.json"
if args.config:
config_name = args.config
try:
with open(os.path.join(self._current_path, config_name), "r", encoding="utf-8") as f:
config = json.load(f)
api = twitter.Api(
consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token_key"],
access_token_secret=config["access_token_secret"],
sleep_on_rate_limit=True,
tweet_mode="extended",
)
except FileNotFoundError:
raise
except json.decoder.JSONDecodeError:
raise
print(args.loop)
while True:
self.downloadLikes(api, args.user, args.force)
if not args.loop:
break
print(
f"[{time.strftime('%m/%d/%Y %H:%M:%S', time.localtime())}] Running again in 30 minutes")
time.sleep(30*60)
downloader = Downloader()
downloader.main()
| 31.703704 | 105 | 0.56104 | 3,275 | 0.956484 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.273364 |
9885bedc72d4f736abfc8e9d034f0b9e67c7f3ad | 701 | py | Python | test.py | lusing/algo | 0f870715fa87f29df77d2a338ca40c76de887b8f | [
"BSD-2-Clause"
] | null | null | null | test.py | lusing/algo | 0f870715fa87f29df77d2a338ca40c76de887b8f | [
"BSD-2-Clause"
] | null | null | null | test.py | lusing/algo | 0f870715fa87f29df77d2a338ca40c76de887b8f | [
"BSD-2-Clause"
] | null | null | null | import gym
import numpy as np
env = gym.make('FrozenLake-v0')
#env = env.unwrapped
print(env.observation_space)
print(env.action_space)
def play_policy(env, policy, render=True):
total_reward = 0
observation = env.reset()
while True:
if render:
env.render()
action = np.random.choice(env.action_space.n, p = policy[observation])
observation, reward, done, _ = env.step(action)
total_reward += reward
if done:
break
return total_reward
random_policy = np.ones((env.unwrapped.nS, env.unwrapped.nA)) / env.unwrapped.nA
episode_reward = [play_policy(env,random_policy) for _ in range(100)]
print(np.mean(episode_reward))
| 26.961538 | 80 | 0.676177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.049929 |
9885d62567b0049d499341f31cbcc14bbd3eeb04 | 1,616 | py | Python | tests/test_pyspy.py | gjoseph92/scheduler-profiling | 826ed2bc65b7126789ac0b828a4116d746e85975 | [
"MIT"
] | null | null | null | tests/test_pyspy.py | gjoseph92/scheduler-profiling | 826ed2bc65b7126789ac0b828a4116d746e85975 | [
"MIT"
] | null | null | null | tests/test_pyspy.py | gjoseph92/scheduler-profiling | 826ed2bc65b7126789ac0b828a4116d746e85975 | [
"MIT"
] | null | null | null | import json
import pathlib
import platform
import dask
import distributed
import pytest
import scheduler_profilers
pytest_plugins = ["docker_compose"]
def core_test(client: distributed.Client, tmp_path: pathlib.Path) -> None:
df = dask.datasets.timeseries().persist()
scheduler_prof_path = tmp_path / "profile.json"
worker_prof_dir = tmp_path / "workers"
with scheduler_profilers.pyspy_on_scheduler(
scheduler_prof_path, client=client
), scheduler_profilers.pyspy(worker_prof_dir, client=client):
df.set_index("id").size.compute(client=client)
with open(scheduler_prof_path) as f:
# Check the file is valid JSON
profile = json.load(f)
assert profile
assert worker_prof_dir.exists()
assert len(list(worker_prof_dir.glob("*.json"))) == len(
client.scheduler_info()["workers"]
)
for p in worker_prof_dir.glob("*.json"):
with open(p) as f:
# Check the file is valid JSON
profile = json.load(f)
assert profile
@pytest.mark.skipif(
platform.system() != "Linux", reason="py-spy always requires root on macOS"
)
def test_local(tmp_path):
client = distributed.Client(set_as_default=False)
core_test(client, tmp_path)
client.shutdown()
client.close()
def test_prctl_on_docker(module_scoped_container_getter, tmp_path):
network_info = module_scoped_container_getter.get("scheduler").network_info[0]
client = distributed.Client(
f"tcp://{network_info.hostname}:{network_info.host_port}", set_as_default=False
)
core_test(client, tmp_path)
| 28.350877 | 87 | 0.701114 | 0 | 0 | 0 | 0 | 255 | 0.157797 | 0 | 0 | 241 | 0.149134 |
9887beb0001dc94ee9fbc4630bc1eeda2bccd16f | 1,889 | py | Python | setup.py | starofrainnight/ncstyler | d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb | [
"MIT"
] | null | null | null | setup.py | starofrainnight/ncstyler | d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb | [
"MIT"
] | null | null | null | setup.py | starofrainnight/ncstyler | d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from pydgutils_bootstrap import use_pydgutils
use_pydgutils()
import pydgutils
from setuptools import setup, find_packages
try:
from pip.req import parse_requirements
except:
from pip._internal.req import parse_requirements
package_name = 'ncstyler'
# Convert source to v2.x if we are using python 2.x.
source_dir = pydgutils.process()
# Exclude the original source package, only accept the preprocessed package!
our_packages = find_packages(where=source_dir)
# parse_requirements() returns generator of pip.req.InstallRequirement objects
requirements = parse_requirements("./requirements.txt", session=False)
requirements = [str(ir.req) for ir in requirements]
long_description = (
open("README.rst", "r").read()
+ "\n" +
open("CHANGES.rst", "r").read()
)
setup(
name=package_name,
version="0.1.8",
author="Hong-She Liang",
author_email="starofrainnight@gmail.com",
url="https://github.com/starofrainnight/%s" % package_name,
description="Name Conventions Styler, a styler just target to naming conventions of source codes",
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries",
],
install_requires=requirements,
package_dir={"": source_dir},
packages=our_packages,
entry_points={
'console_scripts': ['ncstyler=ncstyler.console:main'],
},
zip_safe=False, # Unpack the egg downloaded_file during installation.
)
| 33.140351 | 103 | 0.680783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 902 | 0.477501 |
98887097037d0611c0184d4d2ad833e081fa6d4a | 7,521 | py | Python | code/model.py | philippmarcus/CarND-Behavioral-Cloning-P3 | e5dc637dd211d4cd148f311dc36eb973bd80bf68 | [
"MIT"
] | null | null | null | code/model.py | philippmarcus/CarND-Behavioral-Cloning-P3 | e5dc637dd211d4cd148f311dc36eb973bd80bf68 | [
"MIT"
] | null | null | null | code/model.py | philippmarcus/CarND-Behavioral-Cloning-P3 | e5dc637dd211d4cd148f311dc36eb973bd80bf68 | [
"MIT"
] | null | null | null | import csv
import cv2
import numpy as np
import copy
from sklearn.utils import shuffle
"""
Data generator and augmentation methods. The generator called
get_flipped_copies and get_color_inverted_copies, if augmentation mode
is activated.
The usage of color inverted copies is done to make the algorithm also work on streets that are brighter
than their environment.
"""
def get_flipped_copies(_X, _y):
# Flip the image horizontally
_X_flipped = list(map(lambda img: cv2.flip(img, 1), _X))
_y_flipped = list(map(lambda angl: -angl, _y))
return np.array(_X_flipped), _y_flipped
def get_color_inverted_copies(_X, _y):
# Invert each channel of RGB image
_X_inverted = list(map(lambda img: cv2.bitwise_not(img), _X))
_y_inverted = copy.copy(_y)
return np.array(_X_inverted), _y_inverted
def generator(samples, batch_size=32, augmentation=True):
# add color inversed and flipped images later...
batch_size = int(batch_size/3)
num_samples = len(samples)
# angle correction factors for center, left, right camera
measurement_correction = {0:0., 1:0.2, 2:-0.2}
while 1: # Loop forever so the generator never terminates\
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
# For each parsed line, read in center, left, right image and angle
for i in range(3):
source_path = batch_sample[i]
lap = batch_sample[-1]
filename = source_path.split("/")[-1]
current_path = 'data/{}/IMG/'.format(lap) + filename
img = cv2.imread(current_path)
assert img is not None
images.append(img)
measurement = float(batch_sample[3]) + measurement_correction[i]
measurements.append(measurement)
# Augmentation: Add color inverted and horizontally flipped versions
if augmentation:
X_mirrored, y_mirrored = get_flipped_copies(images, measurements)
images = np.concatenate((images, X_mirrored))
measurements = np.concatenate((measurements, y_mirrored))
X_clr_inv, y_clr_inv = get_color_inverted_copies(images, measurements)
images = np.concatenate((images, X_clr_inv))
measurements = np.concatenate((measurements, y_clr_inv))
images = np.array(images)
measurements = np.array(measurements)
yield shuffle(images, measurements)
"""
Read in the created sample files. A recording N needs to be placed
in a subfolder of data/lapN/
The generator for validation does not use data augmentation. Three data
sets were created. Two on track 1 and one on track 2.
"""
laps=["lap1", "lap2", "lap3"]
samples = []
for lap in laps:
with open("data/{}/driving_log.csv".format(lap)) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
line.append(lap)
samples.append(line)
shuffle(samples)
print("Collected {} raw samples".format(len(samples)))
# Set our batch size
batch_size=200
# Compile and train the model using the generator function
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# Create the generators for training and test
train_generator = generator(train_samples, batch_size=batch_size, augmentation=True)
validation_generator = generator(validation_samples, batch_size=batch_size, augmentation=False)
"""
The Keras model based on the plaidml backend to run it on the GPU of a MacBook Pro.
"""
import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, MaxPooling2D, Input, Activation, Dropout
from keras.layers.normalization import BatchNormalization
# Set to True to continue training on the stored model
CONTINUE_TRAINING = False
if CONTINUE_TRAINING:
# Continue training on the imported model
model = load_model("data/model.h5")
else:
# Define the model and train from the scretch
# Model based on: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
model = Sequential()
#Preprocessing
model.add(Cropping2D(cropping=((70,25), (0,0)), input_shape=(160,320,3)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(65,320,3)))
# Network Model
# Convolutional layers
model.add(Conv2D(24, (5, 5), activation="elu", strides=(2, 2), padding="valid"))
model.add(Conv2D(36, (5, 5), activation="relu", strides=(2, 2), padding="valid"))
model.add(Conv2D(48, (5, 5), activation="elu", strides=(2, 2), padding="valid"))
model.add(Conv2D(64, (3, 3), activation="relu", strides=(1, 1), padding="valid"))
model.add(Conv2D(64, (3, 3), activation="elu", strides=(1, 1), padding="valid"))
model.add(Flatten())
# Dense layers, all with dropout
model.add(Dense(1064, kernel_initializer="he_normal"))
model.add(Dropout(0.5))
model.add(Dense(100, kernel_initializer="he_normal"))
model.add(Dropout(0.5))
model.add(Dense(50, kernel_initializer="he_normal"))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dense(1))
# Use adam optimizer for adaptive learning rate
model.compile(loss="mse", optimizer="adam")
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=3,
verbose=0, mode='auto')
# Perform the training on the loaded or defined model
history_object = model.fit_generator(train_generator,\
steps_per_epoch=np.ceil(len(train_samples)/batch_size),\
validation_data=validation_generator,\
validation_steps=np.ceil(len(validation_samples)/batch_size),\
epochs=150,\
verbose=1,
callbacks = [es]
)
model.save("data/model.h5")
"""
Plot the loss on training and validation data set
against the epochs.
"""
import matplotlib.pyplot as plt
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('learning_curve.png') | 40.654054 | 129 | 0.622258 | 0 | 0 | 2,356 | 0.313256 | 0 | 0 | 0 | 0 | 2,135 | 0.283872 |
9888a42f23f0d00ae83842fcbe258a139ecdf9f3 | 689 | py | Python | abc/abc243/d/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | abc/abc243/d/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | abc/abc243/d/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N, X = na()
S = ns()
up = 0
tmp_S = ""
for c in S[::-1]:
if c == 'U':
up += 1
if c == 'L':
if up:
up -= 1
else:
tmp_S += c
if c == 'R':
if up:
up -= 1
else:
tmp_S += c
tmp_S = 'U' * up + tmp_S[::-1]
S = tmp_S
for c in S:
if c == 'L':
X = 2 * X
if c == 'R':
X = 2 * X + 1
if c == 'U':
X //= 2
print(X) | 17.225 | 53 | 0.441219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.040639 |
98897ab6c0c2e516144b6f2d4b7b401c420c7e80 | 1,580 | py | Python | test/fcos/test_map.py | aclex/detection-experiments | f1cbf115573a8d509553335c5904ebc2e11511d2 | [
"Apache-2.0"
] | 5 | 2020-06-08T08:21:03.000Z | 2021-03-03T21:54:06.000Z | test/fcos/test_map.py | aclex/detection-experiments | f1cbf115573a8d509553335c5904ebc2e11511d2 | [
"Apache-2.0"
] | 3 | 2021-02-06T20:21:02.000Z | 2021-06-06T18:46:27.000Z | test/fcos/test_map.py | aclex/detection-experiments | f1cbf115573a8d509553335c5904ebc2e11511d2 | [
"Apache-2.0"
] | 2 | 2020-06-08T08:21:05.000Z | 2021-02-06T11:44:04.000Z | import pytest
import math
import torch
from detector.fcos.map import Mapper
from test.fcos.level_map_fixtures import (
image_size,
strides,
sample,
targets,
expected_level_map_sizes,
expected_joint_map_8x8
)
@pytest.fixture
def expected_level_thresholds(expected_level_map_sizes, image_size):
pixel_sizes = [1 / float(e) for e in expected_level_map_sizes]
level_thresholds = ((1, 4), (2, 4), (2, 4), (2, 4))
return tuple(
(l[0] * p, l[1] * p) for l, p in zip(level_thresholds, pixel_sizes))
@pytest.fixture
def expected_area():
return 195 * 199 / (256 ** 2)
@pytest.fixture
def mapper(strides, image_size):
m = Mapper(strides, image_size, num_classes=3)
return m
def test_level_thresholds_calculation(strides, expected_level_thresholds):
result = Mapper._calc_level_thresholds(strides, 256)
assert result == expected_level_thresholds
def test_level_map_sizes_calculation(strides, expected_level_map_sizes):
result = Mapper._calc_level_map_sizes(strides, 256)
assert result == expected_level_map_sizes
def test_area_calculation(sample, expected_area):
result = Mapper._calc_area(sample[0][0])
assert result == expected_area
def test_map_sample(sample, mapper, expected_joint_map_8x8):
maps = mapper._map_sample(*sample)
assert maps is not None
result_joint_map_8x8 = maps[2]
assert result_joint_map_8x8.shape == expected_joint_map_8x8.shape
assert torch.allclose(result_joint_map_8x8, expected_joint_map_8x8)
def test_map_forward(targets, mapper, strides):
levels = mapper.forward(targets)
assert len(levels) == len(strides)
| 21.643836 | 74 | 0.777215 | 0 | 0 | 0 | 0 | 462 | 0.292405 | 0 | 0 | 0 | 0 |
988b95133b50696e063c31455c67480729ea1f10 | 770 | py | Python | setup.py | BoaVaga/boavaga_server | 7d25a68832d3b9f4f5666d0a3d55c99025498511 | [
"MIT"
] | null | null | null | setup.py | BoaVaga/boavaga_server | 7d25a68832d3b9f4f5666d0a3d55c99025498511 | [
"MIT"
] | null | null | null | setup.py | BoaVaga/boavaga_server | 7d25a68832d3b9f4f5666d0a3d55c99025498511 | [
"MIT"
] | null | null | null | import sqlalchemy
from sqlalchemy.ext.compiler import compiles
import sys
from src.container import create_container
from src.models.base import Base
from src.models import *
@compiles(sqlalchemy.LargeBinary, 'mysql')
def compile_binary_mysql(element, compiler, **kw):
if isinstance(element.length, int) and element.length > 0:
return f'BINARY({element.length})'
else:
return compiler.visit_BLOB(element, **kw)
def main():
if len(sys.argv) == 1:
print('USAGE: python setup.py <CONFIG_PATH>')
exit(0)
config_path = sys.argv[1]
container = create_container(config_path)
db_engine = container.db_engine()
Base.metadata.create_all(db_engine.engine)
print('Ok')
if __name__ == '__main__':
main()
| 22 | 62 | 0.697403 | 0 | 0 | 0 | 0 | 259 | 0.336364 | 0 | 0 | 86 | 0.111688 |
988d858cdf7f3362f594136b2442041c83f7fa93 | 4,586 | py | Python | cli/src/pcluster/cli/middleware.py | enrico-usai/cfncluster | acf083776c301d4f2a03ce5cd6fc79f9b88c74e0 | [
"Apache-2.0"
] | 415 | 2018-11-13T15:02:15.000Z | 2022-03-31T15:26:06.000Z | cli/src/pcluster/cli/middleware.py | enrico-usai/cfncluster | acf083776c301d4f2a03ce5cd6fc79f9b88c74e0 | [
"Apache-2.0"
] | 2,522 | 2018-11-13T16:16:27.000Z | 2022-03-31T13:57:10.000Z | cli/src/pcluster/cli/middleware.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | 164 | 2018-11-14T22:47:46.000Z | 2022-03-22T11:33:22.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines middleware functions for command line operations.
This allows the ability to provide custom logic either before or after running
an operation by specifying the name of the operation, and then calling the
function that is provided as the first argument and passing the **kwargs
provided.
"""
import logging
import argparse
import boto3
import jmespath
from botocore.exceptions import WaiterError
import pcluster.cli.model
from pcluster.cli.exceptions import APIOperationException, ParameterException
LOGGER = logging.getLogger(__name__)
def _cluster_status(cluster_name):
controller = "cluster_operations_controller"
func_name = "describe_cluster"
full_func_name = f"pcluster.api.controllers.{controller}.{func_name}"
return pcluster.cli.model.call(full_func_name, cluster_name=cluster_name)
def add_additional_args(parser_map):
"""Add any additional arguments to parsers for individual operations.
NOTE: these additional arguments will also need to be removed before
calling the underlying function for the situation where they are not a part
of the specification.
"""
parser_map["create-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["delete-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["update-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
def middleware_hooks():
"""Return a map and from operation to middleware functions.
The map has operation names as the keys and functions as values.
"""
return {"create-cluster": create_cluster, "delete-cluster": delete_cluster, "update-cluster": update_cluster}
def queryable(func):
def wrapper(dest_func, _body, kwargs):
query = kwargs.pop("query", None)
ret = func(dest_func, _body, kwargs)
try:
return jmespath.search(query, ret) if query else ret
except jmespath.exceptions.ParseError:
raise ParameterException({"message": "Invalid query string.", "query": query})
return wrapper
@queryable
def update_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_update_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster update with error: %s", e)
raise APIOperationException(_cluster_status(kwargs["cluster_name"]))
ret = _cluster_status(kwargs["cluster_name"])
return ret
@queryable
def create_cluster(func, body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_create_complete")
try:
waiter.wait(StackName=body["clusterName"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster creation with error: %s", e)
raise APIOperationException(_cluster_status(body["clusterName"]))
ret = _cluster_status(body["clusterName"])
return ret
@queryable
def delete_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait:
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_delete_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster deletion with error: %s", e)
raise APIOperationException({"message": f"Failed when deleting cluster '{kwargs['cluster_name']}'."})
return {"message": f"Successfully deleted cluster '{kwargs['cluster_name']}'."}
else:
return ret
| 37.900826 | 113 | 0.711949 | 0 | 0 | 0 | 0 | 1,906 | 0.415613 | 0 | 0 | 2,109 | 0.459878 |
988f5d1f8daa9ec72e11862df13ec07c4e300748 | 3,696 | py | Python | src/pyrin/audit/api.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/audit/api.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/audit/api.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
audit api module.
"""
import pyrin.audit.services as audit_services
from pyrin.api.router.decorators import api
audit_config = audit_services.get_audit_configurations()
audit_config.update(no_cache=True)
is_enabled = audit_config.pop('enabled', False)
if is_enabled is True:
@api(**audit_config)
def inspect(**options):
"""
inspects all registered packages and gets inspection data.
---
parameters:
- name: application
type: boolean
description: specifies that application info must be included
- name: packages
type: boolean
description: specifies that loaded packages info must be included
- name: framework
type: boolean
description: specifies that framework info must be included
- name: python
type: boolean
description: specifies that python info must be included
- name: os
type: boolean
description: specifies that operating system info must be included
- name: hardware
type: boolean
description: specifies that hardware info must be included
- name: database
type: boolean
description: specifies that database info must be included
- name: caching
type: boolean
description: specifies that caching info must be included
- name: celery
type: boolean
description: specifies that celery info must be included
- name: traceback
type: boolean
description: specifies that on failure, it must include the traceback of errors
responses:
200:
description: all packages are working normally
schema:
properties:
application:
type: object
description: application info
packages:
type: object
description: loaded packages info
framework:
type: object
description: framework info
python:
type: object
description: python info
platform:
type: object
description: platform info
database:
type: object
description: database info
caching:
type: object
description: caching info
celery:
type: object
description: celery info
500:
description: some packages have errors
schema:
properties:
application:
type: object
description: application info
packages:
type: object
description: loaded packages info
framework:
type: object
description: framework info
python:
type: object
description: python info
platform:
type: object
description: platform info
database:
type: object
description: database info
caching:
type: object
description: caching info
celery:
type: object
description: celery info
"""
return audit_services.inspect(**options)
| 33 | 91 | 0.513528 | 0 | 0 | 0 | 0 | 3,383 | 0.915314 | 0 | 0 | 3,333 | 0.901786 |
988f6916ac656773613c7bb7507c1cac7e708d2f | 222 | py | Python | python/stage_2/2839.py | smartx-jshan/Coding_Practice | bc7d485e7992031e55df62483818b721ad7d1d4f | [
"Apache-2.0"
] | null | null | null | python/stage_2/2839.py | smartx-jshan/Coding_Practice | bc7d485e7992031e55df62483818b721ad7d1d4f | [
"Apache-2.0"
] | null | null | null | python/stage_2/2839.py | smartx-jshan/Coding_Practice | bc7d485e7992031e55df62483818b721ad7d1d4f | [
"Apache-2.0"
] | null | null | null | a = int(input())
sum = 0
while True:
if ( a == 0):
print (int(sum))
break
if ( a <= 2):
print (-1)
break
if (a%5 != 0):
a = a - 3
sum = sum + 1
else:
sum = sum + int(a/5)
a = 0
| 10.571429 | 24 | 0.400901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
98907675d26bfe65790edfc2bde7b8179aee4ad8 | 5,793 | py | Python | tests/test_losses/test_mesh_losses.py | nightfuryyy/mmpose | 910d9e31dd9d46e3329be1b7567e6309d70ab64c | [
"Apache-2.0"
] | 1,775 | 2020-07-10T01:20:01.000Z | 2022-03-31T16:31:50.000Z | tests/test_losses/test_mesh_losses.py | KHB1698/mmpose | 93c3a742c540dfb4ca515ad545cef705a07d90b4 | [
"Apache-2.0"
] | 1,021 | 2020-07-11T11:40:24.000Z | 2022-03-31T14:32:26.000Z | tests/test_losses/test_mesh_losses.py | KHB1698/mmpose | 93c3a742c540dfb4ca515ad545cef705a07d90b4 | [
"Apache-2.0"
] | 477 | 2020-07-11T11:27:51.000Z | 2022-03-31T09:42:25.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from numpy.testing import assert_almost_equal
from mmpose.models import build_loss
from mmpose.models.utils.geometry import batch_rodrigues
def test_mesh_loss():
"""test mesh loss."""
loss_cfg = dict(
type='MeshLoss',
joints_2d_loss_weight=1,
joints_3d_loss_weight=1,
vertex_loss_weight=1,
smpl_pose_loss_weight=1,
smpl_beta_loss_weight=1,
img_res=256,
focal_length=5000)
loss = build_loss(loss_cfg)
smpl_pose = torch.zeros([1, 72], dtype=torch.float32)
smpl_rotmat = batch_rodrigues(smpl_pose.view(-1, 3)).view(-1, 24, 3, 3)
smpl_beta = torch.zeros([1, 10], dtype=torch.float32)
camera = torch.tensor([[1, 0, 0]], dtype=torch.float32)
vertices = torch.rand([1, 6890, 3], dtype=torch.float32)
joints_3d = torch.ones([1, 24, 3], dtype=torch.float32)
joints_2d = loss.project_points(joints_3d, camera) + (256 - 1) / 2
fake_pred = {}
fake_pred['pose'] = smpl_rotmat
fake_pred['beta'] = smpl_beta
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices
fake_pred['joints_3d'] = joints_3d
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.))
fake_pred = {}
fake_pred['pose'] = smpl_rotmat + 1
fake_pred['beta'] = smpl_beta + 1
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices + 1
fake_pred['joints_3d'] = joints_3d.clone()
joints_3d_t = joints_3d.clone()
joints_3d_t[:, 0] = joints_3d_t[:, 0] + 1
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d_t
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d + (256 - 1) / 2
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(1.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.5 / 24))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.5))
def test_gan_loss():
"""test gan loss."""
with pytest.raises(NotImplementedError):
loss_cfg = dict(
type='GANLoss',
gan_type='test',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1)
_ = build_loss(loss_cfg)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
loss_cfg = dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_1, True, is_disc=False)
assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
assert_almost_equal(loss.item(), 1.3132616)
# lsgan
loss_cfg = dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 4.0)
# wgan
loss_cfg = dict(
type='GANLoss',
gan_type='wgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 2.0)
# hinge
loss_cfg = dict(
type='GANLoss',
gan_type='hinge',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 3.0)
| 35.323171 | 78 | 0.655619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 627 | 0.108234 |
9891a82e9057c30b7703a78123e0f01f73c0248f | 6,932 | py | Python | seqs/IntegerHeap.py | vincentdavis/special-sequences | b7b7f8c2bd2f655baeb7b2139ddf007615bffd67 | [
"MIT"
] | 1 | 2020-04-15T10:46:57.000Z | 2020-04-15T10:46:57.000Z | seqs/IntegerHeap.py | vincentdavis/special-sequences | b7b7f8c2bd2f655baeb7b2139ddf007615bffd67 | [
"MIT"
] | 1 | 2016-09-14T03:57:25.000Z | 2016-09-14T03:57:25.000Z | seqs/IntegerHeap.py | vincentdavis/special-sequences | b7b7f8c2bd2f655baeb7b2139ddf007615bffd67 | [
"MIT"
] | null | null | null | """IntegerHeap.py
Priority queues of integer keys based on van Emde Boas trees.
Only the keys are stored; caller is responsible for keeping
track of any data associated with the keys in a separate dictionary.
We use a version of vEB trees in which all accesses to subtrees
are performed indirectly through a hash table and the data structures
for the subtrees are only created when they are nonempty. As a
consequence, the data structure takes only linear space
(linear in the number of keys stored in the heap) while still preserving
the O(log log U) time per operation of vEB trees. For better performance,
we switch to bitvectors for sufficiently small integer sizes.
Usage:
Q = BitVectorHeap() # Bit-vector based heap for integers
Q = FlatHeap(i) # Flat heap for 2^i-bit integers
Q = LinearHeap() # Set-based heap with linear-time min operation
Q = IntegerHeap(i) # Choose between BVH and FH depending on i
Q.add(x) # Include x among the values in the heap
Q.remove(x) # Remove x from the values in the heap
Q.min() # Return the minimum value in the heap
if Q # True if Q is nonempty, false if empty
Because the min operation in LinearHeap is a Python primitive rather than
a sequence of interpreted Python instructions, it is actually quite fast;
testing indicates that, for 32-bit keys, FlatHeap(5) beats LinearHeap only
for heaps of 250 or more items. This breakeven point would likely be
different for different numbers of bits per word or when runtime optimizers
such as psyco are in use.
D. Eppstein, January 2010
"""
def IntegerHeap(i):
"""Return an integer heap for 2^i-bit integers.
We use a BitVectorHeap for small i and a FlatHeap for large i.
Timing tests indicate that the cutoff i <= 3 is slightly
faster than the also-plausible cutoff i <= 2, and that both
are much faster than the way-too-large cutoff i <= 4.
The resulting IntegerHeap objects will use 255-bit long integers,
still small compared to the overhead of a FlatHeap."""
if i <= 3:
return BitVectorHeap()
return FlatHeap(i)
Log2Table = {} # Table of powers of two, with their logs
def Log2(b):
"""Return log_2(b), where b must be a power of two."""
while b not in Log2Table:
i = len(Log2Table)
Log2Table[1 << i] = i
return Log2Table[b]
# ======================================================================
# BitVectorHeap
# ======================================================================
class BitVectorHeap(object):
"""Maintain the minimum of a set of integers using bitvector operations."""
def __init__(self):
"""Create a new BitVectorHeap."""
self._S = 0
def __nonzero__(self):
"""True if this heap is nonempty, false if empty."""
return self._S != 0
def __bool__(self):
"""True if this heap is nonempty, false if empty."""
return self._S != 0
def add(self, x):
"""Include x among the values in the heap."""
self._S |= 1 << x
def remove(self, x):
"""Remove x from the values in the heap."""
self._S &= ~1 << x
def min(self):
"""Return the minimum value in the heap."""
if not self._S:
raise ValueError("BitVectorHeap is empty")
return Log2(self._S & ~(self._S - 1))
# ======================================================================
# FlatHeap
# ======================================================================
class FlatHeap(object):
"""Maintain the minimum of a set of 2^i-bit integer values."""
def __init__(self, i):
"""Create a new FlatHeap for 2^i-bit integers."""
self._min = None
self._order = i
self._shift = 1 << (i - 1)
self._max = (1 << (1 << i)) - 1
self._HQ = IntegerHeap(i - 1) # Heap of high halfwords
self._LQ = {} # Map high half to heaps of low halfwords
def _rangecheck(self, x):
"""Make sure x is a number we can include in this FlatHeap."""
if x < 0 or x > self._max:
raise ValueError("FlatHeap: {0!s} out of range".format(repr(x)))
def __nonzero__(self):
"""True if this heap is nonempty, false if empty."""
return self._min is not None
def __bool__(self):
"""True if this heap is nonempty, false if empty."""
return self._min is not None
def min(self):
"""Return the minimum value in the heap."""
if self._min is None:
raise ValueError("FlatHeap is empty")
return self._min
def add(self, x):
"""Include x among the values in the heap."""
self._rangecheck(x)
if self._min is None or self._min == x:
# adding to an empty heap is easy
self._min = x
return
if x < self._min:
# swap to make sure the value we're adding is non-minimal
x, self._min = self._min, x
H = x >> self._shift # split into high and low halfwords
L = x - (H << self._shift)
if H not in self._LQ:
self._HQ.add(H)
self._LQ[H] = IntegerHeap(self._order - 1)
self._LQ[H].add(L)
def remove(self, x):
"""Remove x from the values in the heap."""
self._rangecheck(x)
if self._min == x:
# Removing minimum, move next value into place
# and prepare to remove that next value from secondary heaps
if not self._HQ:
self._min = None
return
H = self._HQ.min()
L = self._LQ[H].min()
x = self._min = (H << self._shift) + L
else:
H = x >> self._shift # split into high and low halfwords
L = x - (H << self._shift)
if H not in self._LQ:
return # ignore removal when not in heap
self._LQ[H].remove(L)
if not self._LQ[H]:
del self._LQ[H]
self._HQ.remove(H)
# ======================================================================
# LinearHeap
# ======================================================================
class LinearHeap(object):
"""Maintain the minimum of a set of integers using a set object."""
def __init__(self):
"""Create a new BitVectorHeap."""
self._S = set()
def __nonzero__(self):
"""True if this heap is nonempty, false if empty."""
return len(self._S) > 0
def __bool__(self):
"""True if this heap is nonempty, false if empty."""
return len(self._S) > 0
def add(self, x):
"""Include x among the values in the heap."""
self._S.add(x)
def remove(self, x):
"""Remove x from the values in the heap."""
self._S.remove(x)
def min(self):
"""Return the minimum value in the heap."""
return min(self._S)
| 34.147783 | 79 | 0.568523 | 4,042 | 0.583093 | 0 | 0 | 0 | 0 | 0 | 0 | 4,142 | 0.597519 |
9893aa6d0245de7a42d6065da71ab428fbb28e3b | 3,238 | py | Python | core/client/client.py | spiritotaku/fedlearn-algo | 842700d43e7f033a7b6a32d0845cb6d9db5b866a | [
"Apache-2.0"
] | 1 | 2021-07-20T23:44:28.000Z | 2021-07-20T23:44:28.000Z | core/client/client.py | kfliubo/fedlearn-algo | 3440bb10a8680319bcad1b9c9677874bab35550a | [
"Apache-2.0"
] | null | null | null | core/client/client.py | kfliubo/fedlearn-algo | 3440bb10a8680319bcad1b9c9677874bab35550a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is the class template theABC JDT client
from core.entity.common.message import RequestMessage, ResponseMessage
from core.grpc_comm.grpc_converter import grpc_msg_to_common_msg, common_msg_to_grpc_msg
from core.proto.transmission_pb2 import ReqResMessage
from core.proto.transmission_pb2_grpc import TransmissionServicer
from abc import abstractmethod
from typing import Dict
import pickle
class ClientError(ValueError):
pass
class Client(TransmissionServicer):
"""
Basic client class
"""
@property
def dict_functions(self):
"""
Dictionary of functions that store the training function mapping as
<phase_id: training_function>.
"""
return self._dict_functions
@dict_functions.setter
def dict_functions(self, value):
if not isinstance(value, dict):
raise ValueError("Funcion mapping must be a dictionary!")
self._dict_functions = value
@abstractmethod
def train_init(self) -> None:
"""
Training initialization function
Returns
-------
None
"""
@abstractmethod
def inference_init(self) -> None:
"""
Inference initialization function
Returns
-------
None
"""
def load_model(self, model_path: str) -> Dict:
"""
Parameters
----------
model_path: str
Returns
-------
model: dict
"""
f = open(model_path, 'rb')
model = pickle.load(f)
f.close()
return model
def save_model(self, model_path: str, model: Dict) -> None:
"""
Parameters
----------
model_path: str
model: dict
Returns
-------
None
"""
f = open(model_path, 'wb')
pickle.dump(model, f)
f.close()
def process_request(self, request: RequestMessage) -> ResponseMessage:
"""
Parameters
----------
request: RequestMessage
Returns
-------
response: ResponseMessage
"""
symbol = request.phase_id
if symbol not in self.dict_functions.keys():
raise ClientError("Function %s is not implemented.", symbol)
response = self.dict_functions[symbol](request)
return response
def comm(self, grpc_request: ReqResMessage, context) -> ReqResMessage:
common_req_msg = grpc_msg_to_common_msg(grpc_request)
common_res_msg = self.process_request(common_req_msg)
return common_msg_to_grpc_msg(common_res_msg)
| 26.540984 | 88 | 0.62446 | 2,251 | 0.695182 | 0 | 0 | 762 | 0.23533 | 0 | 0 | 1,522 | 0.470043 |
9894e55664da246d5300969b91941e5dc7ab68d5 | 9,369 | py | Python | L1Trigger/GlobalTriggerAnalyzer/test/L1GtPackUnpackAnalyzer_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | L1Trigger/GlobalTriggerAnalyzer/test/L1GtPackUnpackAnalyzer_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | L1Trigger/GlobalTriggerAnalyzer/test/L1GtPackUnpackAnalyzer_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | from __future__ import print_function
#
# cfg file to pack (DigiToRaw) a GT DAQ record, unpack (RawToDigi) it back
# and compare the two set of digis
#
# V M Ghete 2009-04-06
import FWCore.ParameterSet.Config as cms
# process
process = cms.Process('TestGtPackUnpackAnalyzer')
###################### user choices ######################
# choose the type of sample used (True for RelVal, False for data)
useRelValSample = True
#useRelValSample=False
# actual GlobalTag must be appropriate for the sample use
if useRelValSample == True :
useGlobalTag = 'IDEAL_V12'
#useGlobalTag='STARTUP_V9'
else :
useGlobalTag = 'CRAFT_ALL_V12'
# change to True to use local files
# the type of file should match the choice of useRelValSample and useGlobalTag
useLocalFiles = False
###################### end user choices ###################
# number of events to be processed and source file
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(10)
)
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
process.source = cms.Source ('PoolSource', fileNames=readFiles, secondaryFileNames=secFiles)
# type of sample used (True for RelVal, False for data)
if useRelValSample == True :
if useGlobalTag.count('IDEAL') :
#/RelValTTbar/CMSSW_2_2_4_IDEAL_V11_v1/GEN-SIM-DIGI-RAW-HLTDEBUG
dataset = cms.untracked.vstring('RelValTTbar_CMSSW_2_2_4_IDEAL_V11_v1')
readFiles.extend([
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V11_v1/0000/02697009-5CF3-DD11-A862-001D09F2423B.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V11_v1/0000/064657A8-59F3-DD11-ACA5-000423D991F0.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V11_v1/0000/0817F6DE-5BF3-DD11-880D-0019DB29C5FC.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_V11_v1/0000/0899697C-5AF3-DD11-9D21-001617DBD472.root'
]);
secFiles.extend([
])
elif useGlobalTag.count('STARTUP') :
#/RelValTTbar/CMSSW_2_2_4_STARTUP_V8_v1/GEN-SIM-DIGI-RAW-HLTDEBUG
dataset = cms.untracked.vstring('RelValTTbar_CMSSW_2_2_4_STARTUP_V8_v1')
readFiles.extend([
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V8_v1/0000/069AA022-5BF3-DD11-9A56-001617E30D12.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V8_v1/0000/08DA99A6-5AF3-DD11-AAC1-001D09F24493.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V8_v1/0000/0A725E15-5BF3-DD11-8B4B-000423D99CEE.root',
'/store/relval/CMSSW_2_2_4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/STARTUP_V8_v1/0000/0AF5B676-5AF3-DD11-A22F-001617DBCF1E.root'
]);
secFiles.extend([
])
else :
print('Error: Global Tag ', useGlobalTag, ' not defined.')
else :
# data
dataset = '/Cosmics/Commissioning09-v1/RAW'
print(' Running on set: '+ dataset)
readFiles.extend( [
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/00BD9A1F-B908-DE11-8B2C-000423D94A04.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/025E8B48-B608-DE11-A0EE-00161757BF42.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/027AA271-D208-DE11-9A7F-001617DBD5AC.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/04281D2F-D108-DE11-9A27-000423D944DC.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/065B0C1C-C008-DE11-A32B-001617E30F48.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/08B1054B-BD08-DE11-AF8B-001617C3B78C.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0C055C33-D108-DE11-B678-001617C3B73A.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0E480977-D208-DE11-BA78-001617C3B6E2.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/0E79251B-B908-DE11-83FF-000423D99CEE.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/101B8CA0-B508-DE11-B614-000423D99160.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/12C62C71-BF08-DE11-A48C-000423D99614.root',
'/store/data/Commissioning09/Cosmics/RAW/v1/000/076/966/16A77E08-B008-DE11-9121-000423D8F63C.root'
]);
secFiles.extend([
])
if useLocalFiles :
readFiles = 'file:/afs/cern.ch/user/g/ghete/scratch0/CmsswTestFiles/testGt_PackUnpackAnalyzer_source.root.root'
# load and configure modules via Global Tag
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions
process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = useGlobalTag+'::All'
# remove FakeConditions when GTag is OK
process.load('L1Trigger.Configuration.L1Trigger_FakeConditions_cff')
#
# pack.......
#
process.load("EventFilter.L1GlobalTriggerRawToDigi.l1GtPack_cfi")
# input tag for GT readout collection:
# input tag for GMT readout collection:
# source = hardware record
if useRelValSample == True :
daqGtInputTagPack = 'simGtDigis'
muGmtInputTagPack = 'simGmtDigis'
else :
daqGtInputTagPack = 'l1GtUnpack'
muGmtInputTagPack = 'l1GtUnpack'
process.l1GtPack.DaqGtInputTag = daqGtInputTagPack
process.l1GtPack.MuGmtInputTag = muGmtInputTagPack
# mask for active boards (actually 16 bits)
# if bit is zero, the corresponding board will not be packed
# default: no board masked: ActiveBoardsMask = 0xFFFF
# no board masked (default)
#process.l1GtPack.ActiveBoardsMask = 0xFFFF
# GTFE only in the record
#process.l1GtPack.ActiveBoardsMask = 0x0000
# GTFE + FDL
#process.l1GtPack.ActiveBoardsMask = 0x0001
# GTFE + GMT
#process.l1GtPack.ActiveBoardsMask = 0x0100
# GTFE + FDL + GMT
#process.l1GtPack.ActiveBoardsMask = 0x0101
# set it to verbose
process.l1GtPack.Verbosity = cms.untracked.int32(1)
#
# unpack.......
#
import EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi
process.gtPackedUnpack = EventFilter.L1GlobalTriggerRawToDigi.l1GtUnpack_cfi.l1GtUnpack.clone()
# input tag for GT and GMT readout collections in the packed data:
process.gtPackedUnpack.DaqGtInputTag = 'l1GtPack'
# Active Boards Mask
# no board masked (default)
#process.gtPackedUnpack.ActiveBoardsMask = 0xFFFF
# GTFE only in the record
#process.gtPackedUnpack.ActiveBoardsMask = 0x0000
# GTFE + FDL
#process.gtPackedUnpack.ActiveBoardsMask = 0x0001
# GTFE + GMT
#process.gtPackedUnpack.ActiveBoardsMask = 0x0100
# GTFE + FDL + GMT
#process.gtPackedUnpack.ActiveBoardsMask = 0x0101
# BxInEvent to be unpacked
# all available BxInEvent (default)
#process.gtPackedUnpack.UnpackBxInEvent = -1
# BxInEvent = 0 (L1A)
#process.gtPackedUnpack.UnpackBxInEvent = 1
# 3 BxInEvent (F, 0, 1)
#process.gtPackedUnpack.UnpackBxInEvent = 3
#
# compare the initial and final digis .......
#
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtPackUnpackAnalyzer_cfi")
# input tag for the initial GT DAQ record: must match the pack label
# input tag for the initial GMT readout collection: must match the pack label
process.l1GtPackUnpackAnalyzer.InitialDaqGtInputTag = daqGtInputTagPack
process.l1GtPackUnpackAnalyzer.InitialMuGmtInputTag = muGmtInputTagPack
# input tag for the final GT DAQ and GMT records: must match the unpack label
# GT unpacker: gtPackedUnpack (cloned unpacker from L1GtPackUnpackAnalyzer.cfg)
#process.l1GtPackUnpackAnalyzer.FinalGtGmtInputTag = 'gtPackedUnpack'
# path to be run
if useRelValSample == True :
process.p = cms.Path(process.l1GtPack*process.gtPackedUnpack*process.l1GtPackUnpackAnalyzer)
else :
process.p = cms.Path(process.l1GtPack*process.gtPackedUnpack*process.l1GtPackUnpackAnalyzer)
# FIXME unpack first raw data
# Message Logger
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.debugModules = [ 'l1GtPack', 'l1GtUnpack', 'l1GtPackUnpackAnalyzer']
process.MessageLogger.destinations = ['L1GtPackUnpackAnalyzer']
process.MessageLogger.L1GtPackUnpackAnalyzer = cms.untracked.PSet(
threshold=cms.untracked.string('DEBUG'),
#threshold = cms.untracked.string('INFO'),
#threshold = cms.untracked.string('ERROR'),
DEBUG=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
INFO=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
WARNING=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
ERROR=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
default = cms.untracked.PSet(
limit=cms.untracked.int32(-1)
)
)
# summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# output
process.outputL1GtPackUnpack = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('L1GtPackUnpackAnalyzer.root'),
# keep only emulated data, packed data, unpacked data in the ROOT file
outputCommands = cms.untracked.vstring('drop *',
'keep *_simGtDigis_*_*',
'keep *_simGmtDigis_*_*',
'keep *_l1GtPack_*_*',
'keep *_l1GtPackedUnpack_*_*')
)
process.outpath = cms.EndPath(process.outputL1GtPackUnpack)
| 35.488636 | 139 | 0.732629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,048 | 0.645533 |
9896f98d8a2acdc2a185331ee790f1c4273955b0 | 39,103 | py | Python | ranges/RangeDict.py | wikti/ranges | 133376861d5595e040f0f18fd17de73a4ac2c3c2 | [
"MIT"
] | null | null | null | ranges/RangeDict.py | wikti/ranges | 133376861d5595e040f0f18fd17de73a4ac2c3c2 | [
"MIT"
] | null | null | null | ranges/RangeDict.py | wikti/ranges | 133376861d5595e040f0f18fd17de73a4ac2c3c2 | [
"MIT"
] | 1 | 2022-03-29T16:16:15.000Z | 2022-03-29T16:16:15.000Z | from operator import is_
from ._helper import _UnhashableFriendlyDict, _LinkedList, _is_iterable_non_string, Rangelike
from .Range import Range
from .RangeSet import RangeSet
from typing import Iterable, Union, Any, TypeVar, List, Tuple, Dict, Tuple
T = TypeVar('T', bound=Any)
V = TypeVar('V', bound=Any)
class RangeDict:
"""
A class representing a dict-like structure where continuous ranges
correspond to certain values. For any item given to lookup, the
value obtained from a RangeDict will be the one corresponding to
the first range into which the given item fits. Otherwise, RangeDict
provides a similar interface to python's built-in dict.
A RangeDict can be constructed in one of four ways:
>>> # Empty
>>> a = RangeDict()
>>> # From an existing RangeDict object
>>> b = RangeDict(a)
>>> # From a dict that maps Ranges to values
>>> c = RangeDict({
... Range('a', 'h'): "First third of the lowercase alphabet",
... Range('h', 'p'): "Second third of the lowercase alphabet",
... Range('p', '{'): "Final third of the lowercase alphabet",
... })
>>> print(c['brian']) # First third of the lowercase alphabet
>>> print(c['king arthur']) # Second third of the lowercase alphabet
>>> print(c['python']) # Final third of the lowercase alphabet
>>> # From an iterable of 2-tuples, like a regular dict
>>> d = RangeDict([
... (Range('A', 'H'), "First third of the uppercase alphabet"),
... (Range('H', 'P'), "Second third of the uppercase alphabet"),
... (Range('P', '['), "Final third of the uppercase alphabet"),
... ])
A RangeDict cannot be constructed from an arbitrary number of positional
arguments or keyword arguments.
RangeDicts are mutable, so new range correspondences can be added
at any time, with Ranges or RangeSets acting like the keys in a
normal dict/hashtable. New keys must be of type Range or RangeSet,
or they must be able to be coerced into a RangeSet. Given
keys are also copied before they are added to a RangeDict.
Adding a new range that overlaps with an existing range will
make it so that the value returned for any given number will be
the one corresponding to the most recently-added range in which
it was found (Ranges are compared by `start`, `include_start`, `end`,
and `include_end` in that priority order). Order of insertion is
important.
The RangeDict constructor, and the `.update()` method, insert elements
in order from the iterable they came from. As of python 3.7+, dicts
retain the insertion order of their arguments, and iterate in that
order - this is respected by this data structure. Other iterables,
like lists and tuples, have order built-in. Be careful about using
sets as arguments, since they have no guaranteed order.
Be very careful about adding a range from -infinity to +infinity.
If defined using the normal Range constructor without any start/end
arguments, then that Range will by default accept any value (see
Range's documentation for more info). However, the first non-infinite
Range added to the RangeDict will overwrite part of the infinite Range,
and turn it into a Range of that type only. As a result, other types
that the infinite Range may have accepted before, will no longer work:
>>> e = RangeDict({Range(include_end=True): "inquisition"})
>>> print(e) # {{[-inf, inf)}: inquisition}
>>> print(e.get(None)) # inquisition
>>> print(e.get(3)) # inquisition
>>> print(e.get("holy")) # inquisition
>>> print(e.get("spanish")) # inquisition
>>>
>>> e[Range("a", "m")] = "grail"
>>>
>>> print(e) # {{[-inf, a), [m, inf)}: inquisition, {[a, m)}: grail}
>>> print(e.get("spanish")) # inquisition
>>> print(e.get("holy")) # grail
>>> print(e.get(3)) # KeyError
>>> print(e.get(None)) # KeyError
In general, unless something has gone wrong, the RangeDict will not
include any empty ranges. Values will disappear if there are not
any keys that map to them. Adding an empty Range to the RangeDict
will not trigger an error, but will have no effect.
By default, the range set will determine value uniqueness by equality
(`==`), not by identity (`is`), and multiple rangekeys pointing to the
same value will be compressed into a single RangeSet pointed at a
single value. This is mainly meaningful for values that are mutable,
such as `list`s or `set`s.
If using assignment operators besides the generic `=` (`+=`, `|=`, etc.)
on such values, be warned that the change will reflect upon the entire
rangeset.
>>> # [{3}] == [{3}] is True, so the two ranges are made to point to the same object
>>> f = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}})
>>> print(f) # {{[1, 2), [4, 5)}: {3}}
>>>
>>> # f[1] returns the {3}. When |= is used, this object changes to {3, 4}
>>> f[Range(1, 2)] |= {4}
>>> # since the entire rangeset is pointing at the same object, the entire range changes
>>> print(f) # {{[1, 2), [4, 5)}: {3, 4}}
This is because `dict[value] = newvalue` calls `dict.__setitem__()`, whereas
`dict[value] += item` instead calls `dict[value].__iadd__()` instead.
To make the RangeDict use identity comparison instead, construct it with the
keyword argument `identity=True`, which should help:
>>> # `{3} is {3}` is False, so the two ranges don't coalesce
>>> g = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}}, identity=True)
>>> print(g) # {{[1, 2)}: {3}, {[4, 5)}: {3}}
To avoid the problem entirely, you can also simply not mutate mutable values
that multiple rangekeys may refer to, substituting non-mutative operations:
>>> h = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}})
>>> print(h) # {{[1, 2), [4, 5)}: {3}}
>>> h[Range(1, 2)] = h[Range(1, 2)] | {4}
>>> print(h) # {{[4, 5)}: {3}, {[1, 2)}: {3, 4}}
"""
# sentinel for checking whether an arg was passed, where anything is valid including None
_sentinel = object()
def __init__(self, iterable: Union['RangeDict', Dict[Rangelike, V], Iterable[Tuple[Rangelike, V]]] = _sentinel,
*, identity=False):
"""
Initialize a new RangeDict from the given iterable. The given iterable
may be either a RangeDict (in which case, a copy will be created),
a regular dict with all keys able to be converted to Ranges, or an
iterable of 2-tuples (range, value).
If the argument `identity=True` is given, the RangeDict will use `is` instead
of `==` when it compares multiple rangekeys with the same associated value to
possibly merge them.
:param iterable: Optionally, an iterable from which to source keys - either a RangeDict, a regular dict
with Rangelike objects as keys, or an iterable of (range, value) tuples.
:param identity: optionally, a toggle to use identity instead of equality when determining key-value
similarity. By default, uses equality, but will use identity instead if True is passed.
"""
# Internally, RangeDict has two data structures
# _values is a dict {value: [rangeset, ...], ..., '_sentinel': [(value: [rangeset, ...]), ...]}
# The sentinel allows the RangeDict to accommodate unhashable types.
# _ranges is a list-of-lists, [[(intrangeset1, value1), (intrangeset2, value2), ...],
# [(strrangeset1, value1), (strrangeset2, value2), ...],
# ...]
# where each inner list is a list of (RangeSet, corresponding_value) tuples.
# Each inner list corresponds to a different, mutually-incomparable, type of Range.
# We use _values to cross-reference with while adding new ranges, to avoid having to search the entire
# _ranges for the value we want to point to.
# Meanwhile, _ranges is a list-of-lists instead of just a list, so that we can accommodate ranges of
# different types (e.g. a RangeSet of ints and a RangeSet of strings) pointing to the same values.
self._values = _UnhashableFriendlyDict()
if identity:
self._values._operator = is_
if iterable is RangeDict._sentinel:
self._rangesets = _LinkedList()
elif isinstance(iterable, RangeDict):
self._values.update({val: rngsets[:] for val, rngsets in iterable._values.items()})
self._rangesets = _LinkedList([rngset.copy() for rngset in iterable._rangesets])
elif isinstance(iterable, dict):
self._rangesets = _LinkedList()
for rng, val in iterable.items():
if _is_iterable_non_string(rng):
for r in rng:
self.add(r, val)
else:
self.add(rng, val)
else:
try:
assert(_is_iterable_non_string(iterable)) # creative method of avoiding code reuse!
self._rangesets = _LinkedList()
for rng, val in iterable:
# this should not produce an IndexError. It produces a TypeError instead.
# (or a ValueError in case of too many to unpack. Which is fine because it screens for 3-tuples)
if _is_iterable_non_string(rng):
# this allows constructing with e.g. rng=[Range(1, 2), Range('a', 'b')], which makes sense
for r in rng:
self.add(r, val)
else:
self.add(rng, val)
except (TypeError, ValueError, AssertionError):
raise ValueError("Expected a dict, RangeDict, or iterable of 2-tuples")
self._values[RangeDict._sentinel] = []
self.popempty()
def add(self, rng: Rangelike, value: V) -> None:
"""
Add the single given Range/RangeSet to correspond to the given value.
If the given Range overlaps with a Range that is already contained
within this RangeDict, then the new range takes precedence.
To add multiple Ranges of the same type, pack them into a RangeSet
and pass that.
To add a list of multiple Ranges of different types, use `.update()`
instead. Using this method instead will produce a `TypeError`.
If an empty Range is given, then this method does nothing.
:param rng: Rangekey to add
:param value: value to add corresponding to the given Rangekey
"""
# copy the range and get it into an easy-to-work-with form
try:
rng = RangeSet(rng)
except TypeError:
raise TypeError("argument 'rng' for .add() must be able to be converted to a RangeSet")
if rng.isempty():
return
# first, remove this range from any existing range
short_circuit = False
for rngsetlist in self._rangesets:
# rngsetlist is a tuple (_LinkedList(ranges), value)
for rngset in rngsetlist:
# rngset
try:
rngset[0].discard(rng)
short_circuit = True # (naively) assume only one type of rngset will be compatible
except TypeError:
pass
if short_circuit:
self.popempty()
break
# then, add it back in depending on whether it shares an existing value or not.
if value in self._values:
# duplicate value. More than one range must map to it.
existing_rangesets = self._values[value]
# existing_rangesets is a list (not _LinkedList) of RangeSets that correspond to value.
# if there's already a whole RangeSet pointing to value, then simply add to that RangeSet
for rngset in existing_rangesets:
try:
# ...once we find the RangeSet of the right type
rngset.add(rng)
# And then bubble it into place in whichever _LinkedList would have contained it.
# This is one empty list traversal for every non-modified _LinkedList, and one gnomesort
# for the one we really want. A little time loss but not that much. Especially not
# any extra timeloss for single-typed RangeDicts.
self._sort_ranges()
# And short-circuit, since we've already dealt with the complications and don't need to
# do any further modification of _values or _rangesets
return
except TypeError:
pass
# if we didn't find a RangeSet of the right type, then we must add rng as a new RangeSet of its own type.
# add a reference in _values
self._values[value].append(rng)
else:
# new value. This is easy, we just need to add a value for it:
self._values[value] = [rng]
# Now that we've added our new RangeSet into _values, we need to make sure it's accounted for in _rangesets
# we will first try to insert it into all our existing rangesets
for rngsetlist in self._rangesets:
# rngsetlist is a _LinkedList of (RangeSet, value) tuples
# [(rangeset0, value0), (rangeset1, value1), ...]
try:
# "try" == "assess comparability with the rest of the RangeSets in this _LinkedList".
# This is checked via trying to execute a dummy comparison with the first RangeSet in this category,
# and seeing if it throws a TypeError.
# Though it's kinda silly, this is probably the best way to handle this. See:
# https://stackoverflow.com/q/57717100/2648811
_ = rng < rngsetlist[0][0]
# If it doesn't raise an error, then it's comparable and we're good.
# Add it, bubble it to sorted order via .gnomesort(), and return.
rngsetlist.append((rng, value))
rngsetlist.gnomesort()
return
except TypeError:
pass
# if no existing rangeset accepted it, then we need to add one.
# singleton _LinkedList containing just (rng, value), appended to self._rangesets
self._rangesets.append(_LinkedList(((rng, value),)))
def update(self, iterable: Union['RangeDict', Dict[Rangelike, V], Iterable[Tuple[Rangelike, V]]]) -> None:
"""
Adds the contents of the given iterable (either another RangeDict, a
`dict` mapping Range-like objects to values, or a list of 2-tuples
`(range-like, value)`) to this RangeDict.
:param iterable: An iterable containing keys and values to add to this RangeDict
"""
# coerce to RangeDict and add that
if not isinstance(iterable, RangeDict):
iterable = RangeDict(iterable)
for value, rangesets in iterable._values.items():
for rngset in rangesets:
self.add(rngset, value)
def getitem(self, item: T) -> Tuple[List[RangeSet], RangeSet, Range, V]:
"""
Returns both the value corresponding to the given item, the Range
containing it, and the set of other contiguous ranges that would
have also yielded the same value, as a 4-tuple
`([RangeSet1, Rangeset2, ...], RangeSet, Range, value)`.
In reverse order, that is
- the value corresponding to item
- the single continuous range directly containing the item
- the RangeSet directly containing the item and corresponding
to the value
- a list of all RangeSets (of various non-mutually-comparable
types) that all correspond to the value. Most of the time,
this will be a single-element list, if only one type of Range
is used in the RangeDict. Otherwise, if ranges of multiple
types (e.g. int ranges, string ranges) correspond to the same
value, this list will contain all of them.
Using `.get()`, `.getrange()`, `.getrangeset()`, or
`.getrangesets()` to isolate just one of those return values is
usually easier. This method is mainly used internally.
Raises a `KeyError` if the desired item is not found.
:param item: item to search for
:return: a 4-tuple (keys with same value, containing RangeSet, containing Range, value)
"""
for rngsets in self._rangesets:
# rngsets is a _LinkedList of (RangeSet, value) tuples
for rngset, value in rngsets:
try:
rng = rngset.getrange(item)
return self._values[value], rngset, rng, value
except IndexError:
# try RangeSets of the same type, corresponding to other values
continue
except TypeError:
# try RangeSets of a different type
break
raise KeyError(f"'{item}' was not found in any range")
def getrangesets(self, item: T) -> List[RangeSet]:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns a list of all RangeSets in this RangeDict that
correspond to that value.
Most of the time, this will be a single-element list, if only one
type of Range is used in the RangeDict. Otherwise, if ranges of
multiple types (e.g. int ranges, string ranges) correspond to the
same value, this list will contain all of them.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: all RangeSets in this RangeDict that correspond to the same value as the given item
"""
return self.getitem(item)[0]
def getrangeset(self, item: T) -> RangeSet:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the RangeSet containing the given item that
corresponds to that value.
To find other RangeSets of other types that correspond to the same
value, use `.getrangesets()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the RangeSet key containing the given item
"""
return self.getitem(item)[1]
def getrange(self, item: T) -> Range:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the single contiguous range containing the given item
that corresponds to that value.
To find the RangeSet of all Ranges that correspond to that item,
use `.getrangeset()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the Range most directly containing the given item
"""
return self.getitem(item)[2]
def get(self, item: T, default: Any = _sentinel) -> Union[V, Any]:
"""
Returns the value corresponding to the given item, based on
the most recently-added Range containing it.
The `default` argument is optional.
Like Python's built-in `dict`, if `default` is given, returns that if
`item` is not found.
Otherwise, raises a `KeyError`.
:param item: item to search for
:param default: optionally, a value to return, if item is not found
(if not provided, raises a KeyError if not found)
:return: the value corrsponding to the item, or default if item is not found
"""
try:
return self.getitem(item)[3]
except KeyError:
if default is not RangeDict._sentinel:
return default
raise
def getoverlapitems(self, rng: Rangelike) -> List[Tuple[List[RangeSet], RangeSet, V]]:
"""
Returns a list of 3-tuples
[([RangeSet1, ...], RangeSet, value), ...]
corresponding to every distinct rangekey of this RangeDict that
overlaps the given range.
In reverse order, for each tuple, that is
- the value corresponding to the rangeset
- the RangeSet corresponding to the value that intersects the given range
- a list of all RangeSets (of various non-mutually-comparable
types) that all correspond to the value. Most of the time,
this will be a single-element list, if only one type of Range
is used in the RangeDict. Otherwise, if ranges of multiple
types (e.g. int ranges, string ranges) correspond to the same
value, this list will contain all of them.
Using `.getoverlap()`, `.getoverlapranges()`, or
`.getoverlaprangesets()`
to isolate just one of those return values is
usually easier. This method is mainly used internally.
:param rng: Rangelike to search for
:return: a list of 3-tuples (Rangekeys with same value, containing RangeSet, value)
"""
ret = []
for rngsets in self._rangesets:
# rngsets is a _LinkedList of (RangeSet, value) tuples
for rngset, value in rngsets:
try:
if rngset.intersection(rng):
ret.append((self._values[value], rngset, value))
except TypeError:
break
# do NOT except ValueError - if `rng` is not rangelike, then error should be thrown.
return ret
def getoverlap(self, rng: Rangelike) -> List[V]:
"""
Returns a list of values corresponding to every distinct
rangekey of this RangeDict that overlaps the given range.
:param rng: Rangelike to search for
:return: a list of values corresponding to each rangekey intersected by rng
"""
return [t[2] for t in self.getoverlapitems(rng)]
def getoverlapranges(self, rng: Rangelike) -> List[RangeSet]:
"""
Returns a list of all rangekeys in this RangeDict that intersect with
the given range.
:param rng: Rangelike to search for
:return: a list of all RangeSet rangekeys intersected by rng
"""
return [t[1] for t in self.getoverlapitems(rng)]
def getoverlaprangesets(self, rng: Rangelike) -> List[List[RangeSet]]:
"""
Returns a list of RangeSets corresponding to the same value as every
rangekey that intersects the given range.
:param rng: Rangelike to search for
:return: a list lists of rangesets that correspond to the same values as every rangekey intersected by rng
"""
return [t[0] for t in self.getoverlapitems(rng)]
def getvalue(self, value: V) -> List[RangeSet]:
"""
Returns the list of RangeSets corresponding to the given value.
Raises a `KeyError` if the given value is not corresponded to by
any RangeSets in this RangeDict.
:param value: value to search for
:return: a list of rangekeys that correspond to the given value
"""
try:
return self._values[value]
except KeyError:
raise KeyError(f"value '{value}' is not present in this RangeDict")
def set(self, item: T, new_value: V) -> V:
"""
Changes the value corresponding to the given `item` to the given
`new_value`, such that all ranges corresponding to the old value
now correspond to the `new_value` instead.
Returns the original, overwritten value.
If the given item is not found, raises a `KeyError`.
:param item: item to search for
:param new_value: value to set for all rangekeys sharing the same value as item corresponds to
:return: the previous value those rangekeys corresponded to
"""
try:
old_value = self.get(item)
except KeyError:
raise KeyError(f"Item '{item}' is not in any Range in this RangeDict")
self.setvalue(old_value, new_value)
return old_value
def setvalue(self, old_value: V, new_value: V) -> None:
"""
Changes all ranges corresponding to the given `old_value` to correspond
to the given `new_value` instead.
Raises a `KeyError` if the given `old_value` isn't found.
:param old_value: value to change for all keys that correspond to it
:param new_value: value to replace it with
"""
try:
rangesets = list(self._values[old_value])
except KeyError:
raise KeyError(f"Value '{old_value}' is not in this RangeDict")
for rngset in rangesets:
self.add(rngset, new_value)
def popitem(self, item: T) -> Tuple[List[RangeSet], RangeSet, Range, V]:
"""
Returns the value corresponding to the given item, the Range containing
it, and the set of other contiguous ranges that would have also yielded
the same value, as a 4-tuple
`([RangeSet1, Rangeset2, ...], RangeSet, Range, value)`.
In reverse order, that is
- the value corresponding to item
- the single continuous range directly containing the item
- the RangeSet directly containing the item and corresponding to the
value
- a list of all RangeSets (of various non-mutually-comparable types)
that all correspond to the value. Most of the time, this will be a
single-element list, if only one type of Range is used in the
RangeDict. Otherwise, if ranges of multiple types (e.g. int ranges,
string ranges) correspond to the same value, this list will contain
all of them.
Also removes all of the above from this RangeDict.
While this method is used a lot internally, it's usually easier to
simply use `.pop()`, `.poprange()`, `.poprangeset()`, or
`.poprangesets()` to get the single item of interest.
Raises a KeyError if the desired item is not found.
:param item: item to search for
:return: a 4-tuple (keys with same value, containing RangeSet, containing Range, value)
"""
# search for item linked list-style
for rngsetlist in self._rangesets:
# rngsetlist is a _LinkedList of (RangeSet, value) tuples
cur = rngsetlist.first
while cur:
try:
rng = cur.value[0].getrange(item)
rngsetlist.pop_node(cur)
rngsets = self._values.pop(cur.value[1])
self.popempty()
return rngsets, cur.value[0], rng, cur.value[1]
except IndexError:
# try the next range correspondence
cur = cur.next
continue
except TypeError:
# try ranges of a different type
break
raise KeyError(f"'{item}' was not found in any range")
def poprangesets(self, item: T) -> List[RangeSet]:
"""
Finds the value to which the given item corresponds, and returns the
list of RangeSets that correspond to that value (see
`.getrangesets()`).
Also removes the value, and all RangeSets from this RangeDict. To
remove just one range and leave the rest intact, use `.remove()`
instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: all RangeSets in this RangeDict that correspond to the same value as the given item
"""
return self.popitem(item)[0]
def poprangeset(self, item: T) -> RangeSet:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the RangeSet containing the given item that
corresponds to that value.
Also removes the value and all ranges that correspond to it from this
RangeDict. To remove just one range and leave the rest intact, use
`.remove()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the RangeSet key containing the given item
"""
return self.popitem(item)[1]
def poprange(self, item: T) -> Range:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the single contiguous range containing the given item
that corresponds to that value.
Also removes the value and all ranges that correspond to it from this
RangeDict. To remove just one range and leave the rest intact, use
`.remove()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the Range containing the given item
"""
return self.popitem(item)[2]
def pop(self, item: T, default: Any = _sentinel) -> Union[V, Any]:
"""
Returns the value corresponding to the most recently-added range that
contains the given item. Also removes the returned value and all
ranges corresponding to it from this RangeDict.
The argument `default` is optional, just like in python's built-in
`dict.pop()`, if default is given, then if the item is not found,
returns that instead.
Otherwise, raises a `KeyError`.
:param item: item to search for
:param default: optionally, a value to return, if item is not found
(if not provided, raises a KeyError if not found)
:return: the value corrsponding to the item, or default if item is not found
"""
try:
return self.popitem(item)[3]
except KeyError:
if default != RangeDict._sentinel:
return default
raise
def popvalue(self, value: V) -> List[RangeSet]:
"""
Removes all ranges corresponding to the given value from this RangeDict,
as well as the value itself. Returns a list of all the RangeSets of
various types that corresponded to the given value.
:param value: value to purge
:return: all RangeSets in this RangeDict that correspond to the given value
"""
# find a RangeSet corresponding to the value, which we can use as a key
sample_item = self._values[value][0]
# use that RangeSet to do the regular pop() function
return self.popitem(sample_item)[0]
def popempty(self) -> None:
"""
Removes all empty ranges from this RangeDict, as well as all values
that have no corresponding ranges. The RangeDict calls this method on
itself after most operations that modify it, so calling it manually,
while possible, will usually do nothing.
"""
# We start by traversing _ranges and removing all empty things.
rngsetlistnode = self._rangesets.first
while rngsetlistnode:
# rngsetlistnode is a Node(_LinkedList((RangeSet, value)))
rngsetnode = rngsetlistnode.value.first
# First, empty all RangeSets
while rngsetnode:
# rngsetnode is a Node((RangeSet, value))
rngset = rngsetnode.value[0]
# popempty() on the RangeSet in rngsetnode
rngset.popempty()
# if the RangeSet is empty, then remove it.
if rngset.isempty():
rngsetlistnode.value.pop_node(rngsetnode)
# also remove this RangeSet from .values()
self._values[rngsetnode.value[1]].remove(rngset)
# deletion while traversing is fine in a linked list only
rngsetnode = rngsetnode.next
# Next, check for an empty list of RangeSets
if len(rngsetlistnode.value) == 0:
self._rangesets.pop_node(rngsetlistnode)
# in this case, there are no RangeSets to pop, so we can leave ._values alone
# and finally, advance to the next list of RangeSets
rngsetlistnode = rngsetlistnode.next
# Once we've removed all RangeSets, we then remove all values with no corresponding Range-like objects
for value in list(self._values.keys()):
if not self._values[value]:
self._values.pop(value)
def remove(self, rng: Rangelike):
"""
Removes the given Range or RangeSet from this RangeDict, leaving behind
'empty space'.
Afterwards, empty ranges, and values with no remaining corresponding
ranges, will be automatically removed.
:param rng: Range to remove as rangekeys from this dict
"""
# no mutation unless the operation is successful
rng = RangeSet(rng)
temp = self.copy()
# do the removal on the copy
for rngsetlist in temp._rangesets:
for rngset, value in rngsetlist:
try:
rngset.discard(rng)
except TypeError:
break
temp.popempty()
self._rangesets, self._values = temp._rangesets, temp._values
def isempty(self) -> bool:
"""
:return: `True` if this RangeDict contains no values, and `False` otherwise.
"""
return not self._values
def ranges(self) -> List[RangeSet]:
"""
Returns a list of RangeSets that correspond to some value in this
RangeDict, ordered as follows:
All Rangesets of comparable types are grouped together, with
order corresponding to the order in which the first RangeSet of
the given type was added to this RangeDict (earliest first).
Within each such group, RangeSets are ordered in increasing order
of their lower bounds.
This function is analagous to Python's built-in `dict.keys()`
:return: a list of RangeSet keys in this RangeDict
"""
return [rngset for rngsetlist in self._rangesets for rngset, value in rngsetlist]
def values(self) -> List[V]:
"""
Returns a list of values that are corresponded to by some RangeSet in
this RangeDict, ordered by how recently they were added (via .`add()`
or `.update()`) or set (via `.set()` or `.setvalue()`), with the
oldest values being listed first.
This function is synonymous to Python's built-in `dict.values()`
:return: a list of values contained in this RangeDict
"""
return list(self._values.keys())
def items(self) -> List[Tuple[Any, Any]]:
"""
:return: a list of 2-tuples `(list of ranges corresponding to value, value)`, ordered
by time-of-insertion of the values (see `.values()` for more detail)
"""
return [(rngsets, value) for value, rngsets in self._values.items()]
def clear(self) -> None:
"""
Removes all items from this RangeDict, including all of the Ranges
that serve as keys, and the values to which they correspond.
"""
self._rangesets = _LinkedList()
self._values = {}
def copy(self) -> 'RangeDict':
"""
:return: a shallow copy of this RangeDict
"""
return RangeDict(self)
def _sort_ranges(self) -> None:
""" Helper method to gnomesort all _LinkedLists-of-RangeSets. """
for linkedlist in self._rangesets:
linkedlist.gnomesort()
def __setitem__(self, key: Rangelike, value: V):
"""
Equivalent to :func:`~RangeDict.add`.
"""
self.add(key, value)
def __getitem__(self, item: T):
"""
Equivalent to :func:`~RangeDict.get`. If `item` is a range, then this will only
return a corresponding value if `item` is completely contained by one
of this RangeDict's rangekeys. To get values corresponding to all
overlapping ranges, use `.getoverlap(item)` instead.
"""
return self.get(item)
def __contains__(self, item: T):
"""
:return: True if the given item corresponds to any single value in this RangeDict, False otherwise
"""
sentinel2 = object()
return not (self.get(item, sentinel2) is sentinel2)
# return any(item in rngset for rngsetlist in self._rangesets for (rngset, value) in rngsetlist)
def __len__(self) -> int:
"""
Returns the number of values, not the number of unique Ranges,
since determining how to count Ranges is Hard
:return: the number of unique values contained in this RangeDict
"""
return len(self._values)
def __eq__(self, other: 'RangeDict') -> bool:
"""
Tests whether this RangeDict is equal to the given RangeDict (has the same keys and values).
Note that this always tests equality for values, not identity, regardless of whether this
RangeDict was constructed in 'strict' mode.
:param other: RangeDict to compare against
:return: True if this RangeDict is equal to the given RangeDict, False otherwise
"""
# Actually comparing two LinkedLists together is hard, and all relevant information should be in _values anyway
# Ordering is the big challenge here - you can't order the nested LinkedLists.
# But what's important for equality between RangeDicts is that they have the same key-value pairs, which is
# properly checked just by comparing _values
return isinstance(other, RangeDict) and self._values == other._values # and self._rangesets == other._rangesets
def __ne__(self, other: 'RangeDict') -> bool:
"""
:param other: RangeDict to compare against
:return: False if this RangeDict is equal to the given RangeDict, True otherwise
"""
return not self.__eq__(other)
def __bool__(self) -> bool:
"""
:return: False if this RangeDict is empty, True otherwise
"""
return not self.isempty()
def __str__(self):
# nested f-strings, whee
return f"""{{{
', '.join(
f"{{{', '.join(str(rng) for rngset in rngsets for rng in rngset)}}}: {value}"
for value, rngsets in self._values.items()
)
}}}"""
def __repr__(self):
return f"""RangeDict{{{
', '.join(
f"RangeSet{{{', '.join(repr(rng) for rngset in rngsets for rng in rngset)}}}: {repr(value)}"
for value, rngsets in self._values.items()
)
}}}"""
| 46.440618 | 120 | 0.616577 | 38,793 | 0.992072 | 0 | 0 | 0 | 0 | 0 | 0 | 28,102 | 0.718666 |
98985142e7c8b8249e12ffcb7bdd9280a12f0e9c | 428 | py | Python | cases/urls.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 1 | 2020-05-10T06:40:58.000Z | 2020-05-10T06:40:58.000Z | cases/urls.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 25 | 2020-05-03T08:10:38.000Z | 2021-09-22T18:59:29.000Z | cases/urls.py | testyourcodenow/core | 05865b02ff7e60ffd3b30652161b3523046b9696 | [
"MIT"
] | 10 | 2020-05-03T08:25:56.000Z | 2020-06-03T06:49:34.000Z | from django.urls import path
from cases.api.get_visuals_data import UpdateVisualsData
from cases.api.kenyan_cases import KenyanCaseList
from cases.api.visuals import VisualList
urlpatterns = [
path('kenyan/all', KenyanCaseList.as_view(), name='Historical data'),
path('history/', VisualList.as_view(), name='Historical data'),
path('update/history', UpdateVisualsData.as_view(), name='Update Historical data'),
]
| 32.923077 | 87 | 0.766355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.224299 |
9898e2238b703d003f63b12f69d4c95669f7ebc5 | 1,198 | py | Python | tests/test_cloudfront_distribution.py | aexeagmbh/cfn-lint-rules | a7f39f3bab6d7e1aca28ba8bb8afec9965652ab4 | [
"MIT"
] | 1 | 2020-11-15T19:09:41.000Z | 2020-11-15T19:09:41.000Z | tests/test_cloudfront_distribution.py | aexeagmbh/cfn-lint-rules | a7f39f3bab6d7e1aca28ba8bb8afec9965652ab4 | [
"MIT"
] | 6 | 2019-03-21T15:13:14.000Z | 2021-06-02T09:32:27.000Z | tests/test_cloudfront_distribution.py | aexeagmbh/cfn-lint-rules | a7f39f3bab6d7e1aca28ba8bb8afec9965652ab4 | [
"MIT"
] | null | null | null | from typing import List
import pytest
from cfn_lint_ax.rules import (
CloudfrontDistributionComment,
CloudfrontDistributionLogging,
)
from tests.utils import BAD_TEMPLATE_FIXTURES_PATH, ExpectedError, assert_all_matches
@pytest.mark.parametrize(
"filename,expected_errors",
[
(
"cloudfront_distribution_without_logging_configuration.yaml",
[
(
6,
CloudfrontDistributionLogging,
"Property Resources/Distribution/Properties/DistributionConfig/Logging is missing",
),
],
),
(
"cloudfront_distribution_without_comment.yaml",
[
(
6,
CloudfrontDistributionComment,
"Property Resources/Distribution/Properties/DistributionConfig/Comment is missing",
),
],
),
],
)
def test_bad_cloudfront_distribution_config(
filename: str, expected_errors: List[ExpectedError]
) -> None:
filename = (BAD_TEMPLATE_FIXTURES_PATH / filename).as_posix()
assert_all_matches(filename, expected_errors)
| 27.860465 | 103 | 0.603506 | 0 | 0 | 0 | 0 | 965 | 0.805509 | 0 | 0 | 296 | 0.247078 |
9899186911f7d1599b3dc4fbf817f141da8c06f6 | 3,661 | py | Python | holdit/records.py | caltechlibrary/holdit | 474165764e3514303dfd118d1beb0b6570fb6e13 | [
"BSD-3-Clause"
] | 2 | 2019-01-31T21:47:13.000Z | 2020-11-18T04:28:58.000Z | holdit/records.py | caltechlibrary/holdit | 474165764e3514303dfd118d1beb0b6570fb6e13 | [
"BSD-3-Clause"
] | 4 | 2018-10-04T17:56:48.000Z | 2019-01-10T03:20:13.000Z | holdit/records.py | caltechlibrary/holdit | 474165764e3514303dfd118d1beb0b6570fb6e13 | [
"BSD-3-Clause"
] | null | null | null | '''
records.py: base record class for holding data
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2018 by the California Institute of Technology. This code is
open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
import holdit
from holdit.debug import log
# Class definitions.
# .............................................................................
# The particular set of fields in this object came from the TIND holds page
# contents and a few additional fields kept in the tracking spreadsheet by
# the Caltech Library circulation staff.
class HoldRecord(object):
'''Base class for records describing a hold request.'''
def __init__(self):
self.requester_name = '' # String
self.requester_type = '' # String
self.requester_url = '' # String
self.item_title = '' # String
self.item_details_url = '' # String
self.item_record_url = '' # String
self.item_call_number = ''
self.item_barcode = ''
self.item_location_name = '' # String
self.item_location_code = '' # String
self.item_loan_status = '' # String
self.item_loan_url = '' # String
self.date_requested = '' # String (date)
self.date_due = '' # String (date)
self.date_last_notice_sent = '' # String (date)
self.overdue_notices_count = '' # String
self.holds_count = '' # String
# Utility functions.
# .............................................................................
def records_diff(known_records, new_records):
'''Returns the records from 'new_records' missing from 'known_records'.
The comparison is done on the basis of bar codes and request dates.'''
if __debug__: log('Diffing known records with new records')
diffs = []
for candidate in new_records:
found = [record for record in known_records if same_request(record, candidate)]
if not found:
diffs.append(candidate)
if __debug__: log('Found {} different records', len(diffs))
return diffs
def same_request(record1, record2):
return (record1.item_barcode == record2.item_barcode
and record1.date_requested == record2.date_requested
and record1.requester_name == record2.requester_name)
def records_filter(method = 'all'):
'''Returns a closure that takes a TindRecord and returns True or False,
depending on whether the record should be included in the output. This
is meant to be passed to Python filter() as the test function.
'''
# FIXME. It seemed like it might be useful to provide filtering features
# in the future, but this is currently a no-op.
return (lambda x: True)
# Debugging aids.
def print_records(records_list, specific = None):
for record in records_list:
print('title: {}\nbarcode: {}\nlocation: {}\ndate requested: {}\nrequester name: {}\nstatus in TIND: {}\n\n'
.format(record.item_title,
record.item_barcode,
record.item_location_code,
record.date_requested,
record.requester_name,
record.item_loan_status))
def find_record(barcode, records_list):
for record in records_list:
if record.item_barcode == barcode:
return record
return None
| 35.201923 | 116 | 0.594373 | 1,040 | 0.284075 | 0 | 0 | 0 | 0 | 0 | 0 | 1,633 | 0.446053 |
989a09a83b041b1f75c98b22e68018d962171a76 | 12,080 | py | Python | pyroSAR/gamma/srtm.py | ibaris/pyroSAR | 04924500c61674a68e9dc56c1f71b7dd195c480a | [
"MIT"
] | 1 | 2020-03-09T10:33:06.000Z | 2020-03-09T10:33:06.000Z | pyroSAR/gamma/srtm.py | ibaris/pyroSAR | 04924500c61674a68e9dc56c1f71b7dd195c480a | [
"MIT"
] | null | null | null | pyroSAR/gamma/srtm.py | ibaris/pyroSAR | 04924500c61674a68e9dc56c1f71b7dd195c480a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
##############################################################
# preparation of srtm data for use in gamma
# module of software pyroSAR
# John Truckenbrodt 2014-18
##############################################################
"""
The following tasks are performed by executing this script:
-reading of a parameter file dem.par
--see object par for necessary values; file is automatically created by starting the script via the GUI
-if necessary, creation of output and logfile directories
-generation of a DEM parameter file for each .hgt (SRTM) file in the working directory or its subdirectories
--the corresponding GAMMA command is create_dem_par, which is interactive. the list variables dempar and dempar2 are piped to the command line for automation
-if multiple files are found, mosaicing is performed
-replacement and interpolation of missing values
-transformation from equiangular (EQA) to UTM projection using a SLC parameter file
"""
import sys
if sys.version_info >= (3, 0):
from urllib.request import urlopen
else:
from urllib2 import urlopen
import os
import re
import shutil
import zipfile as zf
from spatialist.envi import HDRobject, hdr
from spatialist import raster
from . import ISPPar, process, UTM, slc_corners
import pyroSAR
from pyroSAR.ancillary import finder, run
def fill(dem, dem_out, logpath=None, replace=False):
width = ISPPar(dem + '.par').width
path_dem = os.path.dirname(dem_out)
rpl_flg = 0
dtype = 4
# replace values
value = 0
new_value = 1
process(['replace_values', dem, value, new_value, dem + '_temp', width, rpl_flg, dtype], path_dem, logpath)
value = -32768
new_value = 0
process(['replace_values', dem + '_temp', value, new_value, dem + '_temp2', width, rpl_flg, dtype], path_dem, logpath)
# interpolate missing values
r_max = 9
np_min = 40
np_max = 81
w_mode = 2
process(['interp_ad', dem + '_temp2', dem_out, width, r_max, np_min, np_max, w_mode, dtype], path_dem, logpath)
# remove temporary files
os.remove(dem+'_temp')
os.remove(dem+'_temp2')
# duplicate parameter file for newly created dem
shutil.copy(dem+'.par', dem_out+'.par')
# create ENVI header file
hdr(dem_out+'.par')
if replace:
for item in [dem+x for x in ['', '.par', '.hdr', '.aux.xml'] if os.path.isfile(dem+x)]:
os.remove(item)
def transform(infile, outfile, posting=90):
"""
transform SRTM DEM from EQA to UTM projection
"""
# read DEM parameter file
par = ISPPar(infile + '.par')
# transform corner coordinate to UTM
utm = UTM(infile + '.par')
for item in [outfile, outfile+'.par']:
if os.path.isfile(item):
os.remove(item)
# determine false northing from parameter file coordinates
falsenorthing = 10000000. if par.corner_lat < 0 else 0
# create new DEM parameter file with UTM projection details
inlist = ['UTM', 'WGS84', 1, utm.zone, falsenorthing, os.path.basename(outfile), '', '', '', '', '', '-{0} {0}'.format(posting), '']
process(['create_dem_par', outfile + '.par'], inlist=inlist)
# transform dem
process(['dem_trans', infile + '.par', infile, outfile + '.par', outfile, '-', '-', '-', 1])
hdr(outfile+'.par')
def dempar(dem, logpath=None):
"""
create GAMMA parameter text files for DEM files
currently only EQA and UTM projections with WGS84 ellipsoid are supported
"""
rast = raster.Raster(dem)
# determine data type
dtypes = {'Int16': 'INTEGER*2', 'UInt16': 'INTEGER*2', 'Float32': 'REAL*4'}
if rast.dtype not in dtypes:
raise IOError('data type not supported')
else:
dtype = dtypes[rast.dtype]
# format pixel posting and top left coordinate
posting = str(rast.geo['yres'])+' '+str(rast.geo['xres'])
latlon = str(rast.geo['ymax'])+' '+str(rast.geo['xmin'])
# evaluate projection
projections = {'longlat': 'EQA', 'utm': 'UTM'}
if rast.proj4args['proj'] not in projections:
raise ValueError('projection not supported (yet)')
else:
projection = projections[rast.proj4args['proj']]
# get ellipsoid
ellipsoid = rast.proj4args['ellps'] if 'ellps' in rast.proj4args else rast.proj4args['datum']
if ellipsoid != 'WGS84':
raise ValueError('ellipsoid not supported (yet)')
# create list for GAMMA command input
if projection == 'UTM':
zone = rast.proj4args['zone']
falsenorthing = 10000000. if rast.geo['ymin'] < 0 else 0
parlist = [projection, ellipsoid, 1, zone, falsenorthing, os.path.basename(dem), dtype, 0, 1, rast.cols, rast.rows, posting, latlon]
else:
parlist = [projection, ellipsoid, 1, os.path.basename(dem), dtype, 0, 1, rast.cols, rast.rows, posting, latlon]
# execute GAMMA command
process(['create_dem_par', os.path.splitext(dem)[0] + '.par'], os.path.dirname(dem), logpath, inlist=parlist)
def swap(data, outname):
"""
byte swapping from small to big endian (as required by GAMMA)
"""
rast = raster.Raster(data)
dtype = rast.dtype
if rast.format != 'ENVI':
raise IOError('only ENVI format supported')
dtype_lookup = {'Int16': 2, 'CInt16': 2, 'Int32': 4, 'Float32': 4, 'CFloat32': 4, 'Float64': 8}
if dtype not in dtype_lookup:
raise IOError('data type {} not supported'.format(dtype))
process(['swap_bytes', data, outname, str(dtype_lookup[dtype])])
header = HDRobject(data+'.hdr')
header.byte_order = 1
hdr(header, outname+'.hdr')
def mosaic(demlist, outname, byteorder=1, gammapar=True):
"""
mosaicing of multiple DEMs
"""
if len(demlist) < 2:
raise IOError('length of demlist < 2')
nodata = str(raster.Raster(demlist[0]).nodata)
run(['gdalwarp', '-q', '-of', 'ENVI', '-srcnodata', nodata, '-dstnodata', nodata, demlist, outname])
if byteorder == 1:
swap(outname, outname+'_swap')
for item in [outname, outname+'.hdr', outname+'.aux.xml']:
os.remove(item)
os.rename(outname+'_swap', outname)
os.rename(outname+'_swap.hdr', outname+'.hdr')
if gammapar:
dempar(outname)
def hgt(parfiles):
"""
concatenate hgt file names overlapping with multiple SAR scenes
input is a list of GAMMA SAR scene parameter files
this list is read for corner coordinates of which the next integer lower left latitude and longitude is computed
hgt files are supplied in 1 degree equiangular format named e.g. N16W094.hgt (with pattern [NS][0-9]{2}[EW][0-9]{3}.hgt
For north and east hemisphere the respective absolute latitude and longitude values are smaller than the lower left coordinate of the SAR image
west and south coordinates are negative and hence the nearest lower left integer absolute value is going to be larger
"""
lat = []
lon = []
for parfile in parfiles:
if isinstance(parfile, pyroSAR.ID):
corners = parfile.getCorners()
elif parfile.endswith('.par'):
corners = slc_corners(parfile)
lat += [int(float(corners[x]) // 1) for x in ['ymin', 'ymax']]
lon += [int(float(corners[x]) // 1) for x in ['xmin', 'xmax']]
# add missing lat/lon values (and add an extra buffer of one degree)
lat = range(min(lat), max(lat)+1)
lon = range(min(lon), max(lon)+1)
# convert coordinates to string with leading zeros and hemisphere identification letter
lat = [str(x).zfill(2+len(str(x))-len(str(x).strip('-'))) for x in lat]
lat = [x.replace('-', 'S') if '-' in x else 'N'+x for x in lat]
lon = [str(x).zfill(3+len(str(x))-len(str(x).strip('-'))) for x in lon]
lon = [x.replace('-', 'W') if '-' in x else 'E'+x for x in lon]
# concatenate all formatted latitudes and longitudes with each other as final product
return [x+y+'.hgt' for x in lat for y in lon]
def makeSRTM(scenes, srtmdir, outname):
"""
Create a DEM from SRTM tiles
Input is a list of pyroSAR.ID objects from which coordinates are read to determine the required DEM extent
Mosaics SRTM DEM tiles, converts them to Gamma format and subtracts offset to WGS84 ellipsoid
for DEMs downloaded from USGS http://gdex.cr.usgs.gov or CGIAR http://srtm.csi.cgiar.org
"""
tempdir = outname+'___temp'
os.makedirs(tempdir)
hgt_options = hgt(scenes)
hgt_files = finder(srtmdir, hgt_options)
# todo: check if really needed
nodatas = [str(int(raster.Raster(x).nodata)) for x in hgt_files]
srtm_vrt = os.path.join(tempdir, 'srtm.vrt')
srtm_temp = srtm_vrt.replace('.vrt', '_tmp')
srtm_final = srtm_vrt.replace('.vrt', '')
run(['gdalbuildvrt', '-overwrite', '-srcnodata', ' '.join(nodatas), srtm_vrt, hgt_files])
run(['gdal_translate', '-of', 'ENVI', '-a_nodata', -32768, srtm_vrt, srtm_temp])
process(['srtm2dem', srtm_temp, srtm_final, srtm_final + '.par', 2, '-'], outdir=tempdir)
shutil.move(srtm_final, outname)
shutil.move(srtm_final+'.par', outname+'.par')
hdr(outname+'.par')
shutil.rmtree(tempdir)
def hgt_collect(parfiles, outdir, demdir=None, arcsec=3):
"""
automatic downloading and unpacking of srtm tiles
base directory must contain SLC files in GAMMA format including their parameter files for reading coordinates
additional dem directory may locally contain srtm files. This directory is searched for locally existing files, which are then copied to the current working directory
"""
# concatenate required hgt tile names
target_ids = hgt(parfiles)
targets = []
pattern = '[NS][0-9]{2}[EW][0-9]{3}'
# if an additional dem directory has been defined, check this directory for required hgt tiles
if demdir is not None:
targets.extend(finder(demdir, target_ids))
# check for additional potentially existing hgt tiles in the defined output directory
extras = [os.path.join(outdir, x) for x in target_ids if os.path.isfile(os.path.join(outdir, x)) and not re.search(x, '\n'.join(targets))]
targets.extend(extras)
print('found {} relevant SRTM tiles...'.format(len(targets)))
# search server for all required tiles, which were not found in the local directories
if len(targets) < len(target_ids):
print('searching for additional SRTM tiles on the server...')
onlines = []
if arcsec == 1:
remotes = ['http://e4ftl01.cr.usgs.gov/SRTM/SRTMGL1.003/2000.02.11/']
remotepattern = pattern+'.SRTMGL1.hgt.zip'
elif arcsec == 3:
server = 'http://dds.cr.usgs.gov/srtm/version2_1/SRTM3/'
remotes = [os.path.join(server, x) for x in ['Africa', 'Australia', 'Eurasia', 'Islands', 'North_America', 'South_America']]
remotepattern = pattern+'[.]hgt.zip'
else:
raise ValueError('argument arcsec must be of value 1 or 3')
for remote in remotes:
response = urlopen(remote).read()
items = sorted(set(re.findall(remotepattern, response)))
for item in items:
outname = re.findall(pattern, item)[0]+'.hgt'
if outname in target_ids and outname not in [os.path.basename(x) for x in targets]:
onlines.append(os.path.join(remote, item))
# if additional tiles have been found online, download and unzip them to the local directory
if len(onlines) > 0:
print('downloading {} SRTM tiles...'.format(len(onlines)))
for candidate in onlines:
localname = os.path.join(outdir, re.findall(pattern, candidate)[0]+'.hgt')
infile = urlopen(candidate)
with open(localname+'.zip', 'wb') as outfile:
outfile.write(infile.read())
infile.close()
with zf.ZipFile(localname+'.zip', 'r') as z:
z.extractall(outdir)
os.remove(localname+'.zip')
targets.append(localname)
return targets
if __name__ == '__main__':
main()
| 38.107256 | 170 | 0.643543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,219 | 0.432036 |
989cb6547e6be414dacae50d087b530a3693e1f5 | 631 | py | Python | youtrack/test_create_issue.py | JiSoft/python_test_api | e162f84ff67d7848d55ba4d682876fac7f297f9b | [
"MIT"
] | null | null | null | youtrack/test_create_issue.py | JiSoft/python_test_api | e162f84ff67d7848d55ba4d682876fac7f297f9b | [
"MIT"
] | null | null | null | youtrack/test_create_issue.py | JiSoft/python_test_api | e162f84ff67d7848d55ba4d682876fac7f297f9b | [
"MIT"
] | null | null | null | import unittest
from my_test_api import TestAPI
class TestCreateIssue(TestAPI):
def test_create_issue(self):
params = {
'project': 'API',
'summary': 'test issue by robots',
'description': 'You are mine ! ',
}
response = self.put('/issue/', params)
issue_id = response.headers['Location'].split('/')[-1]
print('Created item ID is ', issue_id)
self.assertEquals(response.status_code, 201)
response = self.get('/issue/' + issue_id)
self.assertEquals(response.status_code, 200)
if __name__ == '__main__':
unittest.main() | 26.291667 | 62 | 0.600634 | 533 | 0.844691 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.217116 |
989d14ba8bad9846c10db51fb0c7bf4b880dcf12 | 1,422 | py | Python | test/test_add_contact_to_group.py | havrylyshyn/python_training | 2b1e1a3dd3a2b86ce1068fe52e233dee42b07580 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact_to_group.py | havrylyshyn/python_training | 2b1e1a3dd3a2b86ce1068fe52e233dee42b07580 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact_to_group.py | havrylyshyn/python_training | 2b1e1a3dd3a2b86ce1068fe52e233dee42b07580 | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
from model.group import Group
import random
def test_add_contact_to_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="contact", lastname="forGroup", address="UA, Kyiv, KPI", homephone="0123456789", email="test@mail.com"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="groupForContact", header="header", footer="footer"))
contact = random.choice(db.get_contact_list())
group = random.choice(db.get_group_list())
app.contact.add_contact_to_group(contact.id, group.id)
assert object_in_list(contact, db.get_contacts_from_group(group))
# assert db.get_contacts_from_group(group).__contains__(contact)
def test_add_contact_to_group_2(app, db, orm):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="contact", lastname="forGroup", address="UA, Kyiv, KPI", homephone="0123456789", email="test@mail.com"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="groupForContact", header="header", footer="footer"))
contact = random.choice(db.get_contact_list())
group = random.choice(db.get_group_list())
app.contact.add_contact_to_group(contact.id, group.id)
assert contact in orm.get_contacts_in_group(group)
def object_in_list(object, list):
if object in list:
return True
else:
return False
| 41.823529 | 149 | 0.716596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.179325 |
989e5e0ff860fef2127fee5afea5afc3f6a62b14 | 35 | py | Python | deadtrees/network/extra/resunetplusplus/__init__.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | 1 | 2021-11-15T09:26:24.000Z | 2021-11-15T09:26:24.000Z | deadtrees/network/extra/resunetplusplus/__init__.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | 43 | 2021-04-19T14:55:05.000Z | 2022-03-29T13:34:16.000Z | deadtrees/network/extra/resunetplusplus/__init__.py | cwerner/deadtrees | 15ddfec58c4a40f22f9c1e2424fb535df4d29b03 | [
"Apache-2.0"
] | null | null | null | from .model import ResUnetPlusPlus
| 17.5 | 34 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
989f321254ef2c700cd7d5e4e5510eef2ae556a7 | 1,416 | py | Python | datacamp/case_collections/study_crimes.py | anilgeorge04/learn-ds | f1a9c638e29270d4d72fc3aed0af3ccea8c53350 | [
"MIT"
] | null | null | null | datacamp/case_collections/study_crimes.py | anilgeorge04/learn-ds | f1a9c638e29270d4d72fc3aed0af3ccea8c53350 | [
"MIT"
] | null | null | null | datacamp/case_collections/study_crimes.py | anilgeorge04/learn-ds | f1a9c638e29270d4d72fc3aed0af3ccea8c53350 | [
"MIT"
] | null | null | null | import csv
from collections import Counter
from collections import defaultdict
from datetime import datetime
# Make dictionary with district as key
# Create the CSV file: csvfile
csvfile = open('crime_sampler.csv', 'r')
# Create a dictionary that defaults to a list: crimes_by_district
crimes_by_district = defaultdict(list)
# Loop over a DictReader of the CSV file
for row in csv.DictReader(csvfile):
# Pop the district from each row: district
district = row.pop('District')
# Append the rest of the data to the list for proper district in crimes_by_district
crimes_by_district[district].append(row)
# Number of arrests in each City District for each year
# Loop over the crimes_by_district using expansion as district and crimes
for district, crimes in crimes_by_district.items():
# Print the district
print(district)
# Create an empty Counter object: year_count
year_count = Counter()
# Loop over the crimes:
for crime in crimes:
# If there was an arrest
if crime['Arrest'] == 'true':
# Convert the Date to a datetime and get the year
year = datetime.strptime(crime['Date'], '%m/%d/%Y %I:%M:%S %p').year
# Increment the Counter for the year
year_count[year] += 1
# Print the counter
print(year_count)
# Insight: Looks like most arrests took place in the 11th district
| 30.12766 | 87 | 0.694915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.551554 |
98a149a17f0d2633d0a5d99ecb5f2a1417468a75 | 4,016 | py | Python | python/nsc/nsc_instcal_combine_breakup_idstr.py | dnidever/noaosourcecatalog | bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb | [
"MIT"
] | 4 | 2017-05-23T20:57:33.000Z | 2018-01-30T22:51:42.000Z | python/nsc/nsc_instcal_combine_breakup_idstr.py | dnidever/noaosourcecatalog | bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb | [
"MIT"
] | null | null | null | python/nsc/nsc_instcal_combine_breakup_idstr.py | dnidever/noaosourcecatalog | bdd22e53da3ebb6e6c79d8cbe9e375562b09cfeb | [
"MIT"
] | 1 | 2021-07-15T03:06:22.000Z | 2021-07-15T03:06:22.000Z | #!/usr/bin/env python
# Break up idstr file into separate measid/objectid lists per exposure on /data0
import os
import sys
import numpy as np
import time
from dlnpyutils import utils as dln, db
from astropy.io import fits
import sqlite3
import socket
from argparse import ArgumentParser
def breakup_idstr(dbfile):
""" Break-up idstr file into separate measid/objectid lists per exposure on /data0."""
t00 = time.time()
outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'
# Load the exposures table
expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)
# Make sure it's a list
if type(dbfile) is str: dbfile=[dbfile]
print('Breaking up '+str(len(dbfile))+' database files')
# Loop over files
for i,dbfile1 in enumerate(dbfile):
print(str(i+1)+' '+dbfile1)
if os.path.exists(dbfile1):
t0 = time.time()
dbbase1 = os.path.basename(dbfile1)[0:-9] # remove _idstr.db ending
# Get existing index names for this database
d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = d.cursor()
cmd = 'select measid,exposure,objectid from idstr'
t1 = time.time()
data = cur.execute(cmd).fetchall()
print(' '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
# Break up data into lists
measid,exposure,objectid = list(zip(*data))
measid = np.array(measid)
objectid = np.array(objectid)
exposure = np.array(exposure)
eindex = dln.create_index(exposure)
# Match exposures to exposure catalog
ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
# Loop over exposures and write output files
nexp = len(eindex['value'])
print(' '+str(nexp)+' exposures')
measid_maxlen = np.max(dln.strlen(measid))
objectid_maxlen = np.max(dln.strlen(objectid))
df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
# Loop over the exposures and write out the files
for k in range(nexp):
if nexp>100:
if k % 100 == 0: print(' '+str(k+1))
ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
cat = np.zeros(len(ind),dtype=df)
cat['measid'] = measid[ind]
cat['objectid'] = objectid[ind]
instcode = expcat['INSTRUMENT'][ind1[k]]
dateobs = expcat['DATEOBS'][ind1[k]]
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
# Sometimes this crashes because another process is making the directory at the same time
try:
os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
except:
pass
outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
np.save(outfile,cat)
print(' dt = %6.1f sec. ' % (time.time()-t0))
else:
print(' '+dbfile1+' NOT FOUND')
print('dt = %6.1f sec.' % (time.time()-t00))
if __name__ == "__main__":
parser = ArgumentParser(description='Break up idstr into separate lists per exposure.')
parser.add_argument('dbfile', type=str, nargs=1, help='Database filename')
args = parser.parse_args()
hostname = socket.gethostname()
host = hostname.split('.')[0]
dbfile = args.dbfile[0]
# Input is a list
if dbfile[0]=='@':
listfile = dbfile[1:]
if os.path.exists(listfile):
dbfile = dln.readlines(listfile)
else:
print(listfile+' NOT FOUND')
sys.exit()
breakup_idstr(dbfile)
| 39.372549 | 117 | 0.574701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,135 | 0.28262 |
98a2ca2296e875523ce2f68c78e6507e53f436a6 | 721 | py | Python | Chapter10/fabfile_operations.py | frankethp/Hands-On-Enterprise-Automation-with-Python | 4d20dc5fda2265a2c3666770b8ad53e63c7ae07c | [
"MIT"
] | 51 | 2018-07-02T04:03:07.000Z | 2022-03-08T07:20:29.000Z | Chapter10/fabfile_operations.py | MindaugasVaitkus2/Hands-On-Enterprise-Automation-with-Python | 39471804525701e634bd35046d8db3c0bca51dd6 | [
"MIT"
] | 1 | 2018-08-06T10:13:15.000Z | 2020-10-08T12:27:17.000Z | Chapter10/fabfile_operations.py | MindaugasVaitkus2/Hands-On-Enterprise-Automation-with-Python | 39471804525701e634bd35046d8db3c0bca51dd6 | [
"MIT"
] | 43 | 2018-07-24T08:50:41.000Z | 2022-03-18T21:45:40.000Z | #!/usr/bin/python
__author__ = "Bassim Aly"
__EMAIL__ = "basim.alyy@gmail.com"
from fabric.api import *
env.hosts = [
'10.10.10.140', # ubuntu machine
'10.10.10.193', # CentOS machine
]
env.user = "root"
env.password = "access123"
def run_ops():
output = run("hostname")
def get_ops():
try:
get("/var/log/messages", "/root/")
except:
pass
def put_ops():
try:
put("/root/VeryImportantFile.txt", "/root/")
except:
pass
def sudo_ops():
sudo("whoami") # it should print the root even if you use another account
def prompt_ops():
prompt("please supply release name", default="7.4.1708")
def reboot_ops():
reboot(wait=60, use_sudo=True)
| 16.386364 | 78 | 0.614424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.424411 |
98a44edac65fd22c66ab9f5074e13352309bd948 | 1,882 | py | Python | WebApp/application.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | WebApp/application.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | WebApp/application.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | import os
from Model.Model import *
from Handlers.AddGuest import AddGuest
from Handlers.QueryGuest import QueryGuest
from Handlers.EventsCreation import EventsCreation
from Handlers.EventRemoval import EventRemoval
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
events_query = Event.query(ancestor=events_key())
events = events_query.fetch()
guest_list = []
for event in events:
guest_query = Guest.query(ancestor=event_guests_key(event.name))
guests_by_event = guest_query.fetch()
guest_list.append((event.name, guests_by_event))
template_values = {
'events': events,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class AdminPage(webapp2.RequestHandler):
def get(self):
events_query = Event.query(ancestor=events_key())
events = events_query.fetch()
guest_list = []
for event in events:
guest_query = Guest.query(ancestor=event_guests_key(event.name))
guests_by_event = guest_query.fetch()
guest_list.append((event.name, guests_by_event))
template_values = {
'events': events,
'guests_list': guest_list
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/', MainPage),
('/admin', AdminPage),
('/event_creation', EventsCreation),
('/event_removal', EventRemoval),
('/add_guest', AddGuest),
('/query_guest', QueryGuest),
], debug=True)
| 29.40625 | 76 | 0.670563 | 1,223 | 0.649841 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.077577 |
98a6da5934125aa84cc4de8ab3349dd673edbfc9 | 530 | py | Python | leap/leap_test.py | shozi91/xpython | f3eccb61910f9950a98c5524efcfd8784fbc4289 | [
"MIT"
] | null | null | null | leap/leap_test.py | shozi91/xpython | f3eccb61910f9950a98c5524efcfd8784fbc4289 | [
"MIT"
] | null | null | null | leap/leap_test.py | shozi91/xpython | f3eccb61910f9950a98c5524efcfd8784fbc4289 | [
"MIT"
] | 1 | 2020-06-10T23:33:20.000Z | 2020-06-10T23:33:20.000Z | import unittest
from year import is_leap_year
class YearTest(unittest.TestCase):
def test_leap_year(self):
self.assertTrue(is_leap_year(1996))
def test_non_leap_year(self):
self.assertFalse(is_leap_year(1997))
def test_non_leap_even_year(self):
self.assertFalse(is_leap_year(1998))
def test_century(self):
self.assertFalse(is_leap_year(1900))
def test_exceptional_century(self):
self.assertTrue(is_leap_year(2400))
if __name__ == '__main__':
unittest.main()
| 22.083333 | 44 | 0.713208 | 432 | 0.815094 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.018868 |
98abe48e2e82e8030955b56dce5a86874efde1ce | 1,934 | py | Python | Examples/batch_data_reduction.py | keflavich/TurbuStat | a6fac4c0d10473a74c62cce4a9c6a30773a955b1 | [
"MIT"
] | null | null | null | Examples/batch_data_reduction.py | keflavich/TurbuStat | a6fac4c0d10473a74c62cce4a9c6a30773a955b1 | [
"MIT"
] | null | null | null | Examples/batch_data_reduction.py | keflavich/TurbuStat | a6fac4c0d10473a74c62cce4a9c6a30773a955b1 | [
"MIT"
] | null | null | null | # Licensed under an MIT open source license - see LICENSE
'''
Runs data_reduc on all data cubes in the file.
Creates a folder for each data cube and its products
Run from folder containing data cubes
'''
from turbustat.data_reduction import *
from astropy.io.fits import getdata
import os
import sys
import errno
import shutil
from datetime import datetime
folder = sys.argv[1]
noise = sys.argv[2]
if str(noise) == "None":
noise=None
os.chdir(folder)
## Read files in the folder
data_cubes = [x for x in os.listdir(".") if os.path.isfile(x) and x[-4:]=="fits"]
# [os.path.join(folder,x) for x in os.listdir(folder) if os.path.isfile(os.path.join(folder,x)) and x[-4:]=="fits"]
print data_cubes
logfile = open("".join([folder[:-1],"_reductionlog",".txt"]), "w+")
for fitscube in data_cubes:
filestr = "Reducing %s \n" % (fitscube)
print filestr
print str(datetime.now())
logfile.write(filestr)
logfile.write("".join([str(datetime.now()),"\n"]))
try:
os.makedirs(fitscube[:-5])
except OSError as exception:
pass
# if exception.errno != errno.EEXIST:
# logfile.write(OSError)
# logfile.close()
# raise
shutil.move(fitscube, fitscube[:-5])
os.chdir(fitscube[:-5])
cube, header = getdata(fitscube, header=True)
# if np.isnan(cube.sum(axis=0)[:,cube.shape[2]]).shape[1] == cube.shape[2]:
cube[:,:,cube.shape[2]-1] = cube[:,:,0]
# elif np.isnan(cube.sum(axis=0)[cube.shape[1],:]).shape[1] == cube.shape[1]:
cube[:,cube.shape[1]-1,:] = cube[:,0,:]
reduction = property_arrays((cube,header), rms_noise=0.001, kernel_size=10, save_name=fitscube[:-5])
reduction.return_all()
## Clean up
cube, header = None, None
reduction = None
os.chdir("..")
print "Done!\n "
print str(datetime.now())
logfile.write("Done!")
logfile.write("".join([str(datetime.now()),"\n"]))
logfile.close()
| 24.794872 | 115 | 0.639607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.369183 |
98b33ea8451d3967d4ce2088f2eba80859167c6d | 44,549 | py | Python | tests/test_dataset_tensor_backend.py | evendrow/deepsnap | 8d5762bf4a2ef6910ad602895685cac892207ba8 | [
"MIT"
] | null | null | null | tests/test_dataset_tensor_backend.py | evendrow/deepsnap | 8d5762bf4a2ef6910ad602895685cac892207ba8 | [
"MIT"
] | null | null | null | tests/test_dataset_tensor_backend.py | evendrow/deepsnap | 8d5762bf4a2ef6910ad602895685cac892207ba8 | [
"MIT"
] | null | null | null | import copy
import random
import torch
import unittest
from torch_geometric.datasets import TUDataset, Planetoid
from copy import deepcopy
from deepsnap.graph import Graph
from deepsnap.hetero_graph import HeteroGraph
from deepsnap.dataset import GraphDataset, Generator, EnsembleGenerator
from tests.utils import (
pyg_to_dicts,
simple_networkx_graph,
simple_networkx_small_graph,
simple_networkx_graph_alphabet,
simple_networkx_multigraph,
generate_dense_hete_dataset,
generate_simple_small_hete_graph,
gen_graph
)
class TestDatasetTensorBackend(unittest.TestCase):
def test_dataset_basic(self):
_, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
G = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
edge_feature=edge_x, edge_label=edge_y,
graph_feature=graph_x, graph_label=graph_y, directed=True
)
H = deepcopy(G)
dataset = GraphDataset([G, H])
self.assertEqual(len(dataset), 2)
def test_dataset_property(self):
_, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
G = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
edge_feature=edge_x, edge_label=edge_y,
graph_feature=graph_x, graph_label=graph_y, directed=True
)
H = deepcopy(G)
H.graph_label = torch.tensor([1])
graphs = [G, H]
dataset = GraphDataset(graphs)
self.assertEqual(dataset.num_node_labels, 5)
self.assertEqual(dataset.num_node_features, 2)
self.assertEqual(dataset.num_edge_labels, 4)
self.assertEqual(dataset.num_edge_features, 2)
self.assertEqual(dataset.num_graph_labels, 1)
self.assertEqual(dataset.num_graph_features, 2)
self.assertEqual(dataset.num_labels, 5) # node task
dataset = GraphDataset(graphs, task="edge")
self.assertEqual(dataset.num_labels, 4)
dataset = GraphDataset(graphs, task="link_pred")
self.assertEqual(dataset.num_labels, 5)
dataset = GraphDataset(graphs, task="graph")
self.assertEqual(dataset.num_labels, 1)
def test_dataset_hetero_graph_split(self):
G = generate_dense_hete_dataset()
hete = HeteroGraph(G)
hete = HeteroGraph(
node_feature=hete.node_feature,
node_label=hete.node_label,
edge_feature=hete.edge_feature,
edge_label=hete.edge_label,
edge_index=hete.edge_index,
directed=True
)
# node
dataset = GraphDataset([hete], task="node")
split_res = dataset.split()
for node_type in hete.node_label_index:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
# node with specified split type
dataset = GraphDataset([hete], task="node")
node_split_types = ["n1"]
split_res = dataset.split(split_types=node_split_types)
for node_type in hete.node_label_index:
if node_type in node_split_types:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
else:
num_nodes = int(len(hete.node_label_index[node_type]))
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
num_nodes,
)
# node with specified split type (string mode)
dataset = GraphDataset([hete], task="node")
node_split_types = "n1"
split_res = dataset.split(split_types=node_split_types)
for node_type in hete.node_label_index:
if node_type in node_split_types:
num_nodes = int(len(hete.node_label_index[node_type]))
node_0 = int(num_nodes * 0.8)
node_1 = int(num_nodes * 0.1)
node_2 = num_nodes - node_0 - node_1
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
node_0,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
node_1,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
node_2,
)
else:
num_nodes = int(len(hete.node_label_index[node_type]))
self.assertEqual(
len(split_res[0][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[1][0].node_label_index[node_type]),
num_nodes,
)
self.assertEqual(
len(split_res[2][0].node_label_index[node_type]),
num_nodes,
)
# edge
dataset = GraphDataset([hete], task="edge")
split_res = dataset.split()
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(num_edges * 0.8)
edge_1 = int(num_edges * 0.1)
edge_2 = num_edges - edge_0 - edge_1
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
# edge with specified split type
dataset = GraphDataset([hete], task="edge")
edge_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(split_types=edge_split_types)
for edge_type in hete.edge_label_index:
if edge_type in edge_split_types:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(num_edges * 0.8)
edge_1 = int(num_edges * 0.1)
edge_2 = num_edges - edge_0 - edge_1
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
else:
num_edges = hete.edge_label_index[edge_type].shape[1]
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
num_edges,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
num_edges,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
num_edges,
)
# link_pred
dataset = GraphDataset([hete], task="link_pred")
split_res = dataset.split(transductive=True)
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2
)
# link_pred with specified split type
dataset = GraphDataset([hete], task="link_pred")
link_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(
transductive=True,
split_types=link_split_types
)
for edge_type in hete.edge_label_index:
if edge_type in link_split_types:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2
)
else:
num_edges = hete.edge_label_index[edge_type].shape[1]
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
num_edges
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
num_edges
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
num_edges
)
# link_pred + disjoint
dataset = GraphDataset(
[hete],
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.5,
)
split_res = dataset.split(
transductive=True,
split_ratio=[0.6, 0.2, 0.2],
)
for edge_type in hete.edge_label_index:
num_edges = hete.edge_label_index[edge_type].shape[1]
edge_0 = int(0.6 * num_edges)
edge_0 = 2 * (edge_0 - int(0.5 * edge_0))
edge_1 = 2 * int(0.2 * num_edges)
edge_2 = 2 * (
num_edges - int(0.6 * num_edges) - int(0.2 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index[edge_type].shape[1],
edge_0,
)
self.assertEqual(
split_res[1][0].edge_label_index[edge_type].shape[1],
edge_1,
)
self.assertEqual(
split_res[2][0].edge_label_index[edge_type].shape[1],
edge_2,
)
# link pred with edge_split_mode set to "exact"
dataset = GraphDataset(
[hete],
task="link_pred",
edge_split_mode="approximate"
)
split_res = dataset.split(transductive=True)
hete_link_train_edge_num = 0
hete_link_test_edge_num = 0
hete_link_val_edge_num = 0
num_edges = 0
for edge_type in hete.edge_label_index:
num_edges += hete.edge_label_index[edge_type].shape[1]
if edge_type in split_res[0][0].edge_label_index:
hete_link_train_edge_num += (
split_res[0][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[1][0].edge_label_index:
hete_link_test_edge_num += (
split_res[1][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[2][0].edge_label_index:
hete_link_val_edge_num += (
split_res[2][0].edge_label_index[edge_type].shape[1]
)
# num_edges_reduced = num_edges - 3
edge_0 = 2 * int(0.8 * num_edges)
edge_1 = 2 * int(0.1 * num_edges)
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
hete_link_train_edge_num,
edge_0
)
self.assertEqual(
hete_link_test_edge_num,
edge_1
)
self.assertEqual(
hete_link_val_edge_num,
edge_2
)
# link pred with specified types and edge_split_mode set to "exact"
dataset = GraphDataset(
[hete],
task="link_pred",
edge_split_mode="approximate",
)
link_split_types = [("n1", "e1", "n1"), ("n1", "e2", "n2")]
split_res = dataset.split(
transductive=True,
split_types=link_split_types,
)
hete_link_train_edge_num = 0
hete_link_test_edge_num = 0
hete_link_val_edge_num = 0
num_split_type_edges = 0
num_non_split_type_edges = 0
for edge_type in hete.edge_label_index:
if edge_type in link_split_types:
num_split_type_edges += (
hete.edge_label_index[edge_type].shape[1]
)
else:
num_non_split_type_edges += (
hete.edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[0][0].edge_label_index:
hete_link_train_edge_num += (
split_res[0][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[1][0].edge_label_index:
hete_link_test_edge_num += (
split_res[1][0].edge_label_index[edge_type].shape[1]
)
if edge_type in split_res[2][0].edge_label_index:
hete_link_val_edge_num += (
split_res[2][0].edge_label_index[edge_type].shape[1]
)
# num_edges_reduced = num_split_type_edges - 3
num_edges = num_split_type_edges
edge_0 = 2 * int(0.8 * num_edges) + num_non_split_type_edges
edge_1 = 2 * int(0.1 * num_edges) + num_non_split_type_edges
edge_2 = 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
) + num_non_split_type_edges
self.assertEqual(hete_link_train_edge_num, edge_0)
self.assertEqual(hete_link_test_edge_num, edge_1)
self.assertEqual(hete_link_val_edge_num, edge_2)
def test_dataset_split(self):
# inductively split with graph task
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="graph")
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# inductively split with link_pred task
# and default (`all`) edge_train_mode
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="link_pred")
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# inductively split with link_pred task and `disjoint` edge_train_mode
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
)
split_res = dataset.split(transductive=False)
num_graphs = len(dataset)
num_train = int(0.8 * num_graphs)
num_val = int(0.1 * num_graphs)
num_test = num_graphs - num_train - num_val
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
# transductively split with node task
pyg_dataset = Planetoid("./cora", "Cora")
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="node")
num_nodes = dataset.num_nodes[0]
num_edges = dataset.num_edges[0]
node_0 = int(0.8 * num_nodes)
node_1 = int(0.1 * num_nodes)
node_2 = num_nodes - node_0 - node_1
split_res = dataset.split()
self.assertEqual(
len(split_res[0][0].node_label_index),
node_0
)
self.assertEqual(
len(split_res[1][0].node_label_index),
node_1
)
self.assertEqual(
len(split_res[2][0].node_label_index),
node_2
)
# transductively split with link_pred task
# and default (`all`) edge_train_mode
dataset = GraphDataset(graphs, task="link_pred")
edge_0 = 2 * 2 * int(0.8 * num_edges)
edge_1 = 2 * 2 * int(0.1 * num_edges)
edge_2 = 2 * 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
split_res = dataset.split()
self.assertEqual(
split_res[0][0].edge_label_index.shape[1],
edge_0
)
self.assertEqual(
split_res[1][0].edge_label_index.shape[1],
edge_1
)
self.assertEqual(
split_res[2][0].edge_label_index.shape[1],
edge_2
)
# transductively split with link_pred task, `split` edge_train_mode
# and 0.5 edge_message_ratio
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.5,
)
split_res = dataset.split()
edge_0 = 2 * int(0.8 * num_edges)
edge_0 = 2 * (edge_0 - int(0.5 * edge_0))
edge_1 = 2 * 2 * int(0.1 * num_edges)
edge_2 = 2 * 2 * (
num_edges - int(0.8 * num_edges) - int(0.1 * num_edges)
)
self.assertEqual(
split_res[0][0].edge_label_index.shape[1],
edge_0,
)
self.assertEqual(split_res[1][0].edge_label_index.shape[1], edge_1)
self.assertEqual(split_res[2][0].edge_label_index.shape[1], edge_2)
# transductively split with link_pred task
# and specified edge_negative_sampling_ratio
dataset = GraphDataset(
graphs,
task="link_pred",
edge_negative_sampling_ratio=2
)
split_res = dataset.split()
edge_0 = (2 + 1) * (2 * int(0.8 * num_edges))
edge_1 = (2 + 1) * (2 * int(0.1 * num_edges))
edge_2 = (2 + 1) * (
2 * (num_edges - int(0.8 * num_edges) - int(0.1 * num_edges))
)
self.assertEqual(split_res[0][0].edge_label_index.shape[1], edge_0)
self.assertEqual(split_res[1][0].edge_label_index.shape[1], edge_1)
self.assertEqual(split_res[2][0].edge_label_index.shape[1], edge_2)
def test_dataset_split_custom(self):
# transductive split with node task (self defined dataset)
G, x, y, edge_x, edge_y, edge_index, graph_x, graph_y = (
simple_networkx_graph()
)
Graph.add_edge_attr(G, "edge_feature", edge_x)
Graph.add_edge_attr(G, "edge_label", edge_y)
Graph.add_node_attr(G, "node_feature", x)
Graph.add_node_attr(G, "node_label", y)
Graph.add_graph_attr(G, "graph_feature", graph_x)
Graph.add_graph_attr(G, "graph_label", graph_y)
num_nodes = len(list(G.nodes))
nodes_train = torch.tensor(list(G.nodes)[: int(0.3 * num_nodes)])
nodes_val = torch.tensor(
list(G.nodes)[int(0.3 * num_nodes): int(0.6 * num_nodes)]
)
nodes_test = torch.tensor(list(G.nodes)[int(0.6 * num_nodes):])
graph_train = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_train, directed=True
)
graph_val = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_val, directed=True
)
graph_test = Graph(
node_feature=x, node_label=y, edge_index=edge_index,
node_label_index=nodes_test, directed=True
)
graphs_train = [graph_train]
graphs_val = [graph_val]
graphs_test = [graph_test]
dataset_train, dataset_val, dataset_test = (
GraphDataset(graphs_train, task='node'),
GraphDataset(graphs_val, task='node'),
GraphDataset(graphs_test, task='node')
)
self.assertEqual(
dataset_train[0].node_label_index.tolist(),
list(range(int(0.3 * num_nodes)))
)
self.assertEqual(
dataset_val[0].node_label_index.tolist(),
list(range(int(0.3 * num_nodes), int(0.6 * num_nodes)))
)
self.assertEqual(
dataset_test[0].node_label_index.tolist(),
list(range(int(0.6 * num_nodes), num_nodes))
)
# transductive split with link_pred task (train/val split)
edges = list(G.edges)
num_edges = len(edges)
edges_train = edges[: int(0.7 * num_edges)]
edges_val = edges[int(0.7 * num_edges):]
link_size_list = [len(edges_train), len(edges_val)]
# generate pseudo pos and neg edges, they may overlap here
train_pos = torch.LongTensor(edges_train).permute(1, 0)
val_pos = torch.LongTensor(edges_val).permute(1, 0)
val_neg = torch.randint(high=10, size=val_pos.shape, dtype=torch.int64)
val_neg_double = torch.cat((val_neg, val_neg), dim=1)
num_train = len(edges_train)
num_val = len(edges_val)
graph_train = Graph(
node_feature=x, edge_index=edge_index,
edge_feature=edge_x, directed=True,
edge_label_index=train_pos
)
graph_val = Graph(
node_feature=x, edge_index=edge_index,
edge_feature=edge_x, directed=True,
edge_label_index=val_pos,
negative_edge=val_neg_double
)
graphs_train = [graph_train]
graphs_val = [graph_val]
dataset_train, dataset_val = (
GraphDataset(
graphs_train, task='link_pred', resample_negatives=True
),
GraphDataset(
graphs_val, task='link_pred', edge_negative_sampling_ratio=2
)
)
self.assertEqual(
dataset_train[0].edge_label_index.shape[1],
2 * link_size_list[0]
)
self.assertEqual(
dataset_train[0].edge_label.shape[0],
2 * link_size_list[0]
)
self.assertEqual(
dataset_val[0].edge_label_index.shape[1],
val_pos.shape[1] + val_neg_double.shape[1]
)
self.assertEqual(
dataset_val[0].edge_label.shape[0],
val_pos.shape[1] + val_neg_double.shape[1]
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index[:, :num_train],
train_pos
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label_index[:, :num_val],
val_pos
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label_index[:, num_val:],
val_neg_double
)
)
dataset_train.resample_negatives = False
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index,
dataset_train[0].edge_label_index
)
)
# transductive split with link_pred task with edge label
edge_label_train = torch.LongTensor([1, 2, 3, 2, 1, 1, 2, 3, 2, 0, 0])
edge_label_val = torch.LongTensor([1, 2, 3, 2, 1, 0])
graph_train = Graph(
node_feature=x,
edge_index=edge_index,
directed=True,
edge_label_index=train_pos,
edge_label=edge_label_train
)
graph_val = Graph(
node_feature=x,
edge_index=edge_index,
directed=True,
edge_label_index=val_pos,
negative_edge=val_neg,
edge_label=edge_label_val
)
graphs_train = [graph_train]
graphs_val = [graph_val]
dataset_train, dataset_val = (
GraphDataset(graphs_train, task='link_pred'),
GraphDataset(graphs_val, task='link_pred')
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label_index,
dataset_train[0].edge_label_index
)
)
self.assertTrue(
torch.equal(
dataset_train[0].edge_label[:num_train],
edge_label_train
)
)
self.assertTrue(
torch.equal(
dataset_val[0].edge_label[:num_val],
edge_label_val
)
)
# Multiple graph tensor backend link prediction (inductive)
pyg_dataset = Planetoid('./cora', 'Cora')
x = pyg_dataset[0].x
y = pyg_dataset[0].y
edge_index = pyg_dataset[0].edge_index
row, col = edge_index
mask = row < col
row, col = row[mask], col[mask]
edge_index = torch.stack([row, col], dim=0)
edge_index = torch.cat(
[edge_index, torch.flip(edge_index, [0])], dim=1
)
graphs = [
Graph(
node_feature=x, node_label=y,
edge_index=edge_index, directed=False
)
]
graphs = [copy.deepcopy(graphs[0]) for _ in range(10)]
edge_label_index = graphs[0].edge_label_index
dataset = GraphDataset(
graphs,
task='link_pred',
edge_message_ratio=0.6,
edge_train_mode="all"
)
datasets = {}
datasets['train'], datasets['val'], datasets['test'] = dataset.split(
transductive=False, split_ratio=[0.85, 0.05, 0.1]
)
edge_label_index_split = (
datasets['train'][0].edge_label_index[
:, 0:edge_label_index.shape[1]
]
)
self.assertTrue(
torch.equal(
edge_label_index,
edge_label_index_split
)
)
# transductive split with node task (pytorch geometric dataset)
pyg_dataset = Planetoid("./cora", "Cora")
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
split_ratio = [0.3, 0.3, 0.4]
node_size_list = [0 for i in range(len(split_ratio))]
for graph in graphs:
custom_splits = [[] for i in range(len(split_ratio))]
split_offset = 0
num_nodes = graph.num_nodes
shuffled_node_indices = torch.randperm(graph.num_nodes)
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_nodes)
nodes_split_i = (
shuffled_node_indices[
split_offset: split_offset + num_split_i
]
)
split_offset += num_split_i
else:
nodes_split_i = shuffled_node_indices[split_offset:]
custom_splits[i] = nodes_split_i
node_size_list[i] += len(nodes_split_i)
graph.custom = {
"general_splits": custom_splits
}
node_feature = graphs[0].node_feature
edge_index = graphs[0].edge_index
directed = graphs[0].directed
graph_train = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][0]
)
graph_val = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][1]
)
graph_test = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
node_label_index=graphs[0].custom["general_splits"][2]
)
train_dataset = GraphDataset([graph_train], task="node")
val_dataset = GraphDataset([graph_val], task="node")
test_dataset = GraphDataset([graph_test], task="node")
self.assertEqual(
len(train_dataset[0].node_label_index),
node_size_list[0]
)
self.assertEqual(
len(val_dataset[0].node_label_index),
node_size_list[1]
)
self.assertEqual(
len(test_dataset[0].node_label_index),
node_size_list[2]
)
# transductive split with edge task
pyg_dataset = Planetoid("./cora", "Cora")
graphs_g = GraphDataset.pyg_to_graphs(pyg_dataset)
ds = pyg_to_dicts(pyg_dataset, task="cora")
graphs = [Graph(**item) for item in ds]
split_ratio = [0.3, 0.3, 0.4]
edge_size_list = [0 for i in range(len(split_ratio))]
for i, graph in enumerate(graphs):
custom_splits = [[] for i in range(len(split_ratio))]
split_offset = 0
edges = list(graphs_g[i].G.edges)
num_edges = graph.num_edges
random.shuffle(edges)
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_edges)
edges_split_i = (
edges[split_offset: split_offset + num_split_i]
)
split_offset += num_split_i
else:
edges_split_i = edges[split_offset:]
custom_splits[i] = edges_split_i
edge_size_list[i] += len(edges_split_i)
graph.custom = {
"general_splits": custom_splits
}
node_feature = graphs[0].node_feature
edge_index = graphs[0].edge_index
directed = graphs[0].directed
train_index = torch.tensor(
graphs[0].custom["general_splits"][0]
).permute(1, 0)
train_index = torch.cat((train_index, train_index), dim=1)
val_index = torch.tensor(
graphs[0].custom["general_splits"][1]
).permute(1, 0)
val_index = torch.cat((val_index, val_index), dim=1)
test_index = torch.tensor(
graphs[0].custom["general_splits"][2]
).permute(1, 0)
test_index = torch.cat((test_index, test_index), dim=1)
graph_train = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=train_index
)
graph_val = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=val_index
)
graph_test = Graph(
node_feature=node_feature,
edge_index=edge_index,
directed=directed,
edge_label_index=test_index
)
train_dataset = GraphDataset([graph_train], task="edge")
val_dataset = GraphDataset([graph_val], task="edge")
test_dataset = GraphDataset([graph_test], task="edge")
self.assertEqual(
train_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[0]
)
self.assertEqual(
val_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[1]
)
self.assertEqual(
test_dataset[0].edge_label_index.shape[1],
2 * edge_size_list[2]
)
# inductive split with graph task
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
num_graphs = len(graphs)
split_ratio = [0.3, 0.3, 0.4]
graph_size_list = []
split_offset = 0
custom_split_graphs = []
for i, split_ratio_i in enumerate(split_ratio):
if i != len(split_ratio) - 1:
num_split_i = int(split_ratio_i * num_graphs)
custom_split_graphs.append(
graphs[split_offset: split_offset + num_split_i]
)
split_offset += num_split_i
graph_size_list.append(num_split_i)
else:
custom_split_graphs.append(graphs[split_offset:])
graph_size_list.append(len(graphs[split_offset:]))
dataset = GraphDataset(
graphs, task="graph",
custom_split_graphs=custom_split_graphs
)
split_res = dataset.split(transductive=False)
self.assertEqual(graph_size_list[0], len(split_res[0]))
self.assertEqual(graph_size_list[1], len(split_res[1]))
self.assertEqual(graph_size_list[2], len(split_res[2]))
def test_filter(self):
pyg_dataset = TUDataset("./enzymes", "ENZYMES")
ds = pyg_to_dicts(pyg_dataset)
graphs = [Graph(**item) for item in ds]
dataset = GraphDataset(graphs, task="graph")
thresh = 90
orig_dataset_size = len(dataset)
num_graphs_large = 0
for graph in dataset:
if graph.num_nodes >= thresh:
num_graphs_large += 1
dataset = dataset.filter(
lambda graph: graph.num_nodes < thresh, deep_copy=False
)
filtered_dataset_size = len(dataset)
self.assertEqual(
orig_dataset_size - filtered_dataset_size,
num_graphs_large,
)
def test_resample_disjoint_heterogeneous(self):
G = generate_dense_hete_dataset()
hete = HeteroGraph(G)
hete = HeteroGraph(
node_feature=hete.node_feature,
node_label=hete.node_label,
edge_feature=hete.edge_feature,
edge_label=hete.edge_label,
edge_index=hete.edge_index,
directed=True
)
graphs = [hete]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.8,
resample_disjoint=True,
resample_disjoint_period=1
)
dataset_train, _, _ = dataset.split(split_ratio=[0.5, 0.2, 0.3])
graph_train_first = dataset_train[0]
graph_train_second = dataset_train[0]
for message_type in graph_train_first.edge_index:
self.assertEqual(
graph_train_first.edge_label_index[message_type].shape[1],
graph_train_second.edge_label_index[message_type].shape[1]
)
self.assertEqual(
graph_train_first.edge_label[message_type].shape,
graph_train_second.edge_label[message_type].shape
)
def test_resample_disjoint(self):
pyg_dataset = Planetoid("./cora", "Cora")
graphs = GraphDataset.pyg_to_graphs(pyg_dataset)
graph = graphs[0]
graph = Graph(
node_label=graph.node_label,
node_feature=graph.node_feature,
edge_index=graph.edge_index,
edge_feature=graph.edge_feature,
directed=False
)
graphs = [graph]
dataset = GraphDataset(
graphs,
task="link_pred",
edge_train_mode="disjoint",
edge_message_ratio=0.8,
resample_disjoint=True,
resample_disjoint_period=1
)
dataset_train, _, _ = dataset.split(split_ratio=[0.5, 0.2, 0.3])
graph_train_first = dataset_train[0]
graph_train_second = dataset_train[0]
self.assertEqual(
graph_train_first.edge_label_index.shape[1],
graph_train_second.edge_label_index.shape[1]
)
self.assertTrue(
torch.equal(
graph_train_first.edge_label,
graph_train_second.edge_label
)
)
def test_secure_split_heterogeneous(self):
G = generate_simple_small_hete_graph()
graph = HeteroGraph(G)
graph = HeteroGraph(
node_label=graph.node_label,
edge_index=graph.edge_index,
edge_label=graph.edge_label,
directed=True
)
graphs = [graph]
# node task
dataset = GraphDataset(graphs, task="node")
split_res = dataset.split()
for node_type in graph.node_label_index:
num_nodes = graph.node_label_index[node_type].shape[0]
num_nodes_reduced = num_nodes - 3
node_0 = 1 + int(num_nodes_reduced * 0.8)
node_1 = 1 + int(num_nodes_reduced * 0.1)
node_2 = num_nodes - node_0 - node_1
node_size = [node_0, node_1, node_2]
for i in range(3):
self.assertEqual(
split_res[i][0].node_label_index[node_type].shape[0],
node_size[i]
)
self.assertEqual(
split_res[i][0].node_label[node_type].shape[0],
node_size[i]
)
# edge task
dataset = GraphDataset(graphs, task="edge")
split_res = dataset.split()
for message_type in graph.edge_label_index:
num_edges = graph.edge_label_index[message_type].shape[1]
num_edges_reduced = num_edges - 3
edge_0 = 1 + int(num_edges_reduced * 0.8)
edge_1 = 1 + int(num_edges_reduced * 0.1)
edge_2 = num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index[message_type].shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label[message_type].shape[0],
edge_size[i]
)
# link_pred task
dataset = GraphDataset(graphs, task="link_pred")
split_res = dataset.split()
for message_type in graph.edge_label_index:
num_edges = graph.edge_label_index[message_type].shape[1]
num_edges_reduced = num_edges - 3
edge_0 = 2 * (1 + int(num_edges_reduced * 0.8))
edge_1 = 2 * (1 + int(num_edges_reduced * 0.1))
edge_2 = 2 * num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index[message_type].shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label[message_type].shape[0],
edge_size[i]
)
def test_secure_split(self):
G = simple_networkx_small_graph()
graph = Graph(G)
graph = Graph(
node_label=graph.node_label,
edge_index=graph.edge_index,
edge_label=graph.edge_label,
directed=True
)
graphs = [graph]
# node task
dataset = GraphDataset(graphs, task="node")
num_nodes = dataset.num_nodes[0]
num_nodes_reduced = num_nodes - 3
node_0 = 1 + int(0.8 * num_nodes_reduced)
node_1 = 1 + int(0.1 * num_nodes_reduced)
node_2 = num_nodes - node_0 - node_1
node_size = [node_0, node_1, node_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].node_label_index.shape[0],
node_size[i]
)
self.assertEqual(
split_res[i][0].node_label.shape[0],
node_size[i]
)
# edge task
dataset = GraphDataset(graphs, task="edge")
num_edges = dataset.num_edges[0]
num_edges_reduced = num_edges - 3
edge_0 = 1 + int(0.8 * num_edges_reduced)
edge_1 = 1 + int(0.1 * num_edges_reduced)
edge_2 = num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index.shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label.shape[0],
edge_size[i]
)
# link_pred task
dataset = GraphDataset(graphs, task="link_pred")
num_edges = dataset.num_edges[0]
num_edges_reduced = num_edges - 3
edge_0 = 2 * (1 + int(0.8 * num_edges_reduced))
edge_1 = 2 * (1 + int(0.1 * num_edges_reduced))
edge_2 = 2 * num_edges - edge_0 - edge_1
edge_size = [edge_0, edge_1, edge_2]
split_res = dataset.split()
for i in range(3):
self.assertEqual(
split_res[i][0].edge_label_index.shape[1],
edge_size[i]
)
self.assertEqual(
split_res[i][0].edge_label.shape[0],
edge_size[i]
)
# graph task
graphs = [deepcopy(graph) for _ in range(5)]
dataset = GraphDataset(graphs, task="link_pred")
num_graphs = len(dataset)
num_graphs_reduced = num_graphs - 3
num_train = 1 + int(num_graphs_reduced * 0.8)
num_val = 1 + int(num_graphs_reduced * 0.1)
num_test = num_graphs - num_train - num_val
split_res = dataset.split(transductive=False)
self.assertEqual(num_train, len(split_res[0]))
self.assertEqual(num_val, len(split_res[1]))
self.assertEqual(num_test, len(split_res[2]))
if __name__ == "__main__":
unittest.main()
| 35.667734 | 79 | 0.548946 | 43,948 | 0.986509 | 0 | 0 | 0 | 0 | 0 | 0 | 2,355 | 0.052863 |
98b505993d7e054879c6dcdc1e0b5fdc721d797f | 5,425 | py | Python | kube-socialNetwork/scripts/init_social_graph.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | kube-socialNetwork/scripts/init_social_graph.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | kube-socialNetwork/scripts/init_social_graph.py | Romero027/DeathStarBench | 185b61851b7a89277c0c2c1845e18776a9dd7201 | [
"Apache-2.0"
] | null | null | null | import aiohttp
import asyncio
import os
import string
import random
import argparse
async def upload_follow(session, addr, user_0, user_1):
payload = {'user_name': 'username_' + user_0,
'followee_name': 'username_' + user_1}
async with session.post(addr + '/wrk2-api/user/follow', data=payload) as resp:
return await resp.text()
async def upload_register(session, addr, user):
payload = {'first_name': 'first_name_' + user, 'last_name': 'last_name_' + user,
'username': 'username_' + user, 'password': 'password_' + user, 'user_id': user}
print(payload)
async with session.post(addr + '/wrk2-api/user/register', data=payload) as resp:
return await resp.text()
async def upload_compose(session, addr, user_id, num_users):
text = ''.join(random.choices(string.ascii_letters + string.digits, k=256))
# user mentions
for _ in range(random.randint(0, 5)):
text += ' @username_' + str(random.randint(0, num_users))
# urls
for _ in range(random.randint(0, 5)):
text += ' http://' + \
''.join(random.choices(string.ascii_lowercase + string.digits, k=64))
# media
media_ids = []
media_types = []
for _ in range(random.randint(0, 5)):
media_ids.append('\"' + ''.join(random.choices(string.digits, k=18)) + '\"')
media_types.append('\"png\"')
payload = {'username': 'username_' + str(user_id),
'user_id': str(user_id),
'text': text,
'media_ids': '[' + ','.join(media_ids) + ']',
'media_types': '[' + ','.join(media_types) + ']',
'post_type': '0'}
async with session.post(addr + '/wrk2-api/post/compose', data=payload) as resp:
return await resp.text()
def getNumNodes(file):
return int(file.readline())
def getEdges(file):
edges = []
lines = file.readlines()
for line in lines:
edges.append(line.split())
return edges
def printResults(results):
result_type_count = {}
for result in results:
try:
result_type_count[result] += 1
except KeyError:
result_type_count[result] = 1
for result_type, count in result_type_count.items():
if result_type == '' or result_type.startswith('Success'):
print('Succeeded:', count)
elif '500 Internal Server Error' in result_type:
print('Failed:', count, 'Error:', 'Internal Server Error')
else:
print('Failed:', count, 'Error:', result_type.strip())
async def register(addr, nodes):
tasks = []
conn = aiohttp.TCPConnector(limit=200)
async with aiohttp.ClientSession(connector=conn) as session:
print('Registering Users...')
for i in range(nodes):
task = asyncio.ensure_future(upload_register(session, addr, str(i)))
tasks.append(task)
if i % 200 == 0:
_ = await asyncio.gather(*tasks)
print(i)
results = await asyncio.gather(*tasks)
printResults(results)
async def follow(addr, edges):
idx = 0
tasks = []
conn = aiohttp.TCPConnector(limit=200)
async with aiohttp.ClientSession(connector=conn) as session:
print('Adding follows...')
for edge in edges:
task = asyncio.ensure_future(
upload_follow(session, addr, edge[0], edge[1]))
tasks.append(task)
task = asyncio.ensure_future(
upload_follow(session, addr, edge[1], edge[0]))
tasks.append(task)
idx += 1
if idx % 200 == 0:
_ = await asyncio.gather(*tasks)
print(idx)
results = await asyncio.gather(*tasks)
printResults(results)
async def compose(addr, nodes):
idx = 0
tasks = []
conn = aiohttp.TCPConnector(limit=200)
async with aiohttp.ClientSession(connector=conn) as session:
print('Composing posts...')
for i in range(nodes):
for _ in range(random.randint(0, 20)): # up to 20 posts per user, average 10
task = asyncio.ensure_future(upload_compose(session, addr, i+1, nodes))
tasks.append(task)
idx += 1
if idx % 200 == 0:
_ = await asyncio.gather(*tasks)
print(idx)
results = await asyncio.gather(*tasks)
printResults(results)
if __name__ == '__main__':
parser = argparse.ArgumentParser('DeathStarBench social graph initializer.')
parser.add_argument(
'--graph', help='Graph name. (`socfb-Reed98`, `ego-twitter`, or `soc-twitter-follows-mun`)', default='socfb-Reed98')
parser.add_argument(
'--ip', help='IP address of socialNetwork NGINX web server. ', default='10.107.191.60')
parser.add_argument(
'--port', help='IP port of socialNetwork NGINX web server.', default=18080)
parser.add_argument('--compose', action='store_true',
help='intialize with up to 20 posts per user', default=False)
args = parser.parse_args()
with open(os.path.join('datasets/social-graph', args.graph, f'{args.graph}.nodes'), 'r') as f:
nodes = getNumNodes(f)
with open(os.path.join('datasets/social-graph', args.graph, f'{args.graph}.edges'), 'r') as f:
edges = getEdges(f)
random.seed(1) # deterministic random numbers
addr = 'http://{}:{}'.format(args.ip, args.port)
print('addr', addr)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(register(addr, nodes))
loop.run_until_complete(future)
future = asyncio.ensure_future(follow(addr, edges))
loop.run_until_complete(future)
if args.compose:
future = asyncio.ensure_future(compose(addr, nodes))
loop.run_until_complete(future)
| 33.695652 | 122 | 0.650507 | 0 | 0 | 0 | 0 | 0 | 0 | 3,276 | 0.603871 | 1,068 | 0.196866 |
98b7f6b7b89c34e5105fa11524b6903c3e65f006 | 1,152 | py | Python | src/reader/test_cases/test_wiki_article.py | LukeMurphey/textcritical_net | 887b11ac66d1970576ea83b307cefc4fdc4319b5 | [
"CC-BY-3.0"
] | 6 | 2019-05-18T04:31:39.000Z | 2020-12-10T15:22:59.000Z | src/reader/test_cases/test_wiki_article.py | LukeMurphey/textcritical_net | 887b11ac66d1970576ea83b307cefc4fdc4319b5 | [
"CC-BY-3.0"
] | 1 | 2020-05-19T06:04:11.000Z | 2020-06-04T06:16:02.000Z | src/reader/test_cases/test_wiki_article.py | LukeMurphey/textcritical_net | 887b11ac66d1970576ea83b307cefc4fdc4319b5 | [
"CC-BY-3.0"
] | null | null | null | from . import TestReader
from reader.models import WikiArticle
class TestWikiArticle(TestReader):
def test_get_wiki_article(self):
wiki = WikiArticle(search="M. Antonius Imperator Ad Se Ipsum", article="Meditations")
wiki.save()
wiki2 = WikiArticle(search="M. Antonius Imperator Ad Se Ipsum Marcus Aurelius", article="Meditations")
wiki2.save()
# Try a lookup with the string
article = WikiArticle.get_wiki_article("M. Antonius Imperator Ad Se Ipsum")
self.assertEqual(article, "Meditations")
# Try a lookup with an array of strings
article = WikiArticle.get_wiki_article(["M. Antonius Imperator Ad Se Ipsum"])
self.assertEqual(article, "Meditations")
# Try a lookup with an array of strings where the first one doesn't match
article = WikiArticle.get_wiki_article(["Tree", "M. Antonius Imperator Ad Se Ipsum"])
self.assertEqual(article, "Meditations")
# Try a lookup that fails
article = WikiArticle.get_wiki_article(["Tree", "Frogs"])
self.assertEqual(article, None)
| 41.142857 | 110 | 0.658854 | 1,087 | 0.943576 | 0 | 0 | 0 | 0 | 0 | 0 | 442 | 0.383681 |
98b9a46afb25d58d865589959a96d2c38ca63150 | 7,399 | py | Python | tests/test_predictions.py | platiagro/projects | 00da234b35003bb0ecc2d22a997e08737ceda044 | [
"Apache-2.0"
] | 6 | 2019-09-16T13:07:20.000Z | 2021-06-02T19:02:05.000Z | tests/test_predictions.py | platiagro/projects | 00da234b35003bb0ecc2d22a997e08737ceda044 | [
"Apache-2.0"
] | 325 | 2019-09-20T20:06:00.000Z | 2022-03-30T15:05:49.000Z | tests/test_predictions.py | platiagro/projects | 00da234b35003bb0ecc2d22a997e08737ceda044 | [
"Apache-2.0"
] | 17 | 2019-08-02T16:55:47.000Z | 2021-06-26T19:13:35.000Z | # -*- coding: utf-8 -*-
import unittest
import unittest.mock as mock
import requests
import json
from io import BytesIO
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.database import session_scope
import tests.util as util
app.dependency_overrides[session_scope] = util.override_session_scope
TEST_CLIENT = TestClient(app)
class TestPredictions(unittest.TestCase):
maxDiff = None
def setUp(self):
"""
Sets up the test before running it.
"""
util.create_mocks()
def tearDown(self):
"""
Deconstructs the test after running it.
"""
util.delete_mocks()
def test_create_prediction_deployments_does_not_exist(self):
"""
Should return an http status 404 and an error message "The specified deployment does not exist".
"""
project_id = util.MOCK_UUID_1
deployment_id = "unk"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "The specified deployment does not exist",
"code": "DeploymentNotFound",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 404)
def test_create_prediction_projects_does_not_exist(self):
"""
Should return an http status 404 and an error message "The specified projects does not exist".
"""
project_id = "unk"
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "The specified project does not exist",
"code": "ProjectNotFound",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 404)
def test_create_prediction_form_required(self):
"""
Should return an http status 400 and a message 'either form-data or json is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "either form-data or json is required",
"code": "MissingRequiredFormDataOrJson",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
def test_create_prediction_dataset_name_required(self):
"""
Should return an http status 400 and a message 'either dataset name or file is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions", json={}
)
result = rv.json()
expected = {
"message": "either dataset name or file is required",
"code": "MissingRequiredDatasetOrFile",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
@mock.patch(
"projects.controllers.predictions.load_dataset",
side_effect=util.FILE_NOT_FOUND_ERROR,
)
def test_create_prediction_dataset_required(self, mock_load_dataset):
"""
Should return an http status 400 and a message 'a valid dataset is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
name_dataset = "unk"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": name_dataset},
)
result = rv.json()
expected = {
"message": "a valid dataset is required",
"code": "InvalidDataset",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
mock_load_dataset.assert_any_call(name_dataset)
@mock.patch(
"projects.controllers.predictions.load_dataset",
return_value=util.IRIS_DATAFRAME,
)
@mock.patch(
"requests.post",
return_value=util.MOCK_POST_PREDICTION,
)
def test_create_prediction_dataset(
self,
mock_requests_post,
mock_load_dataset,
):
"""
Should load dataset request successfully.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
name = util.IRIS_DATASET_NAME
url = "http://uuid-1-model.anonymous:8000/api/v1.0/predictions"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": name},
)
result = rv.json()
self.assertIsInstance(result, dict)
self.assertEqual(rv.status_code, 200)
mock_load_dataset.assert_any_call(name)
mock_requests_post.assert_any_call(
url=url,
json={
"data": {
"names": [
"SepalLengthCm",
"SepalWidthCm",
"PetalLengthCm",
"PetalWidthCm",
"Species",
],
"ndarray": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
[4.6, 3.1, 1.5, 0.2, "Iris-setosa"],
],
}
},
)
@mock.patch(
"projects.controllers.predictions.load_dataset",
return_value=util.IRIS_DATAFRAME,
)
@mock.patch(
"requests.post",
return_value=util.MOCK_POST_PREDICTION,
)
def test_create_prediction_dataset_image(
self,
mock_requests_post,
mock_load_dataset,
):
"""
Should load the dataset request with an image successfully.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
dataset_name = "mock.jpg"
url = "http://uuid-1-model.anonymous:8000/api/v1.0/predictions"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": dataset_name},
)
result = rv.json()
self.assertIsInstance(result, dict)
self.assertEqual(rv.status_code, 200)
mock_load_dataset.assert_any_call(dataset_name)
mock_requests_post.assert_any_call(
url=url,
json={
"data": {
"names": [
"SepalLengthCm",
"SepalWidthCm",
"PetalLengthCm",
"PetalWidthCm",
"Species",
],
"ndarray": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
[4.6, 3.1, 1.5, 0.2, "Iris-setosa"],
],
}
},
)
| 31.619658 | 104 | 0.554805 | 7,026 | 0.949588 | 0 | 0 | 4,149 | 0.560751 | 0 | 0 | 2,305 | 0.311529 |
7f2501da9305f389d3f740592cc04a7f9d85b66c | 114 | py | Python | examples/add_module.py | satyavls/simple_mock | 5344f7383de6fa3d8270bec611d6986416d7f278 | [
"MIT"
] | 1 | 2019-06-03T17:40:31.000Z | 2019-06-03T17:40:31.000Z | examples/add_module.py | satyavls/simple_mock | 5344f7383de6fa3d8270bec611d6986416d7f278 | [
"MIT"
] | null | null | null | examples/add_module.py | satyavls/simple_mock | 5344f7383de6fa3d8270bec611d6986416d7f278 | [
"MIT"
] | null | null | null | def add_num(x, y):
return x + y
def sub_num(x, y):
return x - y
class MathFunctions(object):
pass
| 10.363636 | 28 | 0.596491 | 37 | 0.324561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7f26586a1c1633037e799675a12e878cfa0662c1 | 37,344 | py | Python | terraform/stacks/threat-intelligence/lambdas/python/cloud-sniper-threat-intelligence/cloud_sniper_threat_intelligence.py | houey/cloud-sniper | b0ac98eddc0b2da0f37c70926e2cef897283d787 | [
"MIT"
] | 160 | 2019-09-27T18:02:03.000Z | 2022-03-15T23:46:40.000Z | terraform/stacks/threat-intelligence/lambdas/python/cloud-sniper-threat-intelligence/cloud_sniper_threat_intelligence.py | houey/cloud-sniper | b0ac98eddc0b2da0f37c70926e2cef897283d787 | [
"MIT"
] | 2 | 2019-10-21T13:30:17.000Z | 2019-10-30T00:09:11.000Z | terraform/stacks/threat-intelligence/lambdas/python/cloud-sniper-threat-intelligence/cloud_sniper_threat_intelligence.py | houey/cloud-sniper | b0ac98eddc0b2da0f37c70926e2cef897283d787 | [
"MIT"
] | 31 | 2019-10-19T18:10:23.000Z | 2022-02-28T14:13:19.000Z | import boto3
import json
import datetime
import logging
import os
import ipaddress
import requests
log = logging.getLogger()
log.setLevel(logging.INFO)
QUEUE_URL = os.environ['SQS_QUEUE_CLOUD_SNIPER']
DYNAMO_TABLE = os.environ['DYNAMO_TABLE_CLOUD_SNIPER']
WEBHOOK_URL = os.environ['WEBHOOK_URL_CLOUD_SNIPER']
HUB_ACCOUNT_ID = os.environ['HUB_ACCOUNT_ID_CLOUD_SNIPER']
ROLE_SPOKE = os.environ['ROLE_SPOKE_CLOUD_SNIPER']
BUCKET_NAME = os.environ['BUCKET_NAME']
IOCS_PATH = os.environ['IOCS_PATH']
TOPIC_ARN = os.environ['TOPIC_ARN']
message = []
json_a = []
# hub account
s = boto3.session.Session(region_name=os.environ['AWS_REGION'])
ec2 = s.client('ec2')
sqs = s.client('sqs')
iam = s.client('iam')
r_ec2 = s.resource('ec2')
dynamodb = s.resource('dynamodb')
sns = s.client('sns')
# spoke account
sts_connection = boto3.client('sts')
networkConnectionAction = [
"UnauthorizedAccess:EC2/SSHBruteForce",
]
portProbeAction = [
"Recon:EC2/PortProbeUnprotectedPort",
]
instanceDetails = [
"UnauthorizedAccess:EC2/TorIPCaller",
]
awsApiCallAction = [
"Recon:IAMUser/TorIPCaller",
]
def read_sqs():
log.info("Processing queue")
response = sqs.receive_message(
QueueUrl=QUEUE_URL,
MaxNumberOfMessages=10,
MessageAttributeNames=[
'All'
],
)
if 'Messages' in response:
return response['Messages']
else:
log.info("There is no new message in the queue")
return
def search_ioc():
log.info("Searching for IOC ...")
global json_a
for b in message:
body = b['Body']
data = json.loads(body)
try:
flag = 0
for dt in networkConnectionAction:
if data["detail"]["type"] == dt:
flag = 1
break
for dt in portProbeAction:
if data["detail"]["type"] == dt:
flag = 2
break
for dt in instanceDetails:
if data["detail"]["type"] == dt:
flag = 3
break
for dt in awsApiCallAction:
if data["detail"]["type"] == dt:
flag = 4
break
if flag == 1:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
direction = data["detail"]["service"]["action"]["networkConnectionAction"]["connectionDirection"]
if ipaddress.ip_address(src_ip).is_private is False and direction == "INBOUND":
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["country"][
"countryName"]
city = (data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " ")
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 2:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
if ipaddress.ip_address(src_ip).is_private is False:
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
country = \
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"country"][
"countryName"]
city = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"city"][
"cityName"]).replace(",", " ")
asn_org = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
asn = \
data["detail"]["service"]["action"]["portProbeAction"]["portProbeDetails"][0][
"remoteIpDetails"][
"organization"][
"asn"]
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 3:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
direction = data["detail"]["service"]["action"]["networkConnectionAction"]["connectionDirection"]
if ipaddress.ip_address(src_ip).is_private is False and direction == "INBOUND":
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
subnet_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["subnetId"]
instance_id = data["detail"]["resource"]["instanceDetails"]["instanceId"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["country"][
"countryName"]
try:
city = str((data["detail"]["service"]["action"]["networkConnectionAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " "))
except Exception as e:
city = "NIA"
nacl_id = get_netacl_id(subnet_id, account_id)
hits = str(data["detail"]["service"]["count"])
vpc_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["vpcId"]
sg_name = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupName"]
sg_id = data["detail"]["resource"]["instanceDetails"]["networkInterfaces"][0]["securityGroups"][0]["groupId"]
tags = (str(data["detail"]["resource"]["instanceDetails"]["tags"])).replace(",", "")
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
elif flag == 4:
ioc = []
src_ip = (json.dumps(
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"ipAddressV4"])).strip('"')
account_id = data["detail"]["accountId"]
region = data["detail"]["region"]
ttp = data["detail"]["type"]
asn = \
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"asn"]
asn_org = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"asnOrg"]).replace(",", " ")
isp = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"isp"]).replace(",", " ")
org = (
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"][
"organization"][
"org"]).replace(",", " ")
country = \
data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"]["country"][
"countryName"]
try:
city = str((data["detail"]["service"]["action"]["awsApiCallAction"]["remoteIpDetails"]["city"][
"cityName"]).replace(",", " "))
except Exception as e:
city = "NIA"
hits = str(data["detail"]["service"]["count"])
account_alias = str(get_account_alias(account_id))
event_first_seen = str(data["detail"]["service"]["eventFirstSeen"])
subnet_id = instance_id = nacl_id = vpc_id = sg_name = sg_id = tags = ""
principal_id = data["detail"]["resource"]["accessKeyDetails"]["principalId"]
user_name = data["detail"]["resource"]["accessKeyDetails"]["userName"]
ioc = ttp + "," + hits + "," + account_id + "," + account_alias + "," + region + "," + subnet_id + "," + src_ip + "," + instance_id + "," + nacl_id + "," + country + "," + city + "," + asn_org + "," + org + "," + isp + "," + asn + "," + vpc_id + "," + sg_name + "," + sg_id + "," + tags + "," + event_first_seen+ "," + principal_id + "," + user_name
log.info("IOCs: " + str(ioc))
put_to_s3(ioc)
if len(json_a) == 0:
json_a.append(ioc)
else:
for e in json_a:
if e != ioc:
json_a.append(ioc)
except Exception as e:
log.info("JSON could not be parsed:" + str(e))
def get_netacl_id(subnet_id, account_id):
log.info("Getting NACL id for subnet: " + str(subnet_id) + " account: " + str(account_id))
global HUB_ACCOUNT_ID
try:
nacl_id = ""
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "client")
response = client.describe_network_acls(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [
subnet_id,
]
}
]
)
else:
response = ec2.describe_network_acls(
Filters=[
{
'Name': 'association.subnet-id',
'Values': [
subnet_id,
]
}
]
)
nacls = response['NetworkAcls'][0]['Associations']
for n in nacls:
if n['SubnetId'] == subnet_id:
nacl_id = n['NetworkAclId']
log.info("NACL found:" + str(nacl_id))
return nacl_id
except Exception as e:
log.info("Failed to get NACL id:" + str(e))
def incident_and_response():
log.info("Incident and Response Automation ...")
ts = str(datetime.datetime.now())
ujsa = set(json_a)
for jsa in ujsa:
lst = jsa.split(",")
ioc = len(lst)
rule_no = "-1"
if ioc == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = jsa.split(",")
lst_nacl = get_nacl_rule(nacl_id, account_id)
rule_no = int(lst_nacl.pop())
result = create_nacl_rule(nacl_id, src_ip, rule_no, account_id, set(lst_nacl))
if result:
update_ioc(src_ip, ts, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
message_to_slack(jsa)
elif ioc == 22:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = jsa.split(",")
message_to_slack(jsa)
else:
country = city = asn_org = org = isp = asn = "NIA"
ttp, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, vpc_id, sg_name, sg_id, tags, event_first_seen = jsa.split(",")
lst_nacl = get_nacl_rule(nacl_id, account_id)
rule_no = int(lst_nacl.pop())
result = create_nacl_rule(nacl_id, src_ip, rule_no, account_id, set(lst_nacl))
if result:
update_ioc(src_ip, ts, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
message_to_slack(jsa)
def get_nacl_rule(nacl_id, account_id):
rule = get_rules(nacl_id, account_id)
log.info("Getting rule number (entry) for NACL: " + str(nacl_id) + " account: " + str(account_id))
lst_no = []
lst_cidr = []
for r in rule:
no, cidr = r.split(",")
lst_no.append(int(no))
lst_cidr.append(cidr)
i = int(min(lst_no)) + 1
if int(min(lst_no)) == 100:
rule_no = 1
else:
count = 1
while count < 98:
count += 1
if i < 100 and i not in lst_no:
rule_no = i
break
else:
i += 1
log.info("Rule number (entry): " + str(rule_no))
log.info("CIDR already added: " + str(set(lst_cidr)))
lst_cidr.append(str(rule_no))
return lst_cidr
def get_rules(nacl_id, account_id):
log.info("Getting rules for NACL: " + str(nacl_id) + " account: " + str(account_id))
global HUB_ACCOUNT_ID
rules = []
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "client")
response = client.describe_network_acls(
NetworkAclIds=[
nacl_id,
],
)
else:
response = ec2.describe_network_acls(
NetworkAclIds=[
nacl_id,
],
)
data = response['NetworkAcls'][0]['Entries']
for d in data:
entry = str(d['RuleNumber']) + "," + str(d['CidrBlock'])
rules.append(entry)
return rules
def create_nacl_rule(nacl_id, attacker_ip, rule_no, account_id, lst_nacl):
global HUB_ACCOUNT_ID
log.info("Creating NACL rule for attacker:" + str(attacker_ip))
if attacker_ip + '/32' not in lst_nacl and len(lst_nacl) <= 18:
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "resource")
nacl = client.NetworkAcl(nacl_id)
else:
nacl = r_ec2.NetworkAcl(nacl_id)
response = nacl.create_entry(
CidrBlock=attacker_ip + '/32',
Egress=False,
PortRange={
'From': 0,
'To': 65535
},
Protocol='-1',
RuleAction='deny',
RuleNumber=rule_no
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
else:
return False
elif len(lst_nacl) == 20:
log.info("NACL is full, no more than 18 entries can be added")
else:
log.info("Attacker is already blocked")
def get_account_alias(account_id):
log.info("Getting alias for account: " + str(account_id))
global HUB_ACCOUNT_ID
rules = []
if account_id != HUB_ACCOUNT_ID:
client = assume_role(account_id, "iam")
response = client.list_account_aliases()
else:
response = iam.list_account_aliases()
alias = str(response['AccountAliases'])
result = alias[2:-2]
return result
def update_ioc(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen):
log.info("Sending IOCs to DynamoDB ...")
try:
table = dynamodb.Table(DYNAMO_TABLE)
scan = table.scan()
if scan['Items']:
updated = 0
for s in scan['Items']:
if s['attacker_ip'] == attacker_ip:
update_entry_attackers(attacker_ip, hits, rule_no, False)
updated = 1
if updated == 0:
create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id,
instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
else:
create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id,
instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen)
except Exception as e:
log.info("DynamoDB entry could not be updated" + str(e))
def update_entry_attackers(attacker_ip, hits, rule_no, deleted):
table = dynamodb.Table(DYNAMO_TABLE)
try:
if not deleted:
log.info("Updating new DynamoDB entry for attacker: " + str(attacker_ip))
response = table.update_item(
Key={
'attacker_ip': attacker_ip
},
UpdateExpression="set hits = :h, rule_no = :r_no",
ExpressionAttributeValues={
':h': hits,
':r_no': rule_no
},
ReturnValues="UPDATED_NEW"
)
return
else:
log.info("Updating cleaned (NACL) DynamoDB entry for attacker: " + str(attacker_ip))
response = table.update_item(
Key={
'attacker_ip': attacker_ip
},
UpdateExpression="set hits = :h, rule_no = :r_no",
ExpressionAttributeValues={
':h': hits,
':r_no': rule_no
},
ReturnValues="UPDATED_NEW"
)
return
except Exception as e:
log.info("DynamoDB could not be updated:" + str(e))
def create_entry_attackers(attacker_ip, timestamp, ttp, hits, region, account_id, account_alias, nacl_id, subnet_id, instance_id, country, city, asn_org, org, isp, asn, rule_no, vpc_id, sg_name, sg_id, tags, event_first_seen):
if not city:
city = "NIA"
log.info("Creating DynamoDB entry for attacker:" + str(attacker_ip))
try:
table = dynamodb.Table(DYNAMO_TABLE)
response = table.put_item(
Item={
'attacker_ip': str(attacker_ip),
'timestamp': str(timestamp),
'ttp': str(ttp),
'hits': str(hits),
'region': str(region),
'account_id': str(account_id),
'account_alias': str(account_alias),
'vpc_id': str(vpc_id),
'nacl_id': str(nacl_id),
'subnet_id': str(subnet_id),
'instance_id': str(instance_id),
'tags': str(tags),
'sg_name': str(sg_name),
'sg_id': str(sg_id),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'rule_no': str(rule_no),
'event_first_seen': str(event_first_seen)
}
)
except Exception as e:
log.info("DynamoDB entry could not be created" + str(e))
def assume_role(account_id, boto_type):
global ROLE_SPOKE
log.info("Assuming role: " + str(ROLE_SPOKE) + " account: " + str(account_id))
try:
sts = sts_connection.assume_role(
RoleArn="arn:aws:iam::" + account_id + ":role/" + ROLE_SPOKE,
RoleSessionName="cross_acct_lambda"
)
ACCESS_KEY = sts['Credentials']['AccessKeyId']
SECRET_KEY = sts['Credentials']['SecretAccessKey']
SESSION_TOKEN = sts['Credentials']['SessionToken']
if boto_type == "resource":
client = boto3.resource(
'ec2',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
elif boto_type == "client":
client = boto3.client(
'ec2',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
elif boto_type == "iam":
client = boto3.client(
'iam',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
)
return client
except Exception as e:
log.info("Role could not be assumed" + str(e))
def clean_nacls():
global HUB_ACCOUNT_ID
log.info("Cleaning old NACLs entries ... ")
try:
now = datetime.datetime.now()
table = dynamodb.Table(DYNAMO_TABLE)
response = table.scan()
for r in response['Items']:
if str(r['rule_no']) != "0":
t = r['timestamp']
account = r['account_id']
log.info("Searching for oldest entries in the account: " + str(account) + " attacker: " + str(r['attacker_ip']))
old = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S.%f')
difh = ((now - old).days * 24) + int((now - old).seconds / 3600)
log.info("Hours that remained blocked: " + str(difh))
if difh >= 6:
log.info("Cleaning NACL entry: " + str(r['rule_no']) + " account: " + str(account))
try:
if account != HUB_ACCOUNT_ID:
client = assume_role(account, "resource")
network_acl = client.NetworkAcl(r['nacl_id'])
else:
network_acl = r_ec2.NetworkAcl(r['nacl_id'])
response2 = network_acl.delete_entry(
Egress=False,
RuleNumber=int(r['rule_no'])
)
if response2['ResponseMetadata']['HTTPStatusCode'] == 200:
log.info("NACL rule deleted for attacker: " + str(r['attacker_ip']))
update_entry_attackers(str(r['attacker_ip']), str(r['hits']), "0", True)
return
else:
log.info("Failed to delete the entry")
except Exception as e:
log.info("Failed to instantiate resource NetworkAcl " + str(e))
log.info("Updating IOCs db to keep consistency ... " + str(e))
try:
update_entry_attackers(str(r['attacker_ip']), str(r['hits']), "0", True)
except Exception as e:
log.info("Updating IOCs db to keep consistency failed: " + str(e))
except Exception as e:
log.info("NACLs could not be deleted: " + str(e))
def message_to_slack(ioc):
lst = ioc.split(",")
ioc_len = len(lst)
try:
if ioc_len == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = ioc.split(",")
nacl_url = "https://console.aws.amazon.com/vpc/home?region=" + region + "#acls:networkAclId=" + nacl_id + ";sort=networkAclId"
data = {
'text': '***************************************************************\n\n'
+ '*ATTACKER IP:* ' + src_ip + ' *HITS:* ' + hits + '\n'
+ '*TTP:* ' + ttp + '\n'
+ '*ACCOUNT ID:* ' + '`' + account_id + '`' + ' *ACCOUNT ALIAS:* ' + account_alias + ' *INSTANCE ID:* ' + '`' + instance_id + '`' + '\n'
+ '*TAGS:* ' + tags + '\n'
+ '*NACL:* ' + nacl_url + '\n'
+ '*VPC ID:* ' + '`' + vpc_id + '`' + ' *SUBNET ID:* ' + '`' + subnet_id + '`' + '\n'
+ '*COUNTRY:* ' + country + ' *CITY:* ' + city + '\n'
+ '*ASN ORG:* ' + asn_org + ' *ORG:* ' + org + ' *ISP:* ' + isp + '\n'
+ '*FIRST SEEN:* ' + event_first_seen + '\n'
+ '***************************************************************',
'username': 'CLOUD SNIPER BUDDY',
'icon_emoji': ':robot_face:'
}
else:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = ioc.split(",")
data = {
'text': '***************************************************************\n\n'
+ '*ATTACKER IP:* ' + src_ip + ' *HITS:* ' + hits + '\n'
+ '*TTP:* ' + ttp + '\n'
+ '*ACCOUNT ID:* ' + '`' + account_id + '`' + ' *ACCOUNT ALIAS:* ' + account_alias + '\n'
+ '*COUNTRY:* ' + country + ' *CITY:* ' + city + '\n'
+ '*ASN ORG:* ' + asn_org + ' *ORG:* ' + org + ' *ISP:* ' + isp + '\n'
+ '*FIRST SEEN:* ' + event_first_seen + '\n'
+ '*USER NAME:* ' + user_name + ' *PRINCIPAL ID:* ' + principal_id + '\n'
+ '*DESCRIPTION:* API DescribeAlarms, commonly used in reconnaissance attacks, was invoked from a Tor exit node IP address. The threat intelligence feed does not provide resource details, so there is no automatic blocking. The user must be investigated' + '\n'
+ '***************************************************************',
'username': 'CLOUD SNIPER BUDDY',
'icon_emoji': ':robot_face:'
}
response = requests.post(WEBHOOK_URL, data=json.dumps(data), headers={'Content-Type': 'application/json'})
log.info('Sending message to Slack. Response: ' + str(response.text) + ' Response Code: ' + str(response.status_code))
except Exception as e:
log.info("Message could not be send to Slack: " + str(e))
def delete_sqs():
log.info("Deleting queue ...")
try:
for rh in message:
receipt_handle = rh['ReceiptHandle']
sqs.delete_message(
QueueUrl=QUEUE_URL,
ReceiptHandle=receipt_handle
)
log.info('Processed and deleted message: %s' % receipt_handle)
except Exception as e:
log.info("SQS queue could not be deleted" + str(e))
def put_to_s3(ioc):
log.info("Sending findings to S3 ...")
lst = ioc.split(",")
ioc_len = len(lst)
if ioc_len == 20:
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen = ioc.split(",")
dataset = {
'ttp': str(ttp),
'hits': str(hits),
'cloud.account.id': str(account_id),
'cloud.account.name': str(account_alias),
'cloud.region': str(region),
'interface.subnet.id': str(subnet_id),
'source.ip': str(src_ip),
'cloud.instance.id': str(instance_id),
'interface.nacl.id': str(nacl_id),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'interface.vpc.id': str(vpc_id),
'interface.security_group.name': str(sg_name),
'interface.security_group.id': str(sg_id),
'tags': str(tags),
'timestamp': str(event_first_seen),
'cloud.provider': 'aws'
}
else:
# 22
ttp, hits, account_id, account_alias, region, subnet_id, src_ip, instance_id, nacl_id, country, city, asn_org, org, isp, asn, vpc_id, sg_name, sg_id, tags, event_first_seen, principal_id, user_name = ioc.split(",")
dataset = {
'ttp': str(ttp),
'hits': str(hits),
'cloud.account.id': str(account_id),
'cloud.account.name': str(account_alias),
'cloud.region': str(region),
'source.ip': str(src_ip),
'country': str(country),
'city': str(city),
'asn_org': str(asn_org),
'org': str(org),
'isp': str(isp),
'asn': str(asn),
'timestamp': str(event_first_seen),
'cloud.provider': 'aws',
'cloud.principal_id': str(principal_id),
'cloud.user_name': str(user_name)
}
NOW = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket_name = BUCKET_NAME
iocs_path = IOCS_PATH
bucket = s3_resource.Bucket(name=bucket_name)
if iocs_path.startswith("/"):
iocs_path = iocs_path[1:]
if iocs_path.endswith("/"):
iocs_path = iocs_path[:-1]
try:
(bucket.Object(key=f"{iocs_path}/iocs_{NOW}.json")
.put(Body=bytes(json.dumps(dataset).encode('UTF-8'))))
except Exception as e:
log.info("Could not put the object to S3" + str(e))
def publish_to_sns():
publish_object = {"Message": "TOR"}
try:
response = sns.publish(
TopicArn=TOPIC_ARN,
Message=json.dumps(publish_object),
Subject="IR"
)
log.info("Publish to SNS: " + str(response['ResponseMetadata']['HTTPStatusCode']))
except Exception as e:
log.info("Could not publish to SNS " + str(e))
def cloud_sniper_threat_intelligence(event, context):
global message
log.info("Processing GuardDuty findings: %s" % json.dumps(event))
try:
clean_nacls()
message = read_sqs()
if message:
search_ioc()
incident_and_response()
delete_sqs()
log.info("Findings properly processed")
except Exception as e:
log.error('Failure to process finding ' + str(e))
| 39.350896 | 365 | 0.492449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,640 | 0.258141 |
7f265c229148e280301571ddc10324f031f42de0 | 17,311 | py | Python | tests/apitests/python/test_tag_immutability.py | tedgxt/harbor | 039733b200cc44ba23829499eb6cc71c63d3b9e6 | [
"Apache-2.0"
] | 1 | 2019-06-19T14:07:38.000Z | 2019-06-19T14:07:38.000Z | tests/apitests/python/test_tag_immutability.py | tedgxt/harbor | 039733b200cc44ba23829499eb6cc71c63d3b9e6 | [
"Apache-2.0"
] | null | null | null | tests/apitests/python/test_tag_immutability.py | tedgxt/harbor | 039733b200cc44ba23829499eb6cc71c63d3b9e6 | [
"Apache-2.0"
] | 1 | 2019-07-11T02:36:30.000Z | 2019-07-11T02:36:30.000Z | from __future__ import absolute_import
import unittest
import sys
from testutils import ADMIN_CLIENT
from testutils import harbor_server
from library.project import Project
from library.user import User
from library.repository import Repository
from library.repository import push_image_to_project
from library.registry import Registry
from library.artifact import Artifact
from library.tag_immutability import Tag_Immutability
from library.repository import push_special_image_to_project
class TestTagImmutability(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = ADMIN_CLIENT["endpoint"]
self.user_password = "Aa123456"
self.project= Project()
self.user= User()
self.repo= Repository()
self.registry = Registry()
self.artifact = Artifact()
self.tag_immutability = Tag_Immutability()
self.project_id, self.project_name, self.user_id, self.user_name = [None] * 4
self.user_id, self.user_name = self.user.create_user(user_password = self.user_password, **ADMIN_CLIENT)
self.USER_CLIENT = dict(with_signature = True, with_immutable_status = True, endpoint = self.url, username = self.user_name, password = self.user_password)
self.exsiting_rule = dict(selector_repository="rel*", selector_tag="v2.*")
self.project_id, self.project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
def check_tag_immutability(self, artifact, tag_name, status = True):
for tag in artifact.tags:
if tag.name == tag_name:
self.assertTrue(tag.immutable == status)
return
raise Exception("No tag {} found in artifact {}".format(tag, artifact))
def test_disability_of_rules(self):
"""
Test case:
Test Disability Of Rules
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a disabled rule matched image A with tag A;
4. Both tags of image A should not be immutable;
5. Enable this rule;
6. image A with tag A should be immutable.
"""
image_a = dict(name="image_disability_a", tag1="latest", tag2="6.2.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a disabled rule matched image A;
rule_id = self.tag_immutability.create_rule(project_id, disabled = True, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Both tags of image A should not be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Enable this rule;
self.tag_immutability.update_tag_immutability_policy_rule(project_id, rule_id, disabled = False, **self.USER_CLIENT)
#6. image A with tag A should be immutable.
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
def test_artifact_and_repo_is_undeletable(self):
"""
Test case:
Test Artifact And Repo is Undeleteable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Artifact is undeletable;
6. Repository is undeletable.
"""
image_a = dict(name="image_repo_undeletable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_artifact_and_repo_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Artifact is undeletable;
self.artifact.delete_artifact(project_name, image_a["name"], image_a["tag1"], expect_status_code = 412,expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
#6. Repository is undeletable.
self.repo.delete_repoitory(project_name, image_a["name"], expect_status_code = 412, expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
def test_tag_is_undeletable(self):
"""
Test case:
Test Tag is Undeleteable
Test step and expected result:
1. Push image A to the project with 2 tags A and B;
2. Create a enabled rule matched image A with tag A;
3. Tag A should be immutable;
4. Tag A is undeletable;
5. Tag B is deletable.
"""
image_a = dict(name="image_undeletable_a", tag1="latest", tag2="9.3.2")
#1. Push image A to the project with 2 tags A and B;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#2. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag2"])[0:2] + "*", **self.USER_CLIENT)
#3. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_tag_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
#4. Tag A is undeletable;
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], expect_status_code = 412, **self.USER_CLIENT)
#5. Tag B is deletable.
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag1"], **self.USER_CLIENT)
def test_image_is_unpushable(self):
"""
Test case:
Test Image is Unpushable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not push image with the same image name and with the same tag name.
"""
image_a = dict(name="image_unpushable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_image_is_unpushable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not push image with the same image name and with the same tag name.
push_image_to_project(project_name, harbor_server, self.user_name, self.user_password, "tomcat", image_a["tag1"],
new_image = image_a["name"], expected_error_message = "configured as immutable")
def test_copy_disability(self):
"""
Test case:
Test Copy Disability
Test step and expected result:
1. Create 2 projects;
2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not copy artifact from project A to project B with the same repository name.
"""
image_a = dict(name="image_copy_disability_a", tag1="latest", tag2="1.3.2")
#1. Create 2 projects;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
_, project_name_src = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
push_special_image_to_project(project_name_src, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not copy artifact from project A to project B with the same repository name.
artifact_a_src = self.artifact.get_reference_info(project_name_src, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact_a_src:{}".format(artifact_a_src))
self.artifact.copy_artifact(project_name, image_a["name"], project_name_src+"/"+ image_a["name"] + "@" + artifact_a_src.digest, expect_status_code=412, expect_response_body = "configured as immutable, cannot be updated", **self.USER_CLIENT)
#def test_replication_disability(self):
# pass
def test_priority_of_rules(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A, B and C, image A has only 1 tag named tag1;
2. Create a matching rule that matches image A and tag named tag2 which is not exist;
3. Create a excluding rule to exlude image A and B;
4. Add a tag named tag2 to image A, tag2 should be immutable;
5. Tag2 should be immutable;
6. All tags in image B should be immutable;
7. All tags in image C should not be immutable;
8. Disable all rules.
"""
image_a = dict(name="image_priority_a", tag1="latest", tag2="6.3.2")
image_b = dict(name="image_priority_b", tag1="latest", tag2="0.12.0")
image_c = dict(name="image_priority_c", tag1="latest", tag2="3.12.0")
#1. Push image A, B and C, image A has only 1 tag named tag1;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_b["name"], [image_b["tag1"],image_b["tag2"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_c["name"], [image_c["tag1"],image_c["tag2"]])
#2. Create a matching rule that matches image A and tag named tag2 which is not exist;
rule_id_1 = self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=image_a["tag2"], **self.USER_CLIENT)
#3. Create a excluding rule to exlude image A and B;
rule_id_2 = self.tag_immutability.create_rule(self.project_id, selector_repository_decoration = "repoExcludes",
selector_repository="{image_priority_a,image_priority_b}", selector_tag="**", **self.USER_CLIENT)
#4. Add a tag named tag2 to image A, tag2 should be immutable;
self.artifact.create_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], **self.USER_CLIENT)
#5. Tag2 should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
#6. All tags in image B should be immutable;
artifact_b = self.artifact.get_reference_info(self.project_name, image_b["name"], image_b["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_b))
self.assertTrue(artifact_b)
self.check_tag_immutability(artifact_b, image_b["tag2"], status = False)
self.check_tag_immutability(artifact_b, image_b["tag1"], status = False)
#7. All tags in image C should not be immutable;
artifact_c = self.artifact.get_reference_info(self.project_name, image_c["name"], image_c["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_c))
self.assertTrue(artifact_c)
self.check_tag_immutability(artifact_c, image_c["tag2"], status = True)
self.check_tag_immutability(artifact_c, image_c["tag1"], status = True)
#8. Disable all rules.
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_1, disabled = True, **self.USER_CLIENT)
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_2, disabled = True, **self.USER_CLIENT)
def test_add_exsiting_rule(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A and B with no tag;
2. Create a immutability policy rule A;
3. Fail to create rule B which has the same config as rule A;
"""
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, **self.USER_CLIENT)
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, expect_status_code = 409, **self.USER_CLIENT)
@classmethod
def tearDownClass(self):
print("Case completed")
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestTagImmutability))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"Tag immutability test failed: {}".format(result))
| 57.511628 | 249 | 0.667841 | 16,492 | 0.952689 | 0 | 0 | 993 | 0.057362 | 0 | 0 | 6,487 | 0.374733 |
7f26727fc4891343ad2b1f70064b4be1149f932b | 3,728 | py | Python | src/hackmds/tasks.py | chairco/dj-Hackmd-notifer | ae032537707ddba7b911b04b99645230ae350322 | [
"0BSD"
] | 4 | 2017-10-23T10:42:06.000Z | 2019-06-21T00:44:01.000Z | src/hackmds/tasks.py | chairco/dj-Hackmd-notifier | ae032537707ddba7b911b04b99645230ae350322 | [
"0BSD"
] | 2 | 2017-10-23T01:56:15.000Z | 2017-10-23T01:56:53.000Z | src/hackmds/tasks.py | chairco/dj-Hackmd-notifier | ae032537707ddba7b911b04b99645230ae350322 | [
"0BSD"
] | null | null | null | import requests
import logging
import os
import diffhtml
from django.conf import settings
from django.shortcuts import render
from django.core.mail import EmailMessage
from django.template import loader
from django.contrib.auth.models import User
from django_q.tasks import async_chain, result_group
from hackmds.models import Archive
from bs4 import BeautifulSoup
from markupsafe import Markup
logger = logging.getLogger(__name__)
cutoff = 0.6
def send_mail(email_subject, email_body, to, cc=None, bcc=None, from_email=None):
"""send email by Django send_email module
:type email_subject: str
:type email_body: Markup()
:type to: list()
:type cc: list()
:type bcc: list()
:type from_email: str
:rtype: EmailMessage()
"""
if bcc is None:
bcc = []
else:
bcc = bcc
if cc is None:
cc = []
else:
cc = cc
# all email str lower(),remove replicate email
cc = list(set(i.lower() for i in cc))
# all email str lower(),remove replicate email
to = list(set(i.lower() for i in to))
email = EmailMessage(
subject=email_subject,
body=email_body,
from_email=from_email,
to=to,
bcc=bcc,
cc=cc,
headers={'Reply-To': from_email}
)
email.content_subtype = "html"
return email.send(fail_silently=False)
def hackmd_notify_email(email_subject, result, cc=None):
"""Add mail template
:type email_subject: str
:type result: Markup()
:type cc: list()
:rtype: send_mail()
"""
t = loader.get_template(
os.path.join(
os.path.join(settings.BASE_DIR, 'templates'), 'email/'
) + 'hackmds_notify.html'
)
email_body = t.render({'result': result})
email_to = [u.email for u in User.objects.all()]
from_email = settings.EMAIL_HOST_USER + '@gmail.com'
return send_mail(
email_subject=email_subject,
email_body=email_body,
to=email_to,
from_email=from_email
)
def hackmd_task(url):
"""get hackmd.io content compare and send mail, first will save.
:type urls: str
:rtype: QuerySet()
"""
url = url.split('#')[0] # get the url none anchor
r = requests.get(url)
if r.status_code != 200:
return '{} error, error code; {}'.format(url, r.status_code)
soup = BeautifulSoup(r.text, 'html.parser')
content = soup.find(
'div', {'id': 'doc', 'class': 'container markdown-body'})
content = content.string
resutl = ''
if len(Archive.objects.filter(url=url)):
compare = Archive.objects.get(url=url)
email_subject = url
result = Markup('<br>').join(
diffhtml.ndiff(compare.content.splitlines(),
content.splitlines(), cutoff=cutoff)
) # default cutoff = 0.6
diff_count = str(result).count('<ins>')
print('上一次內容差異數: {}'.format(diff_count))
if diff_count > 0:
Archive.objects.filter(url=url).update(content=content)
if diff_count >= 2:
try:
hackmd_notify_email(email_subject=url, result=result)
except Exception as e:
return e
print('send_mail success')
print('OK, No send_mail')
else:
print('第一次新增')
Archive.objects.create(url=url, content=content)
return not result and None or result
def hackmd_taskchain(urls):
"""
:type urls: str
:rtype: QuerySet()
"""
urls = [u.split('#')[0].strip() for u in urls.split(',')]
chains = [('hackmds.tasks.hackmd_task', (url,)) for url in urls]
group_id = async_chain(chains)
return result_group(group_id, count=len(urls))
| 27.614815 | 81 | 0.616685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 963 | 0.256253 |
7f283665e8b6be70031379c582e3646c30d58f97 | 814 | py | Python | camunda/utils/log_utils.py | finexioinc/camunda-external-task-client-python3 | dd88da967e8cc1aaf91972e2667e01bfa02265d0 | [
"Apache-2.0"
] | null | null | null | camunda/utils/log_utils.py | finexioinc/camunda-external-task-client-python3 | dd88da967e8cc1aaf91972e2667e01bfa02265d0 | [
"Apache-2.0"
] | null | null | null | camunda/utils/log_utils.py | finexioinc/camunda-external-task-client-python3 | dd88da967e8cc1aaf91972e2667e01bfa02265d0 | [
"Apache-2.0"
] | 1 | 2020-08-05T22:20:06.000Z | 2020-08-05T22:20:06.000Z | import logging
from frozendict import frozendict
def log_with_context(message, context=frozendict({}), log_level='info', **kwargs):
log_function = __get_log_function(log_level)
log_context_prefix = __get_log_context_prefix(context)
if log_context_prefix:
log_function(f"{log_context_prefix} {message}", **kwargs)
else:
log_function(message, **kwargs)
def __get_log_context_prefix(context):
log_context_prefix = ""
if context:
for k, v in context.items():
if v:
log_context_prefix += f"[{k}:{v}]"
return log_context_prefix
def __get_log_function(log_level):
switcher = {
'info': logging.info,
'warning': logging.warning,
'error': logging.error
}
return switcher.get(log_level, logging.info)
| 25.4375 | 82 | 0.665848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.092138 |
7f2882efd53bcebd87c22f2c0fd81888ec5db7e6 | 734 | py | Python | Python/Basic Data Types/Lists.py | justchilll/HackerRank | 948e5783007b0918b25bcef452a12c4dddb36fa6 | [
"MIT"
] | 1 | 2021-12-28T15:45:25.000Z | 2021-12-28T15:45:25.000Z | Python/Basic Data Types/Lists.py | justchilll/HackerRank | 948e5783007b0918b25bcef452a12c4dddb36fa6 | [
"MIT"
] | null | null | null | Python/Basic Data Types/Lists.py | justchilll/HackerRank | 948e5783007b0918b25bcef452a12c4dddb36fa6 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
N = int(input())
main_list=[]
for iterate in range(N):
entered_string=input().split()
if entered_string[0] == 'insert':
main_list.insert(int(entered_string[1]),int(entered_string[2]))
elif entered_string[0] == 'print':
print(main_list)
elif entered_string[0] == 'remove':
main_list.remove(int(entered_string[1]))
elif entered_string[0] == 'append':
main_list.append(int(entered_string[1]))
elif entered_string[0] == 'sort':
main_list.sort()
elif entered_string[0] == 'pop':
main_list.pop()
elif entered_string[0] == 'reverse':
main_list.reverse()
| 36.7 | 75 | 0.569482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.083106 |
7f28a3ba2a1d3d7c7bdf664d76babed98cc0144e | 742 | py | Python | wildlifecompliance/migrations/0055_auto_20180704_0848.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 1 | 2020-12-07T17:12:40.000Z | 2020-12-07T17:12:40.000Z | wildlifecompliance/migrations/0055_auto_20180704_0848.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 14 | 2020-01-08T08:08:26.000Z | 2021-03-19T22:59:46.000Z | wildlifecompliance/migrations/0055_auto_20180704_0848.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 15 | 2020-01-08T08:02:28.000Z | 2021-11-03T06:48:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-07-04 00:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0054_assessment_licence_activity_type'),
]
operations = [
migrations.AlterField(
model_name='applicationgrouptype',
name='name',
field=models.CharField(
choices=[
('officer',
'Officer'),
('assessor',
'Assessor')],
default='officer',
max_length=40,
verbose_name='Group Type'),
),
]
| 25.586207 | 72 | 0.521563 | 584 | 0.787062 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.292453 |
7f28fd8107aa3d01ef3f003e276b381cfb9af0d6 | 677 | py | Python | day2/day2.py | tlee911/aoc2021 | f3b46590be72ceccdf4915b67b050c0c3207f002 | [
"MIT"
] | null | null | null | day2/day2.py | tlee911/aoc2021 | f3b46590be72ceccdf4915b67b050c0c3207f002 | [
"MIT"
] | null | null | null | day2/day2.py | tlee911/aoc2021 | f3b46590be72ceccdf4915b67b050c0c3207f002 | [
"MIT"
] | null | null | null | with open('input.txt', 'r') as file:
input = file.readlines()
input = [ step.split() for step in input ]
input = [ {step[0]: int(step[1])} for step in input ]
def part1():
x = 0
y = 0
for step in input:
x += step.get('forward', 0)
y += step.get('down', 0)
y -= step.get('up', 0)
print(x,y)
return x * y
def part2():
x = 0
y = 0
a = 0
for step in input:
x += step.get('forward', 0)
y += step.get('forward', 0) * a
#y += step.get('down', 0)
a += step.get('down', 0)
#y -= step.get('up', 0)
a -= step.get('up', 0)
print(x,y,a)
return x * y
print(part2()) | 20.515152 | 53 | 0.468242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.161004 |
7f297ce902d7f9247b5e656d705431d2cc8409f0 | 4,307 | py | Python | user_metrics/metrics/edit_count.py | wikimedia/user_metrics | 23ef17f7134729a0a4a8a47ac7100fb88db4d155 | [
"BSD-3-Clause"
] | 1 | 2017-09-03T21:33:35.000Z | 2017-09-03T21:33:35.000Z | user_metrics/metrics/edit_count.py | wikimedia/user_metrics | 23ef17f7134729a0a4a8a47ac7100fb88db4d155 | [
"BSD-3-Clause"
] | null | null | null | user_metrics/metrics/edit_count.py | wikimedia/user_metrics | 23ef17f7134729a0a4a8a47ac7100fb88db4d155 | [
"BSD-3-Clause"
] | null | null | null |
__author__ = "Ryan Faulkner"
__date__ = "July 27th, 2012"
__license__ = "GPL (version 2 or later)"
from os import getpid
from collections import namedtuple
import user_metric as um
from user_metrics.metrics import query_mod
from user_metrics.metrics.users import UMP_MAP
from user_metrics.utils import multiprocessing_wrapper as mpw
from user_metrics.config import logging
class EditCount(um.UserMetric):
"""
Produces a count of edits as well as the total number of bytes added
for a registered user.
`https://meta.wikimedia.org/wiki/Research:Metrics/edit_count(t)`
usage e.g.: ::
>>> import classes.Metrics as m
>>> m.EditCount(date_start='2012-12-12 00:00:00',date_end=
'2012-12-12 00:00:00',namespace=0).process(123456)
25, 10000
The output in this case is the number of edits (25) made by the
editor with ID 123456 and the total number of bytes added by those
edits (10000)
"""
# Structure that defines parameters for EditRate class
_param_types = {
'init': {},
'process': {
'k': [int, 'Number of worker processes.', 5]
}
}
# Define the metrics data model meta
_data_model_meta = {
'id_fields': [0],
'date_fields': [],
'float_fields': [],
'integer_fields': [1],
'boolean_fields': [],
}
_agg_indices = {
'list_sum_indices': _data_model_meta['integer_fields'] +
_data_model_meta['float_fields'],
}
@um.pre_metrics_init
def __init__(self, **kwargs):
super(EditCount, self).__init__(**kwargs)
@staticmethod
def header():
return ['user_id', 'edit_count']
@um.UserMetric.pre_process_metric_call
def process(self, users, **kwargs):
"""
Determine edit count. The parameter *user_handle* can be either
a string or an integer or a list of these types. When the
*user_handle* type is integer it is interpreted as a user id, and
as a user_name for string input. If a list of users is passed
to the *process* method then a dict object with edit counts keyed
by user handles is returned.
- Paramters:
- **user_handle** - String or Integer (optionally lists):
Value or list of values representing user handle(s).
- **is_id** - Boolean. Flag indicating whether user_handle
stores user names or user ids
"""
# Pack args, call thread pool
args = self._pack_params()
results = mpw.build_thread_pool(users, _process_help,
self.k_, args)
# Get edit counts from query - all users not appearing have
# an edit count of 0
user_set = set([long(user_id) for user_id in users])
edit_count = list()
for row in results:
edit_count.append([row[0], int(row[1])])
user_set.discard(row[0])
for user in user_set:
edit_count.append([user, 0])
self._results = edit_count
return self
def _process_help(args):
"""
Worker thread method for edit count.
"""
# Unpack args
users = args[0]
state = args[1]
metric_params = um.UserMetric._unpack_params(state)
query_args_type = namedtuple('QueryArgs', 'date_start date_end')
logging.debug(__name__ + ':: Executing EditCount on '
'%s users (PID = %s)' % (len(users), getpid()))
# Call user period method
umpd_obj = UMP_MAP[metric_params.group](users, metric_params)
results = list()
for t in umpd_obj:
args = query_args_type(t.start, t.end)
# Build edit count results list
results += query_mod.edit_count_user_query(t.user,
metric_params.project,
args)
return results
# Rudimentary Testing
if __name__ == '__main__':
users = ['13234584', '13234503', '13234565', '13234585', '13234556']
e = EditCount(t=10000)
# Check edit counts against
for res in e.process(users):
print res
| 31.669118 | 78 | 0.58997 | 2,847 | 0.661017 | 0 | 0 | 1,620 | 0.376132 | 0 | 0 | 2,118 | 0.491758 |
7f29a4a16372f3427a26db56be3bf0908d8eb335 | 7,210 | py | Python | sdk/python/pulumi_aws_native/panorama/get_application_instance.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/panorama/get_application_instance.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/panorama/get_application_instance.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetApplicationInstanceResult',
'AwaitableGetApplicationInstanceResult',
'get_application_instance',
'get_application_instance_output',
]
@pulumi.output_type
class GetApplicationInstanceResult:
def __init__(__self__, application_instance_id=None, arn=None, created_time=None, default_runtime_context_device_name=None, device_id=None, health_status=None, last_updated_time=None, status=None, status_description=None, status_filter=None, tags=None):
if application_instance_id and not isinstance(application_instance_id, str):
raise TypeError("Expected argument 'application_instance_id' to be a str")
pulumi.set(__self__, "application_instance_id", application_instance_id)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if created_time and not isinstance(created_time, int):
raise TypeError("Expected argument 'created_time' to be a int")
pulumi.set(__self__, "created_time", created_time)
if default_runtime_context_device_name and not isinstance(default_runtime_context_device_name, str):
raise TypeError("Expected argument 'default_runtime_context_device_name' to be a str")
pulumi.set(__self__, "default_runtime_context_device_name", default_runtime_context_device_name)
if device_id and not isinstance(device_id, str):
raise TypeError("Expected argument 'device_id' to be a str")
pulumi.set(__self__, "device_id", device_id)
if health_status and not isinstance(health_status, str):
raise TypeError("Expected argument 'health_status' to be a str")
pulumi.set(__self__, "health_status", health_status)
if last_updated_time and not isinstance(last_updated_time, int):
raise TypeError("Expected argument 'last_updated_time' to be a int")
pulumi.set(__self__, "last_updated_time", last_updated_time)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if status_description and not isinstance(status_description, str):
raise TypeError("Expected argument 'status_description' to be a str")
pulumi.set(__self__, "status_description", status_description)
if status_filter and not isinstance(status_filter, str):
raise TypeError("Expected argument 'status_filter' to be a str")
pulumi.set(__self__, "status_filter", status_filter)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="applicationInstanceId")
def application_instance_id(self) -> Optional[str]:
return pulumi.get(self, "application_instance_id")
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[int]:
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="defaultRuntimeContextDeviceName")
def default_runtime_context_device_name(self) -> Optional[str]:
return pulumi.get(self, "default_runtime_context_device_name")
@property
@pulumi.getter(name="deviceId")
def device_id(self) -> Optional[str]:
return pulumi.get(self, "device_id")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional['ApplicationInstanceHealthStatus']:
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[int]:
return pulumi.get(self, "last_updated_time")
@property
@pulumi.getter
def status(self) -> Optional['ApplicationInstanceStatus']:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusDescription")
def status_description(self) -> Optional[str]:
return pulumi.get(self, "status_description")
@property
@pulumi.getter(name="statusFilter")
def status_filter(self) -> Optional['ApplicationInstanceStatusFilter']:
return pulumi.get(self, "status_filter")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.ApplicationInstanceTag']]:
return pulumi.get(self, "tags")
class AwaitableGetApplicationInstanceResult(GetApplicationInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationInstanceResult(
application_instance_id=self.application_instance_id,
arn=self.arn,
created_time=self.created_time,
default_runtime_context_device_name=self.default_runtime_context_device_name,
device_id=self.device_id,
health_status=self.health_status,
last_updated_time=self.last_updated_time,
status=self.status,
status_description=self.status_description,
status_filter=self.status_filter,
tags=self.tags)
def get_application_instance(application_instance_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationInstanceResult:
"""
Schema for ApplicationInstance CloudFormation Resource
"""
__args__ = dict()
__args__['applicationInstanceId'] = application_instance_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:panorama:getApplicationInstance', __args__, opts=opts, typ=GetApplicationInstanceResult).value
return AwaitableGetApplicationInstanceResult(
application_instance_id=__ret__.application_instance_id,
arn=__ret__.arn,
created_time=__ret__.created_time,
default_runtime_context_device_name=__ret__.default_runtime_context_device_name,
device_id=__ret__.device_id,
health_status=__ret__.health_status,
last_updated_time=__ret__.last_updated_time,
status=__ret__.status,
status_description=__ret__.status_description,
status_filter=__ret__.status_filter,
tags=__ret__.tags)
@_utilities.lift_output_func(get_application_instance)
def get_application_instance_output(application_instance_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApplicationInstanceResult]:
"""
Schema for ApplicationInstance CloudFormation Resource
"""
...
| 43.433735 | 257 | 0.710402 | 5,092 | 0.706241 | 632 | 0.087656 | 4,722 | 0.654924 | 0 | 0 | 1,683 | 0.233426 |
7f29ba6ebc54ec3ef0a767a7c315061f9ee3a3ff | 8,224 | py | Python | year_3/comppi_1/managers/views.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | year_3/comppi_1/managers/views.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | 21 | 2020-03-24T16:26:04.000Z | 2022-02-18T15:56:16.000Z | year_3/comppi_1/managers/views.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .models import ManagersModelsJSONEncoder
from .models import PubOrgAdmin, PubOrgRegistrant
@method_decorator(csrf_exempt, name='dispatch')
def apilogin(req):
if req.method == 'POST':
req_params = req.body.decode("utf-8")
login_fields = json.loads(req_params)
username = login_fields.get('username')
password = login_fields.get('password')
auth_user = authenticate(username=username, password=password)
if auth_user is not None:
login(req, auth_user)
json_positive_response = {'status': True}
return JsonResponse(json_positive_response)
json_negative_response = {'status': False}
return JsonResponse(json_negative_response)
json_err_response = {'status': 405, 'descr': 'method not allowed'}
return JsonResponse(json_err_response)
@method_decorator(csrf_exempt, name='dispatch')
def apilogout(req):
if req.method == 'POST':
if req.user.is_authenticated:
saved_user = req.user
else:
saved_user = None
logout(req)
json_logout_state_response = {
'logged_out_user': saved_user,
'logged_out': not req.user.is_authenticated
}
return JsonResponse(json_logout_state_response, safe=False, encoder=ManagersModelsJSONEncoder)
json_err_response = {'status': 405, 'descr': 'method not allowed'}
return JsonResponse(json_err_response)
@method_decorator(csrf_exempt, name='dispatch')
def apimanageadmin(req):
try:
current_user_instance = req.user
current_admin_instance = get_object_or_404(PubOrgAdmin, user=req.user.id)
except Exception as e:
json_err_response = {'status': 404, 'descr': 'Some exception: {0}'.format(e)}
return JsonResponse(json_err_response)
if req.method == 'GET':
json_get_response = {
'user_inst': current_user_instance,
'admin_inst': current_admin_instance
}
return JsonResponse(json_get_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'POST':
req_params = req.body.decode("utf-8")
post_fields = json.loads(req_params)
username = post_fields.get('username')
password = post_fields.get('password')
doc_code = post_fields.get('doc_code')
credentials = post_fields.get('credentials')
# Create and save User instance.
new_user_inst = User(username=username, password=password)
new_user_inst.save()
# Create and save PubOrgAdmin instance.
new_puborgadmin_inst = PubOrgAdmin(
user=new_user_inst.pk,
doc_code=doc_code,
credentials=credentials
)
new_puborgadmin_inst.save()
# Send resulting json response.
json_post_response = {
'new_user_inst': new_user_inst,
'new_admin_inst': new_puborgadmin_inst,
}
return JsonResponse(json_post_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'PUT':
req_params = req.body.decode("utf-8")
put_fields = json.loads(req_params)
admin_id = put_fields.get('admin_id')
doc_code = put_fields.get('doc_code')
credentials = put_fields.get('credentials')
# Update and save PubOrgAdmin instance.
admin_to_update = PubOrgAdmin.objects.get(pk=admin_id)
admin_to_update.doc_code = doc_code
admin_to_update.credentials = credentials
admin_to_update.save()
# Send resulting json response.
json_put_response = {
'upd_admin_inst': admin_to_update
}
return JsonResponse(json_put_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'DELETE':
req_params = req.body.decode("utf-8")
delete_fields = json.loads(req_params)
admin_id = delete_fields.get('admin_id')
admin_instance = get_object_or_404(PubOrgAdmin, pk=admin_id)
admin_user_instance = get_object_or_404(User, pk=admin_instance.user.pk)
admin_instance.delete()
admin_user_instance.delete()
json_delete_response = {
'deleted_id': admin_id,
}
return JsonResponse(json_delete_response)
json_err_response = {'status': 405, 'descr': 'method not allowed'}
return JsonResponse(json_err_response)
@method_decorator(csrf_exempt, name='dispatch')
def apimanageregistrant(req):
try:
current_user_instance = req.user
current_registrant_instance = get_object_or_404(PubOrgRegistrant, user=req.user.id)
except Exception as e:
json_err_response = {'status': 404, 'descr': 'Some exception: {0}'.format(e)}
return JsonResponse(json_err_response)
if req.method == 'GET':
json_get_response = {
'user_inst': current_user_instance,
'registrant_inst': current_registrant_instance
}
return JsonResponse(json_get_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'POST':
req_params = req.body.decode("utf-8")
post_fields = json.loads(req_params)
username = post_fields.get('username')
password = post_fields.get('password')
hired_by = post_fields.get('hired_by')
hired_order_code = post_fields.get('hired_order_code')
doc_code = post_fields.get('doc_code')
credentials = post_fields.get('credentials')
# Create and save User instance.
new_user_inst = User(username=username, password=password)
new_user_inst.save()
# Create and save PubOrgRegistrant instance.
new_puborgregistrant_inst = PubOrgRegistrant(
user=new_user_inst.pk,
hired_by=hired_by,
hired_order_code=hired_order_code,
doc_code=doc_code,
credentials=credentials
)
new_puborgregistrant_inst.save()
# Send resulting json response.
json_post_response = {
'new_user_inst': new_user_inst,
'new_registrant_inst': new_puborgregistrant_inst,
}
return JsonResponse(json_post_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'PUT':
req_params = req.body.decode("utf-8")
put_fields = json.loads(req_params)
registrant_id = put_fields.get('registrant_id')
hired_by = put_fields.get('hired_by')
hired_order_code = put_fields.get('hired_order_code')
doc_code = put_fields.get('doc_code')
credentials = put_fields.get('credentials')
# Update and save PubOrgRegistrant instance.
registrant_to_update = PubOrgRegistrant.objects.get(pk=registrant_id)
registrant_to_update.hired_by = hired_by
registrant_to_update.hired_order_code = hired_order_code
registrant_to_update.doc_code = doc_code
registrant_to_update.credentials = credentials
registrant_to_update.save()
# Send resulting json response.
json_put_response = {
'upd_registrant_inst': registrant_to_update
}
return JsonResponse(json_put_response, safe=False, encoder=ManagersModelsJSONEncoder)
elif req.method == 'DELETE':
req_params = req.body.decode("utf-8")
delete_fields = json.loads(req_params)
registrant_id = delete_fields.get('registrant_id')
registrant_instance = get_object_or_404(PubOrgRegistrant, pk=registrant_id)
registrant_user_instance = get_object_or_404(User, pk=registrant_instance.user.pk)
registrant_instance.delete()
registrant_user_instance.delete()
json_delete_response = {
'deleted_id': registrant_id,
}
return JsonResponse(json_delete_response)
json_err_response = {'status': 405, 'descr': 'method not allowed'}
return JsonResponse(json_err_response)
| 42.174359 | 102 | 0.678867 | 0 | 0 | 0 | 0 | 7,808 | 0.949416 | 0 | 0 | 1,197 | 0.14555 |
7f2d111e6a6fe6c8ef6ae197c105c8637536b22e | 101 | py | Python | tests/union-env.py | fangyuchen86/mini-pysonar | 541e55ebadee35afb22e17b19eed5c19ad31e21e | [
"BSD-3-Clause"
] | 22 | 2015-04-03T12:44:24.000Z | 2021-12-22T17:55:00.000Z | tests/union-env.py | GaoGersy/mini-pysonar | 541e55ebadee35afb22e17b19eed5c19ad31e21e | [
"BSD-3-Clause"
] | null | null | null | tests/union-env.py | GaoGersy/mini-pysonar | 541e55ebadee35afb22e17b19eed5c19ad31e21e | [
"BSD-3-Clause"
] | 43 | 2015-04-03T12:46:28.000Z | 2022-01-20T17:27:45.000Z | def f(x):
if x:
x = 1
else:
x = 'zero'
y = x
return y
f(1)
| 10.1 | 19 | 0.29703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.059406 |
7f2dfec0ea8564d6ada61226e115c7c09c151298 | 23,950 | py | Python | mechroutines/es/runner/scan.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | null | null | null | mechroutines/es/runner/scan.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | null | null | null | mechroutines/es/runner/scan.py | keceli/mechdriver | 978994ba5c77b6df00078b639c4482dacf269440 | [
"Apache-2.0"
] | 8 | 2019-12-18T20:09:46.000Z | 2020-11-14T16:37:28.000Z | """ Library to perform sequences of electronic structure calculations
along a molecular coordinate and save the resulting information to
SCAN or CSAN layers of the save filesystem.
"""
import numpy
import automol
import autofile
import elstruct
from phydat import phycon
from mechlib import filesys
from mechlib.amech_io import printer as ioprinter
from mechroutines.es.runner._run import execute_job
from mechroutines.es.runner._run import read_job
def execute_scan(zma, spc_info, mod_thy_info,
coord_names, coord_grids,
scn_run_fs, scn_save_fs, scn_typ,
script_str, overwrite,
zrxn=None,
update_guess=True, reverse_sweep=False,
saddle=False,
constraint_dct=None, retryfail=True,
**kwargs):
""" Run all of the electronic structure calculations for the
scan and save the resulting information.
Function will first assess whether the scan has been run by
searching the filesystem.
"""
# Need a resave option
_fin = _scan_finished(
coord_names, coord_grids, scn_save_fs,
constraint_dct=constraint_dct, overwrite=overwrite)
if not _fin:
run_scan(
zma, spc_info, mod_thy_info,
coord_names, coord_grids,
scn_run_fs, scn_save_fs, scn_typ,
script_str, overwrite,
zrxn=zrxn,
update_guess=update_guess, reverse_sweep=reverse_sweep,
saddle=saddle,
constraint_dct=constraint_dct, retryfail=retryfail,
**kwargs)
save_scan(
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
scn_typ=scn_typ,
coord_names=coord_names,
constraint_dct=constraint_dct,
mod_thy_info=mod_thy_info)
def run_scan(zma, spc_info, mod_thy_info,
coord_names, coord_grids,
scn_run_fs, scn_save_fs, scn_typ,
script_str, overwrite,
zrxn=None,
update_guess=True, reverse_sweep=True,
saddle=False,
constraint_dct=None, retryfail=True,
**kwargs):
""" run constrained optimization scan
"""
# Build the SCANS/CSCANS filesystems
if constraint_dct is None:
coord_locs = coord_names
else:
coord_locs = constraint_dct
scn_save_fs[1].create([coord_locs])
inf_obj = autofile.schema.info_objects.scan_branch(
dict(zip(coord_names, coord_grids)))
scn_save_fs[1].file.info.write(inf_obj, [coord_locs])
# Build the grid of values
mixed_grid_vals = automol.pot.coords(coord_grids)
if not reverse_sweep:
grid_vals_groups = [mixed_grid_vals]
else:
grid_vals_groups = [mixed_grid_vals, tuple(reversed(mixed_grid_vals))]
for idx, grid_vals_group in enumerate(grid_vals_groups):
if idx == 1:
print('\nDoing a reverse sweep of the scan to catch errors...')
_run_scan(
guess_zma=zma,
spc_info=spc_info,
mod_thy_info=mod_thy_info,
coord_names=coord_names,
grid_vals=grid_vals_group,
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
scn_typ=scn_typ,
script_str=script_str,
overwrite=overwrite,
zrxn=zrxn,
retryfail=retryfail,
update_guess=update_guess,
saddle=saddle,
constraint_dct=constraint_dct,
**kwargs
)
def run_backsteps(
zma, spc_info, mod_thy_info,
coord_names, coord_grids,
scn_run_fs, scn_save_fs, scn_typ,
script_str, overwrite,
zrxn=None,
saddle=False,
constraint_dct=None, retryfail=True,
errors=(), options_mat=(),
**kwargs):
""" run backward steps along a scan and stop once there
is no hystersis (dont judge me i dont feel like googling
the spelling right now)
"""
# Set up info that is constant across the scan
# i.e., jobtype, frozen_coords
job = _set_job(scn_typ)
mixed_grid_vals_lst = automol.pot.coords(coord_grids)
# Hold off on backsteps while original scan is running
if _scan_is_running(
mixed_grid_vals_lst, coord_names, constraint_dct, scn_run_fs, job):
ioprinter.info_message(
f'Rotor {coord_names} is currently running, wait to backstep')
return
if constraint_dct is None:
coord_locs = coord_names
frozen_coordinates = coord_names
else:
coord_locs = constraint_dct
frozen_coordinates = tuple(coord_names) + tuple(constraint_dct)
# Set the initial zma
guess_zma = zma
scn_save_fs[1].create([coord_locs])
inf_obj = autofile.schema.info_objects.scan_branch(
dict(zip(coord_locs, coord_grids)))
scn_save_fs[1].file.info.write(inf_obj, [coord_locs])
# Build the grid of values
rev_grid_vals_orig_lst = tuple(reversed(mixed_grid_vals_lst))
rev_grid_vals_lst = tuple(tuple(val + 4*numpy.pi for val in grid)
for grid in rev_grid_vals_orig_lst)
pot = {}
for idx, grid_vals in enumerate(mixed_grid_vals_lst):
locs = [coord_names, grid_vals]
back_locs = [coord_names, rev_grid_vals_lst[idx]]
if constraint_dct is not None:
locs = [constraint_dct] + locs
back_locs = [constraint_dct] + back_locs
path = scn_save_fs[-1].path(locs)
back_path = scn_save_fs[-1].path(back_locs)
sp_save_fs = autofile.fs.single_point(path)
back_sp_save_fs = autofile.fs.single_point(back_path)
ene = None
if sp_save_fs[-1].file.energy.exists(mod_thy_info[1:4]):
ene = sp_save_fs[-1].file.energy.read(mod_thy_info[1:4])
ene = ene * phycon.EH2KCAL
if back_sp_save_fs[-1].file.energy.exists(mod_thy_info[1:4]):
back_ene = back_sp_save_fs[-1].file.energy.read(mod_thy_info[1:4])
back_ene = back_ene * phycon.EH2KCAL
if ene is None:
ene = back_ene
if back_ene < ene:
ene = back_ene
if ene:
pot[grid_vals] = ene
# Convert the energies to a baseline relative to the first point;
# helps with numerical issues related to the spline fitting
ref_ene = pot[mixed_grid_vals_lst[0]]
for grid_vals, ene in pot.items():
pot[grid_vals] = ene - ref_ene
# Convert units to degrees (will need to fix for 2-D stuff)
conv_pot = {}
for grid_vals, ene in pot.items():
conv_grid_vals = (grid_vals[0] * phycon.RAD2DEG,)
conv_pot[conv_grid_vals] = ene
bad_grid_vals = (filesys.read.identify_bad_point(conv_pot, thresh=0.018),)
if bad_grid_vals[0] is not None:
print('Akima spline identified potential hysteresis at ',
bad_grid_vals[0]*phycon.DEG2RAD)
passed_bad_point = False
for idx, rev_grid_vals in enumerate(rev_grid_vals_lst):
if rev_grid_vals_orig_lst[idx] <= bad_grid_vals[0]*phycon.DEG2RAD:
passed_bad_point = True
# Get locs for reading and running filesysten
locs = [coord_names, rev_grid_vals]
locs_orig = [coord_names, rev_grid_vals_orig_lst[idx]]
if constraint_dct is not None:
locs = [constraint_dct] + locs
locs_orig = [constraint_dct] + locs_orig
scn_run_fs[-1].create(locs)
run_fs = autofile.fs.run(scn_run_fs[-1].path(locs))
# Build the zma
zma = automol.zmat.set_values_by_name(
guess_zma, dict(zip(coord_names, rev_grid_vals)),
angstrom=False, degree=False)
# Run an optimization or energy job, as needed.
geo_exists = scn_save_fs[-1].file.geometry.exists(locs)
ioprinter.info_message("Taking a backstep at ", rev_grid_vals)
if not geo_exists or overwrite:
success, ret = execute_job(
job=job,
script_str=script_str,
run_fs=run_fs,
geo=zma,
spc_info=spc_info,
thy_info=mod_thy_info,
zrxn=zrxn,
overwrite=overwrite,
frozen_coordinates=frozen_coordinates,
errors=errors,
options_mat=options_mat,
retryfail=retryfail,
saddle=saddle,
**kwargs
)
# Read the output for the zma and geo
if success:
opt_zma = filesys.save.read_job_zma(ret, init_zma=zma)
guess_zma = opt_zma
filesys.save.scan_point_structure(
ret, scn_save_fs, locs, mod_thy_info[1:], job,
init_zma=zma, init_geo=None)
else:
break
else:
guess_zma = scn_save_fs[-1].file.zmatrix.read(locs)
# break out of reverse sweep if the new energy is
# within 1 kcal/mol of the value found in the forward
# direction
# Read in the forward and reverse energy
ioprinter.info_message(
"Comparing to ", rev_grid_vals_orig_lst[idx])
path = scn_save_fs[-1].path(locs)
path_orig = scn_save_fs[-1].path(locs_orig)
sp_save_fs = autofile.fs.single_point(path)
orig_sp_save_fs = autofile.fs.single_point(path_orig)
ene = sp_save_fs[-1].file.energy.read(mod_thy_info[1:4])
ene_orig = orig_sp_save_fs[-1].file.energy.read(mod_thy_info[1:4])
ene = ene * phycon.EH2KCAL
ene_orig = ene_orig * phycon.EH2KCAL
pot = ene - ene_orig
pot_thresh = -0.1
# Print status message about backstop
no_backstep_required = (pot > pot_thresh and passed_bad_point)
if no_backstep_required:
ioprinter.info_message("Reverse Sweep finds a potential "
f"{pot:5.2f} from the forward sweep")
ioprinter.info_message("...no more backsteps required")
else:
ioprinter.warning_message("Backstep finds a potential less "
"than forward sweep of "
f"{pot:5.2f} kcal/mol at ")
ioprinter.info_message(locs, locs_orig)
ioprinter.info_message("...more backsteps required")
# Break loop if no backstep is required
if no_backstep_required:
break
def _scan_is_running(grid_vals, coord_names, constraint_dct, scn_run_fs, job):
""" Is the rotor you requested currently being progressed on?
"""
rotor_is_running = False
for vals in grid_vals:
locs = [coord_names, vals]
if constraint_dct is not None:
locs = [constraint_dct] + locs
if scn_run_fs[-1].exists(locs):
run_fs = autofile.fs.run(scn_run_fs[-1].path(locs))
if run_fs[-1].file.info.exists([job]):
inf_obj = run_fs[-1].file.info.read([job])
if inf_obj.status == autofile.schema.RunStatus.RUNNING:
rotor_is_running = True
ioprinter.info_message(
'scan job is currently running at ',
coord_names, locs)
break
# else:
# break
# This else turns on and off letting the scan run
# backward simultaneously to forward
return rotor_is_running
def _run_scan(guess_zma, spc_info, mod_thy_info,
coord_names, grid_vals,
scn_run_fs, scn_save_fs, scn_typ,
script_str, overwrite,
zrxn=None,
errors=(), options_mat=(),
retryfail=True, update_guess=True,
saddle=False, constraint_dct=None,
**kwargs):
""" new run function
:param coord_names: names of the scan coordinates
:type coord_names: tuple(tuple(str))
:param grid_vals: values of all the scan coordinates
:type grid_vals: ?? same as coord_grids?
:param scn_run_fs: SCAN/CSCAN object with run filesys prefix
:type scn_run_fs: autofile.fs.scan or autofile.fs.cscan object
:param scn_save_fs: SCAN/CSCAN object with save filesys prefix
:type scn_save_fs: autofile.fs.scan or autofile.fs.cscan object
:param scn_typ: label for scan type ('relaxed' or 'rigid')
:type scn_typ: str
"""
# Get a connected geometry from the init guess_zma for instability checks
# conn_geo = automol.zmatrix.geometry(guess_zma)
# conn_zma = guess_zma
# Set the frozen coordinates (set job at this point?)
if constraint_dct is not None:
frozen_coordinates = tuple(coord_names) + tuple(constraint_dct)
else:
frozen_coordinates = coord_names
# Set the job
job = _set_job(scn_typ)
if not _scan_is_running(
grid_vals, coord_names, constraint_dct, scn_run_fs, job):
num_vals = len(grid_vals)
# Read the energies and Hessians from the filesystem
for val_idx, vals in enumerate(grid_vals):
print(f'Running Scan Point {val_idx+1}/{num_vals}:')
# Set the locs for the scan point
locs = [coord_names, vals]
if constraint_dct is not None:
locs = [constraint_dct] + locs
# Create the filesys
scn_run_fs[-1].create(locs)
run_fs = autofile.fs.run(scn_run_fs[-1].path(locs))
# Build the zma
zma = automol.zmat.set_values_by_name(
guess_zma, dict(zip(coord_names, vals)),
angstrom=False, degree=False)
# Run an optimization or energy job, as needed.
geo_exists = scn_save_fs[-1].file.geometry.exists(locs)
if not geo_exists or overwrite:
if job == elstruct.Job.OPTIMIZATION:
success, ret = execute_job(
job=job,
script_str=script_str,
run_fs=run_fs,
geo=zma,
spc_info=spc_info,
thy_info=mod_thy_info,
zrxn=zrxn,
overwrite=overwrite,
frozen_coordinates=frozen_coordinates,
errors=errors,
options_mat=options_mat,
retryfail=retryfail,
saddle=saddle,
**kwargs
)
# Read the output for the zma and geo
if success:
opt_zma = filesys.save.read_job_zma(ret, init_zma=zma)
if update_guess:
guess_zma = opt_zma
elif job == elstruct.Job.ENERGY:
_, _ = execute_job(
job=job,
script_str=script_str,
run_fs=run_fs,
geo=zma,
spc_info=spc_info,
thy_info=mod_thy_info,
zrxn=zrxn,
overwrite=overwrite,
errors=errors,
options_mat=options_mat,
retryfail=retryfail,
**kwargs
)
def save_scan(scn_run_fs, scn_save_fs, scn_typ,
coord_names, constraint_dct,
mod_thy_info):
""" Search for output of electronic structure calculation along the scan
coordinate that exist in the SCAN/CSCAN layer of the run filesys. Then
parse out the required information and save it into formatted files
in the SCAN/CSAN layer of the save filesys.
:param scn_run_fs: SCAN/CSCAN object with run filesys prefix
:type scn_run_fs: autofile.fs.scan or autofile.fs.cscan object
:param scn_save_fs: SCAN/CSCAN object with save filesys prefix
:type scn_save_fs: autofile.fs.scan or autofile.fs.cscan object
:param coord_names: names of the scan coordinates
:type coord_names: tuple(tuple(str))
:param constraint_dct: values of coordinates to constrain during scan
:type constraint_dct: dict[str: float]
"""
ioprinter.info_message(
'Saving any newly run scans in run filesys...', newline=1)
# Set job
job = _set_job(scn_typ)
# Set locs for scan
coord_locs, save_locs = scan_locs(
scn_run_fs, coord_names, constraint_dct=constraint_dct)
if not scn_run_fs[1].exists([coord_locs]):
ioprinter.info_message("No scan to save. Skipping...")
else:
locs_lst = []
for locs in save_locs:
# Set run filesys
run_path = scn_run_fs[-1].path(locs)
run_fs = autofile.fs.run(run_path)
ioprinter.info_message(f"Reading from scan run at {run_path}")
# Save the structure
success, ret = read_job(job, run_fs)
if success:
# Need to get the init zma structure in here
# could write init zma to run filesys; wont work retro
# get init zma readers?
if run_fs[-1].file.zmatrix.exists([job]):
init_zma = run_fs[-1].file.zmatrix.read([job])
else:
init_zma = None
filesys.save.scan_point_structure(
ret, scn_save_fs, locs, mod_thy_info[1:], job,
init_zma=init_zma, init_geo=None)
locs_lst.append(locs)
# Build the trajectory file
if locs_lst:
write_traj(coord_locs, scn_save_fs, mod_thy_info, locs_lst)
def scan_locs(scn_save_fs, coord_names, constraint_dct=None):
""" Determine the locs for all of the directories that currently
exist in the SCAN/CSAN layer of the save filesystem.
:param coord_names: names of the scan coordinates
:type coord_names: tuple(tuple(str))
:param scn_save_fs: SCAN/CSCAN object with save filesys prefix
:type scn_save_fs: autofile.fs.scan or autofile.fs.cscan object
:param constraint_dct: values of coordinates to constrain during scan
:type constraint_dct: dict[str: float]
"""
if constraint_dct is None:
coord_locs = coord_names
scn_locs = scn_save_fs[-1].existing([coord_locs])
else:
coord_locs = constraint_dct
scn_locs = ()
for locs1 in scn_save_fs[2].existing([coord_locs]):
if scn_save_fs[2].exists(locs1):
for locs2 in scn_save_fs[3].existing(locs1):
scn_locs += (locs2,)
return coord_locs, scn_locs
def _scan_finished(coord_names, coord_grids, scn_save_fs,
constraint_dct=None, overwrite=False):
""" Assesses if the scan calculations requested by the user have been
completed by assessing if Z-Matrices exist in the filesystem for
all grid values of the scan coordinates.
:param coord_names: names of the scan coordinates
:type coord_names: tuple(tuple(str))
:param coord_grids: values of all the scan coordinates
:type coord_grids: tuple(tuple(float))
:param scn_save_fs: SCAN/CSCAN object with save filesys prefix
:type scn_save_fs: autofile.fs.scan or autofile.fs.cscan object
:param constraint_dct: values of coordinates to constrain during scan
:type constraint_dct: dict[str: float]
:param overwrite:
:type overwrite: bool
:rtype: bool
"""
run_finished = True
if not overwrite:
grid_vals = automol.pot.coords(coord_grids)
for vals in grid_vals:
# Set the locs for the scan point
locs = [coord_names, vals]
if constraint_dct is not None:
locs = [constraint_dct] + locs
# Check if ZMA (other info?) exists
if not scn_save_fs[-1].file.zmatrix.exists(locs):
run_finished = False
break
else:
run_finished = False
ioprinter.message('User elected to overwrite scan')
if run_finished:
ioprinter.message(f'Scan saved previously at {scn_save_fs[0].path()}')
else:
ioprinter.message('Need to run scans')
return run_finished
def _set_job(scn_typ):
""" Set the appropriate job label defined in the elstruct
package using the input scan type.
:param scn_typ: label for scan type ('relaxed' or 'rigid')
:type scn_typ: str
:rtype: str
"""
assert scn_typ in ('relaxed', 'rigid'), (
f'{scn_typ} is not relaxed or rigid'
)
if scn_typ == 'relaxed':
job = elstruct.Job.OPTIMIZATION
else:
job = elstruct.Job.ENERGY
return job
def write_traj(ini_locs, scn_save_fs, mod_thy_info, locs_lst):
""" Read the geometries and energies of all optimized geometries
in the SCAN/CSCAN filesystem and collate them into am .xyz
trajectory file, which is thereafter written into a file inthe
filesystem.
:param ini_locs: locs for high level SCAN/CSCAN to save traj file
:type ini_locs: dict[]
:param scn_save_fs: SCAN/CSCAN object with save filesys prefix
:type scn_save_fs: autofile.fs.scan or autofile.fs.cscan object
:param mod_thy_info: thy info object with
"""
idxs_lst = [locs[-1] for locs in locs_lst]
enes = []
for locs in locs_lst:
path = scn_save_fs[-1].path(locs)
sp_save_fs = autofile.fs.single_point(path)
enes.append(sp_save_fs[-1].file.energy.read(mod_thy_info[1:4]))
geos = [scn_save_fs[-1].file.geometry.read(locs)
for locs in locs_lst]
traj = []
for idxs, ene, geo in zip(idxs_lst, enes, geos):
idx_lst = [f'{idx:.2f}' for idx in idxs]
idx_str = ','.join(idx_lst)
comment = f'energy: {ene:>15.10f}, grid idxs: {idx_str}'
traj.append((geo, comment))
traj_path = scn_save_fs[1].file.trajectory.path([ini_locs])
print(f"Updating scan trajectory file at {traj_path}")
scn_save_fs[1].file.trajectory.write(traj, [ini_locs])
# DERIVED FUNCTION THAT RUNS RUN_SCAN AND SAVE IN TWO DIRECTIONS #
def run_two_way_scan(ts_zma, ts_info, mod_thy_info,
grid1, grid2, coord_name,
scn_run_fs, scn_save_fs,
opt_script_str, overwrite,
update_guess=True,
reverse_sweep=True,
saddle=False,
constraint_dct=None,
retryfail=False,
**opt_kwargs):
""" Run a two-part scan that goes into two directions, as for rxn path
Wrapper to the execute_scan to run in two directions
"""
for grid in (grid1, grid2):
execute_scan(
zma=ts_zma,
spc_info=ts_info,
mod_thy_info=mod_thy_info,
coord_names=[coord_name],
coord_grids=[grid],
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
scn_typ='relaxed',
script_str=opt_script_str,
overwrite=overwrite,
update_guess=update_guess,
reverse_sweep=reverse_sweep,
saddle=saddle,
constraint_dct=constraint_dct,
retryfail=retryfail,
**opt_kwargs
)
| 37.716535 | 79 | 0.590856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,841 | 0.285637 |
7f2e8ec226f72ce3fd345487aaba5e6c7912c451 | 3,296 | py | Python | book-copier.py | BoKnowsCoding/hbd-organizer | 5312e2678e7f0ee731540f283137c6e8127c6a60 | [
"Unlicense"
] | null | null | null | book-copier.py | BoKnowsCoding/hbd-organizer | 5312e2678e7f0ee731540f283137c6e8127c6a60 | [
"Unlicense"
] | 1 | 2020-06-15T15:52:21.000Z | 2020-06-26T20:27:15.000Z | book-copier.py | BoKnowsCoding/hbd-organizer | 5312e2678e7f0ee731540f283137c6e8127c6a60 | [
"Unlicense"
] | null | null | null | """
Designed to be used in conjunction with xtream1101/humblebundle-downloader.
Takes the download directory of that script, then copies all file types of
each non-comic book to a chosen directory.
Each folder in the target directory will be one book, containing the
different file formats available for the book. They are not separated by
bundles, since this way you can import directories and subdirectories in
calibre, then choose to assume all e-book files in a directory are the same
book in different formats.
Renaming files after they are copied will not result in files being copied
again, as this script keeps a JSON in the given source directory recording
all files previously copied to a target folder.
"""
import sys
import os
import shutil
import json
def traverseBundles(source,target,hbdJSON,copiedJSON):
for bundleName in os.listdir(source):
bundleSource = source+"/"+bundleName
bundleTarget = target
# I don't think any comic bundles have regular books in them.
if os.path.isdir(bundleSource) and "comic" not in bundleName.lower():
traverseBundleItems(bundleSource,bundleTarget,hbdJSON,copiedJSON)
def traverseBundleItems(source,target,hbdJSON,copiedJSON):
for itemName in os.listdir(source):
if itemName not in copiedJSON:
itemPath = source+"/"+itemName
itemTarget = target+"/"+itemName
traverseFiles(itemPath,itemTarget,hbdJSON,copiedJSON,itemName)
def traverseFiles(source,target,hbdJSON,copiedJSON,itemName):
isComic = False
isBook = False
copyList = []
if "comic" in itemName.lower():
isComic = True
for fileName in os.listdir(source):
extension = os.path.splitext(fileName)[1]
# if the item is available as a .cb* file, it's most likely a comic book
# if there is no extension, it's probably a binary
if extension == "":
break
elif ".cb" in extension:
isComic = True
break
elif extension in ".pdf.epub.mobi":
isBook = True
copyList.append((source+"/"+fileName,target+"/"+fileName))
if (isBook) and (not isComic) and (copyList):
os.makedirs(target, exist_ok=True)
copyFiles(itemName,copyList)
copiedDict.update({itemName:"book"})
def copyFiles(itemName,copyList):
if copyList:
for copyJob in copyList:
shutil.copyfile(copyJob[0],copyJob[1])
print(copyJob[0]+"\n-->"+copyJob[1]+"\n")
if __name__ == "__main__":
if len(sys.argv) != 3:
print("\nInvalid parameters.\nUsage: ", sys.argv[0], " <path to source> <path to target>\n")
exit(1)
else:
source = sys.argv[1]
target = sys.argv[2]
with open(source+"/.cache.json") as hbdJsonFile:
hbdDict = json.load(hbdJsonFile)
if os.path.exists(source+"/.book-copier.json"):
with open(source+"/.book-copier.json") as copiedJsonFile:
copiedDict = json.load(copiedJsonFile)
else:
copiedDict = {}
traverseBundles(source,target,hbdDict,copiedDict)
with open(source+"/.book-copier.json","w") as copiedJsonFile:
copiedJsonFile.write(json.dumps(copiedDict, sort_keys=True, indent=4))
| 33.979381 | 100 | 0.664442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,147 | 0.347998 |
7f2ed7e442eb4d9f8c00b399cb27de2af650d79c | 2,276 | py | Python | nemo/collections/tts/torch/helpers.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | null | null | null | nemo/collections/tts/torch/helpers.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | null | null | null | nemo/collections/tts/torch/helpers.py | 23jura23/NeMo | 6815146775432852feee1bc28ed1a7a2fc94010d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
import torch
from scipy import ndimage
from scipy.stats import betabinom
class BetaBinomialInterpolator:
"""
This module calculates alignment prior matrices (based on beta-binomial distribution) using cached popular sizes and image interpolation.
The implementation is taken from https://github.com/NVIDIA/DeepLearningExamples.
"""
def __init__(self, round_mel_len_to=100, round_text_len_to=20):
self.round_mel_len_to = round_mel_len_to
self.round_text_len_to = round_text_len_to
self.bank = functools.lru_cache(beta_binomial_prior_distribution)
def round(self, val, to):
return max(1, int(np.round((val + 1) / to))) * to
def __call__(self, w, h):
bw = self.round(w, to=self.round_mel_len_to)
bh = self.round(h, to=self.round_text_len_to)
ret = ndimage.zoom(self.bank(bw, bh).T, zoom=(w / bw, h / bh), order=1)
assert ret.shape[0] == w, ret.shape
assert ret.shape[1] == h, ret.shape
return ret
def general_padding(item, item_len, max_len, pad_value=0):
if item_len < max_len:
item = torch.nn.functional.pad(item, (0, max_len - item_len), value=pad_value)
return item
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling_factor=1.0):
x = np.arange(0, phoneme_count)
mel_text_probs = []
for i in range(1, mel_count + 1):
a, b = scaling_factor * i, scaling_factor * (mel_count + 1 - i)
mel_i_prob = betabinom(phoneme_count.detach().numpy(), a, b).pmf(x)
mel_text_probs.append(mel_i_prob)
return np.array(mel_text_probs)
| 38.576271 | 145 | 0.706503 | 939 | 0.412566 | 0 | 0 | 0 | 0 | 0 | 0 | 856 | 0.376098 |
7f2f313dd457f2204002811c97661a2ad5ef1b2a | 633 | py | Python | Project Pattern/pattern_26.py | AMARTYA2020/nppy | 7f750534bb5faa4e661447ca132077de0ce0a0ed | [
"MIT"
] | 4 | 2020-12-07T10:15:08.000Z | 2021-11-17T11:21:07.000Z | Project Pattern/pattern_26.py | AMARTYA2020/nppy | 7f750534bb5faa4e661447ca132077de0ce0a0ed | [
"MIT"
] | null | null | null | Project Pattern/pattern_26.py | AMARTYA2020/nppy | 7f750534bb5faa4e661447ca132077de0ce0a0ed | [
"MIT"
] | 1 | 2021-02-17T07:53:13.000Z | 2021-02-17T07:53:13.000Z | class Pattern_Twenty_Six:
'''Pattern twenty_six
***
* *
*
* ***
* *
* *
***
'''
def __init__(self, strings='*'):
if not isinstance(strings, str):
strings = str(strings)
for i in range(7):
if i in [0, 6]:
print(f' {strings * 3}')
elif i in [1, 4, 5]:
print(f'{strings} {strings}')
elif i == 3:
print(f'{strings} {strings * 3}')
else:
print(strings)
if __name__ == '__main__':
Pattern_Twenty_Six()
| 19.181818 | 49 | 0.388626 | 578 | 0.913112 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.330174 |
7f2fcabb83cbf0cb2f3a253ecff31e49e9b71e6b | 1,069 | py | Python | SVD.py | divi9626/RANSAC | c109e41bc9a476b64572d82c92a8aa20504df41a | [
"MIT"
] | null | null | null | SVD.py | divi9626/RANSAC | c109e41bc9a476b64572d82c92a8aa20504df41a | [
"MIT"
] | null | null | null | SVD.py | divi9626/RANSAC | c109e41bc9a476b64572d82c92a8aa20504df41a | [
"MIT"
] | null | null | null | import numpy as np
A = np.asarray([[-5, -5, -1, 0, 0, 0, 500, 500, 100],
[0, 0, 0, -5, -5, -1, 500, 500, 100],
[-150, -5, -1, 0, 0, 0, 30000, 1000, 200],
[0, 0, 0, -150, -5, -1, 12000, 400, 80],
[-150, -150, -1, 0, 0, 0, 33000, 33000, 220],
[0, 0, 0, -150, -150, -1, 12000, 12000, 80],
[-5, -150, -1, 0, 0, 0, 500, 15000, 100],
[0, 0, 0, -5, -150, -1, 1000, 30000, 200]])
# A = U*sig*Vt
U_A = A.dot(A.T)
V_A = A.T.dot(A)
# U is eigen vector of A.dot(A.T)
# V is eigen vector of A.T.dot(A)
### Calculating SVD ######
U = np.linalg.eig(U_A)[1]
print('U Matrix is: ')
print(U)
V = np.linalg.eig(V_A)[1]
print('V Matrix is: ')
print(V)
sigma = np.sqrt(np.absolute(np.linalg.eig(V_A)[0]))
S = np.diag(sigma)
S = S[0:8, :]
print('Sigma Matrix is:')
print(S)
###### Homography #######
H = V[:, 8]
H = np.reshape(H,(3,3))
print('H matrix is: ')
print(H) | 26.725 | 70 | 0.424696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.186155 |
7f2fea8781d091be4e8f7a425ea94d6e86e56885 | 911 | py | Python | apps/university/api/serializers.py | ilyukevich/university-schedule | 305e568b00a847a8d2d10217568e7f87833fb5b3 | [
"MIT"
] | null | null | null | apps/university/api/serializers.py | ilyukevich/university-schedule | 305e568b00a847a8d2d10217568e7f87833fb5b3 | [
"MIT"
] | null | null | null | apps/university/api/serializers.py | ilyukevich/university-schedule | 305e568b00a847a8d2d10217568e7f87833fb5b3 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from ..models import Faculties, Departaments, StudyGroups, Auditories, Disciplines
class FacultiesSerializers(serializers.ModelSerializer):
"""Faculties API"""
class Meta:
fields = '__all__'
model = Faculties
class DepartamentsSerializers(serializers.ModelSerializer):
"""Departaments API"""
class Meta:
fields = '__all__'
model = Departaments
class StudyGroupsSerializers(serializers.ModelSerializer):
"""StudyGroups API"""
class Meta:
fields = '__all__'
model = StudyGroups
class AuditoriesSerializers(serializers.ModelSerializer):
"""Auditories API"""
class Meta:
fields = '__all__'
model = Auditories
class DisciplinesSerializers(serializers.ModelSerializer):
"""Disciplines API"""
class Meta:
fields = '__all__'
model = Disciplines
| 21.186047 | 82 | 0.683864 | 774 | 0.849616 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.162459 |
7f303b0c11d6702e4f41fb8d7ecc0a87c19ce301 | 713 | py | Python | app/views/info/info_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/views/info/info_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | app/views/info/info_routes.py | tjdaley/publicdataws | 1aa4a98cf47fae10cc0f59a8d01168df806b4919 | [
"MIT"
] | null | null | null | """
info_routes.py - Handle the routes for basic information pages.
This module provides the views for the following routes:
/about
/privacy
/terms_and_conditions
Copyright (c) 2019 by Thomas J. Daley. All Rights Reserved.
"""
from flask import Blueprint, render_template
info_routes = Blueprint("info_routes", __name__, template_folder="templates")
@info_routes.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@info_routes.route("/privacy/", methods=["GET"])
def privacy():
return render_template("privacy.html")
@info_routes.route("/terms_and_conditions", methods=["GET"])
def terms_and_conditions():
return render_template("terms_and_conditions.html")
| 23 | 77 | 0.751753 | 0 | 0 | 0 | 0 | 349 | 0.489481 | 0 | 0 | 363 | 0.509116 |