hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4d2816f6506147c7e1180e32379768ecd8e932b | 945 | py | Python | tppm/auth.py | timtumturutumtum/TraktPlaybackProgressManager | 6b3b6f81a6de5c1b7f11d1b2ae34c1b1cc6a2b10 | [
"MIT"
] | 36 | 2017-08-06T13:47:21.000Z | 2022-02-19T03:33:07.000Z | tppm/auth.py | timtumturutumtum/TraktPlaybackProgressManager | 6b3b6f81a6de5c1b7f11d1b2ae34c1b1cc6a2b10 | [
"MIT"
] | 5 | 2018-07-20T13:01:35.000Z | 2021-12-12T21:03:05.000Z | tppm/auth.py | timtumturutumtum/TraktPlaybackProgressManager | 6b3b6f81a6de5c1b7f11d1b2ae34c1b1cc6a2b10 | [
"MIT"
] | 3 | 2018-11-20T13:16:37.000Z | 2021-10-13T01:57:55.000Z | # coding: utf-8
""" Trakt Playback Manager """
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import json
import os.path
def save(path, data):
with io.open(path, 'w', encoding='utf-8', newline='\n') as fh:
# Must NOT use `json.dump` due to a Python 2 bug:
# https://stackoverflow.com/a/14870531/7597273
fh.write(json.dumps(
data, sort_keys=True, ensure_ascii=False,
indent=2, separators=(',', ': ')
))
def load(path):
if not os.path.isfile(path):
return None
with io.open(path, 'r', encoding='utf-8') as fh:
try:
return json.load(fh)
except ValueError:
return None
def remove(path):
if not os.path.isfile(path):
return False
try:
os.remove(path)
except OSError:
return False
return True
class NotAuthenticatedError(Exception):
pass
| 21 | 66 | 0.607407 | 48 | 0.050794 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.180952 |
b4d4621e29be6b6315126f0497290617aa1857f6 | 2,810 | py | Python | srt.py | glegoux/srt | 35db0c3cf23853f067326969178e174678e1a73c | [
"MIT"
] | null | null | null | srt.py | glegoux/srt | 35db0c3cf23853f067326969178e174678e1a73c | [
"MIT"
] | null | null | null | srt.py | glegoux/srt | 35db0c3cf23853f067326969178e174678e1a73c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os.path
import re
from datetime import date, datetime, time, timedelta
# helper
def is_timeformat(s):
p = re.compile('^[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}$')
if p.match(s) is None:
return False
else:
return True
def is_time_line(l):
p = re.compile('^[0-9]{2}:')
m = p.match(l)
if m is None:
return False
else:
return True
def get_time(s):
dt = datetime.strptime(s, "%H:%M:%S,%f")
return dt.time()
def get_str(t):
return t.strftime("%H:%M:%S,%f")[:-3]
def add(t0, delta):
delta = timedelta(hours=delta.hour,
minutes=delta.minute,
seconds=delta.second,
microseconds=delta.microsecond)
dt = datetime.combine(date.today(), t0) + delta
return dt.time()
def sub(t0, delta):
delta = timedelta(hours=delta.hour,
minutes=delta.minute,
seconds=delta.second,
microseconds=delta.microsecond)
dt = datetime.combine(date.today(), t0) - delta
return dt.time()
def get_endpoints(l):
l = l.rstrip()
sep = re.compile("[ ]+-->[ ]+")
ts = sep.split(l)
return list(map(get_time, ts))
def transform_time_line(l, delta, sens):
es = get_endpoints(l)
tes = list()
for e in es:
if sens == '+':
tes.append(add(e, delta))
else:
tes.append(sub(e, delta))
return get_str(tes[0]) + " --> " + get_str(tes[1]) + "\n"
# main
if __name__ == "__main__":
filesrt = sys.argv[1]
if not os.path.isfile(filesrt):
print("ERROR: file isn't exist !")
exit(1)
filesrtnew = filesrt + ".new"
t0 = sys.argv[2]
if not is_timeformat(t0):
print("ERROR: t0 isn't correct !")
exit(1)
t0 = get_time(t0)
delta = 0
sens = ""
is_first_timeline = True
with open(filesrt) as inputf:
print("Reading {}".format(filesrt))
for l in inputf:
if is_time_line(l):
if is_first_timeline:
tt0 = get_endpoints(l)[0]
if tt0 > t0:
delta = sub(tt0, t0)
sens = '-'
print("Delta: -{}".format(get_str(delta)))
else:
delta = sub(t0, tt0)
sens = '+'
print("Delta: +{}".format(get_str(delta)))
is_first_timeline = False
with open(filesrtnew, "a") as outputf:
outputf.write(transform_time_line(l, delta, sens))
else:
with open(filesrtnew, "a") as outputf:
outputf.write(l)
print("Writing {}".format(filesrtnew))
| 24.017094 | 66 | 0.504626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.096797 |
b4d5ab68cc4a4cd64dc0ee44aa2eb3eb7b21a324 | 1,195 | py | Python | pykeyset/core/font/font.py | staticintlucas/pykeyset | 8581252c85dfceebe22926af4640164a0895e7a0 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-06T16:43:25.000Z | 2021-07-06T16:43:25.000Z | pykeyset/core/font/font.py | staticintlucas/pykeyset | 8581252c85dfceebe22926af4640164a0895e7a0 | [
"Apache-2.0",
"MIT"
] | null | null | null | pykeyset/core/font/font.py | staticintlucas/pykeyset | 8581252c85dfceebe22926af4640164a0895e7a0 | [
"Apache-2.0",
"MIT"
] | null | null | null | from typing import Optional
from .glyph import Glyph
class Font:
def __init__(
self,
name: str,
em_size: float,
cap_height: float,
x_height: float,
line_height: float,
slope: float,
char_spacing: float,
):
raise NotImplementedError
def __len__(self) -> int:
"""Returns the number of glyphs in the font"""
raise NotImplementedError
def glyph(self, char: str, size: float) -> Optional[Glyph]:
"""Returns a copy of the glyph for the chosen character scaled to the given size, or None
if the Glyph does not exist in the font"""
raise NotImplementedError
def line_spacing(self, size: float) -> float:
raise NotImplementedError
def add_glyph(self, glyph: Glyph) -> None:
"""Adds a glyph to the font. The glyph should have the same metrics as set when creating
the Font object"""
raise NotImplementedError
def replacement(self, size: float) -> Glyph:
"""Returns a copy of this font's replacement character (or the default if none exists)
scaled to the given size"""
raise NotImplementedError
| 25.978261 | 97 | 0.632636 | 1,138 | 0.952301 | 0 | 0 | 0 | 0 | 0 | 0 | 423 | 0.353975 |
b4d7710eaf3e98a228d796af98ffd59a89294d80 | 2,739 | py | Python | models/SoftmaxTriplet.py | anonymous1computervision/REID | 6364d7d0e75ebb81fafc765be41b9b3fd434eeae | [
"MIT"
] | 1 | 2020-06-15T07:50:05.000Z | 2020-06-15T07:50:05.000Z | models/SoftmaxTriplet.py | tiancity-NJU/REID | 125a520a9c0b94440a7757e6f3c3c8bf976906ec | [
"MIT"
] | null | null | null | models/SoftmaxTriplet.py | tiancity-NJU/REID | 125a520a9c0b94440a7757e6f3c3c8bf976906ec | [
"MIT"
] | 4 | 2019-04-09T13:10:58.000Z | 2020-03-06T15:22:38.000Z | # encoding: utf-8
import copy
import itertools
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn, optim
from .resnet import ResNet
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if len(m.bias):
nn.init.constant_(m.bias, 0.0)
class SoftmaxTriplet(nn.Module):
in_planes = 2048
def __init__(self, num_classes=None, last_stride=1, pretrained=False):
super(SoftmaxTriplet,self).__init__()
self.base = ResNet(last_stride)
if pretrained:
model_url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
self.base.load_param(model_zoo.load_url(model_url))
self.num_classes = num_classes
if num_classes is not None:
self.bottleneck = nn.Sequential(
nn.Linear(self.in_planes, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.1),
nn.Dropout(p=0.5)
)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(512, self.num_classes)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
global_feat = self.base(x)
global_feat = F.avg_pool2d(global_feat, global_feat.shape[2:]) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1)
if self.training and self.num_classes is not None:
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
return [global_feat], [cls_score]
else:
return global_feat
def get_optim_policy(self):
base_param_group = self.base.parameters()
if self.num_classes is not None:
add_param_group = itertools.chain(self.bottleneck.parameters(), self.classifier.parameters())
return [
{'params': base_param_group},
{'params': add_param_group}
]
else:
return [
{'params': base_param_group}
] | 33.402439 | 105 | 0.609712 | 1,763 | 0.643666 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.060971 |
b4da893de386e17a0bd776a5eb2220d68a53a7ab | 710 | py | Python | data-exporter/brix/settings.py | dzwiedziu-nkg/credo-api-tools | 37adce8c858d2997b90ce7a1397e68dd281b8249 | [
"MIT"
] | null | null | null | data-exporter/brix/settings.py | dzwiedziu-nkg/credo-api-tools | 37adce8c858d2997b90ce7a1397e68dd281b8249 | [
"MIT"
] | null | null | null | data-exporter/brix/settings.py | dzwiedziu-nkg/credo-api-tools | 37adce8c858d2997b90ce7a1397e68dd281b8249 | [
"MIT"
] | null | null | null | import csv
DIR = 'credo-data-export/detections'
CSV = 'credo-data-export/credocut.tsv'
PLOT = 'credo-data-export/credocut.plot'
JSON = 'credo-data-export/credocut.json'
DEVICES = 'credo-data-export/device_mapping.json'
PNG = 'credo-data-export/png'
CREDOCUT = 10069
DELIMITER='\t'
QUOTECHAR='"'
QUOTING=csv.QUOTE_MINIMAL
COLUMNS = [
'id',
'user_id',
'device_id',
'team_id',
'width',
'height',
'x',
'y',
'latitude',
'longitude',
'altitude',
'accuracy',
'provider',
'source',
'time_received',
'timestamp',
'visible',
'frame_content'
]
TSV_COLUMNS = {}
for i in range(0, len(COLUMNS)):
TSV_COLUMNS[COLUMNS[i]] = i
BLACKLIST = set()
| 17.317073 | 49 | 0.623944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.507042 |
b4db052ea80413d8736cb6f07460bcb08f08dc7f | 540 | py | Python | Calibration/EcalAlCaRecoProducers/python/alcastreamEcalPhiSym_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Calibration/EcalAlCaRecoProducers/python/alcastreamEcalPhiSym_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Calibration/EcalAlCaRecoProducers/python/alcastreamEcalPhiSym_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # The following comments couldn't be translated into the new config version:
#------------------------------------------------
#AlCaReco filtering for phi symmetry calibration:
#------------------------------------------------
#
# Passes events that are coming from the online phi-symmetry stream
#
#
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ecalphiSymHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
HLTPaths = ['AlCa_EcalPhiSym*'],
andOr = True,
throw = False
)
| 24.545455 | 76 | 0.633333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.57963 |
b4db3df141a7dfd438923171f46933d4cbc0dace | 13,456 | py | Python | {{cookiecutter.project_name}}/core/vertebral.py | rzavarce/cookiecutter-vertebral | 29a72b6bfb5c4ca76b1a36ee1e8ff9e0fedcb421 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/core/vertebral.py | rzavarce/cookiecutter-vertebral | 29a72b6bfb5c4ca76b1a36ee1e8ff9e0fedcb421 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/core/vertebral.py | rzavarce/cookiecutter-vertebral | 29a72b6bfb5c4ca76b1a36ee1e8ff9e0fedcb421 | [
"MIT"
] | null | null | null | import re
import json
import logging
import hmac
import base64
import hashlib
import jsonschema
from uuid import uuid4
from aiohttp import web
from pathlib import Path
from yaml import safe_load
from http import HTTPStatus
from datetime import datetime, timezone
from {{cookiecutter.project_name}}.routes import setup_routes, EXCLUDED_ROUTES
from aiohttp_swagger3 import SwaggerDocs, SwaggerUiSettings
from .models.auth import Auth
from .catalogs.response import CATALOG
METHODS_ALLOWED = ["post", "get"]
RESERVED = frozenset(
(
"args",
"asctime",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"id",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"message",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"thread",
"threadName",
)
)
class Vertebral:
def __init__(self):
self.config: dict = {}
self.reserved: frozenset = RESERVED
self.logger = logging
self.exclude_routes: list = EXCLUDED_ROUTES
self.methods_allowed: list = METHODS_ALLOWED
self.catalog = CATALOG
self.prefix: str = ""
def load_config(self, config_path: Path) -> dict:
"""
Load config file from a given path.
-----------------
Args:
config_path (Parh): Path to config YAML file.
Returns:
config (dict): Config file loaded
"""
try:
with config_path.open() as config_file:
self.config: dict = safe_load(config_file)
self.logger.info(f'Config File has been loaded')
except:
self.logger.error("Config file no exist, please check config path",
extra={"config_path": config_path})
self.set_logger_in_file()
return self.config
def set_swagger_config(self, app):
"""
Swagger configuration parameters loader
-----------------
Args:
app (web.app): Aiohhtp web app.
Returns:
SwaggerDocs: SwaggerDocs configuration loaded
"""
swagger_config = self.config['swagger']
return SwaggerDocs(
app,
title=swagger_config["title"],
version=swagger_config["version"],
swagger_ui_settings=SwaggerUiSettings(
path=swagger_config["path"],
layout=swagger_config["layout"],
deepLinking=swagger_config["deepLinking"],
displayOperationId=swagger_config["displayOperationId"],
defaultModelsExpandDepth=swagger_config[
"defaultModelsExpandDepth"],
defaultModelExpandDepth=swagger_config[
"defaultModelExpandDepth"],
defaultModelRendering=swagger_config["defaultModelRendering"],
displayRequestDuration=swagger_config["displayRequestDuration"],
docExpansion=swagger_config["docExpansion"],
filter=swagger_config["filter"],
showExtensions=swagger_config["showExtensions"],
showCommonExtensions=swagger_config["showCommonExtensions"],
supportedSubmitMethods=swagger_config["test"].split(","),
validatorUrl=swagger_config["validatorUrl"],
withCredentials=swagger_config["withCredentials"],
),
)
def load_routes(self, app):
"""
Register existing routes in the app instance.
-----------------
Args:
app (web.app) : application instance
Returns:
No return anythings
"""
routes = setup_routes()
final_routes = []
for route in routes:
if route[0].lower() in self.methods_allowed:
final_routes.append(
web.post(self.prefix + route[1], route[2]))
else:
self.logger.error('Method is not allowed, route no setted',
extra={"route": {
"method": route[0].lower(),
"path": self.prefix + route[1]}})
app.add_routes(final_routes)
async def load_initial_auth_data(self, clientdb):
"""
Register existing routes in the app instance.
-----------------
Args:
clientdb (web.app) : application instance
Returns:
No return anythings
"""
auth = Auth(clientdb)
print()
print("Entro para chequear los datos en la bbdd")
print()
load = await auth.load_initial_data()
if load:
print()
print("cargo la da data inicial")
print()
self.logger.error('Load Initial authentication Data')
del auth
def set_path_prefix(self):
"""
Set path prefix atributte
-----------------
Args:
No accept anythins
Return:
prefix (str): Set and retunr path prefix
"""
app_name = self.config["app_name"]
version = self.config["version"]
self.prefix = f'/{app_name}/api/v{version}/'
return self.prefix
def is_exclude(self, request):
"""Check if a request is inside in path exclude list.
Its validate if path request is out of autentification
-----------------
Args:
request (objc): Aiohttp Web Request
Returns:
status (bool): Path Validation status
"""
for pattern in self.exclude_routes:
if re.fullmatch(pattern, request.path):
return True
return False
def set_response(self, data: dict):
""" Take a response data, search a key inside Response schema and set
response data
-----------------
Args:
data (dict): Data Dictionary to set in response
Returns:
response (dict): Response data serializered
"""
key = data['key']
response = self.catalog.get(key, False)
if response:
response["payload"] = data["payload"]
response["uuid"] = str(uuid4())
return response
def set_error_response(self, data: dict):
""" Take a error data, search a key inside Response schema and set
response data
-----------------
Args:
data (dict): Data Dictionary to set in response
Returns:
response (dict): Response data serializered
"""
key = data['key']
response = self.catalog.get(key, False)
if response:
response["payload"] = data["payload"]
response["uuid"] = str(uuid4())
self.logger.error(response["detail"], extra=response)
return web.json_response(
response,
status=HTTPStatus.UNPROCESSABLE_ENTITY.value)
async def validate_schema(self, data, schema):
"""Schemas Request/Response validator
-----------------
Args:
data (json): Json Request/Response object to check.
schema (dict): Schema Object definition
Returns:
status (bool): Schema Validation status
error_list (list): Errors List if any
"""
v = jsonschema.Draft7Validator(schema)
errors = sorted(v.iter_errors(data), key=lambda e: e.path)
error_list = []
if errors:
status = False
for error in errors:
error_list.append(error.message)
else:
status = True
return status, error_list
async def verify_signature(self, signature, api_secret, body_encoded):
"""Schemas Request/Response validator
-----------------
Args:
signature (str): Headre content signature
api_secret (str): Token session registered
body_encoded (str): Body request econded
Returns:
status (bool): Status signature
"""
signature_hash = hmac.new(api_secret, body_encoded,
hashlib.sha512).digest()
base64_signature_hash = base64.b64encode(signature_hash).decode()
if signature == base64_signature_hash:
return True
return False
def verify_token_timeout(self, time_out: int, last_request: datetime):
""" Check if token is valid, take a time_out and compare delta time of
last requeste date and return status
-----------------
Args:
time_out (int): Token time out in seconds
last_request (datetime): Date of the last request from session
Returns:
status (bool): Path Validation status
"""
now = datetime.now(tz=timezone.utc)
dt_object = datetime.fromtimestamp(last_request, tz=timezone.utc)
delta = now - dt_object
status = False
if time_out > delta.total_seconds():
status = True
return status
def set_logger_in_file(self, level=logging.DEBUG):
"""
Set logger with a alternative handles (StackloggingHandler
Class)
-----------------
Args:
Its no necesary
Return:
logger: Logger instance loaded
"""
logger_enable = self.config['logger']['enable']
if logger_enable:
logger_file_path = self.config['logger']['logs_file_path']
logger_handler = StackFileHandler(logger_file_path)
logger_handler.setLevel(level)
self.logger.addHandler(logger_handler)
def getLogger(self, name=None, level=logging.DEBUG, formatter=None):
"""
Set logger with a alternative handles (StackloggingHandler Class)
-----------------
Args:
Its no necesary
Return:
logger: Logger instance loaded
"""
logger = logging.getLogger(name)
logger.setLevel(level)
logger_handler = StackloggingHandler()
logger_handler.setLevel(level)
if formatter:
logger_handler.setFormatter(formatter)
logger.addHandler(logger_handler)
self.logger = logger
logger.info(f'Log Utility has been setting')
return logger
def get_extra_keys(self, record):
"""
Take a logger record and clean it, only Extra parameters are returned
-----------------
Args:
record (logger.record): Logger record to clean
Return:
extra_keys (list): Extra parameter list
"""
extra_keys = []
for key, value in record.__dict__.items():
if key not in self.reserved and not key.startswith("_"):
extra_keys.append(key)
return extra_keys
def format_stackdriver_json(self, record, message):
"""
Take a string message and format the new logger record with the correct
logger format to show
-----------------
Args:
message (str): Logger message string
record (logger.record): Logger record to clean
Return:
extra_keys (list): Extra parameter list
"""
date_format = '%Y-%m-%dT%H:%M:%SZ'
dt = datetime.utcfromtimestamp(record.created).strftime(date_format)
log_text = f'[{dt}] [{record.process}] [{record.levelname}] ' \
f'[{record.filename}:{record.lineno}] ' \
f'- Msg: {message} - Extra: '
payload = {}
extra_keys = self.get_extra_keys(record)
for key in extra_keys:
try:
# serialization/type error check
json.dumps(record.__dict__[key])
payload[key] = record.__dict__[key]
except TypeError:
payload[key] = str(record.__dict__[key])
dumps = json.dumps(payload)
return log_text + dumps
class StackloggingHandler(logging.StreamHandler):
"""
Handler class localed in logging.handler to support alternative formats and
add extra data in the logger record
"""
def __init__(self, stream=None):
super(StackloggingHandler, self).__init__()
def format(self, record):
"""
Add logger format to record
-----------------
Args:
record (logger.record): Logger record to formatter
Return:
record (logger.record): Logger record formatted
"""
message = super(StackloggingHandler, self).format(record)
return Vertebral().format_stackdriver_json(record, message)
class StackFileHandler(logging.FileHandler):
"""
Handler class to support alternative formats and add extra data in
the logger record
"""
def format(self, record):
"""
Add logger format to record
-----------------
Args:
record (logger.record): Logger record to formatter
Return:
record (logger.record): Logger record formatted
"""
message = super(StackFileHandler, self).format(record)
return Vertebral().format_stackdriver_json(record, message)
| 30.306306 | 80 | 0.56198 | 12,457 | 0.925758 | 0 | 0 | 0 | 0 | 2,026 | 0.150565 | 5,757 | 0.427839 |
b4dc6174210355b1a5c016150eeb213a4732eda6 | 669 | py | Python | 2021/day14/1.py | tomhel/AoC_2019 | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | 1 | 2021-12-07T13:18:52.000Z | 2021-12-07T13:18:52.000Z | 2021/day14/1.py | tomhel/AoC | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | null | null | null | 2021/day14/1.py | tomhel/AoC | c76c34235821864bc763f85d43cbcbfb9ed43469 | [
"MIT"
] | null | null | null | def load():
with open("input") as f:
yield next(f).strip()
next(f)
for x in f:
yield x.strip().split(" -> ")
def pair_insertion():
data = list(load())
polymer, rules = list(data[0]), dict(data[1:])
for _ in range(10):
new_polymer = [polymer[0]]
for i in range(len(polymer) - 1):
pair = polymer[i] + polymer[i + 1]
new_polymer.extend((rules[pair], polymer[i + 1]))
polymer = new_polymer
histogram = {}
for e in polymer:
histogram[e] = histogram.get(e, 0) + 1
return max(histogram.values()) - min(histogram.values())
print(pair_insertion())
| 21.580645 | 61 | 0.539611 | 0 | 0 | 148 | 0.221226 | 0 | 0 | 0 | 0 | 13 | 0.019432 |
b4ddeba8f2836d4bda03deca00a36f4a1066fd37 | 85 | py | Python | dev/cluster/__init__.py | ustcml/TorchML | c950fcaaaf2eaf4e85237894d5b12c20bd383538 | [
"MIT"
] | null | null | null | dev/cluster/__init__.py | ustcml/TorchML | c950fcaaaf2eaf4e85237894d5b12c20bd383538 | [
"MIT"
] | null | null | null | dev/cluster/__init__.py | ustcml/TorchML | c950fcaaaf2eaf4e85237894d5b12c20bd383538 | [
"MIT"
] | null | null | null | # Author: Jintao Huang
# Email: hjt_study@qq.com
# Date:
from .KMeans import KMeans
| 17 | 26 | 0.729412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.647059 |
b4df6ffe444c3bcefc05dd30c22bd546ed8ca77a | 5,000 | py | Python | mandelbrot.py | parkman217/python-mandelbrot | 2e36a6a9a79edf2333644ec1ab89175bae35a991 | [
"MIT"
] | null | null | null | mandelbrot.py | parkman217/python-mandelbrot | 2e36a6a9a79edf2333644ec1ab89175bae35a991 | [
"MIT"
] | null | null | null | mandelbrot.py | parkman217/python-mandelbrot | 2e36a6a9a79edf2333644ec1ab89175bae35a991 | [
"MIT"
] | null | null | null | import pygame, sys, cmath, time, math
def get_mandlebrot(x, y):
num = complex(x, y)
curr_num = complex(0, 0)
max_iter = 100
for a in range(max_iter):
curr_num = (curr_num * curr_num) + num
r, phi = cmath.polar(curr_num)
if r > 2:
return a/max_iter
return 1
def blend_colors(first, second, percent):
return [first[a]*(1-percent) + second[a]*percent for a in range(len(first))]
def draw_screen(screen, bot_left, top_right, screen_width, divisions, curr_div_loc):
#bot_left and top_right are coords in space to draw the set
#curr_div_lock is x, y coords in grid to split in half and draw (zero indexed)
boxes = 2**(divisions)
brot_coord_width = top_right[0] - bot_left[0]
brot_coords_x = brot_coord_width*(curr_div_loc[0])/boxes+bot_left[0]#bot left of the box
brot_coord_height = top_right[1] - bot_left[1]
brot_coords_y = brot_coord_height*(curr_div_loc[1])/boxes+bot_left[1]#bot left of the box
brot = get_mandlebrot(brot_coords_x, brot_coords_y)#middle of the box
colors = [(0, 0, 0), (0, 0, 255), (0, 255, 255), (255, 255, 255)]#cold to hot
if brot == 1:
color = (0, 0, 0)
else:
color_pair_indices = len(colors)-2#-1 for index -1 for needing to be pairs
percentage = (brot)**0.5
first_idx = math.floor(percentage*color_pair_indices)
percent_gradient = percentage*color_pair_indices-first_idx
color = blend_colors(colors[first_idx], colors[first_idx+1], percent_gradient)
if boxes == screen_width:
screen.set_at((curr_div_loc[0], curr_div_loc[1]), color)
else:
rect_width = math.ceil(screen_width / boxes)
top_left = math.ceil(screen_width*(curr_div_loc[0])/boxes), math.ceil(screen_width*(curr_div_loc[1])/boxes)
rect = pygame.Rect((top_left), (rect_width, rect_width))
pygame.draw.rect(screen, color, rect)
def draw_next(screen, bot_left, top_right, screen_width, divisions, curr_div_loc):
draw_screen(screen, bot_left, top_right, screen_width, divisions, curr_div_loc)
boxes = 2**(divisions)
if curr_div_loc[0] == boxes:#looping over to lower
if curr_div_loc[1] == boxes:#start over with smaller div
if boxes == screen_width:#done zooming in
return False, False
else:
return (0, 0), divisions+1#position and divisions
else:
return (0, curr_div_loc[1]+1), divisions#incrementing the y val
else:
return (curr_div_loc[0]+1, curr_div_loc[1]), divisions#incrementing the x val
def run():
pygame.init()
screen_size = 512#must be a power of 2
screen = pygame.display.set_mode((screen_size, screen_size), 0, 32)
surf = pygame.Surface((screen_size, screen_size))#surface to hold the values
running = True
divisions = 0
curr_pos = (0, 0)
desired_fps = 5
latency = 1/float(desired_fps)
brot_xs = [-2, 2]
brot_ys = [-2, 2]
last_frame_time = time.time()
zoom_percent = 0.5#[0, 1] lower the faster
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
old_size = (brot_xs[1] - brot_xs[0], brot_ys[1] - brot_ys[0])#width, height
reduction_amounts = (old_size[0]*(1-zoom_percent), old_size[1]*(1-zoom_percent))
percent_pos = (pos[0]/screen_size, pos[1]/screen_size)
if event.button == 1:
brot_xs[0]+=reduction_amounts[0]*percent_pos[0]
brot_xs[1]-=reduction_amounts[0]*(1-percent_pos[0])
brot_ys[0]+=reduction_amounts[1]*percent_pos[1]
brot_ys[1]-=reduction_amounts[1]*(1-percent_pos[1])
curr_pos = (0, 0)#reset drawing
divisions = 0
elif event.button == 3:
brot_xs[0]-=(reduction_amounts[0])*percent_pos[0]
brot_xs[1]+=(reduction_amounts[0])*(1-percent_pos[0])
brot_ys[0]-=(reduction_amounts[1])*percent_pos[1]
brot_ys[1]+=(reduction_amounts[1])*(1-percent_pos[1])
curr_pos = (0, 0)#reset drawing
divisions = 0
last_frame_time = time.time()#reset the last frame time because bout to draw a frame
while curr_pos != False and time.time() - last_frame_time < latency:
curr_pos, divisions = draw_next(surf, (brot_xs[0], brot_ys[0]), (brot_xs[1], brot_ys[1]), screen_size, divisions, curr_pos)
screen.blit(surf, (0, 0))
pygame.display.update()
pygame.quit()
sys.exit()
if __name__ == '__main__':
run()
| 38.167939 | 135 | 0.6078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.1122 |
b4df7dcc36490cc477f25560dc32bb1832158dd7 | 835 | py | Python | example-query.py | ericwhyne/http-ricochet | bd8edb8591047e00a45727457fd09c089f591836 | [
"BSD-3-Clause"
] | 19 | 2015-05-06T16:45:50.000Z | 2020-07-31T10:26:17.000Z | example-query.py | ericwhyne/http-ricochet | bd8edb8591047e00a45727457fd09c089f591836 | [
"BSD-3-Clause"
] | 2 | 2015-05-06T17:00:33.000Z | 2015-07-29T19:51:58.000Z | example-query.py | ericwhyne/http-ricochet | bd8edb8591047e00a45727457fd09c089f591836 | [
"BSD-3-Clause"
] | 3 | 2015-05-06T23:16:00.000Z | 2019-08-13T15:09:44.000Z | #!/usr/bin/python
import urllib2
import random
# A list of places we've deployed ricochet
ricochet_servers = [
"http://127.0.0.1:8080/ricochet/ricochet?url=",
"http://127.0.0.1:8080/ricochet/ricochet?url="
]
# We're identifying ourselves to ourself here, this will show up in the server logs (unless you've disabled them).
headers = { 'User-Agent' : 'Its me!' }
# Pick a random server, build the query, then make the query.
ricochet_server = random.choice(ricochet_servers)
content_type = "&ct=text/html"
url = "http://news.ycombinator.com"
# use urllib2.quote if your url contains parameters, the ricochet proxy will unquote before making the request
# url = urllib2.quote("https://news.ycombinator.com/newest?n=31")
query = ricochet_server + url + content_type
print urllib2.urlopen(urllib2.Request(query, None, headers)).read()
| 36.304348 | 114 | 0.747305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.677844 |
b4e199847b7a19eae86fecd3be217454cfd5183f | 2,054 | py | Python | Week-4/Prob4h.py | jabhij/MITx-6.00.1x-PYTHON | b9650ec0829796f4f312bd46db16e5e30df605ee | [
"MIT"
] | 2 | 2016-09-20T16:30:49.000Z | 2016-09-20T16:31:04.000Z | Week-4/Prob4h.py | jabhij/MITx-6.00.1x-Python- | b9650ec0829796f4f312bd46db16e5e30df605ee | [
"MIT"
] | null | null | null | Week-4/Prob4h.py | jabhij/MITx-6.00.1x-Python- | b9650ec0829796f4f312bd46db16e5e30df605ee | [
"MIT"
] | null | null | null | def compChooseWord(hand, wordList, n):
"""
Given a hand and a wordList, find the word that gives
the maximum value score, and return it.
This word should be calculated by considering all the words
in the wordList.
If no words in the wordList can be made from the hand, return None.
hand: dictionary (string -> int)
wordList: list (string)
returns: string or None
"""
maxScore = 0
bestWord = None
for word in wordList:
if isValidWord(word, hand, wordList) == True:
wordScore = getWordScore(word, n)
if wordScore > maxScore:
maxScore = wordScore
bestWord = word
return bestWord
def compPlayHand(hand, wordList, n):
"""
Allows the computer to play the given hand, following the same procedure
as playHand, except instead of the user choosing a word, the computer
chooses it.
1) The hand is displayed.
2) The computer chooses a word.
3) After every valid word: the word and the score for that word is
displayed, the remaining letters in the hand are displayed, and the
computer chooses another word.
4) The sum of the word scores is displayed when the hand finishes.
5) The hand finishes when the computer has exhausted its possible
choices (i.e. compChooseWord returns None).
"""
totalScore = 0
while calculateHandlen(hand) > 0:
print ('Current Hand: ' ),
displayHand(hand)
word = compChooseWord(hand, wordList, n)
if word == None:
print ('Total score: ' + str(totalScore) + ' points.')
break
else:
totalScore = getWordScore(word, n) + totalScore
print ('"' + str(word) + '"' + ' earned ' + str(getWordScore(word, n)) + ' points. Total: ' + str(totalScore) + ' points')
hand = updateHand(hand, word)
if calculateHandlen(hand) == 0:
print ('Total score: ' + str(totalScore) + ' points.')
else:
print (' ')
| 31.6 | 135 | 0.608569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,097 | 0.53408 |
b4e36d17dc528299318c8cd10e36c599f350d3dc | 1,172 | py | Python | annofabcli/filesystem/subcommand_filesystem.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 9 | 2019-07-22T23:54:05.000Z | 2020-11-05T06:26:04.000Z | annofabcli/filesystem/subcommand_filesystem.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 389 | 2019-07-03T04:39:11.000Z | 2022-03-28T14:06:11.000Z | annofabcli/filesystem/subcommand_filesystem.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 1 | 2021-08-30T14:22:04.000Z | 2021-08-30T14:22:04.000Z | import argparse
from typing import Optional
import annofabcli
import annofabcli.common.cli
import annofabcli.filesystem.draw_annotation
import annofabcli.filesystem.filter_annotation
import annofabcli.filesystem.mask_user_info
import annofabcli.filesystem.merge_annotation
import annofabcli.filesystem.write_annotation_image
def parse_args(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers()
# サブコマンドの定義
annofabcli.filesystem.draw_annotation.add_parser(subparsers)
annofabcli.filesystem.filter_annotation.add_parser(subparsers)
annofabcli.filesystem.mask_user_info.add_parser(subparsers)
annofabcli.filesystem.merge_annotation.add_parser(subparsers)
annofabcli.filesystem.write_annotation_image.add_parser(subparsers)
def add_parser(subparsers: Optional[argparse._SubParsersAction] = None):
subcommand_name = "filesystem"
subcommand_help = "ファイル操作関係(Web APIにアクセスしない)のサブコマンド"
description = "ファイル操作関係(Web APIにアクセスしない)のサブコマンド"
parser = annofabcli.common.cli.add_parser(
subparsers, subcommand_name, subcommand_help, description, is_subcommand=False
)
parse_args(parser)
return parser
| 33.485714 | 86 | 0.817406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.162016 |
b4e4e45949d2b3e2692bb5ce21125c0005114be5 | 780 | py | Python | buildmsi.py | vivainio/pylauncher-import | 584b7127bfcde114a55188f1edafdd768213e51e | [
"BSD-2-Clause"
] | null | null | null | buildmsi.py | vivainio/pylauncher-import | 584b7127bfcde114a55188f1edafdd768213e51e | [
"BSD-2-Clause"
] | null | null | null | buildmsi.py | vivainio/pylauncher-import | 584b7127bfcde114a55188f1edafdd768213e51e | [
"BSD-2-Clause"
] | 1 | 2021-11-09T02:37:35.000Z | 2021-11-09T02:37:35.000Z | import getpass
import os
import sys
VER = '1.0.1.7'
VERSION = 'Version=%s' % VER
MANUFACTURER = 'Manufacturer=Vinay Sajip'
X86 = 'Platform=x86'
X64 = 'Platform=x64'
TOWIN = 'ToWindows'
def main():
signpwd = getpass.getpass('Password for signing:')
import builddoc
builddoc.main()
os.environ['SIGNPWD'] = signpwd
import makemsi
makemsi.main(['-o', 'launchwin-%s' % VER, X86, VERSION, MANUFACTURER, TOWIN, 'launcher'])
makemsi.main(['-o', 'launcher-%s' % VER, X86, VERSION, MANUFACTURER, 'launcher'])
makemsi.main(['-o', 'launchwin-%s' % VER, X64, VERSION, MANUFACTURER, TOWIN, 'launcher'])
makemsi.main(['-o', 'launcher-%s' % VER, X64, VERSION, MANUFACTURER, 'launcher'])
if __name__ == '__main__':
sys.exit(main()) | 32.5 | 94 | 0.635897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.305128 |
b4e639e99b0548c2da81c255e5c8a218300c3b35 | 2,736 | py | Python | frameworks/Python/spyne/gen_benchmark_config.py | xsoheilalizadeh/FrameworkBenchmarks | 855527008f7488e4fd508d1e72dfa9953874a2c6 | [
"BSD-3-Clause"
] | 5,300 | 2015-01-02T08:04:20.000Z | 2022-03-31T10:08:33.000Z | frameworks/Python/spyne/gen_benchmark_config.py | xsoheilalizadeh/FrameworkBenchmarks | 855527008f7488e4fd508d1e72dfa9953874a2c6 | [
"BSD-3-Clause"
] | 3,075 | 2015-01-01T05:11:45.000Z | 2022-03-31T23:56:33.000Z | frameworks/Python/spyne/gen_benchmark_config.py | xsoheilalizadeh/FrameworkBenchmarks | 855527008f7488e4fd508d1e72dfa9953874a2c6 | [
"BSD-3-Clause"
] | 2,151 | 2015-01-02T14:16:09.000Z | 2022-03-30T00:15:26.000Z | #!/usr/bin/env python
from __future__ import print_function
import json
from spyne import AnyUri, Unicode, ComplexModel, M, UnsignedInteger16, Array
from spyne.protocol.json import JsonDocument
from spyne.util.dictdoc import get_object_as_dict
class BenchmarkConfigElement(ComplexModel):
# exclude this from the output document
key = Unicode(pa={JsonDocument: dict(exc=True)})
display_name = M(Unicode)
notes = Unicode
versus = Unicode
db_url = AnyUri
json_url = AnyUri
query_url = AnyUri
fortune_url = AnyUri
update_url = AnyUri
plaintext_url = AnyUri
port = M(UnsignedInteger16(default=8080))
approach = M(Unicode(values=['Realistic', 'Stripped'], default='Realistic'))
classification = M(Unicode(values=['Micro', 'Fullstack', 'Platform'], default='Micro'))
database = M(Unicode(values=['none', 'mongodb', 'postgres', 'mysql'], default='none'))
orm = M(Unicode(values=['Full', 'Micro', 'None', 'Raw']))
framework = M(Unicode)
language = M(Unicode)
flavor = M(Unicode)
platform = M(Unicode)
webserver = M(Unicode)
os = M(Unicode(default='Linux'))
database_os = M(Unicode(default='Linux'))
class BenchmarkConfig(ComplexModel):
framework = M(Unicode)
tests = Array(BenchmarkConfigElement, wrapped=False)
gen_raw_test = lambda: BenchmarkConfigElement(
display_name="Spyne RAW",
db_url="/dbsraw",
query_url="/dbraw?queries=",
fortune_url="/fortunesraw",
update_url="/raw-updates?queries=",
orm='Raw',
)
gen_normal_test = lambda: BenchmarkConfigElement(
display_name="Spyne ORM",
db_url="/dbs",
query_url="/db?queries=",
fortune_url="/fortunes",
update_url="/updatesraw?queries=",
orm='Full',
)
def add_common(bc):
bc.port = 8080
bc.approach = "Realistic"
bc.classification = "Micro"
bc.database = "postgres"
bc.framework = "spyne"
bc.language = "Python"
bc.platform = "Spyne"
bc.webserver = "None"
bc.os = "Linux"
bc.database_os = "Linux"
bc.versus = "wsgi"
bc.plaintext_url = "/plaintext"
return bc
config = BenchmarkConfig(framework='spyne', tests=[])
keys = iter(['default', 'raw', 'py3orm', 'py3raw'])
for flav in ['CPython', 'Python3']:
bc = add_common(gen_normal_test())
bc.flavor = flav
bc.key = next(keys)
config.tests.append(bc)
bc = add_common(gen_raw_test())
bc.flavor = flav
bc.key = next(keys)
config.tests.append(bc)
data = get_object_as_dict(config, complex_as=dict)
data['tests'] = [{d['key']: d} for d in data['tests']]
data = json.dumps(data, indent=2, sort_keys=True, separators=(',', ': '))
open('benchmark_config.json', 'wb').write(data)
print(data)
| 25.333333 | 91 | 0.663743 | 1,063 | 0.388523 | 0 | 0 | 0 | 0 | 0 | 0 | 548 | 0.200292 |
b4e6b497acf3eb5e2c7697b0aabc3286bf901091 | 2,557 | py | Python | craedl/errors.py | AnotherGroupChat/craedl-sdk-python | 57eec7ad4013e693740ad40190d5817984721fde | [
"Apache-2.0"
] | null | null | null | craedl/errors.py | AnotherGroupChat/craedl-sdk-python | 57eec7ad4013e693740ad40190d5817984721fde | [
"Apache-2.0"
] | null | null | null | craedl/errors.py | AnotherGroupChat/craedl-sdk-python | 57eec7ad4013e693740ad40190d5817984721fde | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class CraedlException(Exception):
"""Base exception that all other exceptions should inherit from."""
def __init__(self):
self.message = "Craedl Error."
def __str__(self):
return self.message
class Connection_Refused_Error(CraedlException):
def __init__(self):
self.message = 'Failed to establish a connection to https://api.craedl.org.'
class Invalid_Token_Error(CraedlException):
def __init__(self):
self.message = 'Your configured authentication token is invalid.\n'
self.message += ' Use `python -m craedl` to configure your authentication token.'
class Missing_Token_Error(CraedlException):
def __init__(self):
self.message = 'You have not configured an authentication token.\n'
self.message += ' Use `python -m craedl` to configure your authentication token.'
class Not_Found_Error(CraedlException):
def __init__(self):
self.message = 'The requested resource was not found.'
class Other_Error(CraedlException):
def __init__(self):
self.message = 'New error encountered. Determine the response error code and create a new error class.'
class Parse_Error(CraedlException):
def __init__(self, details=None):
self.message = 'Your request included invalid parameters.'
self.details = details
def __str__(self):
return self.message + ' ' + self.details
class File_Error(CraedlException):
def __init__(self, details=None):
self.message = 'Cannot upload an empty file.'
self.details = details
def __str__(self):
return self.message + ' file: ' + self.details
class Server_Error(CraedlException):
def __init__(self, details=None):
self.message = 'The server at https://api.craedl.org has encountered an error.'
class Unauthorized_Error(CraedlException):
def __init__(self):
self.message = 'You are not authorized to access the requested resource.'
| 36.528571 | 111 | 0.717247 | 1,932 | 0.755573 | 0 | 0 | 0 | 0 | 0 | 0 | 1,292 | 0.50528 |
b4e8ec3e073f72df115d2e467a43a5e057d8d890 | 35 | py | Python | slack_bolt/response/__init__.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 160 | 2019-09-27T18:02:03.000Z | 2022-03-15T23:46:40.000Z | slack_bolt/response/__init__.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 2 | 2019-10-21T13:30:17.000Z | 2019-10-30T00:09:11.000Z | slack_bolt/response/__init__.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 31 | 2019-10-19T18:10:23.000Z | 2022-02-28T14:13:19.000Z | from .response import BoltResponse
| 17.5 | 34 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4e905f07ef9267e0151e885d12dea14423eaf4d | 1,063 | py | Python | source/miscellaneous/test_get_sub_dir_dates.py | youdar/usesul_functions | 7cca9f8e241f2334f9eb0eab46d40b4c109e8518 | [
"MIT"
] | null | null | null | source/miscellaneous/test_get_sub_dir_dates.py | youdar/usesul_functions | 7cca9f8e241f2334f9eb0eab46d40b4c109e8518 | [
"MIT"
] | null | null | null | source/miscellaneous/test_get_sub_dir_dates.py | youdar/usesul_functions | 7cca9f8e241f2334f9eb0eab46d40b4c109e8518 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from get_sub_dir_dates import get_sub_dir_dates
from get_table_hdfs_location import get_table_hdfs_location
import unittest
import sys
__author__ = 'youval.dar'
class CollectDates(unittest.TestCase):
def test_get_sub_dir_dates(self):
print sys._getframe().f_code.co_name
acme_table_name = 'youval_db.acme_with_account_info'
dr = get_table_hdfs_location(acme_table_name,print_out=False)
dates = list(get_sub_dir_dates(dr))
print dates[0]
def run_selected_tests():
""" Run selected tests
1) List in "tests" the names of the particular test you want to run
2) Comment out unittest.main()
3) Un-comment unittest.TextTestRunner().run(run_selected_tests())
"""
tests = ['test_something','test_something_else']
suite = unittest.TestSuite(map(MyTestCase,tests))
return suite
if __name__ == '__main__':
# use for individual tests
# unittest.TextTestRunner().run(run_selected_tests())
# Use to run all tests
unittest.main()
| 27.25641 | 69 | 0.732832 | 320 | 0.301035 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.391345 |
b4e98bbdc6436cd505b553e475e6aa9b86055856 | 5,831 | py | Python | torchreid/utils/tools.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 1 | 2020-07-07T19:22:17.000Z | 2020-07-07T19:22:17.000Z | torchreid/utils/tools.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 1 | 2020-06-04T15:22:09.000Z | 2020-06-04T15:22:09.000Z | torchreid/utils/tools.py | opencv/deep-person-reid | ccc305614e968d4b64cc7d4b6664eb42267e6250 | [
"MIT"
] | 4 | 2020-07-02T09:23:11.000Z | 2020-08-21T08:24:13.000Z | # Copyright (c) 2018-2021 Kaiyang Zhou
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2018 davidtvs
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2018 Facebook
# SPDX-License-Identifier: MIT
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import absolute_import, division, print_function
import copy
import errno
import os
import os.path as osp
import random
import subprocess
import numpy as np
import torch
import cv2 as cv
__all__ = [
'mkdir_if_missing', 'check_isfile', 'set_random_seed', "worker_init_fn",
'read_image', 'get_model_attr', 'StateCacher', 'random_image', 'EvalModeSetter',
'get_git_revision', 'set_model_attr'
]
def get_git_revision():
path = os.path.abspath(os.path.dirname(__file__))
sha_message = ['git', 'rev-parse', 'HEAD']
head_message = sha_message[:2] + ['--abbrev-ref'] + sha_message[2:]
return (subprocess.check_output(sha_message, cwd=path).decode('ascii').strip(),
subprocess.check_output(head_message, cwd=path).decode('ascii').strip())
def mkdir_if_missing(dirname):
"""Creates dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def check_isfile(fpath):
"""Checks if the given path is a file.
Args:
fpath (str): file path.
Returns:
bool
"""
isfile = osp.isfile(fpath)
return isfile
def set_random_seed(seed, deterministic=False):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
random.seed(random.getstate()[1][0] + worker_id)
def read_image(path, grayscale=False):
"""Reads image from path using ``Open CV``.
Args:
path (str): path to an image.
grayscale (bool): load grayscale image
Returns:
Numpy image
"""
got_img = False
if not osp.exists(path):
raise IOError(f'"{path}" does not exist')
while not got_img:
try:
img = cv.cvtColor(cv.imread(path, cv.IMREAD_COLOR), cv.COLOR_BGR2RGB)
got_img = True
except IOError:
print(f'IOError occurred when reading "{path}".')
return img
def random_image(height, width):
input_size = (height, width, 3)
img = np.random.rand(*input_size).astype(np.float32)
img = np.uint8(img * 255)
return img
def get_model_attr(model, attr):
if hasattr(model, 'module'):
model = model.module
return getattr(model, attr)
def set_model_attr(model, attr, value):
if hasattr(model, 'module'):
model = model.module
if hasattr(model, 'nncf_module'):
setattr(model.nncf_module, attr, value)
setattr(model, attr, value)
class StateCacher:
def __init__(self, in_memory, cache_dir=None):
self.in_memory = in_memory
self.cache_dir = cache_dir
if self.cache_dir is None:
import tempfile
self.cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(self.cache_dir):
raise ValueError("Given `cache_dir` is not a valid directory.")
self.cached = {}
def store(self, key, state_dict):
if self.in_memory:
self.cached.update({key: copy.deepcopy(state_dict)})
else:
fn = os.path.join(self.cache_dir, f"state_{key}_{id(self)}.pt")
self.cached.update({key: fn})
torch.save(state_dict, fn)
def retrieve(self, key):
if key not in self.cached:
raise KeyError(f"Target {key} was not cached.")
if self.in_memory:
return self.cached.get(key)
fn = self.cached.get(key)
if not os.path.exists(fn):
raise RuntimeError(
f"Failed to load state in {fn}. File doesn't exist anymore."
)
state_dict = torch.load(fn, map_location=lambda storage, location: storage)
return state_dict
def __del__(self):
"""Check whether there are unused cached files existing in `cache_dir` before
this instance being destroyed."""
if self.in_memory:
return
for _, v in self.cached.items():
if os.path.exists(v):
os.remove(v)
class EvalModeSetter:
def __init__(self, module, m_type):
self.modules = module
if not isinstance(self.modules, (tuple, list)):
self.modules = [self.modules]
self.modes_storage = [{} for _ in range(len(self.modules))]
self.m_types = m_type
if not isinstance(self.m_types, (tuple, list)):
self.m_types = [self.m_types]
def __enter__(self):
for module_id, module in enumerate(self.modules):
modes_storage = self.modes_storage[module_id]
for child_name, child_module in module.named_modules():
matched = any(isinstance(child_module, m_type) for m_type in self.m_types)
if matched:
modes_storage[child_name] = child_module.training
child_module.train(mode=False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for module_id, module in enumerate(self.modules):
modes_storage = self.modes_storage[module_id]
for child_name, child_module in module.named_modules():
if child_name in modes_storage:
child_module.train(mode=modes_storage[child_name])
| 28.583333 | 90 | 0.628709 | 2,742 | 0.470245 | 0 | 0 | 0 | 0 | 0 | 0 | 1,221 | 0.209398 |
b4ea18107650e6d8144929adf5242acb860cb78c | 198 | py | Python | src/setup.py | Mr-Salme/CipheyCore | f9ce42973d141a6cfd506ec7a423443293d29201 | [
"MIT"
] | null | null | null | src/setup.py | Mr-Salme/CipheyCore | f9ce42973d141a6cfd506ec7a423443293d29201 | [
"MIT"
] | null | null | null | src/setup.py | Mr-Salme/CipheyCore | f9ce42973d141a6cfd506ec7a423443293d29201 | [
"MIT"
] | null | null | null | from setuptools import setup, Extension
# Compile parts of `freq.cpp` into a shared library so we can call it from Python
setup(
#...
ext_modules=[Extension('gof_test', ['freq.cpp'],),],
)
| 24.75 | 81 | 0.686869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.530303 |
b4ed182369b3b188f8f312aa3ddca9ef3c96de04 | 36 | py | Python | acousticsim/clustering/__init__.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | 5 | 2018-01-15T22:06:20.000Z | 2022-02-21T07:02:40.000Z | acousticsim/clustering/__init__.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | null | null | null | acousticsim/clustering/__init__.py | JoFrhwld/python-acoustic-similarity | 50f71835532010b2fedf14b0ca3a52d88a9ab380 | [
"MIT"
] | 2 | 2019-11-28T17:06:27.000Z | 2019-12-05T22:57:28.000Z | from .network import ClusterNetwork
| 18 | 35 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4ee34ec8c2a387636b7c753e6e3f2ecebd11bb6 | 3,534 | py | Python | hangman/ps3_hangman.py | kriyaaseela/Hangman | 8733ced9f915890b74d53587e67c3b9b36815484 | [
"MIT"
] | 1 | 2016-11-13T20:13:06.000Z | 2016-11-13T20:13:06.000Z | hangman/ps3_hangman.py | kriyaaseela/Hangman | 8733ced9f915890b74d53587e67c3b9b36815484 | [
"MIT"
] | null | null | null | hangman/ps3_hangman.py | kriyaaseela/Hangman | 8733ced9f915890b74d53587e67c3b9b36815484 | [
"MIT"
] | null | null | null | # Hangman game
import random
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
t = True
for x in secretWord:
if x not in lettersGuessed:
t = False
break
return t
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
# FILL IN YOUR CODE HERE...
x = ""
for a in secretWord:
if a in lettersGuessed:
x += a + " "
else:
x += "_ "
return x
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
alphabet = "abcdefghijklmnopqrstuvwxyz"
x = ""
for a in alphabet:
if a not in lettersGuessed:
x += a
return x
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
'''
# FILL IN YOUR CODE HERE...
print("Welcome to the game, Hangman!")
print("I am thinking of a word that is "+str(len(secretWord))+" letters long.")
lettersGuessed=[]
mistakesMade=0
while not isWordGuessed(secretWord, lettersGuessed):
if not mistakesMade<8:
break
print("-----------")
print("You have "+str(8-mistakesMade)+" guesses left.")
availableLetters=getAvailableLetters(lettersGuessed)
print("Available Letters: "+availableLetters)
c=input("Please guess a letter: ")
if c[0] in lettersGuessed:
print("Oops! You've already guessed that letter: "+getGuessedWord(secretWord, lettersGuessed))
continue
lettersGuessed.append(c[0])
if c[0] in secretWord:
print("Good guess: "+getGuessedWord(secretWord, lettersGuessed))
else:
print("Oops! That letter is not in my word: "+getGuessedWord(secretWord, lettersGuessed))
mistakesMade+=1
print("-----------")
if isWordGuessed(secretWord, lettersGuessed):
print("Congratulations, you won!")
else:
print("Sorry, you ran out of guesses. The word was "+secretWord+".")
secretWord = chooseWord(wordlist).lower()
hangman(secretWord)
| 28.272 | 106 | 0.631862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,765 | 0.499434 |
b4f0d8d809078663bd768dd9f4f542a1be3ac6fe | 520 | py | Python | src/utils/logger.py | hao-wang/Montage | d1c98ec7dbe20d0449f0d02694930cf1f69a5cea | [
"MIT"
] | 65 | 2020-01-03T11:59:03.000Z | 2022-03-19T07:10:47.000Z | src/utils/logger.py | hao-wang/Montage | d1c98ec7dbe20d0449f0d02694930cf1f69a5cea | [
"MIT"
] | 5 | 2020-01-10T01:55:26.000Z | 2020-09-23T10:44:00.000Z | src/utils/logger.py | hao-wang/Montage | d1c98ec7dbe20d0449f0d02694930cf1f69a5cea | [
"MIT"
] | 10 | 2020-10-07T02:39:06.000Z | 2021-06-04T07:06:54.000Z | class Colors:
END = '\033[0m'
ERROR = '\033[91m[ERROR] '
INFO = '\033[94m[INFO] '
WARN = '\033[93m[WARN] '
def get_color(msg_type):
if msg_type == 'ERROR':
return Colors.ERROR
elif msg_type == 'INFO':
return Colors.INFO
elif msg_type == 'WARN':
return Colors.WARN
else:
return Colors.END
def get_msg(msg, msg_type=None):
color = get_color(msg_type)
msg = ''.join([color, msg, Colors.END])
return msg
def print_msg(msg, msg_type=None):
msg = get_msg(msg, msg_type)
print(msg)
| 20.8 | 41 | 0.642308 | 114 | 0.219231 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.157692 |
b4f1abe399d018b436c84a4219cc721810235141 | 9,689 | py | Python | lorawan/user_agent/bridge/agent_bridge.py | pablomodernell/lorawan_conformance_testing | 3e6b9028ee7a6a614e52bac684e396ecd04fd10c | [
"MIT"
] | 1 | 2020-09-10T14:12:07.000Z | 2020-09-10T14:12:07.000Z | lorawan/user_agent/bridge/agent_bridge.py | pablomodernell/lorawan_conformance_testing | 3e6b9028ee7a6a614e52bac684e396ecd04fd10c | [
"MIT"
] | null | null | null | lorawan/user_agent/bridge/agent_bridge.py | pablomodernell/lorawan_conformance_testing | 3e6b9028ee7a6a614e52bac684e396ecd04fd10c | [
"MIT"
] | null | null | null | """
This Module defines the main the main component of the Agent service, a bridge that listens to
UDP messages from the LoRa gateway's Packet Forwarder and encapsulates and sends them using the AMQP
protocol to the Test Application Server (TAS).
"""
#################################################################################
# MIT License
#
# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC),
# Universidad de la Republica Oriental del Uruguay (UdelaR).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################
import os
import random
import socket
import struct
import time
import logging
import lorawan.user_agent.bridge.udp_listener as udp_listener
import message_queueing
import lorawan.parsing.lorawan
import lorawan.parsing.gateway_forwarder
import parameters.message_broker as message_broker
from parameters.message_broker import routing_keys
from lorawan.parsing.flora_messages import GatewayMessage
logger = logging.getLogger(__name__)
PACKET_FORWARDER_VERSION_INT = int(os.environ.get('PACKET_FORWARDER_VERSION_INT'))
class SPFBridge(object):
"""
Semtech Packet Forwarder (SPF) Bridge.
The Bride service running in the agent (user side) is in charge of listening to the uplink messages from the
LoRa gateway (e.g. in the same LAN of the machine running the bridge) in order to forward them to
the broker. The broker then will be in charge of making the messages available to the
testing services (f-interop side).
The Bridge is also in charge of receiving the downlink messages from the testing platform to send them to the
user's gateway.
The user must specify the IP and port of the gateway running the packet forwarder in the environment variables:
- *PF_IP*
- *PF_UDP_PORT*
"""
VERSION = bytes([PACKET_FORWARDER_VERSION_INT])
PUSH_DATA_ID = b"\x00"
PUSH_ACK_ID = b"\x01"
PULL_DATA_ID = b"\x02"
PULL_RESP_ID = b"\x03"
PULL_ACK_ID = b"\x04"
def __init__(self):
"""
Creates a Semtech Packet Forwarder (SPF) Bridge to handle the uplink UDP messages. It is also a consumer
of downlink messages from the broker. The SPF Bridge is a MqInterface.
"""
super().__init__()
self.UDP_IP = os.environ.get('PF_IP')
self.UDP_PORT = int(os.environ.get('PF_UDP_PORT'))
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind((self.UDP_IP, self.UDP_PORT))
self.gwdladdrLock1 = udp_listener.UDPListener.create_lock()
self.gwuladdrLock2 = udp_listener.UDPListener.create_lock()
self._gateway_dl_addr = None
self._gateway_ul_addr = None
self.downlink_ready_semaphore = udp_listener.UDPListener.create_semaphore()
self.udp_listener = udp_listener.UDPListener(self)
self._ready_to_downlink = False
self.last_uplink_time = None
self.uplink_mq_interface = message_queueing.MqInterface()
self.downlink_mq_interface = message_queueing.MqInterface()
self.downlink_mq_interface.declare_and_consume(
queue_name='down_nwk',
durable=False,
auto_delete=True,
routing_key=message_broker.routing_keys.toAgent + '.#',
callback=self.process_dlmsg)
@property
def gateway_dl_addr(self):
""" Thread safe access to the downlink address of the gateway's packet forwarder."""
with self.gwdladdrLock1:
retval = self._gateway_dl_addr
return retval
@gateway_dl_addr.setter
def gateway_dl_addr(self, downlink_addr):
""" Thread safe access to the downlink address of the gateway's packet forwarder."""
with self.gwdladdrLock1:
self._gateway_dl_addr = downlink_addr
@property
def gateway_ul_addr(self):
""" Thread safe access to the uplink address of the gateway's packet forwarder."""
with self.gwuladdrLock2:
retval = self._gateway_ul_addr
return retval
@gateway_ul_addr.setter
def gateway_ul_addr(self, uladdr):
""" Thread safe access to the uplink address of the gateway's packet forwarder."""
with self.gwuladdrLock2:
self._gateway_ul_addr = uladdr
def listen_spf(self):
""" Starts to listen for UDP uplink messages from the gateway."""
self.udp_listener.setDaemon(True)
self.udp_listener.start()
def process_dlmsg(self, channel, basic_deliver, properties, body):
"""
Downlink messages handler.
If no PULL_DATA message was previously received from the gateway (so the downlink address is unknown), the
message is ignored.
"""
body_str = body.decode()
if self._ready_to_downlink:
received_gw_message = GatewayMessage(body_str)
self.send_pull_resp(received_gw_message.get_txpk_str().encode())
elapsed_time = time.time() - self.last_uplink_time
logger.info(f"\n\n<<<<<<\nTime since the last uplink: {elapsed_time}\n<<<<<<\n")
logger.info(f"Sending DL to GW: \n{received_gw_message.get_txpk_str().encode()}")
else:
logger.info(
"Agent Bridge NOT ready to downlink: waiting for a PULL_DATA from the gateway.")
def process_uplink_data(self):
"""
Uplink message handler.
When a UDP message is received with an uplink message from the gateway,
it is sent to the broker using the
right routing key.
:return: None
"""
data, addr = self._sock.recvfrom(1024) # buffer size is 1024 bytes
self.last_uplink_time = time.time()
received_msg = lorawan.parsing.gateway_forwarder.SemtechUDPMsg(data)
logger.info(f"{str(received_msg)}")
# PUSH_DATA (ID=0) received -> Send an PUSH_ACK
if received_msg.msg_id == SPFBridge.PUSH_DATA_ID[0]:
push_ack_bytes = data[0:3] + SPFBridge.PUSH_ACK_ID
self.gateway_ul_addr = addr
self.send_ulresponse_raw(push_ack_bytes)
data_msg_list = received_msg.get_data()
if len(data_msg_list) > 0:
for packet in data_msg_list:
# packet[0]: the decoded data
# packet[1]: the json string with the decoded data in it's data field.
self.uplink_mq_interface.publish(msg=packet[1],
routing_key=routing_keys.fromAgent + '.gw1')
# PULL_DATA (ID=2) received -> Send an PULL_ACK
elif received_msg.msg_id == SPFBridge.PULL_DATA_ID[0]:
pull_ack = data[0:3] + SPFBridge.PULL_ACK_ID
self.gateway_dl_addr = addr
if not self._ready_to_downlink:
self._ready_to_downlink = True
self.downlink_ready_semaphore.release()
self.send_dl_raw(pull_ack)
def send_ulresponse_raw(self, ul_message):
"""
(SPFBridge, bytes) -> (None)
Sends a UDP message to the uplink socket of the Packet Forwarder running on the gateway.
:param ul_message: bytes to be sent as the response to an uplink message from the
Gateway's Semtech Packet Forwarder.
:return: None.
"""
assert self._gateway_ul_addr
self._sock.sendto(ul_message, self.gateway_ul_addr)
def send_dl_raw(self, dl_message):
"""
(SPFBridge, bytes) -> (None)
Sends a downlink UDP message to the Packet Forwarder running on the gateway. The IP address must be previously
obtained by receiving a PULL REQUEST message.
:param dl_message: bytes to be sent to the Gateway's Semtech Packet Forwarder.
:return: None.
"""
assert self._gateway_dl_addr is not None
# logger.info(f"Sending message to {self.gateway_dl_addr}")
self._sock.sendto(dl_message, self.gateway_dl_addr)
def send_pull_resp(self, json_bytes):
"""
(SPFBridge, bytes) -> (None)
Sends a message to the gateway using a PULL_RESP (Pull Response) message as defined in the Semtech
Packet Forwarder (SPF) Protocol.
:param json_bytes: byte sequence to be sent in the payload of a SPF PULL_RESP message.
:return: None.
"""
token = random.randint(0, 2 ** 16 - 1)
message = SPFBridge.VERSION + struct.pack(
'>H', token) + SPFBridge.PULL_RESP_ID
self.send_dl_raw(message + json_bytes)
def start_listening_downlink(self):
self.downlink_mq_interface.consume_start()
| 42.871681 | 118 | 0.668593 | 7,536 | 0.777789 | 0 | 0 | 942 | 0.097224 | 0 | 0 | 4,896 | 0.505315 |
b4f29d97ffa1af1c0c768796e62188143ad7c066 | 461 | py | Python | datamanager/interfaces/abstract_resource_resolver.py | alghimo/data-manager | 98c54934cb704c4e980acdfcc2206d740dd1ec03 | [
"Apache-2.0"
] | 1 | 2021-05-22T08:37:17.000Z | 2021-05-22T08:37:17.000Z | datamanager/interfaces/abstract_resource_resolver.py | alghimo/data-manager | 98c54934cb704c4e980acdfcc2206d740dd1ec03 | [
"Apache-2.0"
] | null | null | null | datamanager/interfaces/abstract_resource_resolver.py | alghimo/data-manager | 98c54934cb704c4e980acdfcc2206d740dd1ec03 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import TypeVar, Generic
T = TypeVar("T")
class AbstractResourceResolver(Generic[T], ABC):
"""
The resolver takes care of creating fully qualified names from resource keys.
For instance, when working with files this would be the file path, when working
with databases it will be the fully qualified table name, etc..
"""
@abstractmethod
def resolve(self, key: str) -> T:
pass | 30.733333 | 83 | 0.709328 | 369 | 0.800434 | 0 | 0 | 66 | 0.143167 | 0 | 0 | 248 | 0.537961 |
b4f37c5b1ce75f2a66c8ca0507bace026f75b5cb | 51 | py | Python | model_zoo/Star/__init__.py | RManLuo/MAMDR | f6dc5b799150103e8f270f329217bf541c44a67d | [
"MIT"
] | 2 | 2022-03-06T10:25:51.000Z | 2022-03-08T02:29:43.000Z | model_zoo/Star/__init__.py | RManLuo/MAMDR | f6dc5b799150103e8f270f329217bf541c44a67d | [
"MIT"
] | 2 | 2022-03-07T23:58:23.000Z | 2022-03-13T10:11:55.000Z | model_zoo/Star/__init__.py | RManLuo/MAMDR | f6dc5b799150103e8f270f329217bf541c44a67d | [
"MIT"
] | 3 | 2022-02-25T02:51:49.000Z | 2022-03-08T07:49:51.000Z | from .star import *
from .partitioned_norm import * | 25.5 | 31 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4f450aa99093525879013746d82ec4c365abcd8 | 3,409 | py | Python | synergy/db/model/job.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | synergy/db/model/job.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | synergy/db/model/job.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, ListField, IntegerField
MAX_NUMBER_OF_LOG_ENTRIES = 32
TIMEPERIOD = 'timeperiod'
PROCESS_NAME = 'process_name'
START_OBJ_ID = 'start_obj_id'
END_OBJ_ID = 'end_obj_id'
STATE = 'state'
RELATED_UNIT_OF_WORK = 'related_unit_of_work'
NUMBER_OF_FAILURES = 'number_of_failures'
# contains list of MAX_NUMBER_OF_LOG_ENTRIES last log messages
HISTORIC_LOG = 'historic_log'
# given Job was _not_ processed by aggregator because of multiple errors/missing data
# this state allows to mute current Job abd allow other timeperiods/Jobs to be processed
# only manual "re-processing" can re-run the skipped Job
STATE_SKIPPED = 'state_skipped'
# given Job was successfully processed by an aggregator
# no further processing for this Job is performed
STATE_PROCESSED = 'state_processed'
# no processing was performed for this Job
# no further processing for this Job is performed
STATE_NOOP = 'state_noop'
# Scheduler assumes that all timeperiod data is in the database, and asks an aggregator to run a "final" aggregation
# Job will be marked as STATE_PROCESSED afterwards if the processing succeed
STATE_FINAL_RUN = 'state_final_run'
# Aggregator is asked to perform a routine aggregation.
# Further state of the Job depends on the governing state machine:
# it could be either STATE_PROCESSED, STATE_IN_PROGRESS, STATE_NOOP, STATE_FINAL_RUN or STATE_SKIPPED
STATE_IN_PROGRESS = 'state_in_progress'
# Given timetable record serves as place-holder in the Tree
# TimeRecord can move to STATE_IN_PROGRESS
STATE_EMBRYO = 'state_embryo'
class Job(BaseDocument):
""" class presents status for the time-period, and indicates whether data was process by particular process"""
db_id = ObjectIdField('_id', null=True)
process_name = StringField(PROCESS_NAME)
timeperiod = StringField(TIMEPERIOD)
start_id = ObjectIdField(START_OBJ_ID)
end_id = ObjectIdField(END_OBJ_ID)
state = StringField(STATE, choices=[STATE_IN_PROGRESS, STATE_PROCESSED, STATE_FINAL_RUN,
STATE_EMBRYO, STATE_SKIPPED, STATE_NOOP])
related_unit_of_work = ObjectIdField(RELATED_UNIT_OF_WORK)
log = ListField(HISTORIC_LOG)
number_of_failures = IntegerField(NUMBER_OF_FAILURES, default=0)
@BaseDocument.key.getter
def key(self):
return self.process_name, self.timeperiod
@key.setter
def key(self, value):
""" :param value: tuple (name of the process, timeperiod as string in Synergy Data format) """
self.process_name = value[0]
self.timeperiod = value[1]
@property
def is_active(self):
return self.state in [STATE_FINAL_RUN, STATE_IN_PROGRESS, STATE_EMBRYO]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_skipped(self):
return self.state == STATE_SKIPPED
@property
def is_embryo(self):
return self.state == STATE_EMBRYO
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
@property
def is_final_run(self):
return self.state == STATE_FINAL_RUN
| 34.09 | 116 | 0.7445 | 1,758 | 0.515694 | 0 | 0 | 1,005 | 0.294808 | 0 | 0 | 1,438 | 0.421825 |
b4f5a7efcd7626395f15699df12dd9127abee6af | 187 | py | Python | text/code/exact-string-matching/naive-forward.py | pmikolajczyk41/string-algorithms | faa7c7b3ab18a157a27e8c08081f2efebf8be900 | [
"MIT"
] | 1 | 2020-06-27T01:33:43.000Z | 2020-06-27T01:33:43.000Z | text/code/exact-string-matching/naive-forward.py | TenGumis/string-algorithms | e57a9dc6150e92ab65cad4a5c1e68533b7166eb7 | [
"MIT"
] | null | null | null | text/code/exact-string-matching/naive-forward.py | TenGumis/string-algorithms | e57a9dc6150e92ab65cad4a5c1e68533b7166eb7 | [
"MIT"
] | null | null | null | def naive_string_matching(t, w, n, m):
for i in range(n - m + 1):
j = 0
while j < m and t[i + j + 1] == w[j + 1]:
j = j + 1
if j == m:
return True
return False | 23.375 | 45 | 0.475936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b4f6a6979f60206415ac42ae13093ab17a970251 | 27,291 | py | Python | shop/tests.py | okcashpro/okshop | f18600537eca12a0fe7dd52ed5453ed65c2ecacc | [
"MIT"
] | 3 | 2017-01-18T14:21:41.000Z | 2021-10-29T23:47:31.000Z | shop/tests.py | okcashpro/okshop | f18600537eca12a0fe7dd52ed5453ed65c2ecacc | [
"MIT"
] | 1 | 2017-05-16T20:24:01.000Z | 2017-05-17T21:28:27.000Z | shop/tests.py | okcashpro/okshop | f18600537eca12a0fe7dd52ed5453ed65c2ecacc | [
"MIT"
] | 1 | 2021-10-29T23:47:24.000Z | 2021-10-29T23:47:24.000Z | from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import *
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
import pyotp
import json
# Create your tests here.
class RegisterTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('u1', 'email@example.com', '')
ue1 = UserExtra(user=self.u1)
ue1.save()
self.u1.save()
def test_user_register_all_valid(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u3',
'email': 'test@example.com',
'password': 'pass1234',
'passwordconfirm': 'pass1234'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertEquals(m.tags, 'success')
def test_user_register_invalid_email(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u4',
'email': 'test1@example',
'password': 'pass1234',
'passwordconfirm':
'pass1234'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_password_too_short(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u5',
'email': 'test2@example.com',
'password': 'pass123',
'passwordconfirm': 'pass123'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_password_mismatch(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u6',
'email': 'test3@example.com',
'password': 'pass1234',
'passwordconfirm': 'pass4'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_register_username_in_use(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u1',
'email': 'test4@example.com',
'password': 'pass1234',
'passwordconfirm':
'pass1234'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_email_in_use(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u7',
'email': 'email@example.com',
'password': 'pass1234',
'passwordconfirm': 'pass1234'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_user_invalid_username(self):
response = self.client.post(reverse('shop:register'), {
'username': 'u3',
'email': 'test5@example',
'password': 'pass1234',
'passwordconfirm': 'pass1234'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
class LoginTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('_u1', 'email@example.com',
'p4ssw0rd')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.u2 = User.objects.create_user('_u2', 'email2@example.com',
'p4ssw0rd')
ue2 = UserExtra(user=self.u2, verified=False)
ue2.save()
self.u2.save()
self.u3 = User.objects.create_user('_u3', 'email3@example.com',
'p4ssw0rd')
ue3 = UserExtra(user=self.u3, verified=True, authenticator_id='test',
authenticator_verified=True)
ue3.save()
self.u1.save()
def test_login_all_valid_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u1',
'password': 'p4ssw0rd'
}, follow=True)
self.assertEquals(response.status_code, 200)
self.assertEquals(str(list(response.context['messages'])[0]),
'Welcome back, _u1!')
def test_login_all_invalid_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': 'invalidname',
'password': 'paaaaaaaaaaaa'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_invalid_pass_no_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u1',
'password': 'paaaaaaaaaaaa'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_not_verified(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u2',
'password': 'p4ssw0rd'
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
def test_login_all_valid_2fa(self):
totp = pyotp.TOTP('test')
response = self.client.post(reverse('shop:login'), {
'username': '_u3',
'password': 'p4ssw0rd',
'2facode': totp.now()
}, follow=True)
self.assertEquals(str(list(response.context['messages'])[0]),
'Welcome back, _u3!')
def test_login_invalid_2fa(self):
response = self.client.post(reverse('shop:login'), {
'username': '_u3',
'password': 'p4ssw0rd',
'2facode': ''
}, follow=True)
self.assertEquals(response.status_code, 200)
for m in list(response.context['messages']):
self.assertNotEqual(m.tags, 'success')
class TestUploadFiles(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('__u1', '', 'passw0rd')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.p1 = Product(
product_name='T',
product_description='d',
price=0,
physical=False,
seller=self.u1
)
self.p1.save()
self.u2 = User.objects.create_user('__u2', '', 'passw0rd')
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
self.u2.save()
def test_upload_product_not_found(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': '291827346271725623'}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertEqual(r.status_code, 404)
def test_upload_product_not_logged_in(self):
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertNotEqual(r.status_code, 200)
def test_upload_product_no_permission(self):
self.client.login(username=self.u2.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'n'
}
)
self.assertEqual(r.status_code, 403)
def test_upload_incomplete_request(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{}
)
self.assertEqual(r.status_code, 400)
def test_upload_name_too_big(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 'a'*201
}
)
self.assertEqual(r.status_code, 400)
def test_upload_no_name(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse(
'shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': ''
}
)
self.assertEqual(r.status_code, 400)
# Can't seem to fake file size... I'll have to rely on my intuition
"""def test_upload_file_too_large(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': InMemoryUploadedFile(
BytesIO(b"d"),
None,
'file.txt',
"text/txt",
10**10,
None,
None
),
'name': 's'
}
)
self.assertEqual(r.status_code, 400)"""
def test_upload_all_fine(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(
reverse('shop:uploadfile', kwargs={'id': self.p1.id}),
{
'file': SimpleUploadedFile("file.txt", b"t",
content_type="text/txt"),
'name': 's'
}
)
# TODO: Get this to work on py3.5
"""rjson = json.loads(str(r.content))
file = DigitalFile.objects.get(id=rjson['file'])
self.assertEqual(file.file.read(), b't')"""
self.assertEqual(r.status_code, 200)
class TestDeleteFile(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('___u1', '', 'passw0rd')
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
self.u1.save()
self.u2 = User.objects.create_user('___u2', '', 'passw0rd')
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
self.u1.save()
self.p1 = Product(product_name='T', product_description='d', price=0,
physical=False, seller=self.u1)
self.p1.save()
self.file1 = DigitalFile(
file=SimpleUploadedFile("file.txt", b"t", content_type="text/txt"),
name='test',
product=self.p1
)
self.file1.save()
self.file2 = DigitalFile(
file=SimpleUploadedFile("file.txt", b"t", content_type="text/txt"),
name='test',
product=self.p1
)
self.file2.save()
def test_file_not_logged_in(self):
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file1.id}))
self.assertNotEqual(r.status_code, 200)
def test_file_no_permission(self):
self.client.login(username=self.u2.username, password='passw0rd')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file1.id}))
self.assertEqual(r.status_code, 403)
def test_file_not_exists(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': 2912787347128272}))
self.assertEqual(r.status_code, 404)
def test_file_all_fine(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.get(reverse('shop:deletefile',
kwargs={'id': self.file2.id}), follow=True)
self.assertEqual(r.status_code, 200)
class CheckoutTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('____u1', '', 'passw0rd')
self.u1.save()
self.u2 = User.objects.create_user('____u2', '', 'passw0rd')
self.u2.save()
self.u3 = User.objects.create_user('____u3', '', 'passw0rd')
self.u3.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
ue3 = UserExtra(user=self.u3, verified=True)
ue3.save()
w = Wallet(user=self.u1)
w.save()
w1 = Wallet(user=self.u2)
w2 = Wallet(user=self.u2, label='2')
w3 = Wallet(user=self.u3, label='3', redeemed=Decimal(-10000))
w4 = Wallet(user=self.u3, label='3', redeemed=Decimal(-500))
w1.save()
w2.save()
w3.save()
w4.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p1.save()
self.p2 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=True,
stock=10,
worldwide_shipping=True,
free_shipping=True
)
self.p2.save()
self.expensiveproduct = Product(
product_name='t',
seller=self.u1,
price=2**32,
stock=10
)
self.expensiveproduct.save()
self.reasonableproduct = Product(
product_name='t',
seller=self.u1,
price=10,
stock=10
)
self.reasonableproduct.save()
self.outofstock = Product(
product_name='t',
seller=self.u1,
price=0,
stock=0
)
self.outofstock.save()
def test_checkout_not_logged_in(self):
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_cart_empty(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_no_money(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.expensiveproduct)
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_checkout_outofstock(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.outofstock)
r = self.client.get(reverse('shop:checkout'))
self.assertNotEqual(r.status_code, 200)
def test_physical_one_wallet_free(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
def test_physical_one_wallet_free_incomplete_data(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
c = r.context['checkout']
r = self.client.post(reverse('shop:checkout'),
{'checkout': str(c.uuid)})
self.assertGreater(len(r.context['messages']), 0)
def test_physical_one_wallet_free_new_address(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p2)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout1.html')
c = r.context['checkout']
r = self.client.post(reverse('shop:checkout'), {
'checkout': str(c.uuid),
'name': "Mr. Testing",
'address1': "Somewhere, Norcross",
'state': "GA",
'country': "US",
'zip': "30092",
'use_custom_address': ""
})
self.assertTemplateUsed(r, 'shop/checkout3.html')
r = self.client.post(reverse('shop:checkout'),
{'checkout': str(c.uuid), 'confirm': ''})
self.assertEqual(r.status_code, 302)
def test_digital_one_wallet_free(self):
self.client.login(username=self.u1.username, password='passw0rd')
self.u1.userextra.clear_cart()
self.u1.userextra.add_to_cart(self.p1)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout3.html')
def test_digital_multiple_wallets_free(self):
self.client.login(username=self.u2.username, password='passw0rd')
self.u2.userextra.clear_cart()
self.u2.userextra.add_to_cart(self.p1)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout3.html')
def test_digital_multiple_wallets_enough_money(self):
self.client.login(username=self.u3.username, password='passw0rd')
self.u3.userextra.clear_cart()
self.u3.userextra.add_to_cart(self.reasonableproduct)
r = self.client.get(reverse('shop:checkout'))
self.assertTemplateUsed(r, 'shop/checkout2.html')
class ReviewTestCase(TestCase):
def setUp(self):
# These names are getting ridiculous
self.u1 = User.objects.create_user('______u1', '', 'passw0rd')
self.u1.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
c = Cart(user=self.u1)
c.save()
self.u2 = User.objects.create_user('______u2', '', 'passw0rd')
self.u2.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
c2 = Cart(user=self.u2)
c2.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p1.save()
self.p2 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p2.save()
self.pur = Purchase(by=self.u1)
self.pur.save()
pi = PurchaseItem(purchase=self.pur, price=Decimal(0), product=self.p1)
pi.save()
self.pur2 = Purchase(by=self.u2)
self.pur2.save()
pi2 = PurchaseItem(purchase=self.pur2, price=Decimal(0),
product=self.p1)
pi2.save()
def test_post_not_logged_in(self):
self.client.logout()
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'post_not_logged_in',
'rating': 3,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(r.status_code, 302)
self.assertEqual(0,
self.p1.review_set.filter(title='post_not_logged_in')
.count())
def test_post_not_owned(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p2.id}), {
'title': 'post_not_owned',
'rating': 3,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p2.review_set.filter(title='post_not_owned')
.count())
def test_post_owned_title_too_long(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'a'*200,
'rating': 3,
'review': 'test_post_too_long'
})
self.assertEqual(0,
self.p1.review_set.filter(review='test_post_too_long')
.count())
def test_post_owned_rate_too_high(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_high',
'rating': 6,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_high')
.count())
def test_post_owned_rate_too_low(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_low',
'rating': 0,
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_low')
.count())
def test_post_owned_rate_invalid(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_rate_bad',
'rating': 'neat',
'review': 'This shouldn\'t have been posted'
})
self.assertEqual(0,
self.p1.review_set.filter(title='test_post_rate_bad')
.count())
def test_post_owned_all_fine(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_fine',
'rating': 4,
'review': 'This should have been posted'
})
self.assertEqual(1,
self.p1.review_set.filter(title='test_post_fine')
.count())
def test_post_owned_edit(self):
self.client.login(username=self.u2.username, password='passw0rd')
self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 't',
'rating': 4,
'review': 'This shouldn\'t have been posted'
})
self.client.post(reverse('shop:viewproduct',
kwargs={'id': self.p1.id}), {
'title': 'test_post_edit',
'rating': 4,
'review': 'This should have been posted'
})
self.assertEqual(0, self.p1.review_set.filter(title='t').count())
self.assertEqual(1,
self.p1.review_set.filter(title='test_post_edit')
.count())
class DeleteReviewTestCase(TestCase):
def setUp(self):
self.u1 = User.objects.create_user('_______u1', '', 'passw0rd')
self.u1.save()
ue1 = UserExtra(user=self.u1, verified=True)
ue1.save()
c = Cart(user=self.u1)
c.save()
self.u2 = User.objects.create_user('_______u2', '', 'passw0rd')
self.u2.save()
ue2 = UserExtra(user=self.u2, verified=True)
ue2.save()
c2 = Cart(user=self.u2)
c2.save()
self.p1 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p1.save()
self.p2 = Product(
product_name='t',
seller=self.u1,
price=0,
physical=False,
stock=10
)
self.p2.save()
self.r1 = Review(product=self.p1, user=self.u1, rating=4, title='r1',
review='review 1')
self.r1.save()
self.r2 = Review(product=self.p1, user=self.u2, rating=4, title='r2',
review='review 2')
self.r2.save()
self.r3 = Review(product=self.p1, user=self.u2, rating=4, title='r3',
review='review 3')
self.r3.save()
def test_delete_not_logged_in(self):
self.client.logout()
r = self.client.get(reverse('shop:deletereview', kwargs={
'id': self.p1.id,
'reviewid': self.r1.id
}))
self.assertEqual(r.status_code, 302)
self.assertEqual(Review.objects.filter(title='r1').count(), 1)
def test_delete_no_permission(self):
self.client.login(username=self.u2.username, password='passw0rd')
r = self.client.get(reverse('shop:deletereview', kwargs={
'id': self.p1.id,
'reviewid': self.r1.id
}))
self.assertEqual(r.status_code, 302)
self.assertEqual(Review.objects.filter(title='r1').count(), 1)
def test_delete_poster(self):
self.client.login(username=self.u2.username, password='passw0rd')
r = self.client.get(reverse('shop:deletereview', kwargs={
'id': self.p1.id,
'reviewid': self.r2.id
}))
self.assertEqual(r.status_code, 302)
self.assertEqual(Review.objects.filter(title='r2').count(), 0)
def test_delete_seller(self):
self.client.login(username=self.u1.username, password='passw0rd')
r = self.client.get(reverse('shop:deletereview', kwargs={
'id': self.p1.id,
'reviewid': self.r3.id
}))
self.assertEqual(r.status_code, 302)
self.assertEqual(Review.objects.filter(title='r3').count(), 0)
| 34.458333 | 79 | 0.548972 | 26,931 | 0.986809 | 0 | 0 | 0 | 0 | 0 | 0 | 5,205 | 0.190722 |
b4f72a2176a65a36df8b60e82c572b124b0a8a0e | 1,965 | py | Python | h2o-py/tests/testdir_algos/rulefit/pyunit_football_rulefit.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 1 | 2022-03-15T06:08:14.000Z | 2022-03-15T06:08:14.000Z | h2o-py/tests/testdir_algos/rulefit/pyunit_football_rulefit.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-py/tests/testdir_algos/rulefit/pyunit_football_rulefit.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.rulefit import H2ORuleFitEstimator
def football():
df = h2o.import_file("https://h2o-public-test-data.s3.amazonaws.com/mli-testing/manual-test/small-dataset/binomial/football_prediction.csv")
df["FTR"] = df["FTR"].asfactor()
x = df.columns
y = "FTR"
x.remove(y)
x.remove("Date")
# Split the dataset into train and test
train, test = df.split_frame(ratios=[.8], seed=1234)
rfit = H2ORuleFitEstimator(min_rule_length=1, max_rule_length=3, max_num_rules=10, seed=1234, model_type="rules_and_linear")
rfit.train(training_frame=train, x=x, y=y, validation_frame=test)
df[y] = df[y].append_levels(["extra_level"])
# Split the dataset into train and test
train, test = df.split_frame(ratios=[.8], seed=1234)
rfit_multi = H2ORuleFitEstimator(min_rule_length=1, max_rule_length=3, max_num_rules=10, seed=1234,
model_type="rules_and_linear", distribution="multinomial")
rfit_multi.train(training_frame=train, x=x, y=y, validation_frame=test)
# Print rules and metrics for comparision:
print("Binomial model rules:")
print(rfit.rule_importance())
print("Multinomial model rules:")
print(rfit_multi.rule_importance())
print("Binomial train RMSE vs. multinomial train RMSE:")
print(str(rfit.rmse()) + " vs. " + str(rfit_multi.rmse()))
print("Binomial train MSE vs. multinomial train MSE: ")
print(str(rfit.mse()) + " vs. " + str(rfit_multi.mse()))
print("Binomial valid RMSE vs. multinomial valid RMSE: ")
print(str(rfit.rmse(valid=True)) + " vs. " + str(rfit_multi.rmse(valid=True)))
print("Binomial valid MSE vs. multinomial valid MSE: ")
print(str(rfit.mse(valid=True)) + " vs. " + str(rfit_multi.mse(valid=True)))
if __name__ == "__main__":
pyunit_utils.standalone_test(football)
else:
football()
| 37.075472 | 144 | 0.683461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.312468 |
b4f763f3755acf4717fd0387a861a69a686dff19 | 1,140 | py | Python | Day2/python-exercises-02-answers/preparing_dataset/category_dataset_debug.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | null | null | null | Day2/python-exercises-02-answers/preparing_dataset/category_dataset_debug.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | 1 | 2021-12-13T20:33:28.000Z | 2021-12-13T20:33:28.000Z | Day2/python-exercises-02-answers/preparing_dataset/category_dataset_debug.py | klimpie94/Python-training | 7af210126cfe2e9386a8f22075ea0d7eff80daac | [
"RSA-MD"
] | 9 | 2020-02-05T10:24:12.000Z | 2020-02-10T13:08:50.000Z | import pandas as pd
import os
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
DATA_PATH = os.path.join(
os.fspath(Path(__file__).parents[1]),
"data")
IMDB_DATA_PATH = os.path.join(DATA_PATH, "imdb_category.csv")
EXPORT_PATH = os.path.join(DATA_PATH, "imdb_category_binary.csv")
categories_df = pd.read_csv(IMDB_DATA_PATH)
cols_category = categories_df.iloc[:, 1:29].columns
categories_df_tmp = categories_df.copy(deep=True)
categories_df_tmp["film_category"] = (categories_df_tmp
.loc[:, cols_category]
.idxmax(axis=1)
.values)
columns_to_drop = [col
for idx, col in enumerate(categories_df_tmp.columns)
if idx in range(1, 29)]
categories_df_tmp.drop(columns_to_drop, axis=1, inplace=True)
categories_binary = categories_df_tmp.join(
pd.get_dummies(
data=categories_df_tmp.film_category,
prefix=None))
categories_binary.drop("film_category", axis=1, inplace=True)
categories_binary.to_csv(EXPORT_PATH, sep=",", index=False)
| 30.810811 | 71 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.080702 |
b4f7eeae12122017454859c15609e21806fad1d5 | 3,003 | py | Python | imgtools/modules/segmentation.py | bhklab/med-imagetools | 0cce0ee6666d052d4f76a1b6dc5d088392d309f4 | [
"Apache-2.0"
] | 9 | 2021-12-14T19:53:57.000Z | 2022-01-18T18:45:26.000Z | imgtools/modules/segmentation.py | bhklab/med-imagetools | 0cce0ee6666d052d4f76a1b6dc5d088392d309f4 | [
"Apache-2.0"
] | 4 | 2021-12-05T02:54:00.000Z | 2021-12-10T20:32:20.000Z | imgtools/modules/segmentation.py | bhklab/imgtools | 0f0414533cb6667b68aa48541feb376226fd5515 | [
"Apache-2.0"
] | 1 | 2021-07-30T20:22:46.000Z | 2021-07-30T20:22:46.000Z | from functools import wraps
import numpy as np
import SimpleITK as sitk
from ..utils import array_to_image, image_to_array
def accepts_segmentations(f):
@wraps(f)
def wrapper(img, *args, **kwargs):
result = f(img, *args, **kwargs)
if isinstance(img, Segmentation):
result = sitk.Cast(result, sitk.sitkVectorUInt8)
return Segmentation(result, roi_names=img.roi_names)
else:
return result
return wrapper
def map_over_labels(segmentation, f, include_background=False, return_segmentation=True, **kwargs):
if include_background:
labels = range(segmentation.num_labels + 1)
else:
labels = range(1, segmentation.num_labels + 1)
res = [f(segmentation.get_label(label=label), **kwargs) for label in labels]
if return_segmentation and isinstance(res[0], sitk.Image):
res = [sitk.Cast(r, sitk.sitkUInt8) for r in res]
res = Segmentation(sitk.Compose(*res), roi_names=segmentation.roi_names)
return res
class Segmentation(sitk.Image):
def __init__(self, segmentation, roi_names=None):
super().__init__(segmentation)
self.num_labels = self.GetNumberOfComponentsPerPixel()
if not roi_names:
self.roi_names = {f"label_{i}": i for i in range(1, self.num_labels+1)}
else:
self.roi_names = roi_names
if 0 in self.roi_names.values():
self.roi_names = {k : v+1 for k, v in self.roi_names.items()}
if len(self.roi_names) != self.num_labels:
for i in range(1, self.num_labels+1):
if i not in self.roi_names.values():
self.roi_names[f"label_{i}"] = i
def get_label(self, label=None, name=None, relabel=False):
if label is None and name is None:
raise ValueError("Must pass either label or name.")
if label is None:
label = self.roi_names[name]
if label == 0:
# background is stored implicitly and needs to be computed
label_arr = sitk.GetArrayViewFromImage(self)
label_img = sitk.GetImageFromArray((label_arr.sum(-1) == 0).astype(np.uint8))
else:
label_img = sitk.VectorIndexSelectionCast(self, label - 1)
if relabel:
label_img *= label
return label_img
def to_label_image(self):
arr, *_ = image_to_array(self)
# TODO handle overlapping labels
label_arr = np.where(arr.sum(-1) != 0, arr.argmax(-1) + 1, 0)
label_img = array_to_image(label_arr, reference_image=self)
return label_img
# TODO also overload other operators (arithmetic, etc.)
# with some sensible behaviour
def __getitem__(self, idx):
res = super().__getitem__(idx)
if isinstance(res, sitk.Image):
res = Segmentation(res, self.roi_names)
return res
def __repr__(self):
return f"<Segmentation with ROIs: {self.roi_names!r}>"
| 35.75 | 99 | 0.628705 | 1,972 | 0.656677 | 0 | 0 | 303 | 0.100899 | 0 | 0 | 279 | 0.092907 |
b4f93b9450eed02e00244a8c824d6cbeda2da9f1 | 1,432 | py | Python | simtbx/diffBragg/attr_list.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | simtbx/diffBragg/attr_list.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | simtbx/diffBragg/attr_list.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import division
"""
critical properties of diffBragg objects which should be logged for reproducibility
"""
# TODO : implement a savestate and getstate for these objects
# attrs of diffBragg() instances
DIFFBRAGG_ATTRS = [
'Amatrix',
'Bmatrix',
'Ncells_abc',
'Ncells_abc_aniso',
'Ncells_def',
'Npix_to_allocate',
'Omatrix',
'Umatrix',
'beamsize_mm',
'compute_curvatures',
'default_F',
'detector_thick_mm',
'detector_thickstep_mm',
'detector_thicksteps',
'detector_twotheta_deg',
'device_Id',
'diffuse_gamma',
'diffuse_sigma',
'exposure_s',
'fluence',
'flux',
'has_anisotropic_mosaic_spread',
'interpolate',
'isotropic_ncells',
'lambda_coefficients',
'mosaic_domains',
'mosaic_spread_deg',
'no_Nabc_scale',
'nopolar',
'only_diffuse',
'only_save_omega_kahn',
'oversample',
'oversample_omega',
'phi_deg',
'phistep_deg',
'phisteps',
'point_pixel',
'polar_vector',
'polarization',
'spindle_axis',
'spot_scale',
'twotheta_axis',
'unit_cell_Adeg',
'unit_cell_tuple',
'use_diffuse',
'use_lambda_coefficients']
# properties of nanoBragg_crystal.NBcryst instances
NB_CRYST_ATTRS = [
'anisotropic_mos_spread_deg',
'isotropic_ncells',
'miller_is_complex',
'mos_spread_deg',
'n_mos_domains',
'symbol',
'xtal_shape']
# properties of nanoBragg_beam.NBbeam instances
NB_BEAM_ATTRS = [
'divergence',
'polarization_fraction',
'size_mm',
'number_of_sources',
'unit_s0']
| 18.842105 | 83 | 0.74162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,158 | 0.808659 |
b4fb9b530079d2e7e3c6f10c76a9f743b47d6a04 | 1,571 | py | Python | tests/data/fields/field_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | tests/data/fields/field_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | tests/data/fields/field_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | from allennlp.data.fields import Field
def test_eq_with_inheritance():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
__slots__ = ["b"]
def __init__(self, a, b):
super().__init__(a)
self.b = b
class SubSubSubField(SubSubField):
__slots__ = ["c"]
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3)
assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3)
def test_eq_with_inheritance_for_non_slots_field():
class SubField(Field):
def __init__(self, a):
self.a = a
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
def test_eq_with_inheritance_for_mixed_field():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
def __init__(self, a, b):
super().__init__(a)
self.b = b
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
| 23.447761 | 61 | 0.583705 | 703 | 0.447486 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.007638 |
b4fcfbbaf3fe162cf04a9a20b000b200e3971f97 | 1,870 | py | Python | LMToolKit_Setup.py | Meadowlion/Float | fb93866cdc6061943fbec32c0aff78dcbc7df9fb | [
"BSD-3-Clause"
] | 1 | 2021-10-21T03:53:34.000Z | 2021-10-21T03:53:34.000Z | LMToolKit_Setup.py | Meadowlion/LM_ToolKit | fb93866cdc6061943fbec32c0aff78dcbc7df9fb | [
"BSD-3-Clause"
] | null | null | null | LMToolKit_Setup.py | Meadowlion/LM_ToolKit | fb93866cdc6061943fbec32c0aff78dcbc7df9fb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 10:19:44 2021
@author: sb069
"""
import os
import errno
from os.path import expanduser
os.system("sudo apt-get update")
os.system("sudo apt install nvidia-cuda-toolkit")
os.system("sudo apt install python3.8")
os.system("conda install flye")
try:
os.system("mkdir -p ~/Documents/LMToolkit")
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.system("mkdir -p ~/Documents/LMToolkit/MinionOut")
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.system("mkdir -p ~/Documents/LMToolkit/Assembly_Out")
except OSError as e:
if e.errno != errno.EEXIST:
raise
#Downloads=str(os.environ['_'])
#Downloads=Download.replace('/LMToolkit_Setup.py','')
#print("THIS IS A TEST " + Downloads)
#os.chdir(Downloads)
#print("mv LMToolKit.py ~/Documents/LMToolkit")
try:
os.system("mv LMToolkit.py ~/Documents/LMToolkit/LMToolkit.py")
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.system("mv README.md ~/Documents/LMToolkit/README.md")
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.system("mv LMToolKit_License ~/Documents/LMToolkit/LMToolKit_License")
except OSError as e:
if e.errno != errno.EEXIST:
raise
def main():
hdir=os.path.expanduser('~')
os.chdir(hdir+'/Documents/LMToolkit')
with open("config.txt", 'w') as config1:
Basecall=input("What is the path of your guppy basecaller (ont-guppy/bin/guppy_basecaller)?: ")
config1.write(Basecall)
config1.close()
with open("guppyconfig.txt", "w") as config2:
Basecallconfig=input("What is the path of your guppy basecaller config file (ont-guppy/data/config) please read readme for details )?: ")
config2.write(Basecallconfig)
config2.close()
print("LMToolKit is now setup! Happy Basecalling!")
main()
| 27.101449 | 146 | 0.69893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 978 | 0.522995 |
b4fd927462dd602edb7c0fa6683dedd4a121b0e1 | 1,006 | py | Python | oxasl_ve/wrappers/veaslc.py | ibme-qubic/oxasl_ve | 51f0701573876042a7fc5f1b91c5085600c6e3c3 | [
"Apache-2.0"
] | 1 | 2021-01-20T12:06:31.000Z | 2021-01-20T12:06:31.000Z | oxasl_ve/wrappers/veaslc.py | physimals/oxasl_ve | f56ea316fddf8b59216a5df7a557bf0c3ea6972b | [
"Apache-2.0"
] | null | null | null | oxasl_ve/wrappers/veaslc.py | physimals/oxasl_ve | f56ea316fddf8b59216a5df7a557bf0c3ea6972b | [
"Apache-2.0"
] | null | null | null | """
Wrapper for epi_reg command
"""
import fsl.utils.assertions as asrt
from fsl.wrappers import wrapperutils as wutils
@wutils.fileOrImage('data', 'roi', outprefix='out')
@wutils.fileOrArray('veslocs', 'encdef', 'modmat')
@wutils.fileOrText(' ')
@wutils.fslwrapper
def veaslc(data, roi, veslocs, encdef, imlist, modmat, out="veaslc", **kwargs):
"""
Wrapper for the ``veasl`` command.
Required options:
Additional options:
"""
valmap = {
'inferv' : wutils.SHOW_IF_TRUE,
'debug' : wutils.SHOW_IF_TRUE,
'diff' : wutils.SHOW_IF_TRUE,
}
asrt.assertIsNifti(data)
cmd = ['veasl', '--data=%s' % data, '--mask=%s' % roi, '--enc-setup=%s' % encdef,
'--imlist=%s' % imlist, '--vessels=%s' % veslocs, '--modmat=%s' % modmat,
'--out=%s' % out]
if kwargs.pop("method", "map") == "map":
cmd.append('--map')
cmd += wutils.applyArgStyle('--=', valmap=valmap, singlechar_args=True, **kwargs)
return cmd
| 27.189189 | 86 | 0.595427 | 0 | 0 | 0 | 0 | 882 | 0.87674 | 0 | 0 | 339 | 0.336978 |
b4fe009337e782310f135a773be727eb38fed3e9 | 3,455 | py | Python | leo/modes/kivy.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 1,550 | 2015-01-14T16:30:37.000Z | 2022-03-31T08:55:58.000Z | leo/modes/kivy.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 2,009 | 2015-01-13T16:28:52.000Z | 2022-03-31T18:21:48.000Z | leo/modes/kivy.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 200 | 2015-01-05T15:07:41.000Z | 2022-03-07T17:05:01.000Z | # Leo colorizer control file for kivy mode.
# This file is in the public domain.
# Properties for kivy mode.
properties = {
"ignoreWhitespace": "false",
"lineComment": "#",
}
# Attributes dict for kivy_main ruleset.
kivy_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for kivy mode.
attributesDictDict = {
"kivy_main": kivy_main_attributes_dict,
}
# Keywords dict for kivy_main ruleset.
kivy_main_keywords_dict = {
"app": "keyword2",
"args": "keyword2",
"canvas": "keyword1",
"id": "keyword1",
"root": "keyword2",
"self": "keyword2",
"size": "keyword1",
"text": "keyword1",
"x": "keyword1",
"y": "keyword1",
}
# Dictionary of keywords dictionaries for kivy mode.
keywordsDictDict = {
"kivy_main": kivy_main_keywords_dict,
}
# Rules for kivy_main ruleset.
def kivy_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def kivy_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="kivy::literal_one",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def kivy_rule2(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for kivy_main ruleset.
rulesDict1 = {
"\"": [kivy_rule1,],
"#": [kivy_rule0,],
"0": [kivy_rule2,],
"1": [kivy_rule2,],
"2": [kivy_rule2,],
"3": [kivy_rule2,],
"4": [kivy_rule2,],
"5": [kivy_rule2,],
"6": [kivy_rule2,],
"7": [kivy_rule2,],
"8": [kivy_rule2,],
"9": [kivy_rule2,],
"@": [kivy_rule2,],
"A": [kivy_rule2,],
"B": [kivy_rule2,],
"C": [kivy_rule2,],
"D": [kivy_rule2,],
"E": [kivy_rule2,],
"F": [kivy_rule2,],
"G": [kivy_rule2,],
"H": [kivy_rule2,],
"I": [kivy_rule2,],
"J": [kivy_rule2,],
"K": [kivy_rule2,],
"L": [kivy_rule2,],
"M": [kivy_rule2,],
"N": [kivy_rule2,],
"O": [kivy_rule2,],
"P": [kivy_rule2,],
"Q": [kivy_rule2,],
"R": [kivy_rule2,],
"S": [kivy_rule2,],
"T": [kivy_rule2,],
"U": [kivy_rule2,],
"V": [kivy_rule2,],
"W": [kivy_rule2,],
"X": [kivy_rule2,],
"Y": [kivy_rule2,],
"Z": [kivy_rule2,],
"a": [kivy_rule2,],
"b": [kivy_rule2,],
"c": [kivy_rule2,],
"d": [kivy_rule2,],
"e": [kivy_rule2,],
"f": [kivy_rule2,],
"g": [kivy_rule2,],
"h": [kivy_rule2,],
"i": [kivy_rule2,],
"j": [kivy_rule2,],
"k": [kivy_rule2,],
"l": [kivy_rule2,],
"m": [kivy_rule2,],
"n": [kivy_rule2,],
"o": [kivy_rule2,],
"p": [kivy_rule2,],
"q": [kivy_rule2,],
"r": [kivy_rule2,],
"s": [kivy_rule2,],
"t": [kivy_rule2,],
"u": [kivy_rule2,],
"v": [kivy_rule2,],
"w": [kivy_rule2,],
"x": [kivy_rule2,],
"y": [kivy_rule2,],
"z": [kivy_rule2,],
}
# x.rulesDictDict for kivy mode.
rulesDictDict = {
"kivy_main": rulesDict1,
}
# Import dict for kivy mode.
importDict = {}
| 25.218978 | 75 | 0.546165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 996 | 0.288278 |
3703676a01606399f8153e980d2ac1891a392183 | 10,642 | py | Python | src/predictor.py | prem2017/domain-adaptation-image | 77d5dd85b5895f16c5fc2e7ddfad257367f78ad1 | [
"Apache-2.0"
] | 1 | 2020-09-01T21:48:16.000Z | 2020-09-01T21:48:16.000Z | src/predictor.py | prem2017/domain-adaptation-image | 77d5dd85b5895f16c5fc2e7ddfad257367f78ad1 | [
"Apache-2.0"
] | 4 | 2021-06-08T22:23:07.000Z | 2022-03-12T00:47:33.000Z | src/predictor.py | prem2017/domain-adaptation-image | 77d5dd85b5895f16c5fc2e7ddfad257367f78ad1 | [
"Apache-2.0"
] | 1 | 2021-09-08T05:01:52.000Z | 2021-09-08T05:01:52.000Z | # -*- coding: utf-8 -*-
# ©Prem Prakash
# Predictor module
import os
import sys
import pdb
from copy import deepcopy
import argparse
import onnx
import onnxruntime as ort
import math
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from .models import ADAConvNet, ADAConvNetClsLabel, ADAConvNetSrcLabel, PretrainedADAConvNet
from .image_dataset import ImageDataset
from .transformers import NormalizeImageData
from .report_gen_helper import compute_label_and_prob, compute_prob_all, gen_metric_report
from .report_gen_helper import plot_roc_curves_binclass, plot_roc_curves_multiclass
from . import util
from .util import kconfig
from .util import logger
#----------------------------------------------------------------------------
logger = util.setup_logger('predictor_output.log')
device = util.get_training_device()
#----------------------------------------------------------------------------
def load_trained_model(model_fname, use_batchnorm=False):
"""Loads the pretrained model for the given model name."""
model_path = os.path.join(util.get_models_dir(), model_fname)
# NN Args
net_args = {}
net_args['in_channels'] = 3
# net_args['num_cls_lbs'] = kconfig.img.num_cls_lbs
# net_args['num_src_lbs'] = kconfig.img.num_src_lbs
net_args['model_img_size'] = kconfig.img.size
net_args['nonlinearity_function'] = None # nn.LeakyReLU()
net_args['use_batchnorm'] = use_batchnorm # kconfig.tr.use_batchnorm # False
net_args['dropout'] = kconfig.hp.train.dropout
# net_args['use_batchnorm'] = use_batchnorm
model_dict = {}
if kconfig.tr.use_pretrained_flag:
# net_args['eps'] = 1e-3
model_dict = PretrainedADAConvNet(**net_args)
else:
model_dict['feature_repr_model'] = ADAConvNet(**net_args)
feature_repr_len = ADAConvNet.compute_feature_repr_len(kconfig.img.size)
msg = f'[feature_repr_len] = {feature_repr_len}'
print(msg); logger.info(msg);
model_dict['cls_model'] = ADAConvNetClsLabel(feature_repr_len, kconfig.img.num_cls_lbs)
model_dict['src_model'] = ADAConvNetSrcLabel(feature_repr_len, kconfig.img.num_src_lbs)
saved_state_dict = torch.load(model_path, map_location= lambda storage, loc: storage)
# Available dict
# raw_model_dict = model.state_dict()
for key, model_state_dict in saved_state_dict.items():
print(model_dict[key].load_state_dict(model_state_dict))
# pdb.set_trace()
if isinstance(model_dict, dict):
for key, model in model_dict.items():
model_dict[key] = model.eval()
else:
model_dict = model_dict.eval()
return model_dict
#----------------------------------------------------------------------------
# https://pytorch.org/docs/stable/onnx.html
def save_in_onnx(model, onnx_path):
"""Note: This method is not adapted for dictionay of model """
# First set the model_name and load
# pdb.set_trace()
h, w = kconfig.img.size # util.get_model_img_size()
dummy_input = torch.randn(1, 3, h, w)
print('[Dummy Output] = ', model(dummy_input))
input_names = ['image_input']
output_names = ['output_val']
# TODO:
torch.onnx.export(model, dummy_input, onnx_path, verbose=True,
input_names=input_names, output_names=output_names,
dynamic_axes={'image_input': {0: 'batch'}, 'output_val': {0: 'batch'}})
print('[Saved] at path = ', onnx_path)
#----------------------------------------------------------------------------
def load_from_onnx(onnx_path):
"""Note: This method is not adapted for dictionay of model """
pdb.set_trace()
model_onnx = onnx.load(onnx_path)
# Check that the IR is well formed
print(onnx.checker.check_model(model_onnx))
# Print a human readable representation of the graph
print(onnx.helper.printable_graph(model_onnx.graph))
h, w = kconfig.img.size # util.get_model_img_size()
dummy_input = np.random.randn(2, 3, h, w).astype(np.float32) # astype(np.float32) is needed to conform to the datatype
# Load onnx to torch model
ort_session = ort.InferenceSession(onnx_path)
outputs = ort_session.run(['output_val'], {'image_input': dummy_input})
print(outputs)
return model_onnx
#----------------------------------------------------------------------------
def generate_report(model, test_dataloader, image_names, model_name, save_all_class_prob=False, model_type='test_set'):
""" all_prob=
"""
y_true_all = None
y_pred_all = None
y_pred_prob_all = None
y_pred_label_all = None
# TODO:
with torch.no_grad():
for i, xy in enumerate(test_dataloader):
print('[Predicting for] batch i = ', i)
X, y = xy
y_cls_lbs, y_src_lbs = y
X = X.to(device=device, dtype=torch.float32) # or float is alias for float32
y_check = y_cls_lbs
if math.isnan(y_check[0].item()):
y = None
else:
#print(f'[Class Labels Counts] = {y_cls_lbs.unique(return_counts=True) }', end='')
#print(f'[Source Labels Counts] = {y_src_lbs.unique(return_counts=True) }', end='')
y_cls_lbs = y_cls_lbs.to(device=device, dtype=torch.long)
y_src_lbs = y_src_lbs.to(device=device, dtype=torch.long)
y = (y_cls_lbs, y_src_lbs)
if isinstance(model, dict):
features_repr = model['feature_repr_model'](X)
y_pred_cls = cls_output = model['cls_model'](features_repr)
y_pred_src = src_output = model['src_model'](features_repr)
else:
output = model(X) #
# Because we need to only generate 'metric' on class labels i.e. object classes. It is not for the origin of data.
if y is not None: # if label is not none
y_true_all = y_cls_lbs if y_true_all is None else torch.cat((y_true_all, y_cls_lbs), dim=0)
y_pred_all = y_pred_cls if y_pred_all is None else torch.cat((y_pred_all, y_pred_cls), dim=0)
num_classes = kconfig.img.num_cls_lbs # len(util.get_class_names())
if y_true_all is not None:
report, f1_checker, auc_val = gen_metric_report(y_true_all, y_pred_all)
if num_classes > 2:
plot_roc_curves_multiclass(report, 'inference_time', model_type= model_type, ext_cmt=model_name)
else:
plot_roc_curves_binclass(report, 'inference_time', model_type= model_type, ext_cmt=model_name)
report['roc'] = 'Removed'
msg = util.pretty(report)
logger.info(msg); print(msg)
y_true_all = y_true_all.cpu().numpy().reshape(-1, 1)
y_pred_max_prob_all, y_pred_label_all = compute_label_and_prob(y_pred_all)
y_pred_label_all = y_pred_label_all.cpu().numpy().reshape(-1, 1)
y_pred_max_prob_all = y_pred_max_prob_all.cpu().numpy().reshape(-1, 1)
round_upto = 4
y_pred_max_prob_all = (y_pred_max_prob_all * 10**round_upto).astype(int) / 10**4
class_names = kconfig.img.labels_dict.values()
# output prediction in csv
if image_names is not None:
image_names = image_names.reshape(-1, 1)
if save_all_class_prob and num_classes > 2:
header = ['image_name'] + [cl_name + '_class_prob' for cl_name in class_names] + ['pred_label']
y_pred_prob_all = compute_prob_all(y_pred_all)
y_pred_prob_all = y_pred_prob_all.cpu().numpy()
y_pred_prob_all = (y_pred_prob_all * 10**round_upto).astype(int) / 10**round_upto
df = np.hstack((image_names, y_pred_prob_all, y_pred_label_all))
else:
header = ['image_name', 'pred_label_prob', 'pred_label']
df = np.hstack((image_names, y_pred_max_prob_all, y_pred_label_all))
if y_true_all is not None:
header.append('true_label')
df = np.hstack((df, y_true_all))
df = pd.DataFrame(df, columns=header)
df.to_csv(path_or_buf=os.path.join(util.get_results_dir(), model_name + '_test_output_prediction.csv'), sep=',', index=None, header=header)
return
#----------------------------------------------------------------------------
# TODO: use model to get an prediction from command line
def get_arguments_parser(img_datapath):
"""Argument parser for predition"""
description = 'Provide arguments for fullpath to an images to receive prob score and class lable.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-i', '--img', type=str, default=img_datapath,
help='Provide full path to image location.', required=True)
return parser
#----------------------------------------------------------------------------
def main_onnx(also_test=False):
"""Note: This method is not adapted for dictionay of model """
# pdb.set_trace()
util.set_trained_model_name(ext_cmt='scratch')
base_model_fname = util.get_trained_model_name()
use_batchnorm = kconfig.tr.use_batchnorm
cmt = '' # '_maxf1'
model_fname = base_model_fname + cmt
model = load_trained_model(model_fname, use_batchnorm)
onnx_path = os.path.join(util.get_onnx_dir(), model_fname + '.onnx')
save_in_onnx(model, onnx_path)
print('[Saved] in ONNX format')
if also_test:
load_from_onnx(onnx_path)
#----------------------------------------------------------------------------
def main():
util.reset_logger('predictor_output.log')
# First set the model_name and load
util.set_trained_model_name(ext_cmt='_paper') # ['_paper', 'pretrained_resnet50']
# base_model_fname = util.get_trained_model_name()
use_batchnorm = kconfig.tr.use_batchnorm
img_info_datapath = util.get_test_info_datapath() # get_all_info_datapath() # get_test_info_datapath
# Dataset Args
data_dir = kconfig.img.data_dir # ''
data_info_args = {}
data_info_args['root_data_dir'] = util.get_data_dir(data_dir)
data_info_args['model_img_size'] = kconfig.img.size
data_info_args['normalizer_dict_path'] = util.get_normalization_info_pickle_path()
data_info_args['has_label'] = True
data_info = {'img_info_datapath': img_info_datapath, **data_info_args}
test_dataset = ImageDataset(**data_info)
image_names = test_dataset.get_img_names()
test_dataloader = DataLoader(dataset=test_dataset, batch_size=kconfig.test.batch_size)
ex = '' #
model_fnames = [util.get_custom_model_name(ex) for ex in ['', '_maxauc', '_maxf1', '_minval', '_mintrain']] # ['', '_maxauc', '_maxf1', '_minval', '_mintrain']
models = [load_trained_model(model_fname, use_batchnorm) for model_fname in model_fnames]
output = {}
for i, model in enumerate(models):
print(f'\n\n[Generating Report] for model = {model_fnames[i]}\n')
generate_report(model, test_dataloader, image_names=image_names, model_name=model_fnames[i], save_all_class_prob=True)
print('\n\n######################################################\n')
print(output)
print('\n######################################################\n\n')
return output
#----------------------------------------------------------------------------
if __name__ == '__main__':
print('[Run Test]')
main()
# main_onnx(True)
| 29.236264 | 162 | 0.682391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,512 | 0.329982 |
370445cf9999e96692fc51abf088c2cec9bdb63b | 4,004 | py | Python | tests/ut/python/pipeline/parse/test_create_obj.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 2 | 2020-04-28T03:49:10.000Z | 2020-04-28T03:49:13.000Z | tests/ut/python/pipeline/parse/test_create_obj.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/pipeline/parse/test_create_obj.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@File : test_create_obj.py
@Author:
@Date : 2019-06-26
@Desc : test create object instance on parse function, eg: 'construct'
Support class : nn.Cell ops.Primitive
Support parameter: type is define on function 'ValuePtrToPyData'
(int,float,string,bool,tensor)
"""
import logging
import numpy as np
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.common.api import ms_function
from mindspore.common.tensor import Tensor
from ...ut_filter import non_graph_engine
log = logging.getLogger("test")
log.setLevel(level=logging.ERROR)
class Net(nn.Cell):
""" Net definition """
def __init__(self):
super(Net, self).__init__()
self.softmax = nn.Softmax(0)
self.axis = 0
def construct(self, x):
x = nn.Softmax(self.axis)(x)
return x
# Test: creat CELL OR Primitive instance on construct
@non_graph_engine
def test_create_cell_object_on_construct():
""" test_create_cell_object_on_construct """
log.debug("begin test_create_object_on_construct")
context.set_context(mode=context.GRAPH_MODE)
np1 = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(np1)
net = Net()
output = net(input_me)
out_me1 = output.asnumpy()
print(np1)
print(out_me1)
log.debug("finished test_create_object_on_construct")
# Test: creat CELL OR Primitive instance on construct
class Net1(nn.Cell):
""" Net1 definition """
def __init__(self):
super(Net1, self).__init__()
self.add = P.TensorAdd()
@ms_function
def construct(self, x, y):
add = P.TensorAdd()
result = add(x, y)
return result
@non_graph_engine
def test_create_primitive_object_on_construct():
""" test_create_primitive_object_on_construct """
log.debug("begin test_create_object_on_construct")
x = Tensor(np.array([[1, 2, 3], [1, 2, 3]], np.float32))
y = Tensor(np.array([[2, 3, 4], [1, 1, 2]], np.float32))
net = Net1()
net.construct(x, y)
log.debug("finished test_create_object_on_construct")
# Test: creat CELL OR Primitive instance on construct use many parameter
class NetM(nn.Cell):
""" NetM definition """
def __init__(self, name, axis):
super(NetM, self).__init__()
# self.relu = nn.ReLU()
self.name = name
self.axis = axis
self.softmax = nn.Softmax(self.axis)
def construct(self, x):
x = self.softmax(x)
return x
class NetC(nn.Cell):
""" NetC definition """
def __init__(self, tensor):
super(NetC, self).__init__()
self.tensor = tensor
def construct(self, x):
x = NetM("test", 1)(x)
return x
# Test: creat CELL OR Primitive instance on construct
@non_graph_engine
def test_create_cell_object_on_construct_use_many_parameter():
""" test_create_cell_object_on_construct_use_many_parameter """
log.debug("begin test_create_object_on_construct")
context.set_context(mode=context.GRAPH_MODE)
np1 = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(np1)
net = NetC(input_me)
output = net(input_me)
out_me1 = output.asnumpy()
print(np1)
print(out_me1)
log.debug("finished test_create_object_on_construct")
| 30.105263 | 78 | 0.67008 | 1,061 | 0.264985 | 0 | 0 | 1,494 | 0.373127 | 0 | 0 | 1,725 | 0.430819 |
37058b29e2d2eb0d2cb3136d55f0713da2693a83 | 643 | py | Python | hw_asr/augmentations/spectrogram_augmentations/SpecAug.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | hw_asr/augmentations/spectrogram_augmentations/SpecAug.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | hw_asr/augmentations/spectrogram_augmentations/SpecAug.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | import torchaudio.transforms
from torch import nn
from hw_asr.augmentations.base import AugmentationBase
from hw_asr.augmentations.random_apply import RandomApply
class SpecAug(AugmentationBase):
def __init__(self, freq_mask: int, time_mask: int, prob: float, *args, **kwargs):
self.augmentation = nn.Sequential(
torchaudio.transforms.FrequencyMasking(freq_mask),
torchaudio.transforms.TimeMasking(time_mask)
)
self.prob = prob
self.random_caller = RandomApply(self.augmentation, self.prob)
def __call__(self, data, *args, **kwargs):
return self.random_caller(data)
| 32.15 | 85 | 0.720062 | 475 | 0.738725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3705c276078ec2e2c7ef6800121e1f3622baddd8 | 1,982 | py | Python | acrawler/settings.py | s723138643/acrawler | ad21514fc1fe5a531b33c042ff2deb6bf566e902 | [
"MIT"
] | null | null | null | acrawler/settings.py | s723138643/acrawler | ad21514fc1fe5a531b33c042ff2deb6bf566e902 | [
"MIT"
] | null | null | null | acrawler/settings.py | s723138643/acrawler | ad21514fc1fe5a531b33c042ff2deb6bf566e902 | [
"MIT"
] | null | null | null | import json
import pathlib
DEFAULT_CONFIG = {
# 设置默认class
'EngineClass': 'acrawler.engine.Engine',
'SchedulerClass': 'acrawler.scheduler.Scheduler',
'FilterClass': 'acrawler.filterlib.memfilter.MemFilter',
'QueueClass': 'acrawler.queuelib.sqlitequeue.PrioritySQLiteQueue',
# 以下是各模块设置
'spider': {
'headers': {
'User-Agent': (
'Mozilla/5.0 (X11; Linux x86_64; rv:42.0)'
' Gecko/20100101 Firefox/43.0'),
},
'save_cookie': False,
'debug': False,
},
'engine': {
'threads': 1
},
'scheduler': {
'queue':{
'sqlite_path': './task'
},
'filter': {
'hostonly': True,
'maxredirect': None, # 默认不设置最大跳转数
'maxdeep': None, # 默认不设置最大爬取深度
'db_path': './filter'
}
}
}
def get_settings_from_file(path):
settings = DEFAULT_CONFIG
p = pathlib.Path(path)
if p.is_file():
with p.open('r') as f:
tmp = json.load(f)
merge_settings(settings, tmp)
return settings
def merge_settings(default, current):
for key in default:
if key in current:
if isinstance(default[key], dict):
if isinstance(current[key], dict):
merge_settings(default[key], current[key])
else:
if not isinstance(current[key], dict):
default[key] = current[key]
return default
def make_config(path):
settings = DEFAULT_CONFIG
p = pathlib.Path(path)
with p.open('w') as f:
json.dump(settings, f, indent=True)
if __name__ == '__main__':
print(type(DEFAULT_CONFIG))
print(DEFAULT_CONFIG)
tmp = get_settings_from_file('config.json')
print(tmp)
print(type(tmp))
| 28.314286 | 74 | 0.510091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.28125 |
37061e0c420d7bf2c869932af34f099cf61b5c43 | 13,658 | py | Python | plots/plots/weight_distribution.py | pps-lab/rofl-project-code | eaa9f1aeca3a40ca939c0f723af0186af0f95f9b | [
"MIT"
] | 12 | 2021-07-08T13:27:54.000Z | 2021-12-25T14:53:26.000Z | plots/plots/weight_distribution.py | pps-lab/rofl-project-code | eaa9f1aeca3a40ca939c0f723af0186af0f95f9b | [
"MIT"
] | 1 | 2021-10-15T09:48:18.000Z | 2022-03-31T12:41:15.000Z | plots/plots/weight_distribution.py | pps-lab/rofl-project-code | eaa9f1aeca3a40ca939c0f723af0186af0f95f9b | [
"MIT"
] | 1 | 2021-11-24T19:21:38.000Z | 2021-11-24T19:21:38.000Z | from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import yaml
from matplotlib import cm
import pymongo
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from common import _preprocess, setup_plt, query_data, get_colorful_styles, output_dir, COLOR_GRAY, yaxis_formatter, get_markers
def load_params_data():
weights = pd.read_csv("./data/weight_distribution/params_outside_bound_0.01.csv", index_col=0)
total_weights = 44426.0
# Convert to real array
weights['clients'] = weights['clients'].apply(lambda x: np.array(x.strip('][').split(', '), dtype=np.int32))
weights['honest_stddev'] = weights['clients'].apply(lambda x: np.std(x[:-1]))
weights['honest_mean'] = weights['clients'].apply(lambda x: np.mean(x[:-1]))
weights['mal'] = weights['clients'].apply(lambda x: x[-1])
weights['honest_stddev_perc'] = weights['honest_stddev'] / total_weights
weights['honest_mean_perc'] = weights['honest_mean'] / total_weights
weights['mal_perc'] = weights['mal'] / total_weights
weights = weights[weights["round"] <= 520]
return weights
def build_continuous_static_plot(name, df):
colors, linestyles = get_colorful_styles()
configs = {
'honest_mean_perc': {'label': 'Benign', 'marker': get_markers()[0], 'color': colors[1] },
'mal_perc': {'label': 'Malicious', 'marker': get_markers()[1], 'color': colors[0] },
}
markevery = 100
window_size = 20
markersize = 8
error_color = "0.85"
setup_plt(square=True)
with PdfPages(f"{output_dir}/{name}.pdf") as pdf:
fig, ax = plt.subplots()
custom_lines_colors = []
custom_lines_colors_names = []
##################
# Our 0.5% bound #
##################
plt.axhline(y=0.005, color=COLOR_GRAY, linestyle='dashed')
for index, suffix in enumerate(configs.keys()):
values_acc = df[suffix]
labels = df[f"round"]
config = configs[suffix]
if suffix == "mal_perc":
plt.plot(labels, values_acc.rolling(window_size).mean().shift(-window_size),
linestyle='solid', label=config['label'], color=config['color'],
linewidth=2, marker=config['marker'], markevery=markevery)
values_std = values_acc.rolling(window_size).std().shift(-window_size)
plt.fill_between(labels,
values_acc - values_std,
values_acc + values_std,
alpha=1, edgecolor='#3F7F4C', facecolor=error_color, linewidth=0)
else:
#values_acc.rolling(window_size).mean().shift(-window_size)
plt.plot(labels, values_acc.rolling(window_size).mean().shift(-window_size), #df['honest_stddev_perc'].rolling(window_size).mean().shift(-window_size),
linestyle='solid', label=config['label'], color=config['color'],
linewidth=2, marker=config['marker'], markevery=markevery)
values_std = df['honest_stddev_perc'].rolling(window_size).mean().shift(-window_size)
plt.fill_between(labels,
values_acc - values_std,
values_acc + values_std,
alpha=1, edgecolor='#3F7F4C', facecolor=error_color, linewidth=0)
custom_lines_colors.append(Line2D([0], [0], linestyle="-", lw=2, marker=config['marker'], color=config['color']))
custom_lines_colors_names.append(config['label'])
##########################
# General Format
##########################
ax.grid(True, axis="y", linestyle=':', color='0.6', zorder=0, linewidth=1.2)
##########################
# Y - Axis Format
##########################
# ax.set_ylim(ymin=-0.05, ymax=0.455)
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(yaxis_formatter))
plt.ylabel('% Weights above bound')
plt.xlabel('Round')
# Legend
# if leftmost:
line = Line2D([0], [0])
line.set_visible(False)
custom_lines_colors = [line] + custom_lines_colors
custom_lines_colors_names = ['Update type:'] + custom_lines_colors_names
leg1 = plt.legend(custom_lines_colors, custom_lines_colors_names,
bbox_to_anchor=(1.02, 1.), loc=4, ncol=6, columnspacing=0.75)
ax.add_artist(leg1)
indicator_lines =[
Line2D([0], [0], linestyle="dashed", lw=2, color=COLOR_GRAY)]
indicator_lines_labels = ["$p_v=0.005$"]
leg_line = plt.legend(indicator_lines, indicator_lines_labels,
bbox_to_anchor=(1., 1.), loc='upper right', ncol=1,
columnspacing=0.75)
ax.add_artist(leg_line)
# if leftmost:
for vpack in leg1._legend_handle_box.get_children()[:1]:
for hpack in vpack.get_children():
del hpack._children[0]
# if not leftmost:
# custom_lines_styles = [Line2D([0], [0], linestyle="-", lw=2, color=COLOR_GRAY),
# Line2D([0], [0], linestyle=":", lw=2, color=COLOR_GRAY)]
# leg_task = plt.legend(custom_lines_styles, ["Backdoor", "Main"],
# bbox_to_anchor=(1., 0.05), loc=4, ncol=1,
# columnspacing=0.75)
# ax.add_artist(leg_task)
# plt.legend(title='Bound', mode="expand", loc="lower left", labelspacing=.05, bbox_to_anchor=(1.01, 0, .6, 0))
pdf.savefig(bbox_inches='tight', pad_inches=0)
plt.savefig(f"{output_dir}/{name}.png", bbox_inches='tight', pad_inches=0)
plt.close()
return fig, df
def load_weight_distribution_single_round_data():
dir = "./data/weight_distribution/updates"
mal = np.load(os.path.join(dir, "2245_m_1.npy"), allow_pickle=True)
ben = np.load(os.path.join(dir, "32_b_1.npy"), allow_pickle=True)
df = pd.DataFrame()
for label, update in zip(["mal", "ben"], [mal, ben]):
flattened = np.concatenate([np.reshape(u, -1) for u in update])
df_u = pd.DataFrame()
df_u[f'{label}_weights'] = flattened
# df_u[(df_u[f'{label}_weights'] > 1) | df_u[f'{label}_weights'] < -1][f'{label}_weights'] = np.NaN
df = df.merge(df_u, how='right', left_index=True, right_index=True)
return df
def build_single_round(name, df):
setup_plt(square=True)
bound = 0.01
with PdfPages(f"{output_dir}/{name}.pdf") as pdf:
fig, ax = plt.subplots()
colors, linestyles = get_colorful_styles()
bins = np.linspace(-0.4, 0.4, 40)
plt.hist(df["mal_weights"], color=colors[0], bins=bins, density=False)
plt.hist(df["ben_weights"], color=colors[1], bins=bins, alpha=0.5, density=False)
custom_lines_colors = [
Rectangle((0,0), 1, 1, facecolor=colors[1]),
Rectangle((0,0), 1, 1, facecolor=colors[0])
]
custom_lines_colors_names = [
"Benign",
"Malicious"
]
plt.axvline(-bound, color=COLOR_GRAY, linestyle='dashed')
plt.axvline(bound, color=COLOR_GRAY, linestyle='dashed')
##########################
# General Format
##########################
ax.grid(True, axis="y", linestyle=':', color='0.6', zorder=0, linewidth=1.2)
##########################
# Y - Axis Format
##########################
# ax.set_ylim(ymin=-0.05, ymax=1.05)
# ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
# ax.set_xlim(xmin=-1, xmax=1)
# if leftmost:
plt.ylabel('Count')
plt.xlabel('Weight value')
indicator_lines = [
Line2D([0], [0], linestyle="dashed", lw=2, color=COLOR_GRAY)]
indicator_lines_labels = ["$L_\infty$-B$=0.01$"]
leg_line = plt.legend(indicator_lines, indicator_lines_labels,
bbox_to_anchor=(1., 1.), loc='upper right', ncol=1,
columnspacing=0.75)
ax.add_artist(leg_line)
# Legend
# if leftmost:
line = Line2D([0], [0])
line.set_visible(False)
custom_lines_colors = [line] + custom_lines_colors
custom_lines_colors_names = ['Update type:'] + custom_lines_colors_names
leg1 = plt.legend(custom_lines_colors, custom_lines_colors_names,
bbox_to_anchor=(1.02, 1.), loc=4, ncol=6, columnspacing=0.75)
ax.add_artist(leg1)
# if leftmost:
for vpack in leg1._legend_handle_box.get_children()[:1]:
for hpack in vpack.get_children():
del hpack._children[0]
pdf.savefig(bbox_inches='tight', pad_inches=0)
plt.savefig(f"{output_dir}/{name}.png", bbox_inches='tight', pad_inches=0)
plt.close()
return fig, df
def build_single_round_broken(name, df):
setup_plt(square=True)
bound = 0.01
with PdfPages(f"{output_dir}/{name}.pdf") as pdf:
fig, (ax, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [1, 2]})
colors, linestyles = get_colorful_styles()
bins = np.linspace(-0.4, 0.4, 40)
ax.hist(df["mal_weights"], color=colors[0], bins=bins, density=False)
ax.hist(df["ben_weights"], color=colors[1], bins=bins, alpha=0.5, density=False)
ax2.hist(df["mal_weights"], color=colors[0], bins=bins, density=False)
ax2.hist(df["ben_weights"], color=colors[1], bins=bins, alpha=0.5, density=False)
custom_lines_colors = [
Rectangle((0, 0), 1, 1, facecolor=colors[1]),
Rectangle((0, 0), 1, 1, facecolor=colors[0])
]
custom_lines_colors_names = [
"Benign",
"Malicious"
]
ax.axvline(-bound, color=COLOR_GRAY, linestyle='dashed')
ax.axvline(bound, color=COLOR_GRAY, linestyle='dashed')
ax2.axvline(-bound, color=COLOR_GRAY, linestyle='dashed')
ax2.axvline(bound, color=COLOR_GRAY, linestyle='dashed')
##########################
# General Format
##########################
ax.grid(True, axis="y", linestyle=':', color='0.6', zorder=0, linewidth=1.2)
ax2.grid(True, axis="y", linestyle=':', color='0.6', zorder=0, linewidth=1.2)
##########################
# Y - Axis Format
##########################
# ax.set_ylim(ymin=-0.05, ymax=1.05)
# ax.set_yticks([40000])
# ax.set_xlim(xmin=-1, xmax=1)
ax.set_yticks(list(range(0, 50000, 2500)))
ax2.set_yticks(list(range(0, 50000, 2500)))
ax.set_ylim(ymin=40000-100, ymax=45000)
ax2.set_ylim(ymin=0, ymax=10000)
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
# ax.xaxis.tick_top()
ax.xaxis.set_ticks_position('none')
ax.tick_params(labeltop=False) # don't put tick labels at the top
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-2 * d, + 2 * d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-2 * d, +2 * d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
##########################
# Y - LEGEND
##########################
# if leftmost:
# plt.ylabel('Count')
plt.xlabel('Weight value')
plt.sca(ax)
indicator_lines = [
Line2D([0], [0], linestyle="dashed", lw=2, color=COLOR_GRAY)]
indicator_lines_labels = ["$L_\infty$-B$=0.01$"]
leg_line = plt.legend(indicator_lines, indicator_lines_labels,
bbox_to_anchor=(1., 1.), loc='upper right', ncol=1,
columnspacing=0.75)
ax.add_artist(leg_line)
# Legend
# if leftmost:
line = Line2D([0], [0])
line.set_visible(False)
custom_lines_colors = [line] + custom_lines_colors
custom_lines_colors_names = ['Update type:'] + custom_lines_colors_names
leg1 = plt.legend(custom_lines_colors, custom_lines_colors_names,
bbox_to_anchor=(1.02, 1.), loc=4, ncol=6, columnspacing=0.75)
ax.add_artist(leg1)
# if leftmost:
for vpack in leg1._legend_handle_box.get_children()[:1]:
for hpack in vpack.get_children():
del hpack._children[0]
### Common y label
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.ylabel("Count", labelpad=20)
pdf.savefig(bbox_inches='tight', pad_inches=0)
plt.savefig(f"{output_dir}/{name}.png", bbox_inches='tight', pad_inches=0)
plt.close()
return fig, df | 38.691218 | 167 | 0.568458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,281 | 0.240226 |
37064e9b1f6c4c2026274a61dc624c50744caad0 | 45,476 | py | Python | analysis/Mass Action/DP/22Apro.py | tee-lab/PercolationModels | 687cb8189fafeb2e0d205ea4d8a660bd953bd7b1 | [
"BSD-3-Clause"
] | null | null | null | analysis/Mass Action/DP/22Apro.py | tee-lab/PercolationModels | 687cb8189fafeb2e0d205ea4d8a660bd953bd7b1 | [
"BSD-3-Clause"
] | null | null | null | analysis/Mass Action/DP/22Apro.py | tee-lab/PercolationModels | 687cb8189fafeb2e0d205ea4d8a660bd953bd7b1 | [
"BSD-3-Clause"
] | 1 | 2021-09-11T17:25:25.000Z | 2021-09-11T17:25:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 17:18:39 2021
@author: Koustav
"""
import os
import glob
import matplotlib.pyplot as plt
import seaborn as sea
import numpy as np
import pandas as pan
import math
import collections
import matplotlib.ticker as mtick
from mpl_toolkits import mplot3d
from matplotlib.collections import LineCollection
from scipy.optimize import curve_fit
import powerlaw
def pow_law(x, a, expo):
return a*(np.power(x, expo))
def trunc_pow_law(x, a, expo, trunc_expo): #Truncated Power Law
return a*(np.power(x, expo))*np.exp(trunc_expo*x)
def main_ind():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0
for i in range(6,7):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
Hai
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
'''if(p == 0.728):
print("Skipped")
continue'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
#DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
'''DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
'''if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")'''
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
#hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S \geq \Delta s)$"])
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
f = sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$")
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
#sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
#ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.savefig("0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Saving best fit data.
gaol[float(round(CC,2))].append([L, p, -popt[1], perr[1], -popt[2], perr[2]])'''
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Saving as CSVs.
'''if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, alpha, SD(alpha), lambda, SD(lambda)'
for k in K:
np.savetxt("BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")'''
def main_ccdf_fit():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p == 0.678):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
fit = powerlaw.Fit(data_temp[:,5],discrete=True,estimate_discrete = False) #If you already know xmin pass it as an argument (xmin=value) for speed
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
print('x_min: ',fit.xmin)
print('alpha: ',fit.truncated_power_law.parameter1)
print('1/lambda: ',1/fit.truncated_power_law.parameter2)
tukan = (-fit.truncated_power_law.parameter1, -fit.truncated_power_law.parameter2)
fig = fit.plot_ccdf(color ='cornflowerblue', ls='-', linewidth=1.1, alpha=0.2)
fit.plot_ccdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_ccdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S \geq \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S \geq \Delta s)$")
plt.legend()
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
plt.savefig("Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir("../../")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("Done with CDF Plots And Fits. Moving On To PDF Plots...")
fig = fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.5, alpha=0.4)
#fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_pdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S = \Delta s)$")
plt.legend()
plt.savefig("Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
f = open("Taupe.txt", "w+")
f.write("LR (Power Law): " + str(comparison_tpl_pl[0]) +" p-value: "+ str(comparison_tpl_pl[1]) +"\n")
f.write("LR (Exponential): " + str(comparison_tpl_exp[0]) +" p-value: "+ str(comparison_tpl_exp[1]) +"\n")
f.write("LR (Log-Normal): " + str(comparison_tpl_log_normal[0]) +" p-value: "+ str(comparison_tpl_log_normal[1]) +"\n")
f.write("LR (Stretched-Exponential): " + str(comparison_tpl_streched_exp[0]) +" p-value: "+ str(comparison_tpl_streched_exp[1]) +"\n")
f.close()
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, x_min, alpha, 1/lambda'
for k in K:
np.savetxt("Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")
def main_cumulative():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
if(len(binder)==0):
#First one in the bag.
binder = DP_freqs.tolist()
else:
binder.extend(DP_freqs.tolist())
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("3D")==False):
os.mkdir("3D")
os.chdir("3D")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
fig=plt.figure()
ax = plt.axes(projection='3d')
#surf1 =ax.plot_trisurf(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), cmap='viridis', edgecolor='none')
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$P (S=\Delta s)$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$P (S=\Delta s)$ vs $|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
'''Now for scatter plot'''
fig=plt.figure(figsize=(6.4,4.8))
#ax = plt.axes(projection='3d')
ax = fig.add_subplot(111,projection='3d')
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$log|P (S=\Delta s)|$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_xlim(-0.1, 5)
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_zlim(-6.1, 0)
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
#Plotting p_c plane.
x = np.linspace(-1,5.5,10)
z = np.linspace(-7,1,10)
X,Z = np.meshgrid(x,z)
Y= 0*X +0*Z + p_c
#ax.hold(True) #Preserve pre-plotted elements.
ax.plot_surface(X,Y,Z, alpha= 0.3, color='k', antialiased=True)
ax.text(5, p_c, -1, "$p_{c}(q)$", color='0.5')
'''p_clin = np.array([[0,p_c], [5,p_c]])
lines = LineCollection([p_clin],zorder=1000,color='0.65',lw=2)
ax.add_collection3d(lines, zs=-90)'''
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$log|P (S=\Delta s)|$ vs $log|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
ax.view_init(elev=62.0, azim=-3.0)
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) Top Down --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_count():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
print("Number of del s counts: " + str(a))
binder.append([p, a])
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("S Count")==False):
os.mkdir("S Count")
os.chdir("S Count")
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"Number of unique $|\Delta s|$ observations"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"Number of unique $|\Delta s|$ observations")#, marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Unique $|\Delta s|$ observations, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
#plt.yscale('log'); #plt.xscale('log')
#plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**2,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("S Count, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_symmetry():
p_mask=[0.658, 0.666, 0.678, 0.689, 0.701, 0.728, 0.739, 0.743, 0.755, 0.773 ]
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
MastBind=[]; L=0
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p not in p_mask):
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
#data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
#DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
'''Performing a log-mod transform
https://blogs.sas.com/content/iml/2014/07/14/log-transformation-of-pos-neg.html
https://juluribk.com/dealing-with-plotting-negative-zero-and-positive-values-in-log-scale.html
'''
DP_freqs[:,0] = np.sign(DP_freqs[:,0])*(np.log10(np.abs(DP_freqs[:,0])+1))
DP_freqs = np.insert(DP_freqs, 2, float(round(CC,2)), axis=1)
DP_freqs = np.insert(DP_freqs, 3, p, axis=1)
'''DP_freqs looks like:
|del(s), P(del(s)), CC, p|
'''
print("Final del(s) PDF:")
print(DP_freqs)
if(len(MastBind)== 0):
#Empty
MastBind = DP_freqs
else:
MastBind = np.concatenate((MastBind, DP_freqs), axis=0)
'''for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
#plt.xlim(-5, 5)
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.legend()'''
plt.savefig("Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Plotting cumulative results.
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("Cum")==False):
os.mkdir("Cum")
os.chdir("Cum")
hurtlocker= pan.DataFrame(MastBind, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", hue="Cross-Correlation")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d' %(MastBind[0,3], L))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.xlim(-5, 5)
plt.savefig("Alt Cum Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d.png" %(MastBind[0,3], L), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
def main_bifurcation():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
#crosc =0.8
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
split_data = DP_freqs[:,1] < 10**(-5.6)
DP_freqs = DP_freqs[split_data]
print("Half Done:")
print(DP_freqs)
split_data = DP_freqs[:,1] > 10**(-6)
DP_freqs_band = DP_freqs[split_data] #Stores the band of del(s) values whose probability lie between 10^(-5.85) and 10^(-5.85)
#col_P= np.zeros((a,1)); col_P = p
DP_freqs_band = np.insert(DP_freqs_band, 0, p, axis=1)
DP_freqs_band = DP_freqs_band[DP_freqs_band[:,1].argsort()]
#Sorting in increasing values of del(s)
print("Total number of points in given gap for p:\t"+str(p) +" is: \t" +str(len(DP_freqs_band[:,2])) +"\n")
print(DP_freqs_band)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
flag=0
for j in range(1, len(DP_freqs_band[:,2])-1):
if(abs(DP_freqs_band[j,1] -DP_freqs_band[j-1,2]) > 411 or abs(DP_freqs_band[j,1] -DP_freqs_band[j+1,2]) > 411):
# 10^(3.3) - 10^(3.2) = 410.369
binder.append([p,DP_freqs_band[j,1]])
flag=1
if(flag==0):
#No del(s) value satisfied the bandwidth demand.
#if()
binder.append([p,DP_freqs_band[-1,1]])
#Append the very last value
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$", marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Bifurcation Map, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
plt.yscale('log'); #plt.xscale('log')
plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**1,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("Bifurcation Map, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def plot_fit_pdf():
twist =(-1.2912647288993737, -(1/37.72480211483688))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,1):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01 or p != 0.66):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
print("Sorted del(s) PDF:")
print(DP_freqs)
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
ax = fig.add_subplot(111)
sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", ax= ax)#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.3), 10**(0.1))
x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
#popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
#perr = np.sqrt(np.diag(pcov))
#print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
#tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *twist), color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) = %3.2f \times \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)
plt.savefig("Even Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
'''
def cross_cor(grim_fandango, lag, L, p):
CC=0; k= 128/L
for t in range(0, len(grim_fandango[:,0])):
if grim_fandango[t,0] == p:
CC = grim_fandango[t,1]+ grim_fandango[t,3]*(math.exp(lag*grim_fandango[t,5]*k*k)); break;
#Calculating cross-correlation b/w frames.
print("CC:\t"+ str(CC))
return CC;
main_ind() | 45.430569 | 207 | 0.473437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,708 | 0.455361 |
3708da2d53e985416aa3068357d2ebc357bc0355 | 7,558 | py | Python | loom/group.py | probcomp/loom | 825188eae76e7106a6959f6a18312b0aa3338f83 | [
"BSD-3-Clause"
] | 2 | 2019-10-25T17:57:22.000Z | 2020-07-14T02:37:34.000Z | loom/group.py | probcomp/loom | 825188eae76e7106a6959f6a18312b0aa3338f83 | [
"BSD-3-Clause"
] | 1 | 2019-12-13T03:08:05.000Z | 2019-12-13T03:08:05.000Z | loom/group.py | probcomp/loom | 825188eae76e7106a6959f6a18312b0aa3338f83 | [
"BSD-3-Clause"
] | 1 | 2020-06-22T11:23:43.000Z | 2020-06-22T11:23:43.000Z | # Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy
import pymetis
import pymetis._internal # HACK to avoid errors finding .so files in path
from itertools import izip
from collections import defaultdict
from collections import namedtuple
from distributions.io.stream import json_dump
from distributions.io.stream import open_compressed
from loom.schema_pb2 import CrossCat
from loom.cFormat import assignment_stream_load
from loom.util import LoomError
from loom.util import parallel_map
import loom.store
METIS_ARGS_TEMPFILE = 'temp.metis_args.json'
Row = namedtuple('Row', ['row_id', 'group_id', 'confidence'])
def collate(pairs):
groups = defaultdict(lambda: [])
for key, value in pairs:
groups[key].append(value)
return groups.values()
def group(root, feature_name, parallel=False):
paths = loom.store.get_paths(root, sample_count=None)
map_ = parallel_map if parallel else map
groupings = map_(group_sample, [
(sample, feature_name)
for sample in paths['samples']
])
return group_reduce(groupings)
def group_sample((sample, featureid)):
model = CrossCat()
with open_compressed(sample['model']) as f:
model.ParseFromString(f.read())
for kindid, kind in enumerate(model.kinds):
if featureid in kind.featureids:
break
assignments = assignment_stream_load(sample['assign'])
return collate((a.groupids(kindid), a.rowid) for a in assignments)
def group_reduce(groupings):
return find_consensus_grouping(groupings)
def find_consensus_grouping(groupings, debug=False):
'''
This implements Strehl et al's Meta-Clustering Algorithm [1].
Inputs:
groupings - a list of lists of lists of object ids, for example
[
[ # sample 0
[0, 1, 2], # sample 0, group 0
[3, 4], # sample 0, group 1
[5] # sample 0, group 2
],
[ # sample 1
[0, 1], # sample 1, group 0
[2, 3, 4, 5] # sample 1, group 1
]
]
Returns:
a list of Row instances sorted by (- row.group_id, row.confidence)
References:
[1] Alexander Strehl, Joydeep Ghosh, Claire Cardie (2002)
"Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions"
Journal of Machine Learning Research
http://jmlr.csail.mit.edu/papers/volume3/strehl02a/strehl02a.pdf
'''
if not groupings:
raise LoomError('tried to find consensus among zero groupings')
# ------------------------------------------------------------------------
# Set up consensus grouping problem
allgroups = sum(groupings, [])
objects = list(set(sum(allgroups, [])))
objects.sort()
index = {item: i for i, item in enumerate(objects)}
vertices = [numpy.array(map(index.__getitem__, g), dtype=numpy.intp)
for g in allgroups]
contains = numpy.zeros((len(vertices), len(objects)), dtype=numpy.float32)
for v, vertex in enumerate(vertices):
contains[v, vertex] = 1 # i.e. for u in vertex: contains[v, u] = i
# We use the binary Jaccard measure for similarity
overlap = numpy.dot(contains, contains.T)
diag = overlap.diagonal()
denom = (diag.reshape(len(vertices), 1) +
diag.reshape(1, len(vertices)) - overlap)
similarity = overlap / denom
# ------------------------------------------------------------------------
# Format for metis
if not (similarity.max() <= 1):
raise LoomError('similarity.max() = {}'.format(similarity.max()))
similarity *= 2**16 # metis segfaults if this is too large
int_similarity = numpy.zeros(similarity.shape, dtype=numpy.int32)
int_similarity[:] = numpy.rint(similarity)
edges = int_similarity.nonzero()
edge_weights = map(int, int_similarity[edges])
edges = numpy.transpose(edges)
adjacency = [[] for _ in vertices]
for i, j in edges:
adjacency[i].append(j)
# FIXME is there a better way to choose the final group count?
group_count = int(numpy.median(map(len, groupings)))
metis_args = {
'nparts': group_count,
'adjacency': adjacency,
'eweights': edge_weights,
}
if debug:
json_dump(metis_args, METIS_ARGS_TEMPFILE, indent=4)
edge_cut, partition = pymetis.part_graph(**metis_args)
if debug:
os.remove(METIS_ARGS_TEMPFILE)
# ------------------------------------------------------------------------
# Clean up solution
parts = range(group_count)
if len(partition) != len(vertices):
raise LoomError('metis output vector has wrong length')
represents = numpy.zeros((len(parts), len(vertices)))
for v, p in enumerate(partition):
represents[p, v] = 1
contains = numpy.dot(represents, contains)
represent_counts = represents.sum(axis=1)
represent_counts[numpy.where(represent_counts == 0)] = 1 # avoid NANs
contains /= represent_counts.reshape(group_count, 1)
bestmatch = contains.argmax(axis=0)
confidence = contains[bestmatch, range(len(bestmatch))]
if not all(numpy.isfinite(confidence)):
raise LoomError('confidence is nan')
nonempty_groups = list(set(bestmatch))
nonempty_groups.sort()
reindex = {j: i for i, j in enumerate(nonempty_groups)}
grouping = [
Row(row_id=objects[i], group_id=reindex[g], confidence=c)
for i, (g, c) in enumerate(izip(bestmatch, confidence))
]
groups = collate((row.group_id, row) for row in grouping)
groups.sort(key=len, reverse=True)
grouping = [
Row(row_id=row.row_id, group_id=group_id, confidence=row.confidence)
for group_id, group in enumerate(groups)
for row in group
]
grouping.sort(key=lambda x: (x.group_id, -x.confidence, x.row_id))
return grouping
| 36.162679 | 78 | 0.653744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,319 | 0.439137 |
37098bda53f4717ae975e2ce1e8c1a6a34894efe | 1,948 | py | Python | src/link_baseline.py | tahleen-rahman/linkability_stepcount | ed873782453d391865ad15e7c2d538058f5db88a | [
"Apache-2.0"
] | null | null | null | src/link_baseline.py | tahleen-rahman/linkability_stepcount | ed873782453d391865ad15e7c2d538058f5db88a | [
"Apache-2.0"
] | 7 | 2021-04-30T21:20:19.000Z | 2022-03-12T00:35:33.000Z | src/link_baseline.py | tahleen-rahman/linkability_stepcount | ed873782453d391865ad15e7c2d538058f5db88a | [
"Apache-2.0"
] | null | null | null | # Created by rahman at 16:54 2019-10-20 using PyCharm
import os
import sys
from attacks.Linkability import Link
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from attacks import BinaryDNN
from sklearn import svm
from link_utils import linkability_bl
expdict = { 0: (100, 'linkdata_0/', 0.005) ,
1: (100, 'linkdata_1/', 0.001) ,
2: (50, 'linkdata_2/', 0.0),
3: (10, 'linkdata_3/', 0.0),
4: (100, 'linkdata_dist/', 0.0)
}
def link_baseline(exp, cl, server, weekend):
trees, in_dir, var_th = expdict[exp]
clfdict = {'rf': RandomForestClassifier(n_estimators=trees, random_state=0),
'lr': LinearRegression(),
'svm': svm.SVC(gamma='scale', decision_function_shape='ovo'),
'lsvc': svm.LinearSVC(max_iter=2000), # May not converge if training data is not normalized
'dense1': BinaryDNN(num_layers=1, layer_params=[[0.25, 0.2]], num_epochs=100, batch_size=64, verbose=0),
'dense2': BinaryDNN(num_layers=2, layer_params=[[0.5, 0.2], [0.5, 0.2]], num_epochs=100, batch_size=64,
verbose=0),
'dense3': BinaryDNN(num_layers=3, layer_params=[[0.25, 0.2], [0.25, 0.2], [0.25, 0.2]], num_epochs=100,
batch_size=64, verbose=0)
}
clf = clfdict[cl]
if server:
datapath="../../stepcount/data/dzne/"
else:
datapath="../data/dzne/"
path = datapath + in_dir
from prep_features import *
#path = filter_mornings(path, f=0.25)
in_path = variance_thresholding(path, th=var_th)
linkability_bl(in_path, datapath, cl, clf, exp, weekend)
if __name__ == '__main__':
exp, cl, server, weekend = int(sys.argv[1]), sys.argv[2], int(sys.argv[3]), int(sys.argv[4])
link_baseline(exp, cl, server, weekend)
| 30.920635 | 119 | 0.603696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.163758 |
3709b0c7de5e9e589c3abceef31dccfb79fe0137 | 28,849 | py | Python | main.py | chelyabinsk/FootBallDashboard | 81075cf43eb5ad4ae6fd26181ef1031df6f412f0 | [
"MIT"
] | null | null | null | main.py | chelyabinsk/FootBallDashboard | 81075cf43eb5ad4ae6fd26181ef1031df6f412f0 | [
"MIT"
] | null | null | null | main.py | chelyabinsk/FootBallDashboard | 81075cf43eb5ad4ae6fd26181ef1031df6f412f0 | [
"MIT"
] | null | null | null | import streamlit as st
import numpy as np
import pandas as pd
import requests
import re
import altair as alt
# Find all available data
def find_all_spreadsheets():
available_data = {}
r = requests.get('https://www.football-data.co.uk/downloadm.php')
if r.status_code != 200:
print('Oh dear. Error {}'.format(r.status_code))
return -1
matches = re.findall('(mmz(.*?)[0-9]+-[0-9]+(.*?).[xls]+")',r.text)
for match in matches:
tmp = match[0].replace('"','')
season = re.search('[0-9]+-[0-9]+',tmp).group()
available_data[season] = tmp
return available_data
def load_data(data_url):
data = pd.read_csv(data_url)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
data['datetime'] = pd.to_datetime(data['date'] + ' ' + data['time'])
data = data.drop(['date','time'],axis=1)
# Rearrange columns
cols = data.columns.tolist()
cols.remove('datetime')
cols.insert(0,'datetime')
data = data[cols]
return data
def season_spreadsheet(data_url):
url = 'https://www.football-data.co.uk/'+data_url
data = pd.read_excel(url,sheet_name=None)
return data
@st.cache
def load_all_data(spreadsheets):
# Silly. But it works..
base_url = 'https://www.football-data.co.uk/'
big_df = pd.DataFrame()
all_keys = list(spreadsheets.keys())
stop_season = '2014-2015'
# stop_season = '2018-2019'
pos = all_keys.index(stop_season)
for c,key in enumerate(all_keys):
print(key)
if key == stop_season:
break
data_state.text('Loading season {} ... only {} left to go'.format(key,pos-c))
url = base_url + spreadsheets[key]
og_spreadsheet = pd.read_excel(url,None)
big_spreadsheet = pd.concat(og_spreadsheet, ignore_index=True)
# Convert date to datetime object
#big_spreadsheet['Date'] = pd.to_datetime(big_spreadsheet['Date'])
big_spreadsheet.loc[big_spreadsheet.index,'Date'] = pd.to_datetime(big_spreadsheet['Date'])
#big_spreadsheet['season'] = key
big_spreadsheet.loc[big_spreadsheet.index,'season'] = key
#big_spreadsheet['s-year'] = key[5:]
big_spreadsheet.loc[big_spreadsheet.index,'s-year'] = key[5:]
#big_spreadsheet['s-year'] = big_spreadsheet['s-year'].astype(int)
big_spreadsheet.loc[big_spreadsheet.index,'s-year'] = big_spreadsheet['s-year'].astype(int)
if 'AG' in big_df.columns:
#big_spreadsheet['total-goals'] = big_spreadsheet['HG'] + big_spreadsheet['AG']
big_spreadsheet.loc[big_spreadsheet.index,'total-goals'] = big_spreadsheet['HG'] + big_spreadsheet['AG']
else:
#big_spreadsheet['total-goals'] = big_spreadsheet['FTHG'] + big_spreadsheet['FTAG']
big_spreadsheet.loc[big_spreadsheet.index,'total-goals'] = big_spreadsheet['FTHG'] + big_spreadsheet['FTAG']
big_df = big_df.append(big_spreadsheet, sort=False,ignore_index=True)
big_df = big_df[big_df['total-goals'].isna()==False]
return big_df.sort_values('Date',ascending=False)#.dropna(axis=0,how='any')
def prev_match(df,order_specific=False):
small = df[['Date','HomeTeam','AwayTeam','total-goals']]
small = small.dropna(how='all')
if order_specific:
small.loc[small.index,'hash'] = (small['HomeTeam'] + small['AwayTeam']).apply(hash)
else:
small.loc[small.index,'hash'] = small['HomeTeam'].apply(hash) + small['AwayTeam'].apply(hash)
return small.drop_duplicates(subset='hash', keep="first")
def prev_match_selection(df,order_specific=True,sel_type=None,total_goals=2.5):
# Return list of pairs where certain condition was satisfied
small = df[['Date','HomeTeam','AwayTeam','total-goals']]
small = small.dropna(how='all')
if order_specific:
small.loc[small.index,'hash'] = (small['HomeTeam'] + small['AwayTeam']).apply(hash)
else:
small.loc[small.index,'hash'] = small['HomeTeam'].apply(hash) + small['AwayTeam'].apply(hash)
tmp = sel_type.split('/')
games_played = int(tmp[1])
min_goals = int(tmp[0])
# Find the total matches played criteria
grouped_matches = small.groupby('hash').head(games_played)
# Only select matches where total-goals was satisfied
filtered_matches = grouped_matches[grouped_matches['total-goals'].gt(total_goals)]
# Count how many matches satisfied the total-goals criterion
hash_sizes = filtered_matches.groupby('hash').size().reset_index(name='counts')
# Only keep matches that satisfy the criterion
good_hashes = hash_sizes[hash_sizes['counts'].ge(min_goals)]
# Merge back to find Home and Away team names
merged = pd.merge(small,good_hashes,left_on='hash',right_on='hash',copy=False)
merged.loc[merged.index,'total-goals'] = np.ceil(total_goals)
return merged[['HomeTeam','AwayTeam','total-goals','hash']].drop_duplicates()
def find_stats(test_df,decision_df,order_specific=True,stats_type='last-match',misc=None):
# Add hashes appropriately
if order_specific:
test_df.loc[test_df.index,'hash'] = (test_df['HomeTeam']+test_df['AwayTeam']).apply(hash)
else:
test_df.loc[test_df.index,'hash'] = test_df['HomeTeam'].apply(hash)+test_df['AwayTeam'].apply(hash)
o = {'accuracy':0,'data':None}
if order_specific:
# Match test_df with decision_df on hashes
merged = pd.merge(test_df,decision_df,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged_full = merged
merged = merged_full[['hash','total-goals_t','total-goals_d',#'HomeTeam_t','AwayTeam_t','HomeTeam_d','AwayTeam_d'
]]
merged.loc[merged.index,'correct'] = 0
merged.loc[(
((merged['total-goals_t']>2.5) & (merged['total-goals_d']>2.5))
|
((merged['total-goals_t']<2.5) & (merged['total-goals_d']<2.5))
)
,'correct'
] = 1
o['accuracy'] = merged['correct'].mean()
if 'Date_t' in merged_full.keys():
date_var = 'Date_t'
else:
date_var = 'Date'
o['data'] = [merged_full[[date_var,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']]]
else:
# This makes it harder, if more than one game, will have to update stats in between
# Usually each season has two rounds for each team ??
first_round = test_df.drop_duplicates(subset='hash', keep="last")
second_round = test_df.drop(first_round.index)
# st.write('first round',first_round)
# st.write(first_round.shape)
# st.write('second round',second_round)
# st.write(second_round.shape)
# st.write('test_df',test_df)
# Workout decisions for the first round
merged1 = pd.merge(first_round,decision_df,on='hash',copy=False,suffixes=['_t','_d'])
merged1 = merged1.drop_duplicates(subset='hash', keep="last")
merged1 = merged1.drop(columns=['HomeTeam_d','AwayTeam_d'])
# st.write(merged1)
res = merged1[['hash']]
res['total-goals'] = merged1['total-goals_t']
# Flag correct decision
merged1.loc[merged1.index,'correct'] = 0
merged1.loc[(
((merged1['total-goals_t']>2.5) & (merged1['total-goals_d']>2.5))
|
((merged1['total-goals_t']<2.5) & (merged1['total-goals_d']<2.5))
)
,'correct'
] = 1
# st.write('first round choices',merged1[['HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d','correct']])
# Update stats for second round
if not second_round.empty:
if stats_type == 'last-match':
# Find total goals from previous play
merged2 = pd.merge(second_round,res,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged2.loc[merged2.index,'correct'] = 0
merged2.loc[(
((merged2['total-goals_t']>2.5) & (merged2['total-goals_d']>2.5))
|
((merged2['total-goals_t']<2.5) & (merged2['total-goals_d']<2.5))
)
,'correct'
] = 1
elif stats_type == 'xytotal':
if not misc is None:
x_y_type = misc['sel_type']
total_goals = misc['total_goals']
hist_data = misc['hist_data']
new_data_dirty = merged1.drop(['hash','correct'],axis=1)
new_data = new_data_dirty.rename(columns={'HomeTeam_t':'HomeTeam','AwayTeam_t':'AwayTeam','total-goals_t':'total-goals'}).sort_values('Date',ascending=False)
combined = new_data.append(hist_data,ignore_index=True)
second_round_choices = prev_match_selection(combined,order_specific=order_specific,sel_type=x_y_type,total_goals=total_goals)
merged2 = pd.merge(second_round,second_round_choices,on='hash',copy=False,suffixes=['_t','_d'])
second_round_choices = second_round_choices[['hash','total-goals']].drop_duplicates()
# st.write('second_round_choices',second_round_choices)
# st.write(second_round_choices.shape)
# Find total goals from previous play
merged2 = pd.merge(second_round,second_round_choices,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged2.loc[merged2.index,'correct'] = 0
# st.write(merged2[['HomeTeam','AwayTeam','total-goals_t','total-goals_d']])
merged2.loc[(
((merged2['total-goals_t']>2.5) & (merged2['total-goals_d']>2.5))
|
((merged2['total-goals_t']<2.5) & (merged2['total-goals_d']<2.5))
)
,'correct'
] = 1
o['accuracy'] = np.array(list(merged2['correct'])+list(merged1['correct'])).mean()
if 'Date_t' in merged1.keys():
date_val = 'Date_t'
else:
date_val = 'Date'
o['data'] = [merged1[[date_val,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']],
merged2[['Date','HomeTeam','AwayTeam','total-goals_t','total-goals_d']]]
else:
o['accuracy'] = np.array(list(merged1['correct'])).mean()
if 'Date_t' in merged1.keys():
date_val = 'Date_t'
else:
date_val = 'Date'
o['data'] = [merged1[[date_val,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']]]
return o
def calc_roi(season_odds,season_results,chosen_odds=None):
# > 2.5 goals
#BbAv>2.5 = Betbrain average over 2.5 goals
#BbAv<2.5 = Betbrain average under 2.5 goals
merged = pd.merge(season_odds,season_results,left_on=['Date','AwayTeam','HomeTeam'],right_on=['Date','AwayTeam','HomeTeam'],how='inner')
# st.write('season_odds',season_odds.shape)
# st.write('season_results',season_results.shape)
#
# st.write('merged',merged)
# st.write('merged',merged.shape)
clean = merged
bet_size = 1
# Check that total-goals column was created
# if not then go by the odds
if 'total-goals_t' in clean.keys() and 'total-goals_d' in clean.keys():
# add a flag to mark correctness
clean.loc[clean.index,'correct>2.5'] = 0
clean.loc[clean.index,'correct<2.5'] = 0
clean.loc[clean.index,'correct'] = 0
clean.loc[
((clean['total-goals_t']>2.5) & (clean['total-goals_d']>2.5))
,'correct>2.5'
] = 1
clean.loc[
((clean['total-goals_t']<2.5) & (clean['total-goals_d']<2.5))
,'correct<2.5'
] = 1
clean.loc[(clean['correct>2.5']==1) | (clean['correct<2.5']==1),'correct'] = 1
# st.write(clean)
broker_names = []
won_sizes = []
lost_sizes = []
succ_rates = []
avg_prices = []
rois = []
total_costs = []
profits = []
brokers = ['B365','P','GB','BbAv']
avail_brokers = []
# Lowest Broker for selection
available_odds_gt = []
available_odds_lt = []
for b in brokers:
b_str_gt = '{}>2.5'.format(b)
b_str_lt = '{}<2.5'.format(b)
if b_str_gt in clean.keys():
available_odds_gt.append(b_str_gt)
available_odds_lt.append(b_str_lt)
avail_brokers.append(b)
# Add new columns
clean.loc[clean.index,'min>2.5']=clean[available_odds_gt].min(axis=1)
clean.loc[clean.index,'max>2.5']=clean[available_odds_gt].max(axis=1)
clean.loc[clean.index,'min<2.5']=clean[available_odds_lt].min(axis=1)
clean.loc[clean.index,'max<2.5']=clean[available_odds_lt].max(axis=1)
clean.loc[clean.index,'min-odds']=clean[available_odds_lt+available_odds_gt].min(axis=1)
clean.loc[clean.index,'max-odds']=clean[list(available_odds_lt)+list(available_odds_gt)].max(axis=1)
for c,b in enumerate(avail_brokers):
broker = clean[[available_odds_gt[c],available_odds_lt[c],'correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = (bet_size*(correct_rows_gt[available_odds_gt[c]]).sum(skipna=True)
+ bet_size*(correct_rows_lt[available_odds_lt[c]]).sum(skipna=True))
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt[available_odds_gt[c]])+list(correct_rows_lt[available_odds_lt[c]])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append(b)
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
if 'B365C>2.5' in clean.keys():
broker = clean[['B365C>2.5','B365C<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['B365C>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['B365C<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['B365C>2.5'])+list(correct_rows_lt['B365C<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('Bet365Close')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
if 'PC>2.5' in clean.keys():
broker = clean[['PC>2.5','PC<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['PC>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['PC<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['PC>2.5'])+list(correct_rows_lt['PC<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('PinnacleClose')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
# Select lowest broker
broker = clean[['min>2.5','min<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['min>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['min<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['min>2.5'])+list(correct_rows_lt['min<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('MinBroker')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
# Highest Broker
broker = clean[['max>2.5','max<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['max>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['max<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['max>2.5'])+list(correct_rows_lt['max<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('MaxBroker')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
output_table = pd.DataFrame({'broker-name':broker_names,
'won-size':won_sizes,
'profit':profits,
# 'loss':lost_sizes,
'succ-rate':succ_rates,
'avg-price':avg_prices,
'roi':rois,
'total-cost':total_costs
})
st.write('### Selected odds',clean)
st.write('### Results',output_table)
else:
#TODO: Calculate results based on odds (highest and lowest)
pass
def filter_teams(df,chosen_n=5,filter_type='TotalAll'):
# Select only last season data
last_season = df[df['s-year']==df['s-year'].max()]
# Rank = goals/num_games
if filter_type == 'Total goals Home+Away':
# Rank teams by total scored goals
try:
home = hist_data[['HomeTeam','FTHG']].rename(columns={'HomeTeam':'Team','FTHG':'Goals'})
except:
home = hist_data[['HomeTeam','HG']].rename(columns={'HomeTeam':'Team','HG':'Goals'})
try:
away = hist_data[['AwayTeam','FTAG']].rename(columns={'AwayTeam':'Team','FTAG':'Goals'})
except:
away = hist_data[['AwayTeam','AG']].rename(columns={'AwayTeam':'Team','AG':'Goals'})
teams = home.append(away)
goals_by_teams = teams[['Team','Goals']].groupby('Team').sum()
games_by_teams = teams[['Team','Goals']].groupby('Team').count()
rank = (goals_by_teams/games_by_teams).sort_values('Goals',ascending=False).head(chosen_n).index
home_teams = pd.DataFrame(rank)
merge_home = pd.merge(df,home_teams,left_on='HomeTeam',right_on='Team',how='inner')
merge_away = pd.merge(df,home_teams,left_on='AwayTeam',right_on='Team',how='inner')
merge = merge_home.append(merge_away).reset_index()
# st.write(merge)
return merge
elif filter_type == 'Total goals Home':
# Rank teams on total goals when was at home
try:
goals_by_teams = last_season[['HomeTeam','FTHG']].groupby('HomeTeam').sum()
games_by_teams = last_season[['HomeTeam','FTHG']].groupby('HomeTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('FTHG',ascending=False).head(chosen_n)
except:
goals_by_teams = last_season[['HomeTeam','HG']].groupby('HomeTeam').sum()
games_by_teams = last_season[['HomeTeam','HG']].groupby('HomeTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('HG',ascending=False).head(chosen_n).index
home_teams = pd.DataFrame(rank)
merge = pd.merge(df,home_teams,left_on='HomeTeam',right_on='HomeTeam',how='inner')
return merge
elif filter_type == 'Total goals Away':
# Rank teams on total goals when was away
try:
goals_by_teams = last_season[['AwayTeam','FTAG']].groupby('AwayTeam').sum()
games_by_teams = last_season[['AwayTeam','FTAG']].groupby('AwayTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('FTAG',ascending=False).head(chosen_n)
except:
goals_by_teams = last_season[['AwayTeam','AG']].groupby('AwayTeam').sum()
games_by_teams = last_season[['AwayTeam','AG']].groupby('AwayTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('HG',ascending=False).head(chosen_n).index
away_teams = pd.DataFrame(rank)
merge = pd.merge(df,away_teams,left_on='AwayTeam',right_on='AwayTeam',how='inner')
return merge
spreadsheets = find_all_spreadsheets()
data_state = st.text('')
data_state.text('Pre-processing')
big_df = load_all_data(spreadsheets)
data_state.text('')
season = st.selectbox(
"Select season", list(big_df['season'].unique()),1
)
division = st.selectbox(
"Select division", list(big_df['Div'].sort_values().unique()),0
)
order_specific = st.sidebar.checkbox('Order specific',1)
# Select by exact total number of goals
top_n_selection = st.sidebar.checkbox('Top n teams from the previous season',0)
st.markdown("""Select a type of Head to head. Two options are available.""")
st.markdown("1) Total goals from previous fixture looks at the previous total number of goals in the previous identical match")
st.markdown("2) x/y matching with total goals only selects matches where at least x out of y last matches had at least (however many)`total goals'")
st.markdown("More filters available i the panel on the left")
# Find previous total for all pairs
total_type = st.selectbox(
"Type of `Head to Head'", ['None','Total goals from previous fixture',"x/y & `total goals' criterion"],0
)
current_year = int(season[5:])
division_data = big_df.loc[big_df['Div'] == division]
current_data = division_data.loc[big_df['s-year']==current_year]
hist_data = division_data.loc[(big_df['s-year'] < current_year)]
if top_n_selection:
rank_type = st.sidebar.selectbox(
"Rank teams by", ['Total goals Home+Away','Total goals Home','Total goals Away'],0
)
n = st.sidebar.number_input('Number of top teams selected',
min_value=1,
max_value=len(current_data['HomeTeam'].unique()+current_data['AwayTeam'].unique()),
value=5)
# Filter teams
hist_data = filter_teams(hist_data,chosen_n=n,filter_type=rank_type)
test_data = None
stats_type = None
misc = None
if total_type == 'None':
pass
elif total_type == 'Total goals from previous fixture':
test_data = prev_match(hist_data,order_specific=order_specific)
stats_type = 'last-match'
elif total_type == "x/y & `total goals' criterion":
x_y_type = st.selectbox(
"Select x/y", ['1/1','1/2','2/2','2/3','3/3','3/4','4/4','4/5','5/5'],5
)
total_goals = st.selectbox(
"Select `total goals'", np.linspace(0.5,8.5,9),2
)
test_data = prev_match_selection(hist_data,order_specific=order_specific,sel_type=x_y_type,total_goals=total_goals)
stats_type = 'xytotal'
misc = {'sel_type':x_y_type,'total_goals':total_goals,'hist_data':hist_data}
# Workout how many matches were won with given filters
if total_type != 'None':
temp = find_stats(current_data,test_data,order_specific=order_specific,stats_type=stats_type,misc=misc)
else:
temp = {'data':[]}
if len(temp['data']) == 1:
out_data = temp['data'][0].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
# st.write('## Selection',out_data)
elif len(temp['data']) == 2:
out_data1 = temp['data'][0].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
out_data2 = temp['data'][1].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
out_data = out_data1.append(out_data2,ignore_index=True)
# st.write('## Selection',out_data)
if total_type != 'None':
calc_roi(current_data,out_data)
else:
#TODO: Choose best matches based on odds
pass
| 46.232372 | 173 | 0.565115 | 0 | 0 | 0 | 0 | 1,983 | 0.068737 | 0 | 0 | 8,117 | 0.281362 |
370a15691cec20cb2cd5ab37aca120000fa1eaf8 | 40,610 | py | Python | v2.5.7/toontown/episodes/MoneybinTakeover.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-01T15:46:43.000Z | 2021-07-23T16:26:48.000Z | v2.5.7/toontown/episodes/MoneybinTakeover.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 1 | 2019-06-29T03:40:05.000Z | 2021-06-13T01:15:16.000Z | v2.5.7/toontown/episodes/MoneybinTakeover.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-28T21:18:46.000Z | 2021-02-25T06:37:25.000Z | from toontown.avatar import ToontownAvatarUtils
from toontown.avatar.CogExtras import *
PROPS = [
(
5, 'modules', 'suit_walls', (0.0, 342.4, 0.0), (-10.5, 0.0, 0.0), (54.6, 54.6, 54.6), 'wall_suit_build5', None,
None),
(
5, 'modules', 'suit_walls', (53.686, 332.45, 0.0), (-16.5, 0.0, 0.0), (54.6, 54.6, 54.6), 'wall_suit_build5', None,
None),
(
5, 'modules', 'suit_walls', (106.037, 316.943, 0.0), (-24.0, 0.0, 0.0), (54.6, 54.6, 54.6), 'wall_suit_build5',
None, None),
(
5, 'modules', 'suit_walls', (155.917, 294.735, 0.0), (-36.0, 0.0, 0.0), (54.6, 54.6, 54.6), 'wall_suit_build5',
None, None),
(
4, 'modules', 'suit_landmark_corp', (196.307, 269.394, 0.0), (-49.5, 0.0, 0.0), (2.7, 2.7, 2.7), None, None, None),
(
5, 'modules', 'suit_walls', (247.652, 209.276, 0.0), (-54.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build4',
None, None),
(
5, 'modules', 'suit_walls', (280.215, 164.456, 0.0), (-70.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build4',
None, None),
(
5, 'modules', 'suit_walls', (298.708, 112.234, 0.0), (-81.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build4',
None, None),
(
5, 'modules', 'suit_walls', (307.374, 57.516, 0.0), (-88.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build4',
None,
None),
(
5, 'modules', 'suit_walls', (308.824, 2.135, 0.0), (-96.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build3', None,
None),
(
5, 'modules', 'suit_walls', (303.034, -52.961, 0.0), (-102.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build3',
None, None),
(
5, 'modules', 'suit_walls', (291.515, -107.15, 0.0), (-115.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build3',
None, None),
(
5, 'modules', 'suit_walls', (267.665, -157.153, 0.0), (-129.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build3',
None, None),
(
5, 'modules', 'suit_walls', (232.801, -200.207, 0.0), (-142.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build3',
None, None),
(
5, 'modules', 'suit_walls', (-78.657, -284.069, 0.0), (165.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build1',
None, None),
(
5, 'modules', 'suit_walls', (-132.292, -270.198, 0.0), (149.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build1',
None, None),
(
5, 'modules', 'suit_walls', (-179.779, -241.665, 0.0), (134.0, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build1',
None, None),
(
5, 'modules', 'suit_walls', (-218.263, -201.813, 0.0), (123.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build2',
None, None),
(
5, 'modules', 'suit_walls', (-248.84, -155.616, 0.0), (114.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build2',
None, None),
(
5, 'modules', 'suit_walls', (-271.814, -105.204, 0.0), (96.5, 0.0, 0.0), (55.4, 55.4, 55.4), 'wall_suit_build2',
None, None),
(
4, 'modules', 'suit_landmark_legal', (-278.086, -50.161, 0.0), (87.5, 0.0, 0.0), (2.7, 2.7, 2.7), None, None,
None),
(
5, 'modules', 'suit_walls', (-274.513, 31.661, 0.0), (81.5, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build4',
None,
None),
(
5, 'modules', 'suit_walls', (-266.458, 85.563, 0.0), (66.5, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build4',
None,
None),
(
5, 'modules', 'suit_walls', (-244.726, 135.543, 0.0), (54.5, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build4',
None, None),
(
5, 'modules', 'suit_walls', (-213.078, 179.912, 0.0), (65.0, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build4',
None, None),
(
5, 'modules', 'suit_walls', (-190.045, 229.306, 0.0), (68.0, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build5',
None, None),
(
5, 'modules', 'suit_walls', (-169.629, 279.838, 0.0), (54.5, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build5',
None, None),
(
5, 'modules', 'suit_walls', (-137.98, 324.207, 0.0), (12.5, 0.0, 0.0), (54.5, 54.5, 54.5), 'wall_suit_build5',
None,
None),
(
4, 'modules', 'suit_landmark_sales', (-86.515, 338.143, 0.0), (5.0, 0.0, 0.0), (2.8, 2.8, 2.8), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (110.0, -140.0, 0.0), (0.0, 0.0, 0.0), (1.0, 1.0, 1.0), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (83.4, -175.2, 0.0), (0.0, 0.0, 0.0), (0.8, 0.8, 0.8), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (51.0, 131.8, 0.15), (54.0, 0.0, 0.0), (2.0, 2.0, 2.0), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (86.812, 209.566, 0.15), (-25.5, 0.0, 0.0), (1.4, 1.4, 1.4), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (125.888, 94.562, 0.15), (13.5, 0.0, 0.0), (1.4, 1.4, 1.4), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (167.643, -205.228, 0.15), (293.0, 0.0, 0.0), (1.1, 1.1, 1.1), None, None, None),
(
6, 'cogHQ', 'SmokeStack_simple', (-67.683, 76.087, 0.15), (293.0, 0.0, 0.0), (1.1, 1.1, 1.1), None, None, None),
(
6, 'cogHQ', 'SmokeStack_simple', (-76.301, 48.529, 0.15), (293.0, 0.0, 0.0), (1.025, 1.025, 1.025), None, None,
None),
(
6, 'cogHQ', 'SmokeStack_simple', (-90.453, 37.979, 0.15), (293.0, 0.0, 0.0), (0.875, 0.875, 0.875), None, None,
None),
(
6, 'cogHQ', 'SmokeStack_simple', (-96.849, 7.107, 0.15), (293.0, 0.0, 0.0), (0.775, 0.775, 0.775), None, None,
None),
(
9, 'cogHQ', 'woodCrateB', (91.145, -154.239, 0.3), (403.5, 0.0, 0.0), (2.575, 2.575, 2.575), None, None, None),
(
9, 'cogHQ', 'metal_crateB', (241.218, 62.596, 0.3), (325.5, 0.0, 0.0), (3.975, 3.975, 3.975), None, None, None),
(
9, 'cogHQ', 'FactoryGearB', (215.457, -170.329, 3.15), (339.0, 0.0, 0.0), (12.975, 12.975, 12.975), None, None,
None),
(
9, 'cogHQ', 'woodCrateB', (223.817, 70.637, 0.15), (348.0, 0.0, 0.0), (2.875, 2.875, 2.875), None, None, None),
(
10, 'cashbotHQ', 'CBWoodCrate', (148.604, -193.93, 0.25), (336.0, 0.0, 0.0), (2.875, 2.875, 2.875), None, None,
None),
(
10, 'cashbotHQ', 'CBMetalCrate', (187.686, -190.44, 0.25), (364.5, 0.0, 0.0), (2.875, 2.875, 2.875), None, None,
None),
(
10, 'cashbotHQ', 'CBMetalCrate', (159.86, -175.457, 0.25), (360.0, 0.0, 0.0), (2.425, 2.425, 2.425), None, None,
None),
(
10, 'cashbotHQ', 'DoubleGoldStack', (130.958, 188.263, 0.25), (378.0, 0.0, 0.0), (2.425, 2.425, 2.425), None, None,
None),
(
10, 'cashbotHQ', 'DoubleCoinStack', (16.991, 243.846, 0.25), (367.5, 0.0, 0.0), (2.425, 2.425, 2.425), None, None,
None),
(
10, 'cashbotHQ', 'MoneyStackPallet', (94.762, 80.424, 0.25), (378.0, 0.0, 0.0), (2.425, 2.425, 2.425), None, None,
None),
(
10, 'cashbotHQ', 'CashBotSafe', (152.787, 77.659, 0.25), (369.0, 0.0, 0.0), (1.0, 1.0, 1.0), None, None, None),
(
10, 'cashbotHQ', 'CashBotSafe', (167.633, 76.774, 0.25), (352.5, 0.0, 0.0), (1.0, 1.0, 1.0), None, None, None),
(
10, 'cashbotHQ', 'MoneyStackPallet', (232.506, -16.91, 0.25), (334.5, 0.0, 0.0), (2.4, 2.4, 2.4), None, None,
None),
(
10, 'cashbotHQ', 'shelf_A1', (262.684, 171.426, 0.25), (312.0, 0.0, 0.0), (2.4, 2.4, 2.4), None, None, None),
(
10, 'cashbotHQ', 'shelf_A1MoneyBags', (280.169, 125.461, 0.25), (289.5, 0.0, 0.0), (2.4, 2.4, 2.4), None, None,
None),
(
10, 'cashbotHQ', 'VaultDoorCover', (168.327, 284.185, 0.25), (328.5, 0.0, 0.0), (2.4, 2.4, 2.4), None, None, None),
(
10, 'cashbotHQ', 'shelf_A1Gold', (130.324, 297.732, 0.25), (337.5, 0.0, 0.0), (2.4, 2.4, 2.4), None, None, None),
(
10, 'cashbotHQ', 'shelf_A1Gold', (94.847, 312.427, 0.25), (340.5, 0.0, 0.0), (2.4, 2.4, 2.4), None, None, None),
(
10, 'cashbotHQ', 'shelf_A1Gold', (56.096, 316.298, 0.25), (345.0, 0.0, 0.0), (2.4, 2.4, 2.4), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (-43.383, -149.335, 0.0), (28.5, 0.0, 0.0), (1.0, 1.0, 1.0), None, None, None),
(
6, 'cogHQ', 'WaterTowerSimple', (-84.082, -125.941, 0.0), (-12.0, 0.0, 0.0), (1.2, 1.2, 1.2), None, None, None),
(
9, 'cogHQ', 'woodCrateB', (-106.023, -12.046, 0.0), (3.0, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
9, 'cogHQ', 'CogDoorHandShake', (-233.571, 146.487, 0.0), (58.5, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
10, 'cashbotHQ', 'CBWoodCrate', (-112.604, -121.894, 0.15), (31.5, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
10, 'cashbotHQ', 'crates_C1', (-152.706, 242.107, 0.15), (52.5, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
10, 'cashbotHQ', 'GoldBarStack', (-170.256, 215.103, 0.15), (48.0, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
10, 'cashbotHQ', 'MoneyStackPallet', (-186.041, 186.343, 0.15), (48.0, 0.0, 0.0), (2.5, 2.5, 2.5), None, None,
None),
(
10, 'cashbotHQ', 'shelf_A1Gold', (-249.959, 76.956, 0.15), (67.5, 0.0, 0.0), (2.5, 2.5, 2.5), None, None, None),
(
11, 'lawbotHQ', 'LB_paper_stacks', (-159.375, 13.489, 0.15), (45.0, 0.0, 0.0), (1.875, 1.875, 1.875), None, None,
None),
(
9, 'cogHQ', 'old_sky', (0.0, 0.0, 378.952), (15.5, 0.0, 0.0), (1.475, 1.475, 1.475), None, None, None)]
class MoneybinTakeover:
def __init__(self):
base.camLens.setFov(60)
self.modelList = []
self.moneyBinTheme = None
self.moneyBinTakeOver = None
self.moneybin = None
self.cogbin = None
self.cogArea = None
self.cogSky = None
return
def generate(self):
def addModels(PROPS, parent, children=False, strong=None):
for prop in PROPS:
if children:
if prop[6]:
if prop[0] == 'custom':
model = loader.loadModel('custom/models/%s/%s.egg' % (prop[1], prop[2])).find('**/%s' % prop[6])
else:
model = loader.loadModel('phase_%s/models/%s/%s' % (prop[0], prop[1], prop[2])).find('**/%s' % prop[6])
elif prop[0] == 'custom':
model = loader.loadModel('custom/models/%s/%s.egg' % (prop[1], prop[2]))
else:
model = loader.loadModel('phase_%s/models/%s/%s' % (prop[0], prop[1], prop[2]))
else:
if prop[0] == 'custom':
model = loader.loadModel('custom/models/%s/%s.egg' % (prop[1], prop[2]))
else:
model = loader.loadModel('phase_%s/models/%s/%s' % (prop[0], prop[1], prop[2]))
model.reparentTo(parent)
model.setPos(prop[3])
model.setHpr(prop[4])
model.setScale(prop[5])
self.modelList.append(model)
if prop[7]:
model.setColorScale(prop[7])
if prop[8]:
texture = loader.loadTexture(prop[8])
model.setTexture(texture, 1)
if strong:
model.flattenStrong()
self.moneyBinTheme = loader.loadMusic('phase_14.5/audio/bgm/SB_hub.ogg')
self.moneyBinTakeOver = loader.loadMusic('phase_14.5/audio/sfx/SB_Takeover.ogg')
self.cogArea = render.attachNewNode('cogArea')
self.cogArea.setZ(150)
self.cogArea.hide()
self.cogArea.setColorScale(1, 0.912, 0.863, 1)
self.cogSky = loader.loadModel('phase_9/models/cogHQ/old_sky')
self.cogSky.reparentTo(render)
self.cogSky.setPosHprScale(0, 0, 378.952, 15.5, 0, 0, 1.475, 1.475, 1.475)
self.cogSky.hide()
self.modelList.append(self.cogSky)
self.modelList.append(self.cogArea)
addModels(PROPS, self.cogArea, children=True)
self.setUpStreet()
loader.loadDNAFile(base.cr.playGame.hood.dnaStore, 'phase_8/dna/storage_ODG.jazz')
loader.loadDNAFile(base.cr.playGame.hood.dnaStore, 'phase_4/dna/storage.jazz')
loader.loadDNAFile(base.cr.playGame.hood.dnaStore, 'phase_4/dna/storage_TT_sz.jazz')
self.moneybin = loader.loadDNAFile(base.cr.playGame.hood.dnaStore, 'phase_14/dna/tt_dg_moneybin_area.jazz')
self.moneybin = NodePath(self.moneybin)
self.moneybin.setH(90)
self.moneybin.reparentTo(render)
self.modelList.append(self.moneybin)
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.sky.setScale(2.42)
self.sky.reparentTo(render)
self.cogbin = loader.loadModel('phase_14/models/modules/cogbin')
self.cogbin.reparentTo(render)
self.cogbin.setPosHprScale(0.0, 2.16, 150, 0.0, 0.0, 0.0, 1.38, 1.38, 1.38)
self.cogbin.hide()
self.modelList.append(self.cogbin)
groundTexture = loader.loadTexture('phase_9/maps/ground7.jpg')
sidewalkTexture = loader.loadTexture('phase_9/maps/CementFloorx4Warm.jpg')
self.robberbaron = ToontownAvatarUtils.createDistributedCog('rb', 0.0, -48.43, 16.999, 0.0, 0.0, 0.0, level=12)
streetTrack = Parallel()
for street in ['street_80x40_sidewalk', 'street_80x40_curb', 'street_80x40_street', 'street_25x40_street',
'street_25x40_sidewalk', 'street_25x40_curb']:
for node in render.findAllMatches('**/%s' % street):
streetTrack.append(Func(node.setTexture, groundTexture, 1))
streetTrack.append(Func(node.setTexture, sidewalkTexture, 1))
streetTrack.append(Func(node.setTexture, sidewalkTexture, 1))
moneybinTrack = Parallel(self.moneybin.find('**/moneybin1').colorScaleInterval(4.2, (0,
0,
0,
1)), self.moneybin.find('**/moneybin1').colorScaleInterval(4.2, (0,
0,
0,
1)), Sequence(Wait(1.75), Parallel(Sequence(Parallel(self.moneybin.find('**/moneybin1').scaleInterval(3.75, (1,
1,
0.001)), self.moneybin.find('**/flowers').scaleInterval(3.75, (1,
1,
0.001)), self.moneybin.find('**/trees').scaleInterval(3.75, (1,
1,
0.001)), self.moneybin.find('**/tag_arena_wall').scaleInterval(3.75, (1,
1,
0.001)), self.moneybin.find('**/out_arena_trees_1').scaleInterval(3.75, (1,
1,
0.001)), self.moneybin.find('**/out_arena_trees_2').scaleInterval(3.75, (1,
1,
0.001))), Func(self.moneybin.find('**/moneybin1').removeNode), Func(self.moneybin.find('**/flowers').removeNode), Func(self.moneybin.find('**/trees').removeNode), Func(self.moneybin.find('**/tag_arena_wall').removeNode), Func(self.moneybin.find('**/out_arena_trees_1').removeNode), Func(self.moneybin.find('**/out_arena_trees_2').removeNode)))))
cogBinTrack = Parallel(Func(self.cogbin.show), Func(self.cogArea.show), Func(self.sky.removeNode), Func(self.cogSky.show), Func(self.songRateChange, self.moneyBinTheme), Sequence(Parallel(self.cogbin.posInterval(5.0, (0.0,
2.16,
16.3)), self.cogArea.posInterval(5.0, (0.0,
2.16,
0.0))), Parallel(Sequence(LerpScaleInterval(self.cogbin, 0.45, (1.38,
1.38,
1.1), blendType='easeInOut'), LerpScaleInterval(self.cogbin, 0.6, (1.38,
1.38,
1.6), blendType='easeInOut'), LerpScaleInterval(self.cogbin, 0.7, (1.38,
1.38,
1.28), blendType='easeInOut'), LerpScaleInterval(self.cogbin, 0.75, (1.38,
1.38,
1.38), blendType='easeInOut')), Sequence(LerpScaleInterval(self.cogArea, 0.45, (1.0,
1.0,
0.7), blendType='easeInOut'), LerpScaleInterval(self.cogArea, 0.6, (1.0,
1.0,
1.3), blendType='easeInOut'), LerpScaleInterval(self.cogArea, 0.7, (1.0,
1.0,
0.9), blendType='easeInOut'), LerpScaleInterval(self.cogArea, 0.75, (1.0,
1.0,
1.0), blendType='easeInOut')))))
self.cameraTrack = Sequence(Func(self.robberbaron.setPosHpr, 0.0, -48.43, 16.999, 0.0, 0.0, 0.0), Func(base.camera.setPosHpr, -90.37, -95.11, 20.3, -154, 0.0, 0.0), Parallel(Sequence(Wait(1.0), self.robberbaron.beginSupaFlyMove(VBase3(0.0, -48.43, 16.999), 1, 'flyIn', walkAfterLanding=False)), LerpPosHprInterval(base.camera, 8.0, (-34.39,
-28.71,
25.038), (-130,
-0.6,
0.0), blendType='easeInOut')), Func(base.camera.setPosHpr, 27.945, -83.46, 17.402, 34.5, 8.24, 4.5), Func(self.robberbaron.loop, 'walk'), self.robberbaron.posHprInterval(2.5, (0.0,
-27.43,
21.398), (0.0,
0.0,
0.0)), self.robberbaron.posHprInterval(1.5, (0.0,
-9,
21.398), (0.0,
0.0,
0.0)), Func(base.camera.setPosHpr, 2.4331, -238.5, 20.352, -1.7, 11.8, 9.0), Func(self.robberbaron.delete), Wait(5.0), Func(base.localAvatar.brickLay, 'cog'), Func(self.moneybin.find('**/ground').setTexture, groundTexture, 1), Func(self.moneybin.find('**/moneybin_hill').setTexture, groundTexture, 1), Func(self.moneybin.find('**/ground').setColor, 1, 1, 1, 1), Func(self.moneybin.find('**/moneybin_hill').setColor, 1, 1, 1, 1), Func(self.moneyBinTheme.stop), Func(self.moneyBinTakeOver.play), Func(base.camera.setPosHpr, 131.27, -382.0, 78.261, 19.1, -5.1, 0.0), streetTrack)
self.buildingTrack = Sequence(Wait(12.0), Parallel(moneybinTrack, cogBinTrack, Sequence(Wait(8.0), Func(base.transitions.fadeOut, 2.0), Wait(2.0), Func(self.cleanupScene))))
self.animation = Parallel(Func(self.moneyBinTheme.play), self.cameraTrack, self.buildingTrack)
self.animation.start()
return
def songRateChange(self, song):
rateTrack = Sequence()
playRate = 1.0
for rate in xrange(0, 101):
rateTrack.append(Func(song.setPlayRate, playRate))
rateTrack.append(Wait(0.05))
playRate -= 0.01
rateTrack.append(Func(song.stop))
rateTrack.start()
def setUpStreet(self):
for dept in ['sales', 'money', 'corp', 'legal']:
for spot in render.findAllMatches('**/suit_landmark_new_%s_door_origin' % dept):
elevator = loader.loadModel('phase_4/models/modules/elevator')
elevator.reparentTo(spot)
randomFloor = random.randint(3, 5)
hideList = 5
for light in range(1, 6):
elevator.find('**/floor_light_%s' % light).setColor(0.5, 0.5, 0.5, 1)
if hideList != randomFloor:
elevator.find('**/floor_light_%s' % hideList).hide()
hideList = hideList - 1
sign = loader.loadModel('phase_5/models/modules/suit_sign')
sign.reparentTo(spot)
sign.setPos(0, -0.1, 12.5)
sign.setScale(5)
for spot in render.findAllMatches('**/suit_landmark_money2_door_origin'):
elevator = loader.loadModel('phase_4/models/modules/elevator')
elevator.reparentTo(spot)
randomFloor = random.randint(2, 5)
hideList = 5
for light in range(1, 6):
elevator.find('**/floor_light_%s' % light).setColor(0.5, 0.5, 0.5, 1)
if hideList != randomFloor:
elevator.find('**/floor_light_%s' % hideList).hide()
hideList = hideList - 1
sign = loader.loadModel('phase_5/models/modules/suit_sign')
sign.reparentTo(spot)
sign.setPos(0, -0.1, 11.5)
sign.setScale(5)
def cleanupScene(self):
for model in self.modelList:
model.removeNode()
del model | 106.868421 | 1,168 | 0.264664 | 31,906 | 0.785669 | 0 | 0 | 0 | 0 | 0 | 0 | 3,481 | 0.085718 |
370a96e1434086705b394298ea7d87edaa65f00a | 4,309 | py | Python | easyci/app/easyCI/tasksPool.py | 9OMShitikov/anytask | 71354543f467f6c824dfb194bf48ee76c391ff53 | [
"MIT"
] | null | null | null | easyci/app/easyCI/tasksPool.py | 9OMShitikov/anytask | 71354543f467f6c824dfb194bf48ee76c391ff53 | [
"MIT"
] | null | null | null | easyci/app/easyCI/tasksPool.py | 9OMShitikov/anytask | 71354543f467f6c824dfb194bf48ee76c391ff53 | [
"MIT"
] | null | null | null | import json
import requests
import tempfile
import shutil
import subprocess
import os
import logging
import urllib.request
from multiprocessing import Pool
import app.easyCI.docker as docker
from contextlib import contextmanager
LOG = logging.getLogger(__name__)
CONFIG = "config.json"
PASSWORDS = "passwords.json"
MAX_COMMENT_SIZE = 10000
PROCS = 1
REQUEST_TIMEOUT = 300
class QueueTask(object):
host = None
auth = None
config = None
id = None
course = None
task = None
issue = None
event = None
files = None
def __repr__(self):
return repr(self.__dict__)
@contextmanager
def tmp_dir():
t = tempfile.mkdtemp(dir="/var/tmp")
try:
yield t
finally:
shutil.rmtree(t)
def git_clone(repo, dst_dir):
cmd = ["git", "clone", repo, dst_dir]
LOG.info("RUN: %s", cmd)
subprocess.check_call(cmd)
def prepare_dir(qtask, dirname):
git_dir = os.path.join(dirname, "git")
task_dir = os.path.join(dirname, "task")
git_clone(qtask.course["repo"], git_dir)
os.mkdir(task_dir)
for url in qtask.files:
filename = url.split('/')[-1]
dst_path = os.path.join(task_dir, filename)
LOG.info("Download '%s' -> '%s'", url, dst_path)
print(url, dst_path)
urllib.request.urlretrieve(url, dst_path)
def process_task(qtask):
LOG.info("Proccess task %s", qtask.id)
with tmp_dir() as dirname:
prepare_dir(qtask, dirname)
run_cmd = qtask.course["run_cmd"] + [qtask.task, "/task_dir/task"]
#run_cmd = ["ls", "/task_dir/task"]
ret = docker.execute(run_cmd, cwd="/task_dir/git", timeout=qtask.course["timeout"], user='root',
network='bridge', image=qtask.course["docker_image"],
volumes=["{}:/task_dir:ro".format(os.path.abspath(dirname))])
status, retcode, is_timeout, output = ret
LOG.info("Task %d done, status:%s, retcode:%d, is_timeout:%d",
qtask.id, status, retcode, is_timeout)
LOG.info(" == Task %d output start", qtask.id)
for line in output.split("\n"):
LOG.info(line)
LOG.info(" == Task %d output end", qtask.id)
if len(output) > MAX_COMMENT_SIZE:
output = output[:MAX_COMMENT_SIZE]
output += u"\n...\nTRUNCATED"
if is_timeout:
output += u"\nTIMEOUT ({} sec)".format(qtask.course["timeout"])
comment = u"[id:{}] Check DONE!<br>\nSubmited on {}<br>\n<pre>{}</pre>\n".format(qtask.id,
qtask.event_timestamp,
output)
LOG.info("{}/api/v1/issue/{}/add_comment".format(qtask.host, qtask.issue_id))
response = requests.post("{}/api/v1/issue/{}/add_comment".format(qtask.host, qtask.issue_id),
auth=qtask.auth, data={"comment":comment.encode("utf-8")}, timeout=REQUEST_TIMEOUT)
response.raise_for_status()
LOG.info(" == Task %d DONE!, URL: %s/issue/%d", qtask.id, qtask.host, qtask.issue_id)
return qtask
def load_passwords(filename=PASSWORDS):
with open(filename) as config_fn:
return json.load(config_fn)
def load_config(filename=CONFIG):
with open(filename) as config_fn:
config_arr = json.load(config_fn)
config_dict = {}
for course in config_arr:
config_dict[course["course_id"]] = course
return config_dict
def get_auth(passwords, host):
host_auth = passwords[host]
return (host_auth["username"], host_auth["password"])
config = load_config()
passwords = load_passwords()
pool = Pool(processes=PROCS)
def put_to_pool(task):
course_id = task["course_id"]
course = config[course_id]
auth = get_auth(passwords, course["host"])
files = task["files"]
qtask = QueueTask()
qtask.host = course["host"]
qtask.auth = auth
qtask.course = course
qtask.task = task["title"]
qtask.issue_id = task["issue_id"]
qtask.files = files
qtask.id = task["event"]["id"]
qtask.event_timestamp = task["event"]["timestamp"]
print(qtask)
pool.apply_async(process_task, args=(qtask,))
| 29.312925 | 116 | 0.601764 | 233 | 0.054073 | 118 | 0.027385 | 134 | 0.031098 | 0 | 0 | 692 | 0.160594 |
370bb514404727469781e53bb089355e3b933806 | 1,201 | py | Python | polling_stations/apps/data_collection/management/commands/import_monmouthshire.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_monmouthshire.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_monmouthshire.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = "W06000021"
districts_name = "polling_district"
stations_name = "polling_station.shp"
elections = ["local.monmouthshire.2017-05-04", "parl.2017-06-08"]
def district_record_to_dict(self, record):
return {
"internal_council_id": str(record[1]).strip(),
"name": str(record[1]).strip(),
"polling_station_id": record[3],
}
def station_record_to_dict(self, record):
station = {
"internal_council_id": record[0],
"postcode": "",
"address": "%s\n%s" % (record[2].strip(), record[4].strip()),
}
if str(record[1]).strip() == "10033354925":
"""
There is a dodgy point in this file.
It has too many digits for a UK national grid reference.
Joe queried, Monmouthshire provided this corrected point by email
"""
station["location"] = Point(335973, 206322, srid=27700)
return station
| 32.459459 | 83 | 0.613655 | 1,072 | 0.89259 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.362198 |
370d8cad82f39f9f6c319866224a8b6a5d29e6e7 | 147 | py | Python | compiler/astnodes/typeannotation.py | yangdanny97/chocopy-python-frontend | d0fb63fc744771640fa4d06076743f42089899c1 | [
"MIT"
] | 3 | 2020-05-30T16:59:33.000Z | 2020-05-31T00:28:45.000Z | compiler/astnodes/typeannotation.py | yangdanny97/chocopy-python-frontend | d0fb63fc744771640fa4d06076743f42089899c1 | [
"MIT"
] | 1 | 2020-05-30T17:57:11.000Z | 2020-05-30T20:44:53.000Z | compiler/astnodes/typeannotation.py | yangdanny97/chocopy-python-frontend | d0fb63fc744771640fa4d06076743f42089899c1 | [
"MIT"
] | null | null | null | from .node import Node
class TypeAnnotation(Node):
def __init__(self, location: [int], kind: str):
super().__init__(location, kind)
| 18.375 | 51 | 0.673469 | 121 | 0.823129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
370f39af0bff09b2f1ff0353894b6e2f8d8e79a5 | 2,821 | py | Python | lib/lambdascrapers/sources_ lambdascrapers/de/video4k.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | 11 | 2018-12-21T22:52:37.000Z | 2021-09-02T02:13:50.000Z | lib/lambdascrapers/sources_ lambdascrapers/de/video4k.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | null | null | null | lib/lambdascrapers/sources_ lambdascrapers/de/video4k.py | proxium/script.module.lambdascrapers | f96ad4c7c44c011c9d0007a83edde8c4797e0e2f | [
"Beerware"
] | 1 | 2020-02-01T19:52:36.000Z | 2020-02-01T19:52:36.000Z | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import json
import re
import urllib
import urlparse
from resources.lib.modules import client
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['video4k.to']
self.base_link = 'http://video4k.to'
self.request_link = '/request'
def movie(self, imdb, title, localtitle, aliases, year):
try:
return urllib.urlencode({'mID': re.sub('[^0-9]', '', imdb)})
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
return urllib.urlencode({'mID': re.sub('[^0-9]', '', imdb)})
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None:
return
return urllib.urlencode({'mID': re.sub('[^0-9]', '', imdb), 'season': season, 'episode': episode})
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'raw': 'true', 'language': 'de'})
data = urllib.urlencode(data)
data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data)
data = json.loads(data)
data = [i[1] for i in data[1].items()]
data = [(i['name'].lower(), i['links']) for i in data]
for host, links in data:
valid, host = source_utils.is_host_valid(host, hostDict)
if not valid: continue
for link in links:
try:sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link['URL'], 'direct': False, 'debridonly': False})
except: pass
return sources
except:
return sources
def resolve(self, url):
return url
| 33.987952 | 149 | 0.499468 | 1,998 | 0.708259 | 0 | 0 | 0 | 0 | 0 | 0 | 871 | 0.308756 |
370fc257e4ad1d9ff8e001439c3aa8ae3d6aba1a | 808 | py | Python | lab-sessions/lab-3/ex3_gray_scale.py | DatacollectorVN/BME-Bio-Image-Processing-class | bc750f190398a1c29e2a8cd8092ced2072ce02e9 | [
"MIT"
] | null | null | null | lab-sessions/lab-3/ex3_gray_scale.py | DatacollectorVN/BME-Bio-Image-Processing-class | bc750f190398a1c29e2a8cd8092ced2072ce02e9 | [
"MIT"
] | null | null | null | lab-sessions/lab-3/ex3_gray_scale.py | DatacollectorVN/BME-Bio-Image-Processing-class | bc750f190398a1c29e2a8cd8092ced2072ce02e9 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import argparse
def main(image_file_path):
img = cv2.imread(image_file_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
name_window_1 = "original"
name_window_2 = "grayscale"
while True:
cv2.imshow(name_window_1, img)
cv2.imshow(name_window_2, img_gray)
key = cv2.waitKey(0)
# press ESC to close
if key == 27:
break
# destroy all windows
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imagepath", dest = "image_file_path", type = str,
default = None, help = "Image file path")
args = parser.parse_args()
image_file_path = args.image_file_path
main(image_file_path) | 27.862069 | 76 | 0.632426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.147277 |
371029d250aeabea72732a867201c7c53e2e6057 | 862 | py | Python | project/tests/GUI/tools.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | 2 | 2017-09-18T11:04:38.000Z | 2017-09-25T17:23:21.000Z | project/tests/GUI/tools.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | 26 | 2017-09-20T09:11:10.000Z | 2017-12-11T12:21:56.000Z | project/tests/GUI/tools.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | null | null | null | from functools import partial
from kivy.clock import Clock
def to_task(s):
s.press("//MenuButtonTitled[@name='LOGO']")
s.assert_on_screen('activity')
s.press('//StartNowButton')
s.assert_on_screen('tasks')
s.tap("//TestIntro//TestCarouselForwardButton")
s.assert_on_screen("test", manager_selector="//TasksScreen/ScreenManager")
s.tap("//BlinkImageButton[@name='task_icon']")
def without_schedule_seconds(function):
def inner(*args, **kwargs):
function(*args[:-1], **kwargs)
return inner
def simulate(function):
def simulate_inner(simulator, params):
simulator.start(function, params or {})
return simulate_inner
def execution_step(function):
def execution_step_inner(self, *args, **kwargs):
self.execution_queue.append((function, args, kwargs))
return execution_step_inner
| 24.628571 | 78 | 0.701856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.212297 |
37108a3012ad4523c07b518dd9e47f3e03dc8e23 | 1,726 | py | Python | login_spider.py | ERUIHNIYHBKBNF/Digit-Recognition | c2334222da4b357836e7a0d53de9eccabc587113 | [
"MIT"
] | null | null | null | login_spider.py | ERUIHNIYHBKBNF/Digit-Recognition | c2334222da4b357836e7a0d53de9eccabc587113 | [
"MIT"
] | null | null | null | login_spider.py | ERUIHNIYHBKBNF/Digit-Recognition | c2334222da4b357836e7a0d53de9eccabc587113 | [
"MIT"
] | null | null | null | import requests as rq
import cv2
import time
import os
from picdiv import divide
from knn import getVeryValue
from utils import showimg
url = 'http://222.194.10.249/inc/validatecode.asp'
res = rq.get(url)
# 文件名加个时间戳
fileName = str(int(time.time())) + '.jpg'
# 由于不会在内存中直接转换二进制到rgb就只能存了再读了qwq
with open(fileName, 'wb') as f:
f.write(res.content)
img = cv2.imread(fileName)
digits = divide(img)
res = getVeryValue(digits)
value = 0
for i in res[::-1]:
value = value * 10 + int(i)
showimg(img)
print(value)
os.remove(fileName)
url = 'http://222.194.10.249/checklogin.asp'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Length': '69',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'ASPSESSIONIDQABSAQDS=EBCFBPHBMKFIIBBNLHJMCHKJ; XINHAI_Admin_Id=; XINHAI_Admin_Password=; XINHAI_Admin_Right=; XINHAI%5FAdmin=; XINHAI_Student_Id=; XINHAI_Student_Password=; XINHAI=; XINHAI%5FStudent=; XINHAI_Message=',
'Host': '222.194.10.249',
'Origin': 'http://222.194.10.249',
'Pragma': 'no-cache',
'Referer': 'http://222.194.10.249/index.asp',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'
}
data = {
'usertype': 'Student',
'username': '用户名',
'password': '密码',
'validate': value
}
res = rq.post(url = url, headers = headers, data = data)
cookies = rq.utils.dict_from_cookiejar(res.cookies)
print(cookies)
| 33.192308 | 231 | 0.70336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,100 | 0.611111 |
3710f1bb3487fa331fc10580553cc5631bd8c85e | 808 | py | Python | auctions/migrations/0020_bids_bidder_alter_list_date.py | AncientSoup/cs50w_commerce | fb4cb8a47279e562f1d4a859abbf44ea5a7d9891 | [
"MIT"
] | 1 | 2022-01-25T10:40:44.000Z | 2022-01-25T10:40:44.000Z | auctions/migrations/0020_bids_bidder_alter_list_date.py | AncientSoup/cs50w_commerce | fb4cb8a47279e562f1d4a859abbf44ea5a7d9891 | [
"MIT"
] | null | null | null | auctions/migrations/0020_bids_bidder_alter_list_date.py | AncientSoup/cs50w_commerce | fb4cb8a47279e562f1d4a859abbf44ea5a7d9891 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-02-12 11:07
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('auctions', '0019_alter_list_date_alter_list_price'),
]
operations = [
migrations.AddField(
model_name='bids',
name='bidder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='list',
name='date',
field=models.DateTimeField(default=datetime.datetime(2022, 2, 12, 11, 7, 47, 65691, tzinfo=utc)),
),
]
| 28.857143 | 133 | 0.653465 | 595 | 0.736386 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.15099 |
37110cd9f3d19b020331f8f0ff331ab009b270d9 | 1,896 | py | Python | tests/test_baseline.py | pleiszenburg/pyIGRF | 2572afd40650ef7f10ba1804aaa4c7f870ef704e | [
"MIT"
] | 1 | 2022-02-13T07:30:05.000Z | 2022-02-13T07:30:05.000Z | tests/test_baseline.py | pleiszenburg/pyCRGI | 3a1f394e2df6702432b29d2a7edded4d63cb14da | [
"MIT"
] | null | null | null | tests/test_baseline.py | pleiszenburg/pyCRGI | 3a1f394e2df6702432b29d2a7edded4d63cb14da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from math import isclose
import pytest
from pyCRGI.pure import (
get_coeffs as pure_get_coeffs,
get_value as pure_get_value,
get_variation as pure_get_variation,
)
from pyCRGI.jited import (
get_coeffs as jited_get_coeffs,
get_value as jited_get_value,
get_variation as jited_get_variation,
)
@pytest.mark.parametrize(
"get_value, get_variation",
[(pure_get_value, pure_get_variation), (jited_get_value, jited_get_variation)]
)
def test_doc(get_value, get_variation):
doc = get_value.__doc__
assert isinstance(doc, str)
assert len(doc) > 0
doc = get_variation.__doc__
assert isinstance(doc, str)
assert len(doc) > 0
@pytest.mark.parametrize(
"get_value, get_variation",
[(pure_get_value, pure_get_variation), (jited_get_value, jited_get_variation)]
)
def test_compute(get_value, get_variation):
date = 1999
lat = 40
lon = 116
alt = 300
expected_value = (
-5.080158216428891,
57.85556540804097,
24750.880520185507,
24653.65386814849,
-2191.674582146139,
39388.39340198416,
46519.368238551644,
)
expected_variation = (
-0.022800119085463918,
0.04087715389679826,
-19.857404366020084,
-20.65154904740848,
-8.05224429543091,
30.777595502899203,
15.49444079804009,
)
computed_value = get_value(lat, lon, alt, date)
computed_variation = get_variation(lat, lon, alt, date)
assert all(isclose(a, b) for a, b in zip(expected_value, computed_value))
assert all(isclose(a, b) for a, b in zip(expected_variation, computed_variation))
@pytest.mark.parametrize(
"get_coeffs",
[pure_get_coeffs, jited_get_coeffs]
)
def test_coeffs(get_coeffs):
date = 1999
g, h = get_coeffs(date)
assert len(g) == 14
assert len(h) == 14
| 22.571429 | 85 | 0.675105 | 0 | 0 | 0 | 0 | 1,543 | 0.813819 | 0 | 0 | 87 | 0.045886 |
371263ea2ae9c13774481f8ebcb487baf4684615 | 1,188 | py | Python | mum/update/__init__.py | mapgears/maritime-update-manager | 5a08a3fc609e4a37c758e52dbf2f4fd6ae541bc7 | [
"MIT"
] | null | null | null | mum/update/__init__.py | mapgears/maritime-update-manager | 5a08a3fc609e4a37c758e52dbf2f4fd6ae541bc7 | [
"MIT"
] | 1 | 2022-01-13T01:08:28.000Z | 2022-01-13T01:08:28.000Z | mum/update/__init__.py | mapgears/maritime-update-manager | 5a08a3fc609e4a37c758e52dbf2f4fd6ae541bc7 | [
"MIT"
] | null | null | null | """Update maritime data using update modules"""
from __future__ import annotations
from argparse import ArgumentParser
import base64
import hashlib
import os
import shelve
import toml
from .modules import get_update_module
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('configfile', metavar='config.toml')
args = parser.parse_args()
config = toml.load(args.configfile)
statefile = config.get('statefile')
if statefile is None:
location = os.path.realpath(args.configfile)
hash = hashlib.sha256(location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
statefile = f'/var/tmp/mum-{encoded_hash[:8]}.statefile'
transient_state = {}
with shelve.open(statefile, writeback=True) as db:
for updater_config in config.get('updater', []):
updater_cls = get_update_module(updater_config['module'])
if updater_config.get('enabled', True):
updater = updater_cls(**updater_config)
if updater.needs_update(db, transient_state):
updater.update(db, transient_state)
db.sync()
| 31.263158 | 69 | 0.676768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.128788 |
3712e528ebc57eaa9774a8a00d9c212c77e54ca6 | 4,627 | py | Python | ENCODN/TOOLS/CRYPTOGRAPHY/OTHER/ROZIERCYPHER/ROZIER.py | chrisyo99/ENCODN | 578a5eb87b68e4ba5ebb1c87808ad04aa160df5e | [
"MIT"
] | 6 | 2020-10-07T13:09:38.000Z | 2021-01-16T17:16:51.000Z | ENCODN/TOOLS/CRYPTOGRAPHY/OTHER/ROZIERCYPHER/ROZIER.py | chrisyo99/ENCODN | 578a5eb87b68e4ba5ebb1c87808ad04aa160df5e | [
"MIT"
] | 27 | 2020-10-09T09:14:23.000Z | 2021-01-22T07:16:43.000Z | ENCODN/TOOLS/CRYPTOGRAPHY/OTHER/ROZIERCYPHER/ROZIER.py | chrisyo99/ENCODN | 578a5eb87b68e4ba5ebb1c87808ad04aa160df5e | [
"MIT"
] | 14 | 2020-10-07T14:25:59.000Z | 2021-02-21T16:54:37.000Z | """
Information on the Rozier Cipher can be found at:
https://www.dcode.fr/rozier-cipher
ROZIER.py
Written by: MrLukeKR
Updated: 16/10/2020
"""
# The Rozier cipher needs a string based key, which can be constant for ease
# or changed for each message, for better security
constant_key = "DCODE"
def encrypt(plaintext: str, key: str=constant_key):
"""
Encrypts a plaintext string using the Rozier cipher and a constant key.
Optionally, the function can accept a different key as a parameter.
"""
# Convert plaintext to upper case
plaintext = plaintext.upper()
# Initialise the ciphertext string to the empty string
ciphertext = ""
# Iterate over every letter in the plaintext string
for index, letter in enumerate(plaintext):
# Get the first and second letters of the key at index of letter,
# using modulus to allow for a key to be repeated
first_key = key[index % len(key)]
second_key = key[(index + 1) % len(key)]
# Get the position in the alphabet of the current plaintext letter.
# Negating the ASCII value of capital A allows us to convert from
# an ASCII code to alphabet position.
letter_position = ord(letter) - ord('A')
# Convert the first and second key values to ASCII codes
first_key_value = ord(first_key)
second_key_value = ord(second_key)
# Use the first and second key ASCII codes to determine the distance
# between the two letters. Negative values indicate that the ciphertext
# letter moves to the right of the current letter and positive values
# indicate a move to the left
key_distance = second_key_value - first_key_value
# Calculate the ciphertext letter by adding the original plaintext
# letter to the key distance derived from the two letters from the key.
# Modulus is applied to this to keep the letter within the bounds of
# the alphabet (numbers and special characters are not supported).
# This is added to the ASCII code for capital A to convert from
# alphabet space back into an ASCII code
cipher_letter_value = ord('A') + ((letter_position + key_distance) % 26)
# Convert the ASCII code to a character
cipher_letter = chr(cipher_letter_value)
# Add the character to the total ciphertext string
ciphertext += cipher_letter
return ciphertext
def decrypt(ciphertext: str, key: str=constant_key):
"""
Decrypts a ciphertext string using the Rozier cipher and a constant key.
Optionally, the function can accept a different key as a parameter.
"""
# Convert ciphertext to upper case
ciphertext = ciphertext.upper()
# Initialise the plaintext string to the empty string
plaintext = ""
# Iterate over every letter in the ciphertext string
for index, letter in enumerate(ciphertext):
# Get the first and second letters of the key at index of letter, using
# modulus to allow for a key to be repeated
first_key = key[index % len(key)]
second_key = key[(index + 1) % len(key)]
# Get the position in the alphabet of the current ciphertext letter.
# Negating the ASCII value of capital A allows us to convert from
# an ASCII code to alphabet position.
letter_position = ord(letter) - ord('A')
# Convert the first and second key values to ASCII codes
first_key_value = ord(first_key)
second_key_value = ord(second_key)
# Use the first and second key ASCII codes to determine the distance
# between the two letters. Negative values indicate that the plaintext
# letter moves to the right of the current letter and positive values
# indicate a move to the left
key_distance = second_key_value - first_key_value
# Calculate the plaintext letter by subtracting the key distance derived
# from the two letters from the key, from the original ciphertext letter
# position.
# Modulus is applied to this to keep the letter within the bounds of
# the alphabet (numbers and special characters are not supported).
# This is added to the ASCII code for capital A to convert from
# alphabet space back into an ASCII code
plain_letter_value = ord('A') + ((letter_position - key_distance) % 26)
# Convert the ASCII code to a character
plain_letter = chr(plain_letter_value)
# Add the character to the total plaintext string
plaintext += plain_letter
return plaintext
| 40.234783 | 80 | 0.685758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,984 | 0.64491 |
37142a2768a71aca4c786f5f03f214574e278dfd | 3,324 | py | Python | tests/highlevel/simpler-deepcell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | tests/highlevel/simpler-deepcell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | tests/highlevel/simpler-deepcell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | from seamless.highlevel import Context
from pprint import pprint
ctx = Context()
ctx.a = 12
ctx.compute()
print(ctx.a.value)
print(ctx.a.schema) # None
def triple_it(a):
return 3 * a
def triple_it_b(a, b):
print("RUN!")
return 3 * a + b
ctx.transform = triple_it
ctx.transform.debug.direct_print = True
ctx.transform.hash_pattern = {"*": "#"}
ctx.transform.debug.direct_print = True
ctx.transform.a = 1
print("START")
ctx.compute()
print(ctx.transform.inp.value, ctx.transform.result.value)
ctx.transform.a = ctx.a
ctx.transform.example.a = 99
ctx.compute()
print(ctx.a.value, ctx.transform.inp.value)
print(ctx.transform.inp.schema)
ctx.myresult = ctx.transform
ctx.compute()
print(ctx.a.value, ctx.transform.inp.value)
print(ctx.transform.result.value)
ctx.tfcode = ctx.transform.code.pull()
ctx.compute()
print(ctx.transform.result.value, ctx.myresult.value)
ctx.tfcode = triple_it_b
'''
#or:
ctx.transform = triple_it_b
ctx.transform.a = ctx.a
ctx.myresult = ctx.transform
'''
ctx.compute()
print(ctx.transform.inp.value)
print("NO RESULT", ctx.transform.result.value, ctx.myresult.value)
print("TRANSFORMER EXCEPTION", ctx.transform.exception)
ctx.transform.b = 100
ctx.compute()
print(ctx.transform.inp.value)
print("RESULT", ctx.transform.result.value, ctx.myresult.value)
print("START")
ctx.a = 13
ctx.compute()
print(ctx.a.value)
print(ctx.transform.inp.value)
print("RESULT", ctx.transform.result.value, ctx.myresult.value)
ctx.transform.example.b = "test" # modification of schema => .inp exception
ctx.translate()
print("TRANSFORMER INPUT EXCEPTION", ctx.transform.inp.exception) # None
print(ctx.transform.inp.value)
ctx.compute()
print("TRANSFORMER INPUT EXCEPTION", ctx.transform.inp.exception) # jsonschema.exceptions.ValidationError: 100 is not of type 'string'
###print("TF STATUS", ctx.transform.status)
###ctx.translate(force=True); ctx.compute() ### ERROR
print(ctx.transform.inp.schema)
###print("INPUT EXCEPTION", ctx.transform.inp.exception)
print(ctx.transform.inp.value) # None
print(ctx.transform._get_tf().inp.auth.value) # As of Seamless 0.2, this gives {'a': 1, 'b': 100}
# The a=1 is not cleared when the connection is broken!
print("TRANSFORMER STATUS", ctx.transform.status)
print("START!")
ctx.transform.b = "testing"
ctx.compute()
print(ctx.transform._get_tf().inp.auth.value) # {'a': 1, 'b': "testing"}
print(ctx.transform._get_tf().inp.buffer.value) # {'a': 13, 'b': "testing"}
print(ctx.transform.inp.value) # {'a': 13, 'b': 'testing'}
print(ctx.myresult.value) # None
print("TRANSFORMER INPUT EXCEPTION", ctx.transform.inp.exception) # None
print("TRANSFORMER STATUS", ctx.transform.status)
print("TRANSFORMER EXCEPTION", ctx.transform.exception)
print("START2")
ctx.translate(force=True); ctx.compute()
print(ctx.myresult.value) # None
print("TRANSFORMER INPUT EXCEPTION", ctx.transform.inp.exception) # None
print("TRANSFORMER STATUS", ctx.transform.status)
print("TRANSFORMER EXCEPTION", ctx.transform.exception)
print("START3")
ctx.tfcode = triple_it
del ctx.transform.pins.b
ctx.compute()
print(ctx.myresult.value)
print("TRANSFORMER INPUT STATUS", ctx.transform.inp.status)
print("TRANSFORMER STATUS", ctx.transform.status)
print(ctx.transform.inp.schema)
print(ctx.transform.inp.data)
| 30.777778 | 134 | 0.73225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 969 | 0.291516 |
3714523a9df68433b2793b57885742031aadd9a3 | 58 | py | Python | kanon_api/units.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | null | null | null | kanon_api/units.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | 80 | 2021-04-21T16:02:03.000Z | 2022-03-28T00:48:58.000Z | kanon_api/units.py | legau/kanon-api | bae8fcba11caefa2f6715247852f853bb52fb9a6 | [
"BSD-3-Clause"
] | null | null | null | from astropy import units as u
degree: u.Unit = u.degree
| 14.5 | 30 | 0.741379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
37156272e00bd374fa326b7f03c221d972948d2a | 2,374 | py | Python | IMLearn/utils/utils.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | IMLearn/utils/utils.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | IMLearn/utils/utils.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
] | null | null | null | from typing import Tuple
import numpy as np
import pandas as pd
import time
def split_train_test(X: pd.DataFrame, y: pd.Series, train_proportion: float = .75) \
-> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
"""
Randomly split given sample to a training- and testing sample
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Data frame of samples and feature values.
y : Series of shape (n_samples, )
Responses corresponding samples in data frame.
train_proportion: Fraction of samples to be split as training set
Returns
-------
train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features)
Design matrix of train set
train_y : Series of shape (ceil(train_proportion * n_samples), )
Responses of training samples
test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features)
Design matrix of test set
test_y : Series of shape (floor((1-train_proportion) * n_samples), )
Responses of test samples
"""
# join X_y to shuffle them simultaneously and then seperate them again
y.name = 'labels'
X_y = X.join(y)
X_y = X_y.sample(frac=1).reset_index(drop=True)
X, y = X_y.iloc[:, :-1], X_y.iloc[:, -1:]
amount = int(np.ceil(train_proportion * y.size))
return X.iloc[:amount], y.iloc[:amount], X.iloc[amount:], y.iloc[amount:]
def confusion_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""
Compute a confusion matrix between two sets of integer vectors
Parameters
----------
a: ndarray of shape (n_samples,)
First vector of integers
b: ndarray of shape (n_samples,)
Second vector of integers
Returns
-------
confusion_matrix: ndarray of shape (a_unique_values, b_unique_values)
A confusion matrix where the value of the i,j index shows the number of times value `i` was found in vector `a`
while value `j` vas found in vector `b`
"""
raise NotImplementedError()
def measure_time(f):
"""
wrapper for a function f that prints the time it ran.
:return:
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print('%r %2.2f sec' % (f.__name__, te - ts))
return result
return timed
| 29.308642 | 119 | 0.641112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,546 | 0.651222 |
37164ce047902fb36b1255b04be946281d2676f6 | 2,583 | py | Python | review/migrations/0004_auto_20170315_0930.py | kgdunn/peer-review-system | 1fd5ac9d0f84d7637a86682e9e5fc068ac404afd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | review/migrations/0004_auto_20170315_0930.py | kgdunn/peer-review-system | 1fd5ac9d0f84d7637a86682e9e5fc068ac404afd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | review/migrations/0004_auto_20170315_0930.py | kgdunn/peer-review-system | 1fd5ac9d0f84d7637a86682e9e5fc068ac404afd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 08:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('review', '0003_auto_20170314_2217'),
]
operations = [
migrations.CreateModel(
name='GradeComponent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveSmallIntegerField(default=0.0, help_text='Used to order the display of grade items')),
('explanation', models.TextField(help_text='HTML is possible; used in the template. Can include template elements.', max_length=500)),
('weight', models.FloatField(default=0.0, help_text=('Values must be between 0.0 and 1.0.', ' It is your responsibility to make sure the total weights do not sum to over 1.0 (i.e. 100%)'))),
('extra_detail', models.CharField(blank=True, choices=[('peer', 'peer'), ('instructor', 'instructor')], help_text=('Extra information used to help distinguish a phase. For ', 'example, the Peer-Evaluation phase is used for instructors as well as peers to evaluate. But the instructor(s) grades must get a higher weight. This is used to split the code.'), max_length=50)),
],
),
migrations.CreateModel(
name='GradeReportPhase',
fields=[
('prphase_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='review.PRPhase')),
],
bases=('review.prphase',),
),
migrations.AlterField(
model_name='prphase',
name='end_dt',
field=models.DateTimeField(blank=True, verbose_name='End of this phase'),
),
migrations.AlterField(
model_name='prphase',
name='start_dt',
field=models.DateTimeField(blank=True, verbose_name='Start of this phase'),
),
migrations.AddField(
model_name='gradecomponent',
name='phase',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='review.PRPhase'),
),
migrations.AddField(
model_name='gradecomponent',
name='pr',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='review.PR_process'),
),
]
| 47.833333 | 387 | 0.629113 | 2,392 | 0.926055 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.349593 |
37175535dd73bb85ce801f6d71053f502d010455 | 459 | py | Python | sesame/utils.py | GamePad64/django-sesame | 9d6144110b2cd74cca4e58273926b111eadb3f03 | [
"BSD-3-Clause"
] | null | null | null | sesame/utils.py | GamePad64/django-sesame | 9d6144110b2cd74cca4e58273926b111eadb3f03 | [
"BSD-3-Clause"
] | null | null | null | sesame/utils.py | GamePad64/django-sesame | 9d6144110b2cd74cca4e58273926b111eadb3f03 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from .backends import UrlAuthBackendMixin
from .compatibility import urlencode
from .middleware import TOKEN_NAME
def get_parameters(user):
"""
Return GET parameters to log in `user`.
"""
return {TOKEN_NAME: UrlAuthBackendMixin().create_token(user)}
def get_query_string(user):
"""
Return a complete query string to log in `user`.
"""
return '?' + urlencode(get_parameters(user))
| 20.863636 | 65 | 0.718954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.270153 |
3717e0e3aec2403c0fab9cf210bee60a06ef922a | 1,743 | py | Python | tests/verbs/test_drop_verb.py | RathmoreChaos/intficpy | a5076bba93208dc18dcbf2e4ad720af9e2127eda | [
"MIT"
] | 25 | 2019-04-30T23:51:44.000Z | 2022-03-23T02:02:54.000Z | tests/verbs/test_drop_verb.py | RathmoreChaos/intficpy | a5076bba93208dc18dcbf2e4ad720af9e2127eda | [
"MIT"
] | 4 | 2019-07-09T03:43:35.000Z | 2022-01-10T23:41:46.000Z | tests/verbs/test_drop_verb.py | RathmoreChaos/intficpy | a5076bba93208dc18dcbf2e4ad720af9e2127eda | [
"MIT"
] | 5 | 2021-04-24T03:54:39.000Z | 2022-01-06T20:59:03.000Z | from ..helpers import IFPTestCase
from intficpy.things import Thing, Container, Liquid
class TestDropVerb(IFPTestCase):
def test_verb_func_drops_item(self):
item = Thing(self.game, self._get_unique_noun())
item.invItem = True
self.me.addThing(item)
self.assertIn(item.ix, self.me.contains)
self.assertEqual(len(self.me.contains[item.ix]), 1)
self.assertIn(item, self.me.contains[item.ix])
self.game.turnMain(f"drop {item.verbose_name}")
self.assertItemNotIn(
item, self.me.contains, "Dropped item, but item still in inventory"
)
def test_drop_item_not_in_inv(self):
item = Thing(self.game, "shoe")
item.invItem = True
self.start_room.addThing(item)
self.assertFalse(self.me.containsItem(item))
self.game.turnMain(f"drop {item.verbose_name}")
self.assertIn("You are not holding", self.app.print_stack.pop())
def test_drop_liquid_in_container(self):
cup = Container(self.game, "cup")
water = Liquid(self.game, "water", "water")
water.moveTo(cup)
cup.moveTo(self.me)
self.game.turnMain("drop water")
self.assertIn("You drop the cup", self.app.print_stack.pop())
self.assertFalse(self.game.me.containsItem(cup))
self.assertTrue(cup.containsItem(water))
def test_drop_composite_child(self):
machine = Thing(self.game, "machine")
wheel = Thing(self.game, "wheel")
machine.addComposite(wheel)
machine.moveTo(self.me)
self.game.turnMain("drop wheel")
self.assertIn("wheel is attached to the machine", self.app.print_stack.pop())
self.assertTrue(self.me.containsItem(wheel))
| 36.3125 | 85 | 0.655766 | 1,652 | 0.947791 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.134825 |
37184bb52ce3b69f9a3ea49e44dc9a4d526d0292 | 3,076 | py | Python | woot/apps/catalog/views/forum.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | 1 | 2015-06-27T13:25:28.000Z | 2015-06-27T13:25:28.000Z | woot/apps/catalog/views/forum.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | 1 | 2015-07-02T20:18:53.000Z | 2015-07-02T20:18:53.000Z | woot/apps/catalog/views/forum.py | Makeystreet/makeystreet | 761331de52207227baf6f8d161ab6df1747f8ef3 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from woot.apps.catalog.models.forum import Question, Answer
from woot.apps.catalog.models.core import Makey, Comment
from woot.apps.catalog.forms import QuestionForm, AnswerForm, CommentForm
def question(request, question_id, **kwargs):
q = get_object_or_404(Question, id=question_id)
if request.method == "GET":
q.increase_views()
context = {
'question': q,
}
elif request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
u = request.user
a = Answer()
a.save_from_form(form, creator=u, question=q)
context = {
'question': q,
}
else:
context = {
'question': q,
'form': form,
}
if 'form' in kwargs.keys():
context['form'] = kwargs['form']
return render(request, 'catalog/question_page.html', context)
def ask_question(request, makey_id):
m = get_object_or_404(Makey, id=makey_id)
if request.method == "GET":
context = {
'makey': m,
}
return render(request, 'catalog/ask_question.html', context)
elif request.method == "POST":
u = get_object_or_404(User, id=request.user.id)
form = QuestionForm(request.POST)
if form.is_valid():
q = Question()
q.save_from_form(form, creator=u, makey=m)
return HttpResponseRedirect(reverse('catalog:question', kwargs={
'question_id': q.id
}))
else:
context = {
'makey': m,
'form': form
}
return render(request, 'catalog/ask_question.html', context)
def add_comment(request):
if request.method == "POST":
question_id = request.POST.get('question', '')
form = CommentForm(request.POST)
if form.is_valid():
owner = form.cleaned_data['owner'].split('-')
if owner[0] == "q":
q = get_object_or_404(Question, id=int(owner[1]))
c = Comment()
c.user = request.user
c.body = form.cleaned_data['body']
c.save()
q.comments.add(c)
elif owner[0] == "a":
a = get_object_or_404(Answer, id=int(owner[1]))
c = Comment()
c.user = request.user
c.body = form.cleaned_data['body']
c.save()
a.comments.add(c)
kwargs = {
'question_id': question_id,
}
else:
q = get_object_or_404(Question, id=question_id)
kwargs = {
'question_id': question_id,
'form': form
}
return HttpResponseRedirect(reverse('catalog:question', kwargs=kwargs))
| 29.576923 | 76 | 0.539012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.099155 |
371a4860fef108bacaa12c8950aae7cf93fc6975 | 223 | py | Python | old_django_malliva/marketplaceAccounts/admin.py | olubiyiontheweb/malliva | b212e6b359eed54c92533f0a02afe3c0042150e2 | [
"MIT"
] | null | null | null | old_django_malliva/marketplaceAccounts/admin.py | olubiyiontheweb/malliva | b212e6b359eed54c92533f0a02afe3c0042150e2 | [
"MIT"
] | null | null | null | old_django_malliva/marketplaceAccounts/admin.py | olubiyiontheweb/malliva | b212e6b359eed54c92533f0a02afe3c0042150e2 | [
"MIT"
] | 1 | 2021-07-19T12:15:52.000Z | 2021-07-19T12:15:52.000Z | from django.contrib import admin
from .models import MarketplaceAccount, Plan, Subscription
# Register your models here.
admin.site.register(Plan)
admin.site.register(Subscription)
admin.site.register(MarketplaceAccount)
| 24.777778 | 58 | 0.829596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.125561 |
371a8dcbd851a1e7977852eaa260257568a126f3 | 1,316 | py | Python | tools/cr/cr/commands/info.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | tools/cr/cr/commands/info.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | tools/cr/cr/commands/info.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the info implementation of Command."""
from __future__ import print_function
import cr
class InfoCommand(cr.Command):
"""The cr info command implementation."""
def __init__(self):
super(InfoCommand, self).__init__()
self.help = 'Print information about the cr environment'
def AddArguments(self, subparsers):
parser = super(InfoCommand, self).AddArguments(subparsers)
parser.add_argument(
'-s', '--short', dest='_short',
action='store_true', default=False,
help='Short form results, useful for scripting.'
)
self.ConsumeArgs(parser, 'the environment')
return parser
def EarlyArgProcessing(self):
if getattr(cr.context.args, '_short', False):
self.requires_build_dir = False
cr.Command.EarlyArgProcessing(self)
def Run(self):
if cr.context.remains:
for var in cr.context.remains:
if getattr(cr.context.args, '_short', False):
val = cr.context.Find(var)
if val is None:
val = ''
print(val)
else:
print(var, '=', cr.context.Find(var))
else:
cr.base.client.PrintInfo()
| 28.608696 | 72 | 0.660334 | 1,045 | 0.794073 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.31307 |
371b5cd190cc6453ea1cf3899da67cadf76aab9c | 304 | py | Python | mlapp/MLAPP_CODE/MLAPP-C5-Code/betaCredibleInt.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | mlapp/MLAPP_CODE/MLAPP-C5-Code/betaCredibleInt.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | mlapp/MLAPP_CODE/MLAPP-C5-Code/betaCredibleInt.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | """计算分位数"""
from scipy import stats
import numpy as np
S = 47
N = 100
a = S + 1
b = (N -S) + 1
alpha = 0.05
lu = stats.beta.ppf([alpha/2, 1-alpha/2], a, b)
print(lu)
## MC方法
S = 1000
X = stats.beta.rvs(a, b, size=S)
X = np.sort(X, axis=0)
l = X[round(S*alpha/2)]
u = X[round(S*(1-alpha)/2)]
print(l,u) | 15.2 | 47 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.100629 |
371b77ffe10a8965336c8ef7f84e3420344cd205 | 5,009 | py | Python | cqa_dataloader.py | vksriharsha/CQA | 50021cca2d1d0833a0300bea20fd259dc4871fad | [
"MIT"
] | 8 | 2021-01-11T01:12:09.000Z | 2022-01-20T23:23:22.000Z | cqa_dataloader.py | vksriharsha/CQA | 50021cca2d1d0833a0300bea20fd259dc4871fad | [
"MIT"
] | null | null | null | cqa_dataloader.py | vksriharsha/CQA | 50021cca2d1d0833a0300bea20fd259dc4871fad | [
"MIT"
] | 2 | 2021-07-19T06:23:33.000Z | 2021-09-09T15:28:51.000Z | import json
import os
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import torch
from PIL import Image
from nltk.tokenize import word_tokenize
from torch.utils.data import Dataset, DataLoader
class CQADataset(Dataset):
def __init__(self, cqa_data, ans2idx, ques2idx, maxlen, split, config):
self.cqa_data = cqa_data
self.ques2idx = ques2idx
self.ans2idx = ans2idx
self.maxlen = maxlen
self.split = split
self.config = config
if self.split == 'train':
self.prep = config.train_transform
else:
self.prep = config.test_transform
def __len__(self):
return len(self.cqa_data)
def __getitem__(self, index):
ques, ques_len = encode_questions(self.cqa_data[index]['question'], self.ques2idx,
self.maxlen)
if 'test' not in self.split:
ans = encode_answers(self.cqa_data[index]['answer'], self.ans2idx, self.config)
else:
ans = torch.zeros((1,))
ques_id = self.cqa_data[index]['question_index']
img_path = os.path.join(self.config.root, self.config.dataset, 'images', self.split,
self.cqa_data[index]['image_filename'])
img = Image.open(img_path).convert('RGB')
img_tensor = self.prep(img)
return ques, ans, img_tensor, ques_id, ques_len
def encode_questions(question, ques2idx, maxlen):
ques_vec = torch.zeros(maxlen).long()
ques_words = word_tokenize(question.lower())
ques_len = len(ques_words)
for i, word in enumerate(ques_words):
ques_vec[i] = ques2idx.get(word, len(ques2idx)) # last idx is reserved for <UNK>, needed for real OCRs
return ques_vec, ques_len
def encode_answers(answer, ans2idx, config):
if config.dataset == 'FigureQA':
a = torch.zeros((1,))
if answer == '1':
a[0] = 0.0
else:
a[0] = 1.0
return a
else:
return ans2idx.get(answer, len(ans2idx))
def collate_batch(data_batch):
data_batch.sort(key=lambda x: x[-1], reverse=True)
return torch.utils.data.dataloader.default_collate(data_batch)
def tokenize(q):
return word_tokenize(q['question'].lower())
def build_lut(cqa_train_data):
print("Building lookup table for question and answer tokens")
pool = ProcessPoolExecutor(max_workers=8)
questions = list(pool.map(tokenize, cqa_train_data, chunksize=1000))
pool.shutdown()
print("Finished")
maxlen = max([len(q) for q in questions])
unique_tokens = set([t for q in questions for t in q])
ques2idx = {word: idx + 1 for idx, word in enumerate(unique_tokens)} # save 0 for padding
answers = set([q['answer'] for q in cqa_train_data])
ans2idx = {ans: idx for idx, ans in enumerate(answers)}
return ans2idx, ques2idx, maxlen
# %%
def build_dataloaders(config):
cqa_train_data = json.load(open(os.path.join(config.root, config.dataset, 'qa', config.train_filename)))
if config.lut_location == '':
ans2idx, ques2idx, maxlen = build_lut(cqa_train_data)
else:
lut = json.load(open(config.lut_location, 'r'))
ans2idx = lut['ans2idx']
ques2idx = lut['ques2idx']
maxlen = lut['maxlen']
n = int(config.data_subset * len(cqa_train_data))
np.random.seed(666)
np.random.shuffle(cqa_train_data)
cqa_train_data = cqa_train_data[:n]
train_dataset = CQADataset(cqa_train_data, ans2idx, ques2idx, maxlen, 'train', config)
train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, collate_fn=collate_batch,
num_workers=8)
val_datasets = []
for split in config.val_filenames:
cqa_val_data = json.load(open(os.path.join(config.root, config.dataset, 'qa', config.val_filenames[split])))
val_datasets.append(CQADataset(cqa_val_data, ans2idx, ques2idx, maxlen, split, config))
val_dataloaders = []
for vds in val_datasets:
val_dataloaders.append(DataLoader(vds, batch_size=config.batch_size, shuffle=False, collate_fn=collate_batch,
num_workers=8))
test_datasets = []
for split in config.test_filenames:
cqa_test_data = json.load(open(os.path.join(config.root, config.dataset, 'qa', config.test_filenames[split])))
n = int(config.data_subset * len(cqa_test_data))
cqa_test_data = cqa_test_data[:n]
test_datasets.append(CQADataset(cqa_test_data, ans2idx, ques2idx, maxlen, split, config))
test_dataloaders = []
for tds in test_datasets:
test_dataloaders.append(DataLoader(tds, batch_size=config.batch_size, shuffle=False, collate_fn=collate_batch,
num_workers=8))
return train_dataloader, val_dataloaders, test_dataloaders, len(ques2idx) + 1, len(ans2idx) + 1
def main():
pass
if __name__ == '__main___':
main()
| 36.035971 | 118 | 0.65542 | 1,207 | 0.240966 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.062088 |
371c4a5f6f8c3976b640b0eedc55c96b6d333633 | 4,363 | py | Python | re_calc/shunting_yard.py | LilacRapture/re_calc | d8dc744e26ade3edc545dd6509bf2baf973537a2 | [
"MIT"
] | 1 | 2020-02-27T18:29:52.000Z | 2020-02-27T18:29:52.000Z | re_calc/shunting_yard.py | LilacRapture/re_calc | d8dc744e26ade3edc545dd6509bf2baf973537a2 | [
"MIT"
] | 1 | 2020-02-27T18:46:46.000Z | 2020-02-27T18:46:46.000Z | re_calc/shunting_yard.py | LilacRapture/ReCalc | d8dc744e26ade3edc545dd6509bf2baf973537a2 | [
"MIT"
] | null | null | null | from re_calc.config import *
from re_calc.exceptions import CalcException
from re_calc.util import is_number
import re_calc.meta_containers as meta_containers
def peek(stack):
return stack[-1]
def should_move_to_queue(stack, c_token_prc):
''' Checks token's precedence and associativity to decide if it should be
moved to the queue.
'''
if stack:
s_token = peek(stack)
s_token_prc = get_token_prop(s_token, 'prc')
s_token_assoc = get_token_prop(s_token, 'assoc')
return (s_token_prc > c_token_prc
or (s_token_prc == c_token_prc and
s_token_assoc == 'left')
and (s_token != '('))
else:
return False
def get_arity(fun):
''' Inspects function code object to get args count.
'''
return fun.__code__.co_argcount
def arity_is_valid(fn_token, rest_tokens):
''' Checks whether a function arguments list is valid.
'''
paren_balance = 1
properties = token_properties.get(fn_token)
op_function = properties.get('fun')
arity = get_arity(op_function)
expected_separator_count = arity - 1
arg_tokens = rest_tokens[1:]
token_idx = 0
separator_count = 0
while token_idx < len(arg_tokens) and paren_balance != 0:
c_token = arg_tokens[token_idx]
if c_token == '(':
paren_balance += 1
elif c_token == ')':
paren_balance -= 1
elif (c_token in separators) and (paren_balance == 1):
separator_count += 1
token_idx += 1
return expected_separator_count == separator_count
def infix_to_rpn(tokens):
''' Shunting yard algorithm implementation.
'''
meta_tokens = meta_containers.set_meta_indices(tokens)
output_queue = list()
stack = list()
for token in meta_tokens:
if is_number(token):
output_queue.append(token) # add number to queue
elif token in functions:
n_token_idx = token.meta + 1
if ((n_token_idx > len(meta_tokens) - 1)
or (meta_tokens[n_token_idx] != "(")):
raise CalcException(
token.meta,
meta_tokens,
message="Missing function args",
loc_string="t_missing_fn_args")
if not arity_is_valid(token, meta_tokens[token.meta + 1:]):
raise CalcException(
token.meta,
meta_tokens,
message="Invalid arity",
loc_string="t_invalid_arity")
stack.append(token) # add function to stack
elif token in separators:
if not stack or '(' not in stack:
raise CalcException(
token.meta,
meta_tokens,
message="Missing parentheses or separator",
loc_string="t_missing_separtor")
while stack and peek(stack) != "(":
output_queue.append(stack.pop()) # move operator to queue
elif token in operators:
if stack: # if stack's not empty
c_token_prc = get_token_prop(token, 'prc')
while should_move_to_queue(stack, c_token_prc):
output_queue.append(stack.pop()) # move operator to queue
stack.append(token) # add operator to stack
elif token == '(':
stack.append(token) # add open paren to stack
elif token == ')':
if not stack or '(' not in stack:
raise CalcException(
token.meta,
meta_tokens,
message="Missing open paren(s)",
loc_string="t_missing_l_paren")
while peek(stack) != '(':
output_queue.append(stack.pop()) # move operator or function to queue
if peek(stack) == '(':
stack.pop() # discard open paren
while stack: # move the rest of the stack to the queue
if peek(stack) in priorities:
raise CalcException(
peek(stack).meta,
meta_tokens,
message="Missing close paren(s)",
loc_string="t_missing_r_paren")
output_queue.append(stack.pop())
return meta_containers.pack_list(output_queue, tokens)
| 37.290598 | 86 | 0.572313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.185881 |
371ccf1859b31fd5c4912f1fb9a89f63aca04cba | 28,944 | py | Python | image_classification/VOLO/volo.py | chuliuT/PaddleViT | 282e5013f0460fa9f9b010775ff4d2607e7370ef | [
"Apache-2.0"
] | null | null | null | image_classification/VOLO/volo.py | chuliuT/PaddleViT | 282e5013f0460fa9f9b010775ff4d2607e7370ef | [
"Apache-2.0"
] | null | null | null | image_classification/VOLO/volo.py | chuliuT/PaddleViT | 282e5013f0460fa9f9b010775ff4d2607e7370ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement VOLO Class
"""
import math
import copy
import numpy as np
import paddle
import paddle.nn as nn
from droppath import DropPath
from fold import fold
#from utils import MyPrint
#myprint = MyPrint()
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid using 'if' condition in forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Downsample(nn.Layer):
"""Apply a Conv2D with kernel size = patch_size and stride = patch_size
The shape of input tensor is [N, H, W, C], which will be transposed to
[N, C, H, W] and feed into Conv, finally the output is transposed back
to [N, H, W, C].
Args:
in_embed_dim: int, input feature dimension
out_embed_dim: int, output feature dimension
patch_size: kernel_size and stride
"""
def __init__(self, in_embed_dim, out_embed_dim, patch_size):
super().__init__()
self.proj = nn.Conv2D(in_embed_dim,
out_embed_dim,
kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
x = x.transpose([0, 3, 1, 2])
x = self.proj(x)
x = x.transpose([0, 2, 3, 1])
return x
class PatchEmbedding(nn.Layer):
"""Patch Embeddings with stem conv layers
If stem conv layers are set, the image is firstly feed into stem layers,
stem layers contains 3 conv-bn-relu blocks.
Then a proj (conv2d) layer is applied as the patch embedding.
Args:
image_size: int, input image size, default: 224
stem_conv: bool, if apply stem conv layers, default: False
stem_stride: int, conv stride in stem layers, default: 1
patch_size: int, patch size for patch embedding (k and stride for proj conv), default: 8
in_channels: int, input channels, default: 3
hidden_dim: int, input dimension of patch embedding (out dim for stem), default: 64
embed_dim: int, output dimension of patch embedding, default: 384
"""
def __init__(self,
image_size=224,
stem_conv=False,
stem_stride=1,
patch_size=8,
in_channels=3,
hidden_dim=64,
embed_dim=384):
super().__init__()
assert patch_size in [4, 8, 16]
# define stem conv layers
if stem_conv:
self.stem = nn.Sequential(
nn.Conv2D(in_channels,
hidden_dim,
kernel_size=7,
stride=stem_stride,
padding=3,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
nn.Conv2D(hidden_dim,
hidden_dim,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
nn.Conv2D(hidden_dim,
hidden_dim,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
)
else:
self.stem = Identity()
# define patch embeddings
self.proj = nn.Conv2D(hidden_dim,
embed_dim,
kernel_size = patch_size // stem_stride,
stride = patch_size // stem_stride)
# num patches
self.num_patches = (image_size // patch_size) * (image_size // patch_size)
def forward(self, x):
x = self.stem(x) # Identity layer if stem is not set
x = self.proj(x)
return x
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self, in_features, hidden_features, dropout=0.):
super(Mlp, self).__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(in_features,
hidden_features,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(hidden_features,
in_features,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=1e-6))
return weight_attr, bias_attr
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class OutlookerAttention(nn.Layer):
""" Outlooker Attention
Outlooker attention firstly applies a nn.Linear op, and unfold (im2col) the output
tensor, then use tensor reshape to get the 'V'. 'Attn' is obtained by pool, linear and reshape
ops applied on input tensor. Then a matmul is applied for 'V' and 'Attn'. Finally, a
fold op is applied with a linear projection to get the output.
Args:
dim: int, all heads dimension
num_heads: int, num of heads
kernel_size: int, size used in fold/unfold, and pool, default: 3
padding: int, pad used in fold/unfold, default: 1
stride: int, stride used in fold/unfold, and pool, default: 1
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
kernel_size=3,
padding=1,
stride=1,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
self.dim = dim
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn = nn.Linear(dim, (kernel_size ** 4) * num_heads)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj = nn.Linear(dim, dim)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
self.pool = nn.AvgPool2D(kernel_size=stride, stride=stride, ceil_mode=True)
self.unfold = paddle.nn.Unfold(kernel_sizes=kernel_size, strides=self.stride, paddings=self.padding)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x) # B, H, W, C
v = v.transpose([0, 3, 1, 2]) # B, C, H, W
h, w = math.ceil(H / self.stride), math.ceil(W / self.stride)
# current paddle version has bugs using nn.Unfold
v = paddle.nn.functional.unfold(v,
kernel_sizes=self.kernel_size,
paddings=self.padding,
strides=self.stride) # B, C*kernel_size*kernel_size, L(num of patches)
v = v.reshape([B,
self.num_heads,
C // self.num_heads,
self.kernel_size * self.kernel_size,
h * w])
v = v.transpose([0, 1, 4, 3, 2])
x = x.transpose([0, 3, 1, 2])
attn = self.pool(x)
attn = attn.transpose([0, 2, 3, 1]) # B, H', W', C
attn = self.attn(attn)
attn = attn.reshape([B,
h*w,
self.num_heads,
self.kernel_size * self.kernel_size,
self.kernel_size * self.kernel_size])
attn = attn.transpose([0, 2, 1, 3, 4])
attn = attn * self.scale
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 1, 4, 3, 2])
new_shape = [B, C * self.kernel_size * self.kernel_size, h * w]
z = z.reshape(new_shape)
# Current Paddle dose not have Fold op, we hacked our fold op, see ./fold.py for details
z = fold(z, output_size=(H, W), kernel_size=self.kernel_size,
padding=self.padding, stride=self.stride)
z = z.transpose([0, 2, 3, 1])
z = self.proj(z)
z = self.proj_dropout(z)
return z
class Outlooker(nn.Layer):
""" Outlooker
Outlooker contains norm layers, outlooker attention, mlp and droppath layers,
and residual is applied during forward.
Args:
dim: int, all heads dimension
num_heads: int, num of heads
kernel_size: int, size used in fold/unfold, and pool, default: 3
padding: int, pad used in fold/unfold, default: 1
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 3.
stride: int, stride used in fold/unfold, and pool, default: 1
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
kernel_size,
padding,
stride=1,
num_heads=1,
mlp_ratio=3.,
attention_dropout=0.,
droppath=0.,
qkv_bias=False,
qk_scale=None):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = OutlookerAttention(dim,
num_heads,
kernel_size=kernel_size,
padding=padding,
stride=stride,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout)
self.drop_path = Droppath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio))
def forward(self, x):
h = x
x = self.norm1(x)
x = self.attn(x)
x = self.drop_path(x)
x = h + x
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class Attention(nn.Layer):
""" Attention
Regular Attention module same as ViT
Args:
dim: int, all heads dimension
num_heads: int, num of heads
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
self.attn_dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(axis=-1)
self.proj = nn.Linear(dim, dim)
self.proj_dropout = nn.Dropout(dropout)
def forward(self, x):
B, H, W, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape([B, H * W, 3, self.num_heads, C // self.num_heads])
qkv = qkv.transpose([2, 0, 3, 1, 4])
q, k, v = qkv[0], qkv[1], qkv[2]
attn = paddle.matmul(q, k, transpose_y=True)
attn = attn * self.scale
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 2, 1, 3])
z = z.reshape([B, H, W, C])
z = self.proj(z)
z = self.proj_dropout(z)
return z
class Transformer(nn.Layer):
"""Transformer
Transformer module, same as ViT
Args:
dim: int, all heads dimension
num_heads: int, num of heads
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 4.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
attention_dropout=0,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = Attention(dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio))
def forward(self, x):
h = x
x = self.norm1(x)
x = self.attn(x)
x = self.drop_path(x)
x = h + x
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class ClassAttention(nn.Layer):
""" Class Attention
Class Attention modlee same as CaiT
Args:
dim: int, all heads dimension
dim_head: int, single heads dimension, default: None
num_heads: int, num of heads
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads=8,
dim_head=None,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
if dim_head is not None:
self.dim_head = dim_head
else:
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.kv = nn.Linear(dim,
self.dim_head * self.num_heads * 2,
bias_attr=qkv_bias)
self.q = nn.Linear(dim,
self.dim_head * self.num_heads,
bias_attr=qkv_bias)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj = nn.Linear(self.dim_head * self.num_heads, dim)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
def forward(self, x):
B, N, C = x.shape
kv = self.kv(x)
kv = kv.reshape([B, N, 2, self.num_heads, self.dim_head])
kv = kv.transpose([2, 0, 3, 1, 4])
k, v = kv[0], kv[1]
q = self.q(x[:, :1, :])
q = q.reshape([B, self.num_heads, 1, self.dim_head])
attn = paddle.matmul(q * self.scale, k, transpose_y=True)
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
cls_embed = paddle.matmul(attn, v)
cls_embed = cls_embed.transpose([0, 2, 1, 3])
cls_embed = cls_embed.reshape([B, 1, self.dim_head * self.num_heads])
cls_embed = self.proj(cls_embed)
cls_embed = self.proj_dropout(cls_embed)
return cls_embed
class ClassBlock(nn.Layer):
"""Class Attention Block (CaiT)
CaiT module
Args:
dim: int, all heads dimension
num_heads: int, num of heads
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 4.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
dim_head=None,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = ClassAttention(dim,
num_heads=num_heads,
dim_head=dim_head,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
dropout=dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
dropout=dropout)
def forward(self, x):
cls_embed = x[:, :1]
h = self.norm1(x)
h = self.attn(h)
h = self.drop_path(h)
cls_embed = cls_embed + h
h = cls_embed
cls_embed = self.norm2(cls_embed)
cls_embed = self.mlp(cls_embed)
cls_embed = self.drop_path(cls_embed)
cls_embed = h + cls_embed
out = paddle.concat([cls_embed, x[:, 1:]], axis=1)
return out
def rand_bbox(size, lam, scale=1):
"""
get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling)
return: bounding box
"""
W = size[1] // scale
H = size[2] // scale
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
# item() get the python native dtype
return bbx1.item(), bby1.item(), bbx2.item(), bby2.item()
class VOLO(nn.Layer):
def __init__(self,
layers,
image_size=224,
in_channels=3,
num_classes=1000,
patch_size=8,
stem_hidden_dim=64,
embed_dims=None,
num_heads=None,
downsamples=None,
outlook_attention=None,
mlp_ratios=None,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0.,
num_post_layers=2,
return_mean=False,
return_dense=True,
mix_token=True,
pooling_scale=2,
out_kernel=3,
out_stride=2,
out_padding=1):
super().__init__()
self.num_classes = num_classes
self.patch_embed = PatchEmbedding(image_size=image_size,
stem_conv=True,
stem_stride=2,
patch_size=patch_size,
in_channels=in_channels,
hidden_dim=stem_hidden_dim,
embed_dim=embed_dims[0])
self.pos_embed = paddle.create_parameter(
shape=[1,
image_size // patch_size // pooling_scale,
image_size // patch_size // pooling_scale,
embed_dims[-1]],
dtype='float32',
default_initializer=nn.initializer.Constant(0.0))
self.pos_dropout = nn.Dropout(dropout)
layer_list = []
for i in range(len(layers)):
blocks = []
for block_idx in range(layers[i]):
block_droppath = droppath * (
block_idx + sum(layers[:i])) / (sum(layers) - 1)
if outlook_attention[i]:
blocks.append(
copy.deepcopy(
Outlooker(dim=embed_dims[i],
kernel_size=out_kernel,
padding=out_padding,
stride=out_stride,
num_heads=num_heads[i],
mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=block_droppath)))
else:
blocks.append(
copy.deepcopy(
Transformer(dim=embed_dims[i],
num_heads=num_heads[i],
mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=block_droppath))
)
stage = nn.Sequential(*blocks)
layer_list.append(stage)
if downsamples[i]:
layer_list.append(copy.deepcopy(Downsample(embed_dims[i], embed_dims[i + 1], 2)))
self.model = nn.LayerList(layer_list)
# POST Layers (from CaiT)
self.post_model = None
if num_post_layers is not None:
self.post_model = nn.LayerList([
copy.deepcopy(
ClassBlock(dim=embed_dims[-1],
num_heads=num_heads[-1],
mlp_ratio=mlp_ratios[-1],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=0.)
) for i in range(num_post_layers)
])
self.cls_token = paddle.create_parameter(
shape=[1, 1, embed_dims[-1]],
dtype='float32',
default_initializer=nn.initializer.TruncatedNormal(std=.02))
# Output
self.return_mean = return_mean # if True, return mean, not use class token
self.return_dense = return_dense # if True, return class token and all feature tokens
if return_dense:
assert not return_mean, "Cannot return both mean and dense"
self.mix_token = mix_token
self.pooling_scale = pooling_scale
if mix_token:
self.beta = 1.0
assert return_dense, 'return all tokens if mix_token is enabled'
if return_dense:
self.aux_head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else Identity()
self.norm = nn.LayerNorm(embed_dims[-1])
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else Identity()
# For training:
# TODO: set pos_embed, trunc_normal
# TODO: set init weights for linear layers and layernorm layers
# TODO: set no weight decay for pos_embed and cls_token
def forward(self, x):
# Step1: patch embedding
x = self.patch_embed(x)
x = x.transpose([0, 2, 3, 1])
if self.mix_token and self.training:
lam = np.random.beta(self.beta, self.beta)
patch_h = x.shape[1] // self.pooling_scale
patch_w = x.shape[2] // self.pooling_scale
bbx1, bby1, bbx2, bby2 = rand_bbox(x.shape, lam, scale=self.pooling_scale)
temp_x = x.clone()
sbbx1 = self.pooling_scale * bbx1
sbby1 = self.pooling_scale * bby1
sbbx2 = self.pooling_scale * bbx2
sbby2 = self.pooling_scale * bby2
temp_x[:, sbbx1: sbbx2, sbby1: sbby2, :] = x.flip(axis=[0])[:, sbbx1: sbbx2, sbby1: sbby2, :]
x = temp_x
else:
bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0
# Step2: 2-stages tokens learning
for idx, block in enumerate(self.model):
if idx == 2: # add pos_embed after outlooker blocks (and a downsample layer)
x = x + self.pos_embed
x = self.pos_dropout(x)
x = block(x)
x = x.reshape([x.shape[0], -1, x.shape[-1]]) # B, H*W, C
# Step3: post layers (from CaiT)
if self.post_model is not None:
cls_token = self.cls_token.expand([x.shape[0], -1, -1])
x = paddle.concat([cls_token, x], axis=1)
for block in self.post_model:
x = block(x)
x = self.norm(x)
if self.return_mean:
return self.head(x.mean(1))
x_cls = self.head(x[:, 0])
if not self.return_dense:
return x_cls
x_aux = self.aux_head(x[:, 1:])
if not self.training:
#NOTE: pytorch Tensor.max() returns a tuple of Tensor: (values, indices), while
# paddle Tensor.max() returns a single Tensor: values
return x_cls + 0.5 * x_aux.max(1)
if self.mix_token and self.training:
x_aux = x_aux.reshape([x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]])
temp_x = x_aux.clone()
temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(axis=[0])[:, bbx1:bbx2, bby1:bby2, :]
x_aux = temp_x
x_aux = x_aux.reshape([x_aux.shape[0], patch_h*patch_w, x_aux.shape[-1]])
return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)
def build_volo(config):
"""build volo model using config"""
model = VOLO(image_size=config.DATA.IMAGE_SIZE,
layers=config.MODEL.TRANS.LAYERS,
embed_dims=config.MODEL.TRANS.EMBED_DIMS,
mlp_ratios=config.MODEL.TRANS.MLP_RATIOS,
downsamples=config.MODEL.TRANS.DOWNSAMPLES,
outlook_attention=config.MODEL.TRANS.OUTLOOK_ATTENTION,
stem_hidden_dim=config.MODEL.STEM_HIDDEN_DIM,
num_heads=config.MODEL.TRANS.NUM_HEADS,
qkv_bias=config.MODEL.TRANS.QKV_BIAS,
qk_scale=config.MODEL.TRANS.QK_SCALE)
return model
| 36.453401 | 110 | 0.530991 | 26,760 | 0.924544 | 0 | 0 | 0 | 0 | 0 | 0 | 7,416 | 0.256219 |
371f8ef580118827f3f0d0fba4c653c6f27eaf69 | 740 | py | Python | setup.py | Mattjez914/Blackjack_Microchallenge | c4f60b62a3ada14663eb30ce72563af994e1eda4 | [
"Apache-2.0"
] | null | null | null | setup.py | Mattjez914/Blackjack_Microchallenge | c4f60b62a3ada14663eb30ce72563af994e1eda4 | [
"Apache-2.0"
] | null | null | null | setup.py | Mattjez914/Blackjack_Microchallenge | c4f60b62a3ada14663eb30ce72563af994e1eda4 | [
"Apache-2.0"
] | 1 | 2019-04-17T06:12:23.000Z | 2019-04-17T06:12:23.000Z | import learntools
from setuptools import setup
from setuptools import find_packages
setup(name='learntools',
version=learntools.__version__,
description='Utilities for Kaggle Learn exercises',
url='http://github.com/kaggle/learntools',
author='Dan Becker',
author_email='dan@kaggle.com',
license='Apache 2.0',
packages=find_packages(),
zip_safe=True)
# SETUP. You don't need to worry for now about what this code does or how it works.
# If you're curious about the code, it's available under an open source license at https://github.com/Kaggle/learntools/
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex3 import q7 as blackjack
print('Setup complete.') | 38.947368 | 120 | 0.737838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.47027 |
372019bef08d8edff5f6ead1979cf5e63b3e3fd3 | 1,102 | py | Python | maro/cli/grass/lib/scripts/create_job_details.py | yourmoonlight/maro | 4fbe556f3ae1817995f90cb529e9ca6191f67d7f | [
"MIT"
] | 1 | 2021-01-13T06:41:51.000Z | 2021-01-13T06:41:51.000Z | maro/cli/grass/lib/scripts/create_job_details.py | chaosddp/maro | 3d6715649467d49a83886c1fd4ae9b41ff012a50 | [
"MIT"
] | 2 | 2020-12-15T09:13:43.000Z | 2020-12-16T08:02:41.000Z | maro/cli/grass/lib/scripts/create_job_details.py | chaosddp/maro | 3d6715649467d49a83886c1fd4ae9b41ff012a50 | [
"MIT"
] | 1 | 2021-10-01T09:17:43.000Z | 2021-10-01T09:17:43.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import json
from redis import Redis
from .utils import load_cluster_details, load_job_details
def create_job_details(cluster_name: str, job_name: str):
# Load details
cluster_details = load_cluster_details(cluster_name=cluster_name)
job_details = load_job_details(cluster_name=cluster_name, job_name=job_name)
master_hostname = cluster_details['master']['hostname']
redis_port = cluster_details['master']['redis']['port']
# Add other parameters
job_details['containers'] = {}
redis = Redis(
host=master_hostname,
port=redis_port,
charset="utf-8",
decode_responses=True
)
redis.hset(
f"{cluster_name}:job_details",
job_name,
json.dumps(job_details)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('cluster_name')
parser.add_argument('job_name')
args = parser.parse_args()
create_job_details(cluster_name=args.cluster_name, job_name=args.job_name)
| 25.627907 | 80 | 0.708711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.206897 |
37218dc001152be9a5cbbcb8a62c2a58d5ca62fb | 287 | py | Python | yard/skills/66-python/cookbook/yvhai/demo/input/xml.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 1 | 2016-03-29T02:01:58.000Z | 2016-03-29T02:01:58.000Z | yard/skills/66-python/cookbook/yvhai/demo/input/xml.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 18 | 2019-02-13T09:15:25.000Z | 2021-12-09T21:32:13.000Z | yard/skills/66-python/cookbook/yvhai/demo/input/xml.py | paser4se/bbxyard | d09bc6efb75618b2cef047bad9c8b835043446cb | [
"Apache-2.0"
] | 2 | 2020-07-05T01:01:30.000Z | 2020-07-08T22:33:06.000Z | #!/usr/bin/env python3
# xml 资源
xml_w3_book = """
<?xml version="1.0" encoding="ISO-8859-1"?>
<bookstore>
<book>
<title lang="eng">Harry Potter</title>
<price>29.99</price>
</book>
<book>
<title lang="eng">Learning XML</title>
<price>39.95</price>
</book>
</bookstore>
"""
| 13.045455 | 43 | 0.620209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.934708 |
372d1783d09633f1dcce4eb049bf6981e1c19552 | 5,855 | py | Python | test_001/test_006.py | kostur86/kasia_tut | a974bdac660208409ef3a9d2fcc670429ab5fc07 | [
"MIT"
] | null | null | null | test_001/test_006.py | kostur86/kasia_tut | a974bdac660208409ef3a9d2fcc670429ab5fc07 | [
"MIT"
] | null | null | null | test_001/test_006.py | kostur86/kasia_tut | a974bdac660208409ef3a9d2fcc670429ab5fc07 | [
"MIT"
] | null | null | null | #!env python3
"""
Introducing static object in the world.
Tasks:
1. File got really long - move all classes to library file and import
them here.
"""
import pygame
from pygame import K_ESCAPE, K_LEFT, K_RIGHT, K_UP, K_DOWN, QUIT
class Game():
def __init__(self):
"""
Set basic configuration for a game that can be always accessed.
"""
self.screen = {
"width": 240,
"height": 240,
}
self.bg_colour = (255, 255, 255)
self.active = False
self.reset_keys()
def start(self):
self.active = True
def stop(self):
self.active = False
def reset_keys(self):
self.keys = {
"left": False,
"right": False,
"up": False,
"down": False,
}
class Object():
def __init__(self, game):
self.pos = [0, 0]
self.game = game
self.image = None
def update(self, dt):
"""
"""
def draw(self, surface):
"""
"""
if self.image:
surface.blit(
self.image,
self.pos
)
class Wall(Object):
def __init__(self, game):
super().__init__(game)
self.image = pygame.image.load('Sprite-0002.png')
@staticmethod
def create_wall(game, x, y):
obj = Wall(game)
obj.pos[0] = x
obj.pos[1] = y
return obj
class Character(Object):
def __init__(self, game):
super().__init__(game)
self.health = 100
self.speed = 24
def set_position(self, x, y):
self.pos[0] = x
self.pos[1] = y
def get_position(self):
return int(self.pos[0]), int(self.pos[1])
def move(self, x, y=0):
self.pos[0] += x
self.pos[1] += y
class Player(Character):
"""
"""
def __init__(self, game):
super().__init__(game)
# Setup player's image
self.image = pygame.image.load('Sprite-0001.png')
def update(self, dt):
if self.game.keys["left"] and dt:
self.pos[0] -= int(self.speed * dt / 100)
if self.pos[0] < 0:
self.pos[0] = 0
elif self.game.keys["right"] and dt:
self.pos[0] += int(self.speed * dt / 100)
if self.pos[0] > self.game.screen["width"] - 32:
self.pos[0] = self.game.screen["width"] - 32
if self.game.keys["up"] and dt:
self.pos[1] -= int(self.speed * dt / 100)
if self.pos[1] < 0:
self.pos[1] = 0
elif self.game.keys["down"] and dt:
self.pos[1] += int(self.speed * dt / 100)
if self.pos[1] > self.game.screen["height"] - 32:
self.pos[1] = self.game.screen["height"] - 32
class Enemy(Character):
"""
"""
class Zombie(Enemy):
def __init__(self, game):
super().__init__(game)
self.speed = 10
class FastZombie(Zombie):
def __init__(self, game):
super().__init__(game)
self.speed = 24
self.health = 40
def read_input(game):
"""
Read user input and set game state.
Args:
game (Game): Current game state.
Returns:
bool: Should game still be running?
"""
# Look at every event in the queue
for event in pygame.event.get():
# Did the user hit a key?
if event.type == pygame.KEYDOWN:
# Was it the Escape key? If so, stop the loop.
if event.key == K_ESCAPE:
game.stop()
elif event.key == K_LEFT:
game.keys["left"] = True
elif event.key == K_RIGHT:
game.keys["right"] = True
elif event.key == K_UP:
game.keys["up"] = True
elif event.key == K_DOWN:
game.keys["down"] = True
# Did the user hit a key?
if event.type == pygame.KEYUP:
if event.key == K_LEFT:
game.keys["left"] = False
elif event.key == K_RIGHT:
game.keys["right"] = False
elif event.key == K_UP:
game.keys["up"] = False
elif event.key == K_DOWN:
game.keys["down"] = False
# Did the user click the window close button? If so, stop the loop.
elif event.type == QUIT:
game.stop()
def main():
"""
This is main function - it will be executed only explicitly, like this:
import main
main.main()
or when executing script from command line:
python3 main.py
"""
global active
# Initialize PyGame library
pygame.init()
game = Game()
player = Player(game)
player.set_position(32, 32)
# Create few walls
walls = [
Wall.create_wall(game, 0, 0),
Wall.create_wall(game, 120, 120),
]
# Set up the drawing window
screen = pygame.display.set_mode([game.screen["width"], game.screen["height"]])
# Start measuring time
clock = pygame.time.Clock()
dt = clock.tick()
game.start()
while game.active:
# Read any inputs from keyboard - this function will return False if
# we supposed to stop game (closed window or pressed Esc)
read_input(game)
# If user stopped game do not draw this frame
if not game.active:
continue
# Fill the background with white
screen.fill(game.bg_colour)
player.update(dt)
player.draw(screen)
for wall in walls:
wall.draw(screen)
# Flip the display
pygame.display.flip()
# Time passed since last call of tick()
dt = clock.tick(60)
if __name__ == '__main__':
# When executing script from command line start main function
main()
| 22.782101 | 83 | 0.525021 | 2,846 | 0.48608 | 0 | 0 | 138 | 0.02357 | 0 | 0 | 1,489 | 0.254313 |
372e7aa4b02b743d5cd7b6c3f4b391ee437c82f6 | 256 | py | Python | earth_engine/apps.py | ecsnavarretemit/sarai-satellite-analysis-backend | 73de6c4cac37889efff49b9be085f3c733e27702 | [
"MIT"
] | null | null | null | earth_engine/apps.py | ecsnavarretemit/sarai-satellite-analysis-backend | 73de6c4cac37889efff49b9be085f3c733e27702 | [
"MIT"
] | null | null | null | earth_engine/apps.py | ecsnavarretemit/sarai-satellite-analysis-backend | 73de6c4cac37889efff49b9be085f3c733e27702 | [
"MIT"
] | null | null | null | # admin.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from __future__ import unicode_literals
from django.apps import AppConfig
class EarthEngineConfig(AppConfig):
name = 'earth_engine'
| 18.285714 | 65 | 0.769531 | 61 | 0.238281 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.488281 |
372f0b9a0c5b68436afd58d4097085803b7bfd5f | 1,333 | py | Python | seapy/__init__.py | oceanum/seapy | 758a02677340a51bbb20d622552ce798a3221b7b | [
"MIT"
] | 2 | 2020-07-09T03:29:10.000Z | 2021-07-07T22:00:46.000Z | seapy/__init__.py | joaometocean/seapy | 5df410b3f1fa928f76d3421ae3c1698e9d4a5f52 | [
"MIT"
] | null | null | null | seapy/__init__.py | joaometocean/seapy | 5df410b3f1fa928f76d3421ae3c1698e9d4a5f52 | [
"MIT"
] | null | null | null | """
__init__.py
State Estimation and Analysis for PYthon
Module for working with oceanographic data and models
Copyright (c)2019 University of Hawaii under the MIT-License.
Requires the following packages: joblib
Import classes include:
- :class:`~seapy.environ.opt`
- :class:`~seapy.progressbar.ProgressBar`
- :class:`~seapy.tidal_energy.energetics`
Imported functions include:
- :func:`~seapy.lib.adddim`
- :func:`~seapy.lib.chunker`
- :func:`~seapy.lib.convolve_mask`
- :func:`~seapy.lib.day2date`
- :func:`~seapy.lib.date2day`
- :func:`~seapy.lib.earth_angle`
- :func:`~seapy.lib.earth_distance`
- :func:`~seapy.lib.flatten`
- :func:`~seapy.lib.list_files`
- :func:`~seapy.lib.netcdf`
- :func:`~seapy.lib.rotate`
- :func:`~seapy.lib.today2day`
- :func:`~seapy.lib.unique_rows`
- :func:`~seapy.lib.vecfind`
- :func:`~seapy.oa.oasurf`
- :func:`~seapy.oa.oavol`
- :func:`~seapy.tidal_energy.tidal_energy`
- :func:`~seapy.progressbar.progress`
"""
from .lib import *
from . import roms
from . import model
from . import qserver
from . import mapping
from . import filt
from . import plot
from . import progressbar
from . import seawater
from . import tide
from .tidal_energy import tidal_energy
from .environ import opt
from .hawaii import hawaii
from .oa import *
| 24.236364 | 63 | 0.699925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,014 | 0.76069 |
372f9e118f442669abaa10df5175221694562ac7 | 22,776 | py | Python | pypy/module/_ssl/interp_ssl.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/module/_ssl/interp_ssl.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/module/_ssl/interp_ssl.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from pypy.rpython.rctypes.tool import ctypes_platform
from pypy.rpython.rctypes.tool.libc import libc
import pypy.rpython.rctypes.implementation # this defines rctypes magic
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import W_Root, ObjSpace, Wrappable
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app
from ctypes import *
import ctypes.util
import sys
import socket
import select
from ssl import SSL_CTX, SSL, X509, SSL_METHOD, X509_NAME
from bio import BIO
c_void = None
libssl = cdll.LoadLibrary(ctypes.util.find_library("ssl"))
## user defined constants
X509_NAME_MAXLEN = 256
# these mirror ssl.h
PY_SSL_ERROR_NONE, PY_SSL_ERROR_SSL = 0, 1
PY_SSL_ERROR_WANT_READ, PY_SSL_ERROR_WANT_WRITE = 2, 3
PY_SSL_ERROR_WANT_X509_LOOKUP = 4
PY_SSL_ERROR_SYSCALL = 5 # look at error stack/return value/errno
PY_SSL_ERROR_ZERO_RETURN, PY_SSL_ERROR_WANT_CONNECT = 6, 7
# start of non ssl.h errorcodes
PY_SSL_ERROR_EOF = 8 # special case of SSL_ERROR_SYSCALL
PY_SSL_ERROR_INVALID_ERROR_CODE = 9
SOCKET_IS_NONBLOCKING, SOCKET_IS_BLOCKING = 0, 1
SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3
SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5
class CConfig:
_header_ = """
#include <openssl/ssl.h>
#include <openssl/opensslv.h>
#include <openssl/bio.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/poll.h>
"""
OPENSSL_VERSION_NUMBER = ctypes_platform.ConstantInteger(
"OPENSSL_VERSION_NUMBER")
SSL_FILETYPE_PEM = ctypes_platform.ConstantInteger("SSL_FILETYPE_PEM")
SSL_OP_ALL = ctypes_platform.ConstantInteger("SSL_OP_ALL")
SSL_VERIFY_NONE = ctypes_platform.ConstantInteger("SSL_VERIFY_NONE")
SSL_ERROR_WANT_READ = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_READ")
SSL_ERROR_WANT_WRITE = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_WRITE")
SSL_ERROR_ZERO_RETURN = ctypes_platform.ConstantInteger(
"SSL_ERROR_ZERO_RETURN")
SSL_ERROR_WANT_X509_LOOKUP = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_X509_LOOKUP")
SSL_ERROR_WANT_CONNECT = ctypes_platform.ConstantInteger(
"SSL_ERROR_WANT_CONNECT")
SSL_ERROR_SYSCALL = ctypes_platform.ConstantInteger("SSL_ERROR_SYSCALL")
SSL_ERROR_SSL = ctypes_platform.ConstantInteger("SSL_ERROR_SSL")
FD_SETSIZE = ctypes_platform.ConstantInteger("FD_SETSIZE")
SSL_CTRL_OPTIONS = ctypes_platform.ConstantInteger("SSL_CTRL_OPTIONS")
BIO_C_SET_NBIO = ctypes_platform.ConstantInteger("BIO_C_SET_NBIO")
pollfd = ctypes_platform.Struct("struct pollfd",
[("fd", c_int), ("events", c_short), ("revents", c_short)])
nfds_t = ctypes_platform.SimpleType("nfds_t", c_uint)
POLLOUT = ctypes_platform.ConstantInteger("POLLOUT")
POLLIN = ctypes_platform.ConstantInteger("POLLIN")
class cConfig:
pass
cConfig.__dict__.update(ctypes_platform.configure(CConfig))
OPENSSL_VERSION_NUMBER = cConfig.OPENSSL_VERSION_NUMBER
HAVE_OPENSSL_RAND = OPENSSL_VERSION_NUMBER >= 0x0090500fL
SSL_FILETYPE_PEM = cConfig.SSL_FILETYPE_PEM
SSL_OP_ALL = cConfig.SSL_OP_ALL
SSL_VERIFY_NONE = cConfig.SSL_VERIFY_NONE
SSL_ERROR_WANT_READ = cConfig.SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE = cConfig.SSL_ERROR_WANT_WRITE
SSL_ERROR_ZERO_RETURN = cConfig.SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_X509_LOOKUP = cConfig.SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_WANT_CONNECT = cConfig.SSL_ERROR_WANT_CONNECT
SSL_ERROR_SYSCALL = cConfig.SSL_ERROR_SYSCALL
SSL_ERROR_SSL = cConfig.SSL_ERROR_SSL
FD_SETSIZE = cConfig.FD_SETSIZE
SSL_CTRL_OPTIONS = cConfig.SSL_CTRL_OPTIONS
BIO_C_SET_NBIO = cConfig.BIO_C_SET_NBIO
POLLOUT = cConfig.POLLOUT
POLLIN = cConfig.POLLIN
pollfd = cConfig.pollfd
nfds_t = cConfig.nfds_t
arr_x509 = c_char * X509_NAME_MAXLEN
constants = {}
constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN
constants["SSL_ERROR_WANT_READ"] = PY_SSL_ERROR_WANT_READ
constants["SSL_ERROR_WANT_WRITE"] = PY_SSL_ERROR_WANT_WRITE
constants["SSL_ERROR_WANT_X509_LOOKUP"] = PY_SSL_ERROR_WANT_X509_LOOKUP
constants["SSL_ERROR_SYSCALL"] = PY_SSL_ERROR_SYSCALL
constants["SSL_ERROR_SSL"] = PY_SSL_ERROR_SSL
constants["SSL_ERROR_WANT_CONNECT"] = PY_SSL_ERROR_WANT_CONNECT
constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF
constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE
libssl.SSL_load_error_strings.restype = c_void
libssl.SSL_library_init.restype = c_int
if HAVE_OPENSSL_RAND:
libssl.RAND_add.argtypes = [c_char_p, c_int, c_double]
libssl.RAND_add.restype = c_void
libssl.RAND_status.restype = c_int
libssl.RAND_egd.argtypes = [c_char_p]
libssl.RAND_egd.restype = c_int
libssl.SSL_CTX_new.argtypes = [POINTER(SSL_METHOD)]
libssl.SSL_CTX_new.restype = POINTER(SSL_CTX)
libssl.SSLv23_method.restype = POINTER(SSL_METHOD)
libssl.SSL_CTX_use_PrivateKey_file.argtypes = [POINTER(SSL_CTX), c_char_p, c_int]
libssl.SSL_CTX_use_PrivateKey_file.restype = c_int
libssl.SSL_CTX_use_certificate_chain_file.argtypes = [POINTER(SSL_CTX), c_char_p]
libssl.SSL_CTX_use_certificate_chain_file.restype = c_int
libssl.SSL_CTX_ctrl.argtypes = [POINTER(SSL_CTX), c_int, c_int, c_void_p]
libssl.SSL_CTX_ctrl.restype = c_int
libssl.SSL_CTX_set_verify.argtypes = [POINTER(SSL_CTX), c_int, c_void_p]
libssl.SSL_CTX_set_verify.restype = c_void
libssl.SSL_new.argtypes = [POINTER(SSL_CTX)]
libssl.SSL_new.restype = POINTER(SSL)
libssl.SSL_set_fd.argtypes = [POINTER(SSL), c_int]
libssl.SSL_set_fd.restype = c_int
libssl.BIO_ctrl.argtypes = [POINTER(BIO), c_int, c_int, c_void_p]
libssl.BIO_ctrl.restype = c_int
libssl.SSL_get_rbio.argtypes = [POINTER(SSL)]
libssl.SSL_get_rbio.restype = POINTER(BIO)
libssl.SSL_get_wbio.argtypes = [POINTER(SSL)]
libssl.SSL_get_wbio.restype = POINTER(BIO)
libssl.SSL_set_connect_state.argtypes = [POINTER(SSL)]
libssl.SSL_set_connect_state.restype = c_void
libssl.SSL_connect.argtypes = [POINTER(SSL)]
libssl.SSL_connect.restype = c_int
libssl.SSL_get_error.argtypes = [POINTER(SSL), c_int]
libssl.SSL_get_error.restype = c_int
have_poll = False
if hasattr(libc, "poll"):
have_poll = True
libc.poll.argtypes = [POINTER(pollfd), nfds_t, c_int]
libc.poll.restype = c_int
libssl.ERR_get_error.restype = c_int
libssl.ERR_error_string.argtypes = [c_int, c_char_p]
libssl.ERR_error_string.restype = c_char_p
libssl.SSL_get_peer_certificate.argtypes = [POINTER(SSL)]
libssl.SSL_get_peer_certificate.restype = POINTER(X509)
libssl.X509_get_subject_name.argtypes = [POINTER(X509)]
libssl.X509_get_subject_name.restype = POINTER(X509_NAME)
libssl.X509_get_issuer_name.argtypes = [POINTER(X509)]
libssl.X509_get_issuer_name.restype = POINTER(X509_NAME)
libssl.X509_NAME_oneline.argtypes = [POINTER(X509_NAME), arr_x509, c_int]
libssl.X509_NAME_oneline.restype = c_char_p
libssl.X509_free.argtypes = [POINTER(X509)]
libssl.X509_free.restype = c_void
libssl.SSL_free.argtypes = [POINTER(SSL)]
libssl.SSL_free.restype = c_void
libssl.SSL_CTX_free.argtypes = [POINTER(SSL_CTX)]
libssl.SSL_CTX_free.restype = c_void
libssl.SSL_write.argtypes = [POINTER(SSL), c_char_p, c_int]
libssl.SSL_write.restype = c_int
libssl.SSL_pending.argtypes = [POINTER(SSL)]
libssl.SSL_pending.restype = c_int
libssl.SSL_read.argtypes = [POINTER(SSL), c_char_p, c_int]
libssl.SSL_read.restype = c_int
def _init_ssl():
libssl.SSL_load_error_strings()
libssl.SSL_library_init()
if HAVE_OPENSSL_RAND:
# helper routines for seeding the SSL PRNG
def RAND_add(space, string, entropy):
"""RAND_add(string, entropy)
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
buf = c_char_p(string)
libssl.RAND_add(buf, len(string), entropy)
RAND_add.unwrap_spec = [ObjSpace, str, float]
def RAND_status(space):
"""RAND_status() -> 0 or 1
Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not.
It is necessary to seed the PRNG with RAND_add() on some platforms before
using the ssl() function."""
res = libssl.RAND_status()
return space.wrap(res)
RAND_status.unwrap_spec = [ObjSpace]
def RAND_egd(space, path):
"""RAND_egd(path) -> bytes
Queries the entropy gather daemon (EGD) on socket path. Returns number
of bytes read. Raises socket.sslerror if connection to EGD fails or
if it does provide enough data to seed PRNG."""
socket_path = c_char_p(path)
bytes = libssl.RAND_egd(socket_path)
if bytes == -1:
msg = "EGD connection failed or EGD did not return"
msg += " enough data to seed the PRNG"
raise OperationError(space.w_Exception, space.wrap(msg))
return space.wrap(bytes)
RAND_egd.unwrap_spec = [ObjSpace, str]
class SSLObject(Wrappable):
def __init__(self, space):
self.space = space
self.w_socket = None
self.ctx = POINTER(SSL_CTX)()
self.ssl = POINTER(SSL)()
self.server_cert = POINTER(X509)()
self._server = arr_x509()
self._issuer = arr_x509()
def server(self):
return self.space.wrap(self._server.value)
server.unwrap_spec = ['self']
def issuer(self):
return self.space.wrap(self._issuer.value)
issuer.unwrap_spec = ['self']
def __del__(self):
if self.server_cert:
libssl.X509_free(self.server_cert)
if self.ssl:
libssl.SSL_free(self.ssl)
if self.ctx:
libssl.SSL_CTX_free(self.ctx)
def write(self, data):
"""write(s) -> len
Writes the string s into the SSL object. Returns the number
of bytes written."""
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The write operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket too large for select()."))
num_bytes = 0
while True:
err = 0
num_bytes = libssl.SSL_write(self.ssl, data, len(data))
err = libssl.SSL_get_error(self.ssl, num_bytes)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The connect operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if num_bytes > 0:
return self.space.wrap(num_bytes)
else:
errstr, errval = _ssl_seterror(self.space, self, num_bytes)
raise OperationError(self.space.w_Exception,
self.space.wrap("%s: %d" % (errstr, errval)))
write.unwrap_spec = ['self', str]
def read(self, num_bytes=1024):
"""read([len]) -> string
Read up to len bytes from the SSL socket."""
count = libssl.SSL_pending(self.ssl)
if not count:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The read operation timed out"))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(self.space.w_Exception,
self.space.wrap("Underlying socket too large for select()."))
buf = create_string_buffer(num_bytes)
while True:
err = 0
count = libssl.SSL_read(self.ssl, buf, num_bytes)
err = libssl.SSL_get_error(self.ssl, count)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(self.space.w_Exception,
self.space.wrap("The read operation timed out"))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if count <= 0:
errstr, errval = _ssl_seterror(self.space, self, count)
raise OperationError(self.space.w_Exception,
self.space.wrap("%s: %d" % (errstr, errval)))
if count != num_bytes:
# resize
data = buf.raw
assert count >= 0
try:
new_data = data[0:count]
except:
raise OperationError(self.space.w_MemoryException,
self.space.wrap("error in resizing of the buffer."))
buf = create_string_buffer(count)
buf.raw = new_data
return self.space.wrap(buf.value)
read.unwrap_spec = ['self', int]
SSLObject.typedef = TypeDef("SSLObject",
server = interp2app(SSLObject.server,
unwrap_spec=SSLObject.server.unwrap_spec),
issuer = interp2app(SSLObject.issuer,
unwrap_spec=SSLObject.issuer.unwrap_spec),
write = interp2app(SSLObject.write,
unwrap_spec=SSLObject.write.unwrap_spec),
read = interp2app(SSLObject.read, unwrap_spec=SSLObject.read.unwrap_spec)
)
def new_sslobject(space, w_sock, w_key_file, w_cert_file):
ss = SSLObject(space)
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
w_timeout = space.call_method(w_sock, "gettimeout")
if space.is_w(w_timeout, space.w_None):
has_timeout = False
else:
has_timeout = True
if space.is_w(w_key_file, space.w_None):
key_file = None
else:
key_file = space.str_w(w_key_file)
if space.is_w(w_cert_file, space.w_None):
cert_file = None
else:
cert_file = space.str_w(w_cert_file)
if ((key_file and not cert_file) or (not key_file and cert_file)):
raise OperationError(space.w_Exception,
space.wrap("Both the key & certificate files must be specified"))
ss.ctx = libssl.SSL_CTX_new(libssl.SSLv23_method()) # set up context
if not ss.ctx:
raise OperationError(space.w_Exception, space.wrap("SSL_CTX_new error"))
if key_file:
ret = libssl.SSL_CTX_use_PrivateKey_file(ss.ctx, key_file,
SSL_FILETYPE_PEM)
if ret < 1:
raise OperationError(space.w_Exception,
space.wrap("SSL_CTX_use_PrivateKey_file error"))
ret = libssl.SSL_CTX_use_certificate_chain_file(ss.ctx, cert_file)
libssl.SSL_CTX_ctrl(ss.ctx, SSL_CTRL_OPTIONS, SSL_OP_ALL, c_void_p())
if ret < 1:
raise OperationError(space.w_Exception,
space.wrap("SSL_CTX_use_certificate_chain_file error"))
libssl.SSL_CTX_set_verify(ss.ctx, SSL_VERIFY_NONE, c_void_p()) # set verify level
ss.ssl = libssl.SSL_new(ss.ctx) # new ssl struct
libssl.SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL
# If the socket is in non-blocking mode or timeout mode, set the BIO
# to non-blocking mode (blocking is the default)
if has_timeout:
# Set both the read and write BIO's to non-blocking mode
libssl.BIO_ctrl(libssl.SSL_get_rbio(ss.ssl), BIO_C_SET_NBIO, 1, c_void_p())
libssl.BIO_ctrl(libssl.SSL_get_wbio(ss.ssl), BIO_C_SET_NBIO, 1, c_void_p())
libssl.SSL_set_connect_state(ss.ssl)
# Actually negotiate SSL connection
# XXX If SSL_connect() returns 0, it's also a failure.
sockstate = 0
while True:
ret = libssl.SSL_connect(ss.ssl)
err = libssl.SSL_get_error(ss.ssl, ret)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(space, w_sock, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(space, w_sock, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise OperationError(space.w_Exception,
space.wrap("The connect operation timed out"))
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise OperationError(space.w_Exception,
space.wrap("Underlying socket has been closed."))
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise OperationError(space.w_Exception,
space.wrap("Underlying socket too large for select()."))
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if ret < 0:
errstr, errval = _ssl_seterror(space, ss, ret)
raise OperationError(space.w_Exception,
space.wrap("%s: %d" % (errstr, errval)))
ss.server_cert = libssl.SSL_get_peer_certificate(ss.ssl)
if ss.server_cert:
libssl.X509_NAME_oneline(libssl.X509_get_subject_name(ss.server_cert),
ss._server, X509_NAME_MAXLEN)
libssl.X509_NAME_oneline(libssl.X509_get_issuer_name(ss.server_cert),
ss._issuer, X509_NAME_MAXLEN)
ss.w_socket = w_sock
return ss
new_sslobject.unwrap_spec = [ObjSpace, W_Root, str, str]
def check_socket_and_wait_for_timeout(space, w_sock, writing):
"""If the socket has a timeout, do a select()/poll() on the socket.
The argument writing indicates the direction.
Returns one of the possibilities in the timeout_state enum (above)."""
w_timeout = space.call_method(w_sock, "gettimeout")
if space.is_w(w_timeout, space.w_None):
return SOCKET_IS_BLOCKING
elif space.int_w(w_timeout) == 0.0:
return SOCKET_IS_NONBLOCKING
sock_timeout = space.int_w(w_timeout)
# guard against closed socket
try:
space.call_method(w_sock, "fileno")
except:
return SOCKET_HAS_BEEN_CLOSED
sock_fd = space.int_w(space.call_method(w_sock, "fileno"))
# Prefer poll, if available, since you can poll() any fd
# which can't be done with select().
if have_poll:
_pollfd = pollfd()
_pollfd.fd = sock_fd
if writing:
_pollfd.events = POLLOUT
else:
_pollfd.events = POLLIN
# socket's timeout is in seconds, poll's timeout in ms
timeout = int(sock_timeout * 1000 + 0.5)
rc = libc.poll(byref(_pollfd), 1, timeout)
if rc == 0:
return SOCKET_HAS_TIMED_OUT
else:
return SOCKET_OPERATION_OK
if sock_fd >= FD_SETSIZE:
return SOCKET_TOO_LARGE_FOR_SELECT
# construct the arguments for select
sec = int(sock_timeout)
usec = int((sock_timeout - sec) * 1e6)
timeout = sec + usec * 0.000001
# see if the socket is ready
if writing:
ret = select.select([], [sock_fd], [], timeout)
r, w, e = ret
if not w:
return SOCKET_HAS_TIMED_OUT
else:
return SOCKET_OPERATION_OK
else:
ret = select.select([sock_fd], [], [], timeout)
r, w, e = ret
if not r:
return SOCKET_HAS_TIMED_OUT
else:
return SOCKET_OPERATION_OK
def _ssl_seterror(space, ss, ret):
assert ret <= 0
err = libssl.SSL_get_error(ss.ssl, ret)
errstr = ""
errval = 0
if err == SSL_ERROR_ZERO_RETURN:
errstr = "TLS/SSL connection has been closed"
errval = PY_SSL_ERROR_ZERO_RETURN
elif err == SSL_ERROR_WANT_READ:
errstr = "The operation did not complete (read)"
errval = PY_SSL_ERROR_WANT_READ
elif err == SSL_ERROR_WANT_WRITE:
errstr = "The operation did not complete (write)"
errval = PY_SSL_ERROR_WANT_WRITE
elif err == SSL_ERROR_WANT_X509_LOOKUP:
errstr = "The operation did not complete (X509 lookup)"
errval = PY_SSL_ERROR_WANT_X509_LOOKUP
elif err == SSL_ERROR_WANT_CONNECT:
errstr = "The operation did not complete (connect)"
errval = PY_SSL_ERROR_WANT_CONNECT
elif err == SSL_ERROR_SYSCALL:
e = libssl.ERR_get_error()
if e == 0:
if ret == 0 or space.is_w(ss.w_socket, space.w_None):
errstr = "EOF occurred in violation of protocol"
errval = PY_SSL_ERROR_EOF
elif ret == -1:
# the underlying BIO reported an I/0 error
return errstr, errval # sock.errorhandler()?
else:
errstr = "Some I/O error occurred"
errval = PY_SSL_ERROR_SYSCALL
else:
errstr = libssl.ERR_error_string(e, None)
errval = PY_SSL_ERROR_SYSCALL
elif err == SSL_ERROR_SSL:
e = libssl.ERR_get_error()
errval = PY_SSL_ERROR_SSL
if e != 0:
errstr = libssl.ERR_error_string(e, None)
else:
errstr = "A failure in the SSL library occurred"
else:
errstr = "Invalid error code"
errval = PY_SSL_ERROR_INVALID_ERROR_CODE
return errstr, errval
def ssl(space, w_socket, w_key_file=None, w_cert_file=None):
"""ssl(socket, [keyfile, certfile]) -> sslobject"""
return space.wrap(new_sslobject(space, w_socket, w_key_file, w_cert_file))
ssl.unwrap_spec = [ObjSpace, W_Root, W_Root, W_Root]
| 38.472973 | 85 | 0.67514 | 7,193 | 0.315815 | 0 | 0 | 0 | 0 | 0 | 0 | 3,744 | 0.164384 |
37303e892dc9ad94bf01158c4ae3cf2a203d0473 | 2,576 | py | Python | apps/question2.py | roshik2016/CIS-5810-Health-Care | af06711c946a92d06ab198cbe10ff73dd103f1c2 | [
"Apache-2.0"
] | null | null | null | apps/question2.py | roshik2016/CIS-5810-Health-Care | af06711c946a92d06ab198cbe10ff73dd103f1c2 | [
"Apache-2.0"
] | null | null | null | apps/question2.py | roshik2016/CIS-5810-Health-Care | af06711c946a92d06ab198cbe10ff73dd103f1c2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 29 00:56:43 2017
@author: roshi
"""
import pandas as pd
import matplotlib.pyplot as plt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from app import app
data = pd.read_csv('./data/youth_tobacco_analysis.csv')
"""Pandas DataFrame Implemented"""
final_data = pd.DataFrame(data.groupby(['YEAR','LocationDesc']).count())
final_data.to_csv('./data/question2.csv', sep = ',', encoding='utf-8')
qn2data = pd.read_csv('./data/question2.csv')
qn2data['LocationDesc'] = qn2data['LocationDesc'].str.upper()
x=0
state_names = list(qn2data['LocationDesc'].unique())
"""
String Operation to Convert the string in each column to upper case is used.
It is used as the columns names are inconsistant with the casing
"""
for i in state_names:
state_names[x] = state_names[x].upper()
x=x+1
years = list(qn2data['YEAR'].unique())
layout = html.Div(children=[
html.Div([
dcc.Dropdown(
id='state_names',
options=[{'label': i, 'value': i} for i in state_names],
value='ARIZONA'
),
],
style={'width': '30%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='line-chart'),
],style={'width': '49%'}),
])
@app.callback(
dash.dependencies.Output('line-chart', 'figure'),
[dash.dependencies.Input('state_names', 'value')])
def update_bar_chart(statename1):
"""
Forms a Staced Bar Chart
Keyword Arguments:
statename1 -- Gets the first state name to compare
The values of the states are fetched and compared using a line chart for the trend analysis
Functions - PandasDataFrame Operations Implemented
"""
value_list = list(qn2data['LocationAbbr'][(qn2data['LocationDesc'] == statename1)])
xvalues = list(qn2data['YEAR'][(qn2data['LocationDesc'] == statename1)])
return {
'data': ([
{'x':xvalues , 'y': value_list, 'type': 'line', 'name': 'NB'},
]),
'layout': go.Layout(
title = "Smoking Status Comarison by States",
xaxis={'title': 'Somking Status of Youth'},
yaxis={'title': 'Count of Youth Over the Years'}),
}
| 25.76 | 97 | 0.556289 | 0 | 0 | 0 | 0 | 1,112 | 0.431677 | 0 | 0 | 1,050 | 0.407609 |
3730b8d9d97aea9d5b10c216bcc20f5a6594936c | 1,084 | py | Python | tests/test_easy_patient_name.py | taylordeatri/phc-sdk-py | 8f3ec6ac44e50c7194f174fd0098de390886693d | [
"MIT"
] | 1 | 2020-07-22T12:46:58.000Z | 2020-07-22T12:46:58.000Z | tests/test_easy_patient_name.py | taylordeatri/phc-sdk-py | 8f3ec6ac44e50c7194f174fd0098de390886693d | [
"MIT"
] | 54 | 2019-10-09T16:19:04.000Z | 2022-01-19T20:28:59.000Z | tests/test_easy_patient_name.py | taylordeatri/phc-sdk-py | 8f3ec6ac44e50c7194f174fd0098de390886693d | [
"MIT"
] | 2 | 2019-10-30T19:54:43.000Z | 2020-12-03T18:57:15.000Z | from phc.easy.patients.name import expand_name_value
def test_name():
assert expand_name_value(
[{"text": "ARA251 LO", "given": ["ARA251"], "family": "LO"}]
) == {"name_given_0": "ARA251", "name_family": "LO"}
def test_name_with_multiple_values():
# NOTE: Official names are preferred first and then remaining names are put
# in separate column
assert expand_name_value(
[
{
"text": "Christian Di Lorenzo",
"given": ["Christian"],
"family": "Di Lorenzo",
},
{
"use": "official",
"given": ["Robert", "Christian"],
"family": "Di Lorenzo",
},
]
) == {
"name_given_0": "Robert",
"name_given_1": "Christian",
"name_family": "Di Lorenzo",
"name_use": "official",
"other_names": [
{
"text": "Christian Di Lorenzo",
"given": ["Christian"],
"family": "Di Lorenzo",
},
],
}
| 27.794872 | 79 | 0.47048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.439114 |
37324e5514d40a1e4f6fc7ae82b9119d2c9de22e | 2,249 | py | Python | templates/fastApiService/zarubaServiceName/main.py | state-alchemists/zaruba | 2c689c920df3589168ec81664b92110021892464 | [
"Apache-2.0"
] | 39 | 2020-03-13T19:41:11.000Z | 2022-02-14T02:01:00.000Z | templates/fastApiService/zarubaServiceName/main.py | state-alchemists/zaruba | 2c689c920df3589168ec81664b92110021892464 | [
"Apache-2.0"
] | 5 | 2020-08-01T08:55:48.000Z | 2022-02-10T00:55:39.000Z | templates/fastApiService/zarubaServiceName/main.py | state-alchemists/zaruba | 2c689c920df3589168ec81664b92110021892464 | [
"Apache-2.0"
] | 4 | 2020-11-10T20:45:12.000Z | 2021-03-18T06:18:55.000Z | from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from sqlalchemy import create_engine
from helpers.transport import RMQEventMap, KafkaEventMap, get_rmq_connection_parameters, get_kafka_connection_parameters
from configs.helper import get_abs_static_dir, create_message_bus, create_rpc
import os
db_url = os.getenv('ZARUBA_SERVICE_NAME_SQLALCHEMY_DATABASE_URL', 'sqlite://')
rmq_connection_parameters = get_rmq_connection_parameters(
host = os.getenv('ZARUBA_SERVICE_NAME_RABBITMQ_HOST', 'localhost'),
user = os.getenv('ZARUBA_SERVICE_NAME_RABBITMQ_USER', 'root'),
password = os.getenv('ZARUBA_SERVICE_NAME_RABBITMQ_PASS', 'toor'),
virtual_host = os.getenv('ZARUBA_SERVICE_NAME_RABBITMQ_VHOST', '/'),
heartbeat=30
)
rmq_event_map = RMQEventMap({})
kafka_connection_parameters = get_kafka_connection_parameters(
bootstrap_servers = os.getenv('ZARUBA_SERVICE_NAME_KAFKA_BOOTSTRAP_SERVERS', 'localhost:9093'),
sasl_mechanism=os.getenv('ZARUBA_SERVICE_NAME_KAFKA_SASL_MECHANISM', 'PLAIN'),
sasl_plain_username=os.getenv('ZARUBA_SERVICE_NAME_KAFKA_SASL_PLAIN_USERNAME', ''),
sasl_plain_password=os.getenv('ZARUBA_SERVICE_NAME_KAFKA_SASL_PLAIN_PASSWORD', '')
)
kafka_event_map = KafkaEventMap({})
mb_type = os.getenv('ZARUBA_SERVICE_NAME_MESSAGE_BUS_TYPE', 'local')
rpc_type = os.getenv('ZARUBA_SERVICE_NAME_RPC_TYPE', 'local')
enable_http_handler = os.getenv('ZARUBA_SERVICE_NAME_ENABLE_HTTP_HANDLER', '1') != '0'
enable_event_handler = os.getenv('ZARUBA_SERVICE_NAME_ENABLE_EVENT_HANDLER', '1') != '0'
enable_rpc_handler = os.getenv('ZARUBA_SERVICE_NAME_ENABLE_RPC_HANDLER', '1') != '0'
static_url = os.getenv('ZARUBA_SERVICE_NAME_STATIC_URL', '/static')
static_dir = get_abs_static_dir(os.getenv('ZARUBA_SERVICE_NAME_STATIC_DIR', ''))
engine = create_engine(db_url, echo=True)
app = FastAPI(title='zarubaServiceName')
mb = create_message_bus(mb_type, rmq_connection_parameters, rmq_event_map, kafka_connection_parameters, kafka_event_map)
rpc = create_rpc(rpc_type, rmq_connection_parameters, rmq_event_map)
@app.on_event('shutdown')
def on_shutdown():
mb.shutdown()
rpc.shutdown()
if static_dir != '':
app.mount(static_url, StaticFiles(directory=static_dir), name='static') | 48.891304 | 120 | 0.803468 | 0 | 0 | 0 | 0 | 81 | 0.036016 | 0 | 0 | 768 | 0.341485 |
2e92110a4acf5b4d90f4de3ae5d754fd4d0bda47 | 292 | py | Python | integration/tests_ok/user_agent.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | integration/tests_ok/user_agent.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | integration/tests_ok/user_agent.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | from app import app
from flask import request
@app.route("/user-agent/a")
def useragent_a():
assert "Mozilla/5.0 A" == request.headers["User-Agent"]
return ""
@app.route("/user-agent/b")
def useragent_b():
assert "Mozilla/5.0 B" == request.headers["User-Agent"]
return ""
| 19.466667 | 59 | 0.657534 | 0 | 0 | 0 | 0 | 240 | 0.821918 | 0 | 0 | 88 | 0.30137 |
2e9468df37a2fdec7d11655ed9954768b1a4267e | 61,398 | py | Python | downloader_backend.py | ansys/automatic-installer | 47563fbccf0769ba0153deb9cce373d99e8a8b26 | [
"MIT"
] | 2 | 2022-02-17T10:02:29.000Z | 2022-02-22T21:31:09.000Z | downloader_backend.py | ansys/automatic-installer | 47563fbccf0769ba0153deb9cce373d99e8a8b26 | [
"MIT"
] | 4 | 2022-02-15T09:56:49.000Z | 2022-03-31T15:44:10.000Z | downloader_backend.py | ansys/automatic-installer | 47563fbccf0769ba0153deb9cce373d99e8a8b26 | [
"MIT"
] | 1 | 2022-02-22T21:31:17.000Z | 2022-02-22T21:31:17.000Z | import argparse
import datetime
import errno
import json
import logging
import os
import random
import re
import shutil
import subprocess
import sys
import time
import traceback
import zipfile
import zlib
from functools import wraps
from types import SimpleNamespace
import psutil
import py7zr
from artifactory import ArtifactoryPath
from artifactory import md5sum
from artifactory_du import artifactory_du
from dohq_artifactory import ArtifactoryException
from influxdb import InfluxDBClient
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.runtime.client_request_exception import ClientRequestException
from office365.sharepoint.client_context import ClientContext
from plyer import notification
from requests.exceptions import RequestException
from urllib3.exceptions import HTTPError
import iss_templates
__author__ = "Maksim Beliaev"
__email__ = "maksim.beliaev@ansys.com"
__version__ = "3.0.2"
STATISTICS_SERVER = "OTTBLD02"
STATISTICS_PORT = 8086
TIMEOUT = 90
ARTIFACTORY_DICT = {
"Azure": "http://azwec7artsrv01.ansys.com:8080/artifactory",
"Austin": "http://ausatsrv01.ansys.com:8080/artifactory",
"Boulder": "http://bouartifact.ansys.com:8080/artifactory",
"Canonsburg": "http://canartifactory.ansys.com:8080/artifactory",
"Concord": "http://convmartifact.win.ansys.com:8080/artifactory",
"Darmstadt": "http://darvmartifact.win.ansys.com:8080/artifactory",
"Evanston": "http://evavmartifact:8080/artifactory",
"Hannover": "http://hanartifact1.ansys.com:8080/artifactory",
"Horsham": "http://horvmartifact1.ansys.com:8080/artifactory",
"Lebanon": "http://lebartifactory.win.ansys.com:8080/artifactory",
"Lyon": "http://lyovmartifact.win.ansys.com:8080/artifactory",
"Otterfing": "http://ottvmartifact.win.ansys.com:8080/artifactory",
"Pune": "http://punvmartifact.win.ansys.com:8080/artifactory",
"Sheffield": "http://shfvmartifact.win.ansys.com:8080/artifactory",
"SanJose": "http://sjoartsrv01.ansys.com:8080/artifactory",
"Waterloo": "https://watartifactory.win.ansys.com:8443/artifactory",
}
SHAREPOINT_SITE_URL = r"https://ansys.sharepoint.com/sites/BetaDownloader"
class DownloaderError(Exception):
pass
def retry(exceptions, tries=4, delay=3, backoff=1, logger=None, proc_lock=False):
"""
Retry calling the decorated function using an exponential backoff.
Args:
exceptions (Exception or tuple): the exception to check. may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay each retry
logger (logging): logger to use. If None, print
proc_lock (bool): if retry is applied to proc lock function
Returns: decorator
"""
def deco_retry(func):
@wraps(func)
def f_retry(self, *args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 0:
try:
return func(self, *args, **kwargs)
except exceptions as e:
msg = f"{e}. Error occurred, attempt: {tries - mtries + 1}/{tries}"
if proc_lock:
# only applied for process lock
err = "Stop all processes running from installation folder. "
err += f"Attempt: {tries - mtries + 1}/{tries}"
if mtries > 1:
err += " Autoretry in 60sec."
Downloader.toaster_notification("Failed", err)
else:
raise DownloaderError(msg)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
else:
error = (
"Please verify that your connection is stable, avoid switching state of VPN during download. "
"For artifactory you have to be on VPN. "
f"Number of attempts: {tries}/{tries}"
)
if logger:
raise DownloaderError(error)
else:
print(error)
return f_retry # true decorator
return deco_retry
class Downloader:
"""
Main class that operates the download and installation process:
1. enables logs
2. parses arguments to get settings file
3. loads JSON to named tuple
4. gets URL for selected version based on server
5. downloads zip archive with BETA build
6. unpacks it to download folder
7. depending on the choice proceeds to installation of EDT or WB
8. uninstalls previous build if one exists
9. updates registry of EDT
10. sends statistics to the server
Performs checks:
1. if the same build date is already installed, then do not proceed to download
2. if some process is running from installation folder it will abort download
Notes:
Software attempts to download 4 times, if connection is still bad will abort
"""
def __init__(self, version, settings_folder="", settings_path=""):
"""
:parameter: version: version of the file, used if invoke file with argument -V to get version
self.build_artifactory_path (ArtifactoryPath): URL to the latest build that will be used to download archive
self.zip_file (str): path to the zip file on the PC
self.target_unpack_dir (str): path where to unpack .zip
self.installed_product_info (str): path to the product.info of the installed build
self.product_root_path (str): root path of Ansys Electronics Desktop/ Workbench installation
self.product_version (str): version to use in env variables eg 202
self.setup_exe (str): path to the setup.exe from downloaded and unpacked zip
self.remote_build_date (str): build date that receive from SharePoint
self.hash (str): hash code used for this run of the program
self.pid: pid (process ID of the current Python run, required to allow kill in UI)
self.ctx: context object to authorize in SharePoint using office365 module
self.settings_folder (str): default folder where all configurations would be saved
self.history_file (str): file where installation progress would be written (this file is tracked by UI)
self.history (dict): dict with history of installation processes
self.logging_file (str): file where detailed log for all runs is saved
"""
self.build_artifactory_path = ArtifactoryPath()
self.zip_file = ""
self.target_unpack_dir = ""
self.installed_product_info = ""
self.product_root_path = ""
self.product_version = ""
self.setup_exe = ""
self.remote_build_date = ""
self.pid = str(os.getpid())
self.ctx = None
self.warnings_list = []
self.hash = generate_hash_str()
if not settings_folder:
self.settings_folder = os.path.join(os.environ["APPDATA"], "build_downloader")
else:
self.settings_folder = settings_folder
self.check_and_make_directories(self.settings_folder)
self.history = {}
self.history_file = os.path.join(self.settings_folder, "installation_history.json")
self.get_installation_history()
self.logging_file = os.path.join(self.settings_folder, "downloader.log")
self.settings_path = settings_path if settings_path else self.parse_args(version)
with open(self.settings_path, "r") as file:
self.settings = json.load(file, object_hook=lambda d: SimpleNamespace(**d))
# this part of the code creates attributes that were added. Required for compatibility
if not hasattr(self.settings, "replace_shortcut"):
# v2.0.0
self.settings.replace_shortcut = True
if not hasattr(self.settings, "custom_flags"):
# v2.2.0
self.settings.custom_flags = ""
if not hasattr(self.settings, "license_file"):
# v3.0.0
self.settings.license_file = ""
if not hasattr(self.settings, "wb_assoc"):
# v3.0.0
self.settings.wb_assoc = ""
if "ElectronicsDesktop" in self.settings.version:
self.product_version = self.settings.version[1:4]
if float(self.product_version) >= 221:
self.product_root_path = os.path.join(
self.settings.install_path, "AnsysEM", f"v{self.product_version}", "Win64"
)
else:
self.product_root_path = os.path.join(
self.settings.install_path,
"AnsysEM",
"AnsysEM" + self.product_version[:2] + "." + self.product_version[2:],
"Win64",
)
self.installed_product_info = os.path.join(self.product_root_path, "product.info")
elif "Workbench" in self.settings.version:
self.product_root_path = os.path.join(self.settings.install_path, "ANSYS Inc", self.settings.version[:4])
elif "LicenseManager" in self.settings.version:
self.product_root_path = os.path.join(
self.settings.install_path, "ANSYS Inc", "Shared Files", "Licensing", "tools", "lmcenter"
)
def authorize_sharepoint(self):
"""
Function that uses PnP to authorize user in SharePoint using Windows account and to get actual client_id and
client_secret
Returns: ctx: authorization context for Office365 library
"""
self.update_installation_history(status="In-Progress", details="Authorizing in SharePoint")
command = "powershell.exe "
command += "Connect-PnPOnline -Url https://ansys.sharepoint.com/sites/BetaDownloader -UseWebLogin;"
command += '(Get-PnPListItem -List secret_list -Fields "Title","client_id","client_secret").FieldValues'
out_str = self.subprocess_call(command, shell=True, popen=True)
secret_list = []
try:
for line in out_str.splitlines():
if "Title" in line:
secret_dict = {"Title": line.split()[1]}
elif "client_id" in line:
secret_dict["client_id"] = line.split()[1]
elif "client_secret" in line:
secret_dict["client_secret"] = line.split()[1]
secret_list.append(secret_dict)
except NameError:
raise DownloaderError("Cannot retrieve authentication tokens for SharePoint")
secret_list.sort(key=lambda elem: elem["Title"], reverse=True)
context_auth = AuthenticationContext(url=SHAREPOINT_SITE_URL)
context_auth.acquire_token_for_app(
client_id=secret_list[0]["client_id"], client_secret=secret_list[0]["client_secret"]
)
ctx = ClientContext(SHAREPOINT_SITE_URL, context_auth)
return ctx
def run(self):
"""
Function that executes the download-installation process
:return: None
"""
try:
set_logger(self.logging_file)
logging.info(f"Settings path is set to {self.settings_path}")
if self.settings.artifactory == "SharePoint":
self.ctx = self.authorize_sharepoint()
self.update_installation_history(status="In-Progress", details="Verifying configuration")
self.check_and_make_directories(self.settings.install_path, self.settings.download_path)
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
space_required = 15
# License Manager can be updated even if running
self.check_process_lock()
else:
if not self.settings.license_file:
raise DownloaderError("No license file defined. Please select it in Advanced Settings")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
space_required = 1
self.check_free_space(self.settings.download_path, space_required)
self.check_free_space(self.settings.install_path, space_required)
self.get_build_link()
if self.settings.force_install or self.newer_version_exists:
self.download_file()
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
self.check_process_lock() # download can take time, better to recheck again
self.install()
try:
self.send_statistics()
except Exception:
self.warnings_list.append("Connection to product improvement server failed")
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("Versions are up to date. If issue occurred please use force install flag")
return
except DownloaderError as e:
# all caught errors are here
logging.error(e)
self.update_installation_history(status="Failed", details=str(e))
except Exception:
logging.error(traceback.format_exc())
self.update_installation_history(status="Failed", details="Unexpected error, see logs")
self.send_statistics(error=traceback.format_exc())
self.clean_temp()
@staticmethod
def check_and_make_directories(*paths):
"""
Verify that installation and download path exists.
If not tries to create a requested path
:parameter: paths: list of paths that we need to check and create
"""
for path in paths:
if not os.path.isdir(path):
try:
os.makedirs(path)
except PermissionError:
raise DownloaderError(f"{path} could not be created due to insufficient permissions")
except OSError as err:
if "BitLocker" in str(err):
raise DownloaderError("Your drive is locked by BitLocker. Please unlock!")
else:
raise DownloaderError(err)
@staticmethod
def check_free_space(path, required):
"""
Verifies that enough disk space is available. Raises error if not enough space
:param path: path where to check
:param required: value in GB that should be available on drive to pass the check
:return:
"""
free_space = shutil.disk_usage(path).free // (2**30)
if free_space < required:
err = f"Disk space in {path} is less than {required}GB. This would not be enough to proceed"
raise DownloaderError(err)
@retry((DownloaderError,), tries=3, delay=60, logger=logging, proc_lock=True)
def check_process_lock(self):
"""
Verify if some executable is running from installation folder
Abort installation if any process is running from installation folder
:return: None
"""
process_list = []
for process in psutil.process_iter():
try:
if self.product_root_path in process.exe():
process_list.append(process.name())
except psutil.AccessDenied:
pass
if process_list:
process_list.sort(key=len) # to fit into UI
raise DownloaderError(
"Following processes are running from installation directory: "
+ f"{', '.join(set(process_list))}. Please stop all processes."
)
@property
def newer_version_exists(self):
"""
verify if version on the server is newer compared to installed
Returns:
(bool) True if remote is newer or no version is installed, False if remote is the same or older
"""
if "Workbench" in self.settings.version:
product_installed = os.path.join(self.product_root_path, "package.id")
elif "LicenseManager" in self.settings.version:
# always update LM
return True
else:
product_installed = self.installed_product_info
if os.path.isfile(product_installed):
if "Workbench" in self.settings.version:
with open(product_installed) as file:
installed_product_version = next(file).rstrip().split()[-1] # get first line
try:
installed_product_version = int(installed_product_version.split("P")[0])
except ValueError:
installed_product_version = 0
else:
installed_product_version = self.get_edt_build_date(product_installed)
logging.info(f"Installed version of {self.settings.version} is {installed_product_version}")
new_product_version = self.get_new_build_date()
if not all([new_product_version, installed_product_version]):
# some of the versions could not be parsed, need installation
return True
if new_product_version <= installed_product_version:
return False
return True
def get_new_build_date(self, distribution="winx64"):
"""
Create URL for new build extraction or extract version from self.remote_build_date for SharePoint download
Returns:
new_product_version (int) version of the product on the server
"""
if self.settings.artifactory != "SharePoint":
if "Workbench" in self.settings.version:
url = self.build_artifactory_path.joinpath("package.id")
elif "ElectronicsDesktop" in self.settings.version:
system = "windows" if distribution == "winx64" else "linux"
url = self.build_artifactory_path.parent.joinpath(f"product_{system}.info")
else:
# todo add license manager handling
return 0
logging.info(f"Request info about new package: {url}")
new_product_version = self.get_build_info_file_from_artifactory(url)
else:
try:
new_product_version = int(self.remote_build_date)
except ValueError:
return 0
logging.info(f"Version on artifactory/SP is {new_product_version}")
return new_product_version
@retry((HTTPError, RequestException, ConnectionError, ConnectionResetError), 4, logger=logging)
def get_build_link(self, distribution="winx64"):
"""
Function that sends HTTP request to JFrog and get the list of folders with builds for Electronics Desktop and
checks user password
If use SharePoint then readdress to SP list
:modify: (str) self.build_artifactory_path: URL link to the latest build that will be used to download archive
"""
self.update_installation_history(status="In-Progress", details="Search latest build URL")
if self.settings.artifactory == "SharePoint":
self.get_sharepoint_build_info()
return
if not hasattr(self.settings.password, self.settings.artifactory):
raise DownloaderError(f"Please provide password for {self.settings.artifactory}")
password = getattr(self.settings.password, self.settings.artifactory)
if not self.settings.username or not password:
raise DownloaderError("Please provide username and artifactory password")
server = ARTIFACTORY_DICT[self.settings.artifactory]
art_path = ArtifactoryPath(server, auth=(self.settings.username, password), timeout=TIMEOUT)
try:
repos_list = art_path.get_repositories(lazy=True)
except ArtifactoryException as err:
raise DownloaderError(f"Cannot retrieve repositories. Error: {err}")
# fill the dictionary with Electronics Desktop and Workbench keys since builds could be different
# still parse the list because of different names on servers
artifacts_dict = {}
for repo in repos_list:
repo_name = repo.name
if "EBU_Certified" in repo_name:
version = repo_name.split("_")[0] + "_ElectronicsDesktop"
elif "Certified" in repo_name and "Licensing" not in repo_name:
version = repo_name.split("_")[0] + "_Workbench"
elif "Certified" in repo_name and "Licensing" in repo_name:
version = repo_name.split("_")[0] + "_LicenseManager"
else:
continue
if version not in artifacts_dict:
# extract real repo name in case it is cached
if art_path.joinpath(repo_name).exists():
# repo might be syncing (happens on new release addition)
artifacts_dict[version] = art_path.joinpath(repo_name).stat().repo
try:
repo = artifacts_dict[self.settings.version]
except KeyError:
raise DownloaderError(
f"Version {self.settings.version} that you have specified "
+ f"does not exist on {self.settings.artifactory}"
)
path = ""
art_path = art_path.joinpath(str(repo))
if "ElectronicsDesktop" in self.settings.version:
archive = "winx64.zip" if distribution == "winx64" else "linx64.tgz"
builds_dates = []
for relative_p in art_path:
try:
archive_exists = list(relative_p.glob(f"Electronics*{archive}"))
if archive_exists:
builds_dates.append(int(relative_p.name))
except ValueError:
pass
if not builds_dates:
raise DownloaderError("Artifact does not exist")
latest_build = sorted(builds_dates)[-1]
art_path = art_path.joinpath(str(latest_build))
for path in art_path:
if archive in path.name:
break
else:
raise DownloaderError(f"Cannot find {distribution} archive file")
elif "Workbench" in self.settings.version or "LicenseManager" in self.settings.version:
path = art_path.joinpath(distribution)
if not path:
raise DownloaderError("Cannot receive URL")
self.build_artifactory_path = path
def get_sharepoint_build_info(self):
"""
Gets link to the latest build from SharePoint and builddate itself
Returns: None
"""
product_list = self.ctx.web.lists.get_by_title("product_list")
items = product_list.items
self.ctx.load(items).execute_query()
build_list = []
for item in items:
title = item.properties["Title"]
if title != self.settings.version:
continue
build_dict = {
"Title": title,
"build_date": item.properties["build_date"],
"relative_url": item.properties["relative_url"],
}
build_list.append(build_dict)
build_list.sort(key=lambda elem: elem["build_date"], reverse=True)
build_dict = build_list[0] if build_list else {}
if not build_dict:
raise DownloaderError(f"No version of {self.settings.version} is available on SharePoint")
self.build_artifactory_path = build_dict["relative_url"]
self.remote_build_date = build_dict["build_date"]
def download_file(self):
"""
Downloads file in chunks and saves to the temp.zip file
Uses url to the zip archive or special JFrog API to download Workbench folder
:modify: (str) zip_file: link to the zip file
"""
if self.settings.artifactory == "SharePoint" or "win" in self.build_artifactory_path.name:
archive_type = "zip"
else:
archive_type = "tgz"
self.zip_file = os.path.join(self.settings.download_path, f"{self.settings.version}.{archive_type}")
chunk_size = 50 * 1024 * 1024
if self.settings.artifactory == "SharePoint":
self.download_from_sharepoint(chunk_size=chunk_size)
else:
self.download_from_artifactory(archive_type, chunk_size=chunk_size)
logging.info(f"File is downloaded to {self.zip_file}")
@retry((HTTPError, RequestException, ConnectionError, ConnectionResetError), 4, logger=logging)
def download_from_artifactory(self, archive_type, chunk_size):
"""
Download file from Artifactory
Args:
archive_type: type of the archive, zip or tgz
chunk_size: (int) chunk size in bytes when download
Returns: None
"""
if not self.build_artifactory_path.replication_status["status"] in ["ok", "never_run"]:
raise DownloaderError("Currently Artifactory repository is replicating, please try later")
logging.info(f"Start download file from {self.build_artifactory_path} to {self.zip_file}")
self.update_installation_history(
status="In-Progress", details=f"Downloading file from {self.settings.artifactory}"
)
if "ElectronicsDesktop" in self.settings.version:
file_stats = self.build_artifactory_path.stat()
arti_file_md5 = file_stats.md5
logging.info(f"Artifactory hash: {arti_file_md5}")
try:
self.build_artifactory_path.writeto(
out=self.zip_file, chunk_size=chunk_size, progress_func=self.print_download_progress
)
except RuntimeError as err:
raise DownloaderError(f"Cannot download file. Server returned status code: {err}")
local_md5_hash = md5sum(self.zip_file)
logging.info(f"Local file hash: {local_md5_hash}")
if local_md5_hash != arti_file_md5:
raise DownloaderError("Downloaded file MD5 hash is different")
elif "Workbench" in self.settings.version or "LicenseManager" in self.settings.version:
try:
file_size = self.get_artifactory_folder_size()
logging.info(f"Workbench/License Manager real file size is {file_size}")
except (TypeError, ValueError):
file_size = 14e9
archive_url = self.build_artifactory_path.archive(archive_type=archive_type)
archive_url.writeto(
out=self.zip_file,
chunk_size=chunk_size,
progress_func=lambda x, y: self.print_download_progress(x, file_size),
)
def get_artifactory_folder_size(self):
aql_query_dict, max_depth_print = artifactory_du.prepare_aql(
file=f"/{self.build_artifactory_path.name}",
max_depth=0,
repository=self.build_artifactory_path.repo,
without_downloads="",
older_than="",
)
artifacts = artifactory_du.artifactory_aql(
artifactory_url=str(self.build_artifactory_path.drive),
aql_query_dict=aql_query_dict,
username=self.build_artifactory_path.auth[0],
password=self.build_artifactory_path.auth[1],
kerberos=False,
verify=False,
)
file_size = artifactory_du.out_as_du(artifacts, max_depth_print, human_readable=False)
file_size = int(file_size.strip("/"))
return file_size
@retry(
(HTTPError, RequestException, ConnectionError, ConnectionResetError, ClientRequestException),
tries=4,
logger=logging,
)
def download_from_sharepoint(self, chunk_size):
"""
Downloads file from Sharepoint
Args:
chunk_size: (int) chunk size in bytes when download
Returns: None
"""
self.update_installation_history(status="In-Progress", details="Downloading file from SharePoint")
remote_file = self.ctx.web.get_file_by_server_relative_url(
f"/sites/BetaDownloader/{self.build_artifactory_path}"
)
remote_file.get()
try:
self.ctx.execute_query()
except ClientRequestException as err:
logging.error(str(err))
raise DownloaderError(
"URL on SharePoint is broken. Report an issue to betadownloader@ansys.com. "
"In meantime please switch to any other repository."
)
file_size = remote_file.length
self.check_free_space(self.settings.download_path, file_size / 1024 / 1024 / 1024)
try:
with open(self.zip_file, "wb") as zip_file:
try:
remote_file.download_session(
zip_file,
lambda offset: self.print_download_progress(offset, total_size=file_size),
chunk_size=chunk_size,
)
self.ctx.execute_query()
except OSError as err:
if err.errno == errno.ENOSPC:
raise DownloaderError("No disk space available in download folder!")
raise
except PermissionError as err:
msg = str(err).replace("[Errno 13]", "").strip()
raise DownloaderError(msg)
if not self.zip_file:
raise DownloaderError("ZIP download failed")
if abs(os.path.getsize(self.zip_file) - file_size) > 0.05 * file_size:
raise DownloaderError("File size difference is more than 5%")
def print_download_progress(self, offset, total_size):
msg = "Downloaded {}/{}MB...[{}%]".format(
int(offset / 1024 / 1024), int(total_size / 1024 / 1024), min(round(offset / total_size * 100, 2), 100)
)
logging.info(msg)
self.update_installation_history(status="In-Progress", details=msg)
def install(self, local_lang=False):
"""
Unpack downloaded zip and proceed to installation. Different executions for Electronics Desktop and Workbench
:param local_lang: if not specified then use English as default installation language
:return: None
"""
self.unpack_archive()
if "ElectronicsDesktop" in self.settings.version:
self.install_edt()
elif "Workbench" in self.settings.version:
self.install_wb(local_lang)
else:
self.install_license_manager()
self.update_installation_history(status="In-Progress", details="Clean temp directory")
self.clean_temp()
def unpack_archive(self):
self.update_installation_history(status="In-Progress", details="Start unpacking")
self.target_unpack_dir = self.zip_file.replace(".zip", "")
try:
with zipfile.ZipFile(self.zip_file, "r") as zip_ref:
zip_ref.extractall(self.target_unpack_dir)
except OSError as err:
if err.errno == errno.ENOSPC:
raise DownloaderError("No disk space available in download folder!")
else:
raise DownloaderError(f"Cannot unpack due to {err}")
except (zipfile.BadZipFile, zlib.error):
raise DownloaderError("Zip file is broken. Please try again later or use another repository.")
logging.info(f"File is unpacked to {self.target_unpack_dir}")
def install_edt(self):
"""
Install Electronics Desktop. Make verification that the same version is not yet installed and makes
silent installation
Get Workbench installation path from environment variable and enables integration if exists.
:return: None
"""
setup_exe, product_id, installshield_version = self.parse_iss_template(self.target_unpack_dir)
self.uninstall_edt(setup_exe, product_id, installshield_version)
install_iss_file, install_log_file = self.create_install_iss_file(installshield_version, product_id)
command = [f'"{setup_exe}"', "-s", rf'-f1"{install_iss_file}"', rf'-f2"{install_log_file}"']
command = " ".join(command)
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
self.check_result_code(install_log_file)
self.update_edt_registry()
self.remove_aedt_shortcuts()
def create_install_iss_file(self, installshield_version, product_id):
install_iss_file = os.path.join(self.target_unpack_dir, "install.iss")
install_log_file = os.path.join(self.target_unpack_dir, "install.log")
integrate_wb = 0
awp_env_var = "AWP_ROOT" + self.product_version
if awp_env_var in os.environ:
run_wb = os.path.join(os.environ[awp_env_var], "Framework", "bin", "Win64", "RunWB2.exe")
if os.path.isfile(run_wb):
integrate_wb = 1
logging.info("Integration with Workbench turned ON")
# the "shared files" is created at the same level as the "AnsysEMxx.x" so if installing to unique folders,
# the Shared Files folder will be unique as well. Thus we can check install folder for license
if os.path.isfile(
os.path.join(self.settings.install_path, "AnsysEM", "Shared Files", "Licensing", "ansyslmd.ini")
):
install_iss = iss_templates.install_iss + iss_templates.existing_server
logging.info("Install using existing license configuration")
else:
install_iss = iss_templates.install_iss + iss_templates.new_server
logging.info("Install using 127.0.0.1, Otterfing and HQ license servers")
with open(install_iss_file, "w") as file:
file.write(
install_iss.format(
product_id,
os.path.join(self.settings.install_path, "AnsysEM"),
os.environ["TEMP"],
integrate_wb,
installshield_version,
)
)
return install_iss_file, install_log_file
def uninstall_edt(self, setup_exe, product_id, installshield_version):
"""
Silently uninstall build of the same version
:return: None
"""
if os.path.isfile(self.installed_product_info):
uninstall_iss_file = os.path.join(self.target_unpack_dir, "uninstall.iss")
uninstall_log_file = os.path.join(self.target_unpack_dir, "uninstall.log")
with open(uninstall_iss_file, "w") as file:
file.write(iss_templates.uninstall_iss.format(product_id, installshield_version))
command = [
f'"{setup_exe}"',
"-uninst",
"-s",
rf'-f1"{uninstall_iss_file}"',
rf'-f2"{uninstall_log_file}"',
]
command = " ".join(command)
logging.info("Execute uninstallation")
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
self.subprocess_call(command)
self.check_result_code(uninstall_log_file, False)
em_main_dir = os.path.dirname(self.product_root_path)
self.remove_path(em_main_dir)
if os.path.isdir(em_main_dir):
raise DownloaderError(f"Failed to remove {em_main_dir}. Probably directory is locked.")
else:
logging.info("Version is not installed, skip uninstallation")
def check_result_code(self, log_file, installation=True):
"""
Verify result code of the InstallShield log file
:param log_file: installation log file
:param installation: True if verify log after installation elif after uninstallation False
:return: None
"""
success = "New build was successfully installed" if installation else "Previous build was uninstalled"
fail = "Installation went wrong" if installation else "Uninstallation went wrong"
if not os.path.isfile(log_file):
raise DownloaderError(f"{fail}. Check that UAC disabled or confirm UAC question manually")
msg = fail
regex = "ResultCode=(.*)"
with open(log_file) as file:
for line in file:
code = re.findall(regex, line)
if code and code[0] == "0":
logging.info(success)
break
else:
if not installation:
msg = "Official uninstaller failed, make hard remove"
logging.error(msg)
self.warnings_list.append(msg)
else:
raise DownloaderError(msg)
@staticmethod
def get_edt_build_date(product_info_file="", file_content=None):
"""
extract information about Electronics Desktop build date and version
Args:
product_info_file: path to the product.info
file_content (list): accepts list with file content as well instead of file
Returns: (int) build_date
"""
if os.path.isfile(product_info_file):
with open(product_info_file) as file:
file_content = file.readlines()
for line in file_content:
if "AnsProductBuildDate" in line:
full_build_date = line.split("=")[1].replace('"', "").replace("-", "")
build_date = full_build_date.split()[0]
break
else:
# cannot find line with build date
return 0
try:
return int(build_date)
except ValueError:
return 0
@staticmethod
def parse_iss_template(unpacked_dir):
"""
Open directory with unpacked build of Electronics Desktop and search for SilentInstallationTemplate.iss to
extract product ID which is GUID hash
Args:
unpacked_dir: directory where AEDT package was unpacked
Returns:
product_id: product GUID extracted from iss template
setup_exe: set path to setup.exe if exists
installshield_version: set version from file
"""
default_iss_file = ""
setup_exe = ""
product_id_match = []
for dir_path, dir_names, file_names in os.walk(unpacked_dir):
for filename in file_names:
if "AnsysEM" in dir_path and filename.endswith(".iss"):
default_iss_file = os.path.join(dir_path, filename)
setup_exe = os.path.join(dir_path, "setup.exe")
break
if not default_iss_file:
raise DownloaderError("SilentInstallationTemplate.iss does not exist")
if not os.path.isfile(setup_exe):
raise DownloaderError("setup.exe does not exist")
with open(default_iss_file, "r") as iss_file:
for line in iss_file:
if "DlgOrder" in line:
guid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
product_id_match = re.findall(guid_regex, line)
if "InstallShield Silent" in line:
installshield_version = next(iss_file).split("=")[1]
if product_id_match:
product_id = product_id_match[0]
logging.info(f"Product ID is {product_id}")
else:
raise DownloaderError("Unable to extract product ID")
return setup_exe, product_id, installshield_version
def install_license_manager(self):
"""
Install license manager and feed it with license file
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
command = [
self.setup_exe,
"-silent",
"-LM",
"-install_dir",
install_path,
"-lang",
"en",
"-licfilepath",
self.settings.license_file,
]
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
package_build = self.parse_lm_installer_builddate()
installed_build = self.get_license_manager_build_date()
if all([package_build, installed_build]) and package_build == installed_build:
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("License Manager was not installed")
else:
raise DownloaderError("No LicenseManager setup.exe file detected")
def parse_lm_installer_builddate(self):
"""
Check build date of installation package of License Manager
"""
build_file = os.path.join(self.target_unpack_dir, "builddate.txt")
lm_center_archive = os.path.join(self.target_unpack_dir, "lmcenter", "WINX64.7z")
if not os.path.isfile(build_file) and os.path.isfile(lm_center_archive):
with py7zr.SevenZipFile(lm_center_archive, "r") as archive:
archive.extractall(path=os.path.join(self.target_unpack_dir, "lmcenter"))
build_file = os.path.join(
self.target_unpack_dir,
"lmcenter",
"Shared Files",
"licensing",
"tools",
"lmcenter",
"lmcenter_blddate.txt",
)
if not os.path.isfile(build_file):
# check again if file was unpacked
logging.warning("builddate.txt was not found in installation package")
return
with open(build_file) as file:
for line in file:
if "license management center" in line.lower():
lm_build_date = line.split()[-1]
try:
logging.info(f"Build date of License Manager in installation package {lm_build_date}")
lm_build_date = int(lm_build_date)
return lm_build_date
except TypeError:
raise DownloaderError("Cannot extract build date of installation package")
def get_license_manager_build_date(self):
"""
Check build date of installed License Manager
"""
build_date_file = os.path.join(self.product_root_path, "lmcenter_blddate.txt")
if not os.path.isfile(build_date_file):
raise DownloaderError("lmcenter_blddate.txt is not available")
with open(build_date_file) as file:
lm_build_date = next(file).split()[-1]
try:
logging.info(f"Newly installed build date of License Manager: {lm_build_date}")
lm_build_date = int(lm_build_date)
return lm_build_date
except (TypeError, ValueError):
raise DownloaderError("Cannot extract build date of installed License Manager")
def install_wb(self, local_lang=False):
"""
Install Workbench to the target installation directory
:param local_lang: if not specified then use English as default installation language
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
uninstall_exe = self.uninstall_wb()
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
command = [self.setup_exe, "-silent", "-install_dir", install_path]
if not local_lang:
command += ["-lang", "en"]
command += self.settings.wb_flags.split()
# the "shared files" is created at the same level as the "ANSYS Inc" so if installing to unique folders,
# the Shared Files folder will be unique as well. Thus we can check install folder for license
if (
os.path.isfile(os.path.join(install_path, "Shared Files", "Licensing", "ansyslmd.ini"))
or "ANSYSLMD_LICENSE_FILE" in os.environ
):
logging.info("Install using existing license configuration")
else:
command += ["-licserverinfo", "2325:1055:127.0.0.1,OTTLICENSE5,PITRH6LICSRV1"]
logging.info("Install using 127.0.0.1, Otterfing and HQ license servers")
# convert command to string to easy append custom flags
command = subprocess.list2cmdline(command)
command += " " + self.settings.custom_flags
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
if os.path.isfile(uninstall_exe):
logging.info("New build was installed")
else:
raise DownloaderError(
"Workbench installation failed. "
+ f"If you see this error message by mistake please report to {__email__}"
)
if self.settings.wb_assoc:
wb_assoc_exe = os.path.join(self.settings.wb_assoc, "commonfiles", "tools", "winx64", "fileassoc.exe")
if not os.path.isfile(wb_assoc_exe):
self.warnings_list.append(f"Cannot find {wb_assoc_exe}")
else:
logging.info("Run WB file association")
self.subprocess_call(wb_assoc_exe)
else:
raise DownloaderError("No Workbench setup.exe file detected")
def uninstall_wb(self):
"""
Uninstall Workbench if such exists in the target installation directory
:return: uninstall_exe: name of the executable of uninstaller"""
uninstall_exe = os.path.join(self.product_root_path, "Uninstall.exe")
if os.path.isfile(uninstall_exe):
command = [uninstall_exe, "-silent"]
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
logging.info("Execute uninstallation")
self.subprocess_call(command)
logging.info("Previous build was uninstalled using uninstaller")
else:
logging.info("No Workbench Uninstall.exe file detected")
self.remove_path(self.product_root_path)
return uninstall_exe
def remove_path(self, path):
"""
Function to safely remove path if rmtree fails
:param path:
:return:
"""
def hard_remove():
try:
# try this dirty method to force remove all files in directory
all_files = os.path.join(path, "*.*")
command = ["DEL", "/F", "/Q", "/S", all_files, ">", "NUL"]
self.subprocess_call(command, shell=True)
command = ["rmdir", "/Q", "/S", path]
self.subprocess_call(command, shell=True)
except Exception as err:
logging.error(str(err))
logging.error("Failed to remove directory via hard_remove")
self.warnings_list.append("Failed to remove directory")
logging.info(f"Removing {path}")
if os.path.isdir(path):
try:
shutil.rmtree(path)
except PermissionError:
logging.warning("Permission error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Permissions Error")
except (FileNotFoundError, OSError, Exception):
logging.warning("FileNotFoundError or other error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Not Found or OS Error")
elif os.path.isfile(path):
os.remove(path)
def get_build_info_file_from_artifactory(self, build_info):
"""
Downloads product info file from artifactory
:param (ArtifactoryPath) build_info: arti path to the package_id file
:return: (int): package_id if extracted
"""
product_info = 0
package_info = build_info.read_text()
try:
if "Workbench" in self.settings.version:
first_line = package_info.split("\n")[0]
product_info = first_line.rstrip().split()[-1]
try:
product_info = int(product_info.split("P")[0])
except ValueError:
pass
else:
product_info = self.get_edt_build_date(file_content=package_info.split("\n"))
except IndexError:
pass
return product_info
def clean_temp(self):
"""
Cleans downloaded zip and unpacked folder with content
:return: None
"""
try:
if os.path.isfile(self.zip_file) and self.settings.delete_zip:
self.remove_path(self.zip_file)
logging.info("ZIP deleted")
if os.path.isdir(self.target_unpack_dir):
self.remove_path(self.target_unpack_dir)
logging.info("Unpacked directory removed")
except PermissionError:
logging.error("Temp files could not be removed due to permission error")
def remove_aedt_shortcuts(self):
"""
Function to remove newly created AEDT shortcuts and replace them with new one
"""
if not self.settings.replace_shortcut:
return
# include Public, user folder and user folder when OneDrive sync is enabled
for user in ["Public", os.getenv("username"), os.path.join(os.getenv("username"), "OneDrive - ANSYS, Inc")]:
desktop = os.path.join("C:\\", "Users", user, "Desktop")
for shortcut in [
"ANSYS Savant",
"ANSYS EMIT",
"ANSYS SIwave",
"ANSYS Twin Builder",
"Ansys Nuhertz FilterSolutions",
]:
self.remove_path(os.path.join(desktop, shortcut + ".lnk"))
new_name = os.path.join(
desktop, f"20{self.product_version[:2]}R{self.product_version[2:]} Electronics Desktop.lnk"
)
aedt_shortcut = os.path.join(desktop, "ANSYS Electronics Desktop.lnk")
if not os.path.isfile(new_name):
try:
os.rename(aedt_shortcut, new_name)
except FileNotFoundError:
pass
else:
self.remove_path(aedt_shortcut)
def update_edt_registry(self):
"""
Update Electronics Desktop registry based on the files in the HPC_Options folder that are added from UI
:return: None
"""
hpc_folder = os.path.join(self.settings_folder, "HPC_Options")
update_registry_exe = os.path.join(self.product_root_path, "UpdateRegistry.exe")
productlist_file = os.path.join(self.product_root_path, "config", "ProductList.txt")
if not os.path.isfile(productlist_file):
raise DownloaderError("Cannot update registry. Probably Electronics Desktop installation failed")
with open(productlist_file) as file:
product_version = next(file).rstrip() # get first line
self.update_installation_history(status="In-Progress", details="Update registry")
if os.path.isdir(hpc_folder):
for file in os.listdir(hpc_folder):
if ".acf" in file:
options_file = os.path.join(hpc_folder, file)
command = [update_registry_exe, "-ProductName", product_version, "-FromFile", options_file]
logging.info("Update registry")
self.subprocess_call(command)
def update_installation_history(self, status, details):
"""
Update ordered dictionary with new data and write it to the file
:param status: Failed | Success | In-Progress (important, used in JS)
:param details: Message for details field
:return:
"""
if status == "Failed" or status == "Success":
try:
self.toaster_notification(status, details)
except Exception:
msg = "Toaster notification did not work"
logging.error(msg)
self.warnings_list.append(msg)
self.get_installation_history() # in case if file was deleted during run of installation
time_now = datetime.datetime.now().strftime("%d-%m-%Y %H:%M")
shorten_path = self.settings_path.replace(os.getenv("APPDATA", "@@@"), "%APPDATA%")
if status == "Failed" or status == "Success":
if self.warnings_list:
details += "\nSome warnings occurred during process:\n" + "\n".join(self.warnings_list)
self.history[self.hash] = [status, self.settings.version, time_now, shorten_path, details, self.pid]
with open(self.history_file, "w") as file:
json.dump(self.history, file, indent=4)
@staticmethod
def toaster_notification(status, details):
"""
Send toaster notification
:param status: Failed | Success | In-Progress
:param details: Message for details field
:return:
"""
icon = "fail.ico" if status == "Failed" else "success.ico"
root_path = os.path.dirname(sys.argv[0])
icon_path = os.path.join(root_path, "notifications", icon)
if not os.path.isfile(icon_path):
root_path = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(root_path, "notifications", icon) # dev path
notification.notify(title=status, message=details, app_icon=icon_path, timeout=15)
def get_installation_history(self):
"""
Read a file with installation history
create a file if does not exist
:return: dict with history or empty in case if file was deleted during run of installation
"""
if os.path.isfile(self.history_file):
try:
with open(self.history_file) as file:
self.history = json.load(file)
except json.decoder.JSONDecodeError:
return
else:
self.history = {}
def send_statistics(self, error=None):
"""
Send usage statistics to the database.
Collect username, time, version and software installed
in case of crash send also crash data
:parameter: error: error message of what went wrong
:return: None
"""
version, tool = self.settings.version.split("_")
time_now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.settings.username = os.getenv("username", self.settings.username)
if self.settings.artifactory == "SharePoint":
self.send_statistics_to_sharepoint(tool, version, time_now, error)
else:
self.send_statistics_to_influx(tool, version, time_now, error)
def send_statistics_to_influx(self, tool, version, time_now, error, downloader_ver=__version__):
"""
Send statistics to InfluxDB
Args:
tool: product that is installed
version: version
time_now: time
error: error if some crash occurred
downloader_ver: version of the backend
Returns:
None
"""
client = InfluxDBClient(host=STATISTICS_SERVER, port=STATISTICS_PORT)
db_name = "downloads" if not error else "crashes"
client.switch_database(db_name)
json_body = [
{
"measurement": db_name,
"tags": {
"username": self.settings.username,
"version": version,
"tool": tool,
"artifactory": self.settings.artifactory,
"downloader_ver": downloader_ver,
},
"time": time_now,
"fields": {"count": 1},
}
]
if error:
json_body[0]["tags"]["log"] = error
client.write_points(json_body)
def send_statistics_to_sharepoint(self, tool, version, time_now, error):
"""
Send statistics to SharePoint list
Args:
time_now: time
tool: product that is installed
version: version
error: error if some crash occurred
Returns:
None
"""
list_name = "statistics" if not error else "crashes"
target_list = self.ctx.web.lists.get_by_title(list_name)
item = {
"Title": self.settings.username,
"Date": str(time_now),
"tool": tool,
"version": version,
"in_influx": False,
"downloader_ver": __version__,
}
if error:
error = error.replace("\n", "#N").replace("\r", "")
item["error"] = error
target_list.add_item(item)
self.ctx.execute_query()
@staticmethod
def subprocess_call(command, shell=False, popen=False):
"""
Wrapper for subprocess call to handle non admin run or UAC issue
Args:
command: (str/list) command to run
shell: call with shell mode or not
popen: in case if you need output we need to use Popen. Pyinstaller compiles in -noconsole, need
to explicitly define stdout, in, err
Returns:
output (str), output of the command run
"""
output = ""
try:
if isinstance(command, list):
command_str = subprocess.list2cmdline(command)
else:
command_str = command
logging.info(command_str)
if popen:
p = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell
)
byte_output = p.stdout.read()
output = byte_output.decode("utf-8").rstrip()
p.communicate()
else:
subprocess.call(command, shell=shell)
return output
except OSError:
raise DownloaderError("Please run as administrator and disable Windows UAC")
@staticmethod
def parse_args(version):
"""
Function to parse arguments provided to the script. Search for -p key to get settings path
:return: settings_path: path to the configuration file
"""
parser = argparse.ArgumentParser()
# Add long and short argument
parser.add_argument("--path", "-p", help="set path to the settings file generated by UI")
parser.add_argument("--version", "-V", action="version", version=f"%(prog)s version: {version}")
args = parser.parse_args()
if args.path:
settings_path = args.path
if not os.path.isfile(settings_path):
raise DownloaderError("Settings file does not exist")
return settings_path
else:
raise DownloaderError("Please provide --path argument")
def generate_hash_str():
"""
generate random hash. Letter A at the end is important to preserver Order in JS
:return: hash code (str)
"""
return f"{random.getrandbits(32):x}A".strip()
def set_logger(logging_file):
"""
Function to setup logging output to stream and log file. Will be used by UI and backend
:param: logging_file (str): path to the log file
:return: None
"""
work_dir = os.path.dirname(logging_file)
if not os.path.isdir(work_dir):
os.makedirs(work_dir)
# add logging to console and log file
# If you set the log level to INFO, it will include INFO, WARNING, ERROR, and CRITICAL messages
logging.basicConfig(
filename=logging_file,
format="%(asctime)s (%(levelname)s) %(message)s",
level=logging.INFO,
datefmt="%d.%m.%Y %H:%M:%S",
)
logging.getLogger().addHandler(logging.StreamHandler())
if __name__ == "__main__":
app = Downloader(__version__)
app.run()
| 41.597561 | 118 | 0.610052 | 55,922 | 0.910811 | 0 | 0 | 19,382 | 0.315678 | 0 | 0 | 21,989 | 0.358139 |
2e9651f60def43e3b45da108464a7524c9c57ee1 | 14,752 | py | Python | spectrum/input.py | elifesciences/elife-spectrum | 05ed1dac44f4fd9271b30808e8381ab033c8ecc4 | [
"MIT"
] | null | null | null | spectrum/input.py | elifesciences/elife-spectrum | 05ed1dac44f4fd9271b30808e8381ab033c8ecc4 | [
"MIT"
] | 91 | 2016-12-12T10:00:00.000Z | 2021-10-18T02:51:39.000Z | spectrum/input.py | elifesciences/elife-spectrum | 05ed1dac44f4fd9271b30808e8381ab033c8ecc4 | [
"MIT"
] | 1 | 2017-04-13T22:26:46.000Z | 2017-04-13T22:26:46.000Z | """utility library for interacting with remote services, such as:
* dashboard
* journal
* elife-bot
* journal-cms
contains no tests to be run."""
from os import path
import random
import string
import requests
from econtools import econ_workflow
from pollute import modified_environ
import mechanicalsoup
from spectrum import aws, logger
from spectrum.config import SETTINGS
LOGGER = logger.logger(__name__)
class InputBucket:
def __init__(self, s3, bucket_name):
self._s3 = s3
self._bucket_name = bucket_name
def upload(self, filename, destination_filename=None, id=None):
if not destination_filename:
destination_filename = path.basename(filename)
self._s3.meta.client.upload_file(filename, self._bucket_name, destination_filename)
LOGGER.info("Uploaded %s to %s/%s", filename, self._bucket_name, destination_filename, extra={'id': id})
def clean(self, prefix=None):
aws.clean_bucket(self._bucket_name, prefix)
def name(self):
return self._bucket_name
class Dashboard:
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
def publish(self, id, version, run):
template = "%s/api/queue_article_publication"
url = template % self._host
body = {'articles': [{'id': id, 'version': version, 'run': run}]}
response = requests.post(url, auth=(self._user, self._password), json=body, verify=False)
assert response.status_code == 200, ("Response status was %s: %s" % (response.status_code, response.text))
LOGGER.info(
"Pressed Publish for %s version %s run %s on dashboard",
url,
version,
run,
extra={'id': id}
)
class BotWorkflowStarter:
def __init__(self, aws_access_key_id, aws_secret_access_key, region_name, queue_name):
self._aws_access_key_id = aws_access_key_id
self._aws_secret_access_key = aws_secret_access_key
self._region_name = region_name
self._queue_name = queue_name
def pubmed(self):
LOGGER.info("Starting workflow PubmedArticleDeposit")
with modified_environ(added={'AWS_ACCESS_KEY_ID': self._aws_access_key_id, 'AWS_SECRET_ACCESS_KEY': self._aws_secret_access_key, 'AWS_DEFAULT_REGION': self._region_name}):
econ_workflow.start_workflow(
self._queue_name,
workflow_name='PubmedArticleDeposit'
)
def package_poa(self, filename):
LOGGER.info("Starting workflow PackagePOA(document=%s)", filename)
with modified_environ(added={'AWS_ACCESS_KEY_ID': self._aws_access_key_id, 'AWS_SECRET_ACCESS_KEY': self._aws_secret_access_key, 'AWS_DEFAULT_REGION': self._region_name}):
econ_workflow.start_workflow(
self._queue_name,
workflow_name='PackagePOA',
workflow_data={
'document': filename,
}
)
class JournalCms:
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
def login(self):
browser = mechanicalsoup.Browser()
login_url = "%s/user/login" % self._host
login_page = browser.get(login_url)
form = mechanicalsoup.Form(login_page.soup.form)
form.input({'name': self._user, 'pass': self._password})
response = browser.submit(form, login_page.url)
assert _journal_cms_page_title(response.soup) == self._user
return JournalCmsSession(self._host, browser)
class JournalCmsSession:
def __init__(self, host, browser):
self._host = host
self._browser = browser
def create_podcast_episode(self, title, image, uri, chapter_title):
create_url = "%s/node/add/podcast_episode" % self._host
create_page = self._browser.get(create_url)
form = mechanicalsoup.Form(create_page.soup.form)
form.input({'title[0][value]': title})
form.attach({'files[field_image_0]': image})
LOGGER.info("Attaching image")
form.input({'field_episode_mp3[0][uri]': uri})
chapter_title_field = create_page.soup.form.find('input', {'name': 'field_episode_chapter[form][0][title][0][value]'})
# Leave this condition in until after inline_entity_form is updated
if chapter_title_field is None:
chapter_title_field = create_page.soup.form.find('input', {'name': 'field_episode_chapter[form][inline_entity_form][title][0][value]'})
chapter_title_field['value'] = chapter_title
response = self._browser.submit(form, create_page.url, data={'op': 'Save'})
# requests follows redirects by default
_assert_html_response(response)
assert _journal_cms_page_title(response.soup) == title
def create_article_fragment(self, id, image):
filtered_content_url = "%s/admin/content?status=All&type=article&title=%s" % (self._host, id)
filtered_content_page = self._browser.get(filtered_content_url)
assert filtered_content_page.status_code == 200, \
"Response status of %s was: %s\nBody: %s" % (filtered_content_url, filtered_content_page.status_code, filtered_content_page.content)
try:
view_url = "%s%s" % (self._host, filtered_content_page.soup.find('td', 'views-field-title').find('a', href=True, text=id).get('href'))
edit_url = "%s%s" % (self._host, filtered_content_page.soup.find('td', 'views-field-operations').find('li', 'edit').find('a', href=True, text='Edit').get('href'))
except (AttributeError, TypeError):
raise AssertionError('Edit link not found for article %s when loading URL %s' % (id, filtered_content_url))
LOGGER.info(
"Access edit form",
extra={'id': id}
)
edit_page = self._browser.get(edit_url)
form = mechanicalsoup.Form(edit_page.soup.form)
if edit_page.soup.find('input', {'name': 'field_image_0_remove_button'}):
self._choose_submit(form, 'field_image_0_remove_button', value='Remove')
LOGGER.info(
"Removing existing thumbnail",
extra={'id': id}
)
response = self._browser.submit(form, edit_page.url)
form = mechanicalsoup.Form(response.soup.form)
form.attach({'files[field_image_0]': image})
LOGGER.info(
"Attaching thumbnail %s",
image,
extra={'id': id}
)
LOGGER.info(
"Saving form",
extra={'id': id}
)
# Button text will be 'Save and keep published' or 'Save and keep unpublished'
button_text = edit_page.soup.find('div', {'id': 'edit-actions'}).find('input', 'form-submit').get('value')
response = self._browser.submit(form, edit_page.url, data={'op': button_text})
# requests follows redirects by default
_assert_html_response(response)
view_page = self._browser.get(view_url)
img_selector = ".field--name-field-image img"
img = view_page.soup.select_one(img_selector)
assert img is not None, ("Cannot find %s in %s response\n%s" % (img_selector, view_page.status_code, view_page.content))
assert "king_county" in img.get('src')
LOGGER.info(
"Tag: %s",
img,
extra={'id': id}
)
def _choose_submit(self, wrapped_form, name, value=None):
"""Fixed version of mechanicalsoup.Form.choose_submit()
https://github.com/hickford/MechanicalSoup/issues/61"""
form = wrapped_form.form
criteria = {"name":name}
if value:
criteria['value'] = value
chosen_submit = form.find("input", criteria)
for inp in form.select("input"):
if inp.get('type') != 'submit':
continue
if inp == chosen_submit:
continue
del inp['name']
def _assert_html_response(response):
assert response.status_code == 200, "Response from saving the form was expected to be 200 from the listing page, but it was %s\nBody: %s" % (response.status_code, response.text)
def _journal_cms_page_title(soup):
# <h1 class="js-quickedit-page-title title page-title"><span data-quickedit-field-id="node/1709/title/en/full" class="field field--name-title field--type-string field--label-hidden">Spectrum blog article: jvsfz4oj9vz9hk239fbpq4fbjc9yoh</span></h1>
#<h1 class="js-quickedit-page-title title page-title">alfred</h1>
return soup.find("h1", {"class": "page-title"}).text.strip()
class Journal:
def __init__(self, host):
self._host = host
def session(self):
browser = mechanicalsoup.Browser()
return JournalHtmlSession(self._host, browser)
def javascript_session(self, driver):
return JournalJavaScriptSession(driver, self._host)
class JournalJavaScriptSession:
ID_SUBMIT_MY_RESEARCH = 'submitResearchButton'
def __init__(self, driver, host):
self._driver = driver
self._host = host
def _log(self, message, *args, **kwargs):
LOGGER.info(message, extra={'app':'journal'}, *args, **kwargs)
def submit(self):
LOGGER.info("Loading: %s", self._host)
self._driver.get(self._host)
selenium_title_smoke_test('eLife', self._driver)
submit_link = self._driver.find_element_by_id(self.ID_SUBMIT_MY_RESEARCH)
self._log("Found #%s `%s`", self.ID_SUBMIT_MY_RESEARCH, submit_link.text)
submit_link.click()
self._log("Clicked #%s", self.ID_SUBMIT_MY_RESEARCH)
selenium_title_smoke_test('eLife', self._driver)
# expand: click on login button, log in, and check final destination
# lsh@2020-10-22: xpub removed without replacement
#return XpubJavaScriptSession(self._driver)
class JournalHtmlSession:
PROFILE_LINK = ".login-control__non_js_control_link"
def __init__(self, host, browser):
self._host = host
self._browser = browser
# TODO: automatically pass Referer when MechanicalSoup is upgraded to allow it
def login(self, referer=None):
login_url = "%s/log-in" % self._host
headers = {}
if referer:
headers['Referer'] = '%s%s' % (self._host, referer)
LOGGER.info("Logging in at %s (headers %s)", login_url, headers)
logged_in_page = self._browser.get(login_url, headers=headers)
# should be automatically redirected back by simulator
LOGGER.info("Redirected to %s after log in", logged_in_page.url)
_assert_html_response(logged_in_page)
# if changing to another check, move in logout()
profile = logged_in_page.soup.select_one(self.PROFILE_LINK)
assert profile is not None, ("Cannot find %s in %s response\n%s" % (self.PROFILE_LINK, logged_in_page.status_code, logged_in_page.content))
LOGGER.info("Found logged-in profile button at %s", self.PROFILE_LINK)
return logged_in_page
def logout(self):
logout_url = "%s/log-out" % self._host
LOGGER.info("Logging out at %s", logout_url)
logged_out_page = self._browser.get(logout_url)
LOGGER.info("Redirected to %s after log out", logged_out_page.url)
_assert_html_response(logged_out_page)
profile = logged_out_page.soup.select_one(self.PROFILE_LINK)
assert profile is None, ("Found %s in %s response\n%s" % (self.PROFILE_LINK, logged_out_page.status_code, logged_out_page.content))
def check(self, page_path):
LOGGER.info("Loading page %s", page_path)
page = self._browser.get("%s/%s" % (self._host, page_path.lstrip('/')))
_assert_html_response(page)
return page
class BioProtocol:
def __init__(self, int_host, user, password):
self.int_host = int_host
self.user = user
self.password = password
def create_bioprotocol_data(self, article_id):
"generates bioprotocol for given article data and posts it to the bioprotocol service"
payload = [
{
"ProtocolSequencingNumber": "s4-1",
"ProtocolTitle": "Protein production",
"IsProtocol": True,
"ProtocolStatus": 0,
"URI": "https://en.bio-protocol.org/rap.aspx?eid=24419&item=s4-1"
},
{
"ProtocolSequencingNumber": "s4-2",
"ProtocolTitle": "Chitin-triggered alkalinization of tomato cell suspension",
"IsProtocol": False,
"ProtocolStatus": 0,
"URI": "https://en.bio-protocol.org/rap.aspx?eid=24419&item=s4-2"
}
]
# http://end2end--bp.elife.internal/bioprotocol/article/123456789
template = "%s/bioprotocol/article/%s"
url = template % (self.int_host, article_id)
response = requests.post(url, auth=(self.user, self.password), json=payload, verify=False)
assert response.status_code == 200, ("Response status was %s: %s" % (response.status_code, response.text))
def invented_word(length=30, characters=None):
if not characters:
characters = string.ascii_lowercase + string.digits
return ''.join(random.choice(characters) for _ in range(length))
def selenium_title_smoke_test(portion, driver):
title = driver.title
assert portion in title, "Title: %s\nCurrent URL: %s" % (title, driver.current_url)
PRODUCTION_BUCKET = InputBucket(aws.S3, SETTINGS['bucket_input'])
DIGESTS_BUCKET = InputBucket(aws.S3, SETTINGS['bucket_digests_input'])
SILENT_CORRECTION_BUCKET = InputBucket(aws.S3, SETTINGS['bucket_silent_corrections'])
PACKAGING_BUCKET = InputBucket(aws.S3, SETTINGS['bucket_packaging'])
POA_DELIVERY = InputBucket(aws.S3, SETTINGS['bucket_ejp_poa_delivery'])
EJP = InputBucket(aws.S3, SETTINGS['bucket_ejp_ftp'])
DASHBOARD = Dashboard(
SETTINGS['dashboard_host'],
SETTINGS['dashboard_user'],
SETTINGS['dashboard_password']
)
JOURNAL_CMS = JournalCms(
SETTINGS['journal_cms_host'],
SETTINGS['journal_cms_user'],
SETTINGS['journal_cms_password']
)
JOURNAL = Journal(
SETTINGS['journal_host']
)
JOURNAL_CDN = Journal(
SETTINGS['journal_cdn_host']
)
BOT_WORKFLOWS = BotWorkflowStarter(
SETTINGS['aws_access_key_id'],
SETTINGS['aws_secret_access_key'],
SETTINGS['region_name'],
SETTINGS['queue_workflow_starter']
)
BOT_CONFIGURATION = InputBucket(aws.S3, SETTINGS['bucket_configuration'])
BIOPROTOCOL = BioProtocol(
SETTINGS['bioprotocol_int_host'],
SETTINGS['bioprotocol_user'],
SETTINGS['bioprotocol_password'])
| 39.762803 | 251 | 0.657606 | 12,133 | 0.822465 | 0 | 0 | 0 | 0 | 0 | 0 | 4,060 | 0.275217 |
2e9709ca8cc7d764b689cf057d3c5b3594a91da1 | 2,858 | py | Python | tools/python/maps_generator/generator/osmtools.py | Polas/omim | 03558b418b338f506fbf3aa72ddf15187a2005ee | [
"Apache-2.0"
] | 1 | 2020-11-10T01:13:12.000Z | 2020-11-10T01:13:12.000Z | tools/python/maps_generator/generator/osmtools.py | Polas/omim | 03558b418b338f506fbf3aa72ddf15187a2005ee | [
"Apache-2.0"
] | 1 | 2018-11-26T15:44:46.000Z | 2018-11-27T10:55:36.000Z | tools/python/maps_generator/generator/osmtools.py | Polas/omim | 03558b418b338f506fbf3aa72ddf15187a2005ee | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
from . import settings
from .exceptions import BadExitStatusError
from .exceptions import wait_and_raise_if_fail
def build_osmtools(path, output=subprocess.DEVNULL, error=subprocess.DEVNULL):
src = {
settings.OSM_TOOL_UPDATE: "osmupdate.c",
settings.OSM_TOOL_FILTER: "osmfilter.c",
settings.OSM_TOOL_CONVERT: "osmconvert.c",
}
ld_flags = ("-lz",)
cc = []
result = {}
for executable, src in src.items():
out = os.path.join(settings.OSM_TOOLS_PATH, executable)
op = [
settings.OSM_TOOLS_CC,
*settings.OSM_TOOLS_CC_FLAGS,
"-o",
out,
os.path.join(path, src),
*ld_flags,
]
s = subprocess.Popen(op, stdout=output, stderr=error)
cc.append(s)
result[executable] = out
messages = []
for c in cc:
if c.wait() != os.EX_OK:
messages.append(f"The launch of {' '.join(c.args)} failed.")
if messages:
raise BadExitStatusError("\n".split(messages))
return result
def osmconvert(
name_executable,
in_file,
out_file,
output=subprocess.DEVNULL,
error=subprocess.DEVNULL,
run_async=False,
**kwargs,
):
env = os.environ.copy()
env["PATH"] = f"{settings.OSM_TOOLS_PATH}:{env['PATH']}"
p = subprocess.Popen(
[
name_executable,
in_file,
"--drop-author",
"--drop-version",
"--out-o5m",
f"-o={out_file}",
],
env=env,
stdout=output,
stderr=error,
)
if run_async:
return p
else:
wait_and_raise_if_fail(p)
def osmupdate(
name_executable,
in_file,
out_file,
output=subprocess.DEVNULL,
error=subprocess.DEVNULL,
run_async=False,
**kwargs,
):
env = os.environ.copy()
env["PATH"] = f"{settings.OSM_TOOLS_PATH}:{env['PATH']}"
p = subprocess.Popen(
[
name_executable,
"--drop-author",
"--drop-version",
"--out-o5m",
"-v",
in_file,
out_file,
],
env=env,
stdout=output,
stderr=error,
)
if run_async:
return p
else:
wait_and_raise_if_fail(p)
def osmfilter(
name_executable,
in_file,
out_file,
output=subprocess.DEVNULL,
error=subprocess.DEVNULL,
run_async=False,
**kwargs,
):
env = os.environ.copy()
env["PATH"] = f"{settings.OSM_TOOLS_PATH}:{env['PATH']}"
args = [name_executable, in_file, f"-o={out_file}"] + [
f"--{k.replace('_', '-')}={v}" for k, v in kwargs.items()
]
p = subprocess.Popen(args, env=env, stdout=output, stderr=error)
if run_async:
return p
else:
wait_and_raise_if_fail(p)
| 23.42623 | 78 | 0.557733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.136459 |
2e97391dfc452d676130d4fba3d542a2e077ba24 | 3,210 | py | Python | app.py | mikecarrier4/GITBTC-Prototype | aff6f9584def94abf15ab753829b032cda4b5e51 | [
"MIT"
] | null | null | null | app.py | mikecarrier4/GITBTC-Prototype | aff6f9584def94abf15ab753829b032cda4b5e51 | [
"MIT"
] | null | null | null | app.py | mikecarrier4/GITBTC-Prototype | aff6f9584def94abf15ab753829b032cda4b5e51 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, request, url_for, session
from flask_sqlalchemy import SQLAlchemy
from src import DB
import subprocess
import os
import signal
app = Flask(__name__, template_folder='templates')
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
#app.config['SQLALCHEMY_DATABASE_URI'] =
@app.route('/')
def test ():
return render_template('/index.html')
@app.route('/login', methods = ['GET', 'POST'])
def login ():
if request.method == 'GET':
return 'hello'
if request.method == 'POST':
x = request.form
session['pin'] = x['pin']
session['fn'] , session['ln'], session['user_id'] = DB.User().check_pin(session['pin'])
print('first name', session['fn'])
current_status = DB.Running_Jobs().find_job(session['user_id']) #Grab PIN and Pdid
print('the current status is ', current_status)
if type(current_status) == tuple and len(current_status) == 1:
return redirect(url_for('charts'))
elif type(current_status) == tuple and len(current_status) > 1:
pass #have serious issue page here
else:
return redirect(url_for('strategy'))
@app.route('/strategy')
def strategy():
return render_template('/strategy.html', **locals())
@app.route('/strategy_vector', methods = ['GET', 'POST'])
def strategy_vector():
if request.method == 'GET':
return f'url/form'
if request.method == 'POST':
try:
x = request.form
session['Crypto'] = x['crypto']
session['Strategy'] = x['pstrategy']
session['Duration'] = x['sstrategy']
return redirect(url_for('robinhood'))
except:
pass # have error page here
@app.route('/robinhood')
def robinhood():
return render_template('/robinhood.html', **locals())
@app.route('/driver', methods = ['POST'])
def driver():
if request.method == 'POST':
x = request.form
session['a'] = x['rhuserid']
session['b'] = x['rhpwd']
session['c'] = x['mfa']
session['d'] = x['amount']
subprocess.Popen(["python", "src/main.py", str(session)])
x = None
for i in ['a', 'b', 'c', 'd']:
session.pop(i)
return redirect(url_for('charts'))
@app.route('/charts')
def charts():
"""search for the username and display a job"""
"""display pics and details"""
return render_template('/charts.html')
@app.route('/kill', methods = ['POST'])
def kill():
if request.method == 'POST':
x = request.form
if x['kill'] == 'kill':
job_id = DB.Running_Jobs().find_job(session['user_id'])
print('the job id is ' ,job_id)
os.kill(job_id[0], signal.SIGTERM)
DB.Running_Jobs().kill_time(job_id[0])
return redirect(url_for('charts'))
else:
pass # spit out html to kill
return redirect(url_for('charts'))
@app.route('/about')
def about():
return render_template('/about.html')
@app.route('/form')
def hello_form():
return render_template('/form.html')
if __name__ == "__main__":
app.run()
| 28.157895 | 95 | 0.593458 | 0 | 0 | 0 | 0 | 2,786 | 0.867913 | 0 | 0 | 842 | 0.262305 |
2e97b594597a17ee9700119005e1bb2d1097d89e | 9,027 | py | Python | codes/train_for_each_cate.py | thu-coai/DiaSafety | 8383d45bea1be44ecf1cb9cd4bec52611812ea2d | [
"Apache-2.0"
] | 3 | 2021-11-03T06:16:15.000Z | 2022-03-11T07:09:23.000Z | codes/train_for_each_cate.py | thu-coai/DiaSafety | 8383d45bea1be44ecf1cb9cd4bec52611812ea2d | [
"Apache-2.0"
] | null | null | null | codes/train_for_each_cate.py | thu-coai/DiaSafety | 8383d45bea1be44ecf1cb9cd4bec52611812ea2d | [
"Apache-2.0"
] | null | null | null | from transformers import RobertaTokenizer, RobertaForSequenceClassification, AdamW
import torch
import json
from sklearn import metrics
from tqdm import tqdm
import numpy as np
from time import time
from datetime import timedelta
import pandas as pd
from sklearn.model_selection import train_test_split
import argparse
import torch.nn as nn
import random
import os
def get_loader(dataset, tokenizer, batchsize=16, padsize=256, want_cate="Risk Ignorance"):
batch_inputs, batch_labels = [], []
inputs1, inputs2, categories, labels_ = [d['context'] for d in dataset], [d['response'] for d in dataset], [d['category'] for d in dataset], [d['label'] for d in dataset]
labels = []
for category, label in zip(categories, labels_):
if category==want_cate:
labels.append(int(label=='Unsafe'))
else:
labels.append(2)
for start in tqdm(range(0, len(inputs1), batchsize)):
tmp_batch = tokenizer(text=inputs1[start:min(start + batchsize, len(inputs1))],
text_pair=inputs2[start:min(start + batchsize, len(inputs1))],
return_tensors="pt", truncation=True, padding='max_length', max_length=padsize)
batch_inputs.append(tmp_batch)
tmp_label = torch.LongTensor(labels[start:min(start + batchsize, len(inputs1))])
batch_labels.append(tmp_label)
return batch_inputs, batch_labels
def get_loader_resp(dataset, tokenizer, batchsize=16, padsize=256, want_cate="Risk Ignorance"):
batch_inputs, batch_labels = [], []
inputs1, inputs2, categories, labels_ = [d['context'] for d in dataset], [d['response'] for d in dataset], [d['category'] for d in dataset], [d['label'] for d in dataset]
labels = []
for category, label in zip(categories, labels_):
if category==want_cate:
labels.append(int(label=='Unsafe'))
else:
labels.append(2)
for start in tqdm(range(0, len(inputs2), batchsize)):
tmp_batch = tokenizer(text=inputs2[start:min(start + batchsize, len(inputs2))],
return_tensors="pt", truncation=True, padding='max_length', max_length=padsize)
batch_inputs.append(tmp_batch)
tmp_label = torch.LongTensor(labels[start:min(start + batchsize, len(inputs2))])
batch_labels.append(tmp_label)
return batch_inputs, batch_labels
def evaluate(model, batch_inputs, batch_labels,test=False):
model.eval()
loss_total = 0
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for inputs, labels in zip(batch_inputs, batch_labels):
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(**inputs, labels=labels)
logits = outputs.logits
loss = loss_fct(logits, labels)
loss_total += loss
labels = labels.view(-1).data.cpu().numpy()
predic = torch.max(logits.view(-1, logits.shape[-1]).data, 1)[1].cpu()
labels_all = np.append(labels_all, labels)
predict_all = np.append(predict_all, predic)
acc = metrics.accuracy_score(labels_all, predict_all)
f1 = metrics.f1_score(labels_all, predict_all, average='macro')
if test:
report = metrics.classification_report(labels_all, predict_all, digits=4)
confusion = metrics.confusion_matrix(labels_all, predict_all)
return acc, loss_total / len(batch_inputs), report, confusion, labels_all, predict_all
return acc, loss_total / len(batch_inputs), f1
def test_report(model, save_path, batch_inputs, batch_labels, log_file):
# test
model.load_state_dict(torch.load(save_path))
model.eval()
start_time = time()
test_acc, test_loss, test_report, test_confusion, label, predict = evaluate(model, batch_inputs, batch_labels,
test=True)
msg = 'Test Loss: {0:>5.2}, Test Acc: {1:>6.2%}'
print(msg.format(test_loss, test_acc), file=log_file)
print("Precision, Recall and F1-Score...")
print(test_report, file=log_file)
print("Confusion Matrix...")
print(test_confusion, file=log_file)
time_dif = time() - start_time
time_dif = timedelta(seconds=int(round(time_dif)))
print("Time usage:", time_dif, file=log_file)
parser = argparse.ArgumentParser(description='choose dataset')
parser.add_argument('--dataset', required=True, choices=['agreement', 'expertise', 'offend','bias','risk'])
args = parser.parse_args()
with open('../DiaSafety_dataset/train.json', 'r') as f:
train = json.load(f)
with open('../DiaSafety_dataset/val.json', 'r') as f:
val = json.load(f)
with open('../DiaSafety_dataset/test.json', 'r') as f:
test = json.load(f)
label_dict = {'agreement':"Toxicity Agreement", 'expertise':"Unauthorized Expertise", 'offend':"Offending User",
'political':"Sensitive Topics", 'bias':"Biased Opinion", 'risk':"Risk Ignorance"} # political class is finally deprecated
want_cate = label_dict[args.dataset]
num_labels = 3 # (safe, unsafe, N/A) for a specific category
padsize = 128
num_epochs = 10
require_improvement = 2000 # can be adjusted
import itertools
batchsizes = [64, 32, 16, 8, 4]
learning_rates = [5e-3,2e-3,5e-4,2e-4,5e-5,2e-5,5e-6,2e-6]
weight = [1,1,1] # can be adjuested
weight = torch.FloatTensor(weight)
import sys
#log_file = sys.stdout
for batchsize, learning_rate in itertools.product(batchsizes,learning_rates):
path = 'roberta-base'
if not os.path.isdir('../models_{}'.format(args.dataset)):
os.mkdir('../models_{}'.format(args.dataset))
save_path = '../models_{}/model_{}_{}'.format(args.dataset, batchsize, learning_rate)
tokenizer = RobertaTokenizer.from_pretrained(path)
model = RobertaForSequenceClassification.from_pretrained(path, num_labels=num_labels)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
total_batch = 0
dev_best_loss = float('inf')
best_f1 = 0
last_improve = 0
optimizer = AdamW(model.parameters(), lr=learning_rate)
print("getting loader...")
val_inputs, val_labels = get_loader(val, tokenizer, batchsize=batchsize, padsize=padsize,want_cate=want_cate)
test_inputs, test_labels = get_loader(test, tokenizer, batchsize=batchsize, padsize=padsize, want_cate=want_cate)
model = model.to(device)
flag = False
weight = weight.to(device)
loss_fct = nn.CrossEntropyLoss(weight=weight)
print("start to train...")
for epoch in range(num_epochs):
model.train()
print('Epoch [{}/{}]'.format(epoch + 1, num_epochs))
start_time = time()
random.seed(42)
random.shuffle(train)
train_inputs, train_labels = get_loader(train, tokenizer, batchsize=batchsize, padsize=padsize, want_cate=want_cate)
for i, (trains, labels) in enumerate(zip(train_inputs, train_labels)):
trains, labels = trains.to(device), labels.to(device)
outputs = model(**trains, labels=labels)
#loss = outputs.loss
logits = outputs.logits
loss = loss_fct(logits, labels)
model.zero_grad()
loss.backward()
optimizer.step()
if total_batch % 100 == 0:
true = labels.view(-1).data.cpu()
predic = torch.max(logits.view(-1, logits.shape[-1]).data, 1)[1].cpu()
train_acc = metrics.accuracy_score(true, predic)
dev_acc, dev_loss, dev_f1 = evaluate(model, val_inputs, val_labels)
if dev_f1>best_f1:
best_f1 = dev_f1
#if dev_loss < dev_best_loss:
# dev_best_loss = dev_loss
torch.save(model.state_dict(), save_path)
improve = '*'
last_improve = total_batch
else:
improve = ''
time_dif = time() - start_time
time_dif = timedelta(seconds=int(round(time_dif)))
msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Val Loss: {3:>5.2}, Val Acc: {4:>6.2%}, Val F1: {5:>6.2%} Time: {6} {7}'
print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, dev_f1, time_dif, improve))
model.train()
total_batch += 1
if total_batch - last_improve > require_improvement:
print("No optimization for a long time, auto-stopping...")
flag = True
break
if flag:
break
if not os.path.isdir('../logs_{}'.format(args.dataset)):
os.mkdir('../logs_{}'.format(args.dataset))
log_file = open('../logs_{}/log_{}_{}.txt'.format(args.dataset, batchsize, learning_rate),'w')
print('batchsize: {}\nlearning_rate:{}'.format(batchsize,learning_rate), file=log_file)
test_report(model, save_path, val_inputs, test_labels, log_file=log_file)
log_file.close()
| 42.985714 | 174 | 0.641631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,250 | 0.138473 |
2e9823bd8bb1e8b62572906d7b5463e1162c391e | 5,065 | py | Python | tests/functional/test_lentBook.py | deemarc/dmLibrary | ebdb77c45ffdf4c92797a2f268b59adedd46ef2a | [
"MIT"
] | null | null | null | tests/functional/test_lentBook.py | deemarc/dmLibrary | ebdb77c45ffdf4c92797a2f268b59adedd46ef2a | [
"MIT"
] | null | null | null | tests/functional/test_lentBook.py | deemarc/dmLibrary | ebdb77c45ffdf4c92797a2f268b59adedd46ef2a | [
"MIT"
] | null | null | null | import unittest
from dmLibrary import create_app
from dmLibrary.external.googleBook import GoogleBook
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class TestClass(unittest.TestCase):
def setUp(self):
app = create_app()
self.ctx = app.app_context()
self.ctx.push()
self.client = app.test_client()
self.gb = GoogleBook()
logger.debug('logged from test_something')
def tearDown(self):
"""Do the testing """
pass
def test_lentBook(self):
"""
test lent book
"""
# ===== populate book and customer need for testing ======
# book 1
data = {
"title":"ทายชีวิตคู่ด้วยเลข ๗ ตัว",
"isbn":"9789740212201"
}
response = self.client.post('/api/v1/books',json=data)
logger.info(f"rawResp: {response.status_code}")
if not ((response.status_code == 200) or (response.status_code == 201)):
self.fail("book1 cannot be create")
respData = response.get_json()
book1_id = respData["data"]["id"]
# customerA
data = {
"name":"customerA testLastNameA",
"email":"customerA@gmail.com",
"mobile":"0881111111"
}
response = self.client.post('/api/v1/customers',json=data)
if not ((response.status_code == 200) or (response.status_code == 201)) :
self.fail("customerA cannot be create")
respData = response.get_json()
customerA_id = respData["data"]["id"]
# customerB
data = {
"name":"customerB testLastNameB",
"email":"customerB@gmail.com",
"mobile":"0881111112"
}
response = self.client.post('/api/v1/customers',json=data)
if not ((response.status_code == 200) or (response.status_code == 201)) :
self.fail("customerB cannot be create")
respData = response.get_json()
customerB_id = respData["data"]["id"]
#check if book1 being lent
logger.info(f"getting book1 data: /api/v1/books/{book1_id}")
response = self.client.get(f'/api/v1/books/{book1_id}')
logger.info(f"rawResp: {response.status_code}")
if not (response.status_code == 200):
self.fail("cannot get book1 data")
respData = response.get_json()
self.assertEqual(respData["data"]['isLent'], False)
# ==== get history before lent
response = self.client.get(f'/api/v1/lentHistory?book_id={book1_id}')
if not (response.status_code == 200):
self.fail("cannot get lentHistory")
respData = response.get_json()
lentCount = len(respData["data"])
# customerA lent the book
data ={
"book_id_list":[book1_id]
}
logger.info(f"lenting book: /api/v1/customers/{customerA_id}/lent")
response = self.client.post(f'/api/v1/customers/{customerA_id}/lent',json=data)
if not (response.status_code == 200):
self.fail("customerA cannot lent book1")
respData = response.get_json()
customerA_id = respData["data"]["id"]
#check if book1 being lent
logger.info(f"getting book1 data: /api/v1/books/{book1_id}")
response = self.client.get(f'/api/v1/books/{book1_id}')
logger.info(f"rawResp: {response.status_code}")
if not (response.status_code == 200):
self.fail("cannot get book1 data")
respData = response.get_json()
self.assertEqual(respData["data"]['isLent'], True)
# customerB shouldn't be able to lent
data ={
"book_id_list":[book1_id]
}
response = self.client.post(f'/api/v1/customers/{customerB_id}/lent',json=data)
if not (response.status_code == 400):
self.fail("customerA somehow be able to lent the book")
# customerA return the book
response = self.client.post(f'/api/v1/customers/{customerA_id}/return',json=data)
if not (response.status_code == 200):
self.fail("customerA cannot return the book")
respData = response.get_json()
customerA_id = respData["data"]["id"]
#check if book1 not being lent
logger.info(f"getting book1 data: /api/v1/books/{book1_id}")
response = self.client.get(f'/api/v1/books/{book1_id}')
logger.info(f"rawResp: {response.status_code}")
if not (response.status_code == 200):
self.fail("cannot get book1 data")
respData = response.get_json()
self.assertEqual(respData["data"]['isLent'], False)
# ==== check history after lent
response = self.client.get(f'/api/v1/lentHistory?book_id={book1_id}',json=data)
if not (response.status_code == 200):
self.fail("cannot get lentHistory")
respData = response.get_json()
lentCountNew = len(respData["data"])
self.assertEqual(lentCountNew, lentCount+1)
| 35.41958 | 89 | 0.595854 | 4,885 | 0.956156 | 0 | 0 | 0 | 0 | 0 | 0 | 1,769 | 0.346252 |
2e9869e8da3125095fe2bf1fdea08f90bff81ce5 | 697 | py | Python | LeetCode/Python/search_insert_position.py | tejeshreddy/competitive-programming | e98d3800c7f1dcca285286cac1ac056b11201e12 | [
"MIT"
] | null | null | null | LeetCode/Python/search_insert_position.py | tejeshreddy/competitive-programming | e98d3800c7f1dcca285286cac1ac056b11201e12 | [
"MIT"
] | null | null | null | LeetCode/Python/search_insert_position.py | tejeshreddy/competitive-programming | e98d3800c7f1dcca285286cac1ac056b11201e12 | [
"MIT"
] | null | null | null | """
Title: 0035 - Search Insert Position
Tags: Binary Search
Time: O(logn)
Space: O(1)
Source: https://leetcode.com/problems/search-insert-position/
Difficulty: Easy
"""
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
low = 0
high = len(nums) - 1
while low <= high:
mid = (low + high) / 2
if target == nums[mid]:
return mid
elif target < nums[mid]:
high = mid -1
else:
low = mid + 1
print(low, mid, high)
return low
| 22.483871 | 61 | 0.476327 | 525 | 0.753228 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.373027 |
2e9a0dfea1dd417d3aed705d60b72d8ce81c3470 | 1,337 | py | Python | server/api/test.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 5 | 2019-02-27T03:06:02.000Z | 2021-11-15T20:12:50.000Z | server/api/test.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 48 | 2019-02-14T21:15:18.000Z | 2021-10-02T01:18:49.000Z | server/api/test.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 1 | 2021-03-22T23:47:54.000Z | 2021-03-22T23:47:54.000Z | # pylint: disable=global-statement
"""
Module providing unittest test discovery hook for our Doctest testcases
Copyright (C) 2015, 2016 ERT Inc.
"""
import unittest
import doctest
from time import sleep
from api import app_module as app
from api import (
config_loader
,aes
,json
,resource_util
)
__author__ = "Brandon J. Van Vaerenbergh <brandon.vanvaerenbergh@noaa.gov>, "
pentaho_stopped = False
def app_stop_pentaho():
"""
helper function, to stop the WSGI app's configured Carte server
"""
global pentaho_stopped
while all((app.pentaho_started, not pentaho_stopped)):
if app.pentaho_controller.status():
app.pentaho_controller.stop()
pentaho_stopped = True
return
sleep(2)
def load_tests(loader, tests, ignore):
"""
Expose Doctest testcases to unitest discovery
per: https://docs.python.org/3/library/doctest.html#unittest-api
"""
app_doctests = doctest.DocTestSuite(app)
for app_doctest in app_doctests:
app_doctest.addCleanup(app_stop_pentaho)
tests.addTests(app_doctests)
tests.addTests(doctest.DocTestSuite(config_loader))
tests.addTests(doctest.DocTestSuite(aes))
tests.addTests(doctest.DocTestSuite(json))
tests.addTests(doctest.DocTestSuite(resource_util))
return tests
| 26.74 | 77 | 0.712042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.315632 |
2e9ab9a70a398a99408deeddd6ad6d94b9da8cfd | 9,736 | py | Python | tests/old_wc.py | qiaojunfeng/yambo-aiida | 1900f2a0932b7782759740e5835620dbfb897a76 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2018-10-30T14:00:40.000Z | 2020-04-11T08:03:10.000Z | tests/old_wc.py | qiaojunfeng/yambo-aiida | 1900f2a0932b7782759740e5835620dbfb897a76 | [
"MIT",
"BSD-3-Clause"
] | 32 | 2017-10-23T17:11:35.000Z | 2022-03-28T10:04:23.000Z | tests/old_wc.py | qiaojunfeng/yambo-aiida | 1900f2a0932b7782759740e5835620dbfb897a76 | [
"MIT",
"BSD-3-Clause"
] | 6 | 2017-10-23T17:01:13.000Z | 2022-03-15T18:20:35.000Z | from __future__ import absolute_import
from __future__ import print_function
import unittest
from aiida.manage.fixtures import PluginTestCase
import subprocess, os
def backend_obj_users():
"""Test if aiida accesses users through backend object."""
backend_obj_flag = False
try:
from aiida.backends.utils import get_automatic_user # pylint: disable=unused-variable,no-name-in-module
except ImportError:
backend_obj_flag = True
return backend_obj_flag
def get_current_user():
"""Get current user backwards compatibly with aiida-core <= 0.12.1."""
current_user = None
if backend_obj_users():
from aiida.orm.backend import construct_backend # pylint: disable=no-name-in-module
backend = construct_backend()
current_user = backend.users.get_automatic_user()
else:
from aiida.backends.utils import get_automatic_user # pylint: disable=no-name-in-module
current_user = get_automatic_user()
return current_user
def create_authinfo(computer):
"""
Allow the current user to use the given computer.
Deal with backwards compatibility down to aiida 0.11
"""
from aiida import load_profile
load_profile()
from aiida.orm import backend as orm_backend
authinfo = None
if hasattr(orm_backend, 'construct_backend'):
backend = orm_backend.construct_backend()
authinfo = backend.authinfos.create(
computer=computer, user=get_current_user())
else:
from aiida.backends.djsite.db.models import DbAuthInfo
authinfo = DbAuthInfo(
dbcomputer=computer.dbcomputer, aiidauser=get_current_user())
return authinfo
class TestWf(PluginTestCase):
def setUp(self):
"""
"""
from aiida import work
from aiida.orm.code import Code
from aiida.orm.nodes.parameter import Dict
from aiida.orm.nodes.structure import StructureData
from aiida.orm.nodes.remote import RemoteData
from ase.spacegroup import crystal
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_yambo.calculations.gw import YamboCalculation
from aiida.common.links import LinkType
from aiida.orm.computer import Computer as AiidaOrmComputer
from aiida.common.datastructures import calc_states
from aiida.plugins.utils import DataFactory
runner = work.Runner(
poll_interval=0., rmq_config=None, enable_persistence=None)
work.set_runner(runner)
self.computer = AiidaOrmComputer(name="testcase")
# conf_attrs hostname, description, enabled_state, transport_type, scheduler_type, workdir
# mpirun_command , default_mpiprocs_per_machine,
self.computer._set_hostname_string("localhost")
self.computer._set_enabled_state_string('True')
self.computer._set_transport_type_string("local")
self.computer._set_scheduler_type_string("direct")
self.computer._set_workdir_string("/tmp/testcase/{username}/base")
self.computer.store()
create_authinfo(computer=self.computer).store()
self.code_yambo = Code()
self.code_yambo.label = "yambo"
os_env = os.environ.copy()
yambo_path = subprocess.check_output(['which', 'mock_yambo'],
env=os_env).strip()
self.code_yambo.set_remote_computer_exec((self.computer, yambo_path))
self.code_yambo.set_input_plugin_name('yambo.yambo')
self.code_p2y = Code()
self.code_p2y.label = "p2y"
p2y_path = subprocess.check_output(['which', 'mock_p2y'],
env=os_env).strip()
self.code_p2y.set_remote_computer_exec((self.computer, p2y_path))
self.code_p2y.set_input_plugin_name('yambo.yambo')
self.code_yambo.store()
self.code_p2y.store()
self.calc_pw = PwCalculation()
self.calc_pw.set_computer(self.computer)
self.calc_pw.set_resources({
"num_machines": 1,
"num_mpiprocs_per_machine": 16,
'default_mpiprocs_per_machine': 16
})
StructureData = DataFactory('structure')
cell = [[15.8753100000, 0.0000000000, 0.0000000000],
[0.0000000000, 15.8753100000, 0.0000000000],
[0.0000000000, 0.0000000000, 2.4696584760]]
s = StructureData(cell=cell)
self.calc_pw.use_structure(s)
print((self.calc_pw.store_all(), " pw calc"))
pw_remote_folder = RemoteData(
computer=self.computer, remote_path="/tmp/testcase/work/calcPW")
print((pw_remote_folder.store(), "pw remote data"))
self.calc_pw._set_state(calc_states.PARSING)
pw_remote_folder.add_link_from(
self.calc_pw, label='remote_folder', link_type=LinkType.CREATE)
outputs = Dict(
dict={
"lsda": False,
"number_of_bands": 80,
"number_of_electrons": 8.0,
"number_of_k_points": 147,
"non_colinear_calculation": False
})
outputs.store()
outputs.add_link_from(
self.calc_pw, label='output_parameters', link_type=LinkType.CREATE)
self.calc = YamboCalculation()
self.calc.set_computer(self.computer)
self.calc.use_code(self.code_p2y)
p2y_settings = {
u'ADDITIONAL_RETRIEVE_LIST':
[u'r-*', u'o-*', u'l-*', u'l_*', u'LOG/l-*_CPU_1'],
u'INITIALISE':
True
}
yambo_settings = {
u'ADDITIONAL_RETRIEVE_LIST':
[u'r-*', u'o-*', u'l-*', u'l_*', u'LOG/l-*_CPU_1']
}
self.calc.use_settings(Dict(dict=p2y_settings))
self.calc.set_resources({
"num_machines": 1,
"num_mpiprocs_per_machine": 16,
'default_mpiprocs_per_machine': 16
})
self.calc.use_parent_calculation(self.calc_pw)
print((self.calc.store_all(), " yambo calc"))
self.calc._set_state(calc_states.PARSING)
a = 5.388
cell = crystal(
'Si', [(0, 0, 0)],
spacegroup=227,
cellpar=[a, a, a, 90, 90, 90],
primitive_cell=True)
self.struc = StructureData(ase=cell)
self.struc.store()
self.parameters = Dict(
dict={
"BndsRnXp": [1.0, 48.0],
"Chimod": "Hartree",
"DysSolver": "n",
"FFTGvecs": 25,
"FFTGvecs_units": "Ry",
"GbndRnge": [1.0, 48.0],
"HF_and_locXC": True,
"LongDrXp": [1.0, 0.0, 0.0],
"NGsBlkXp": 2,
"NGsBlkXp_units": "Ry",
"QPkrange": [[1, 145, 3, 5]],
"SE_CPU": "1 2 4",
"SE_ROLEs": "q qp b",
"X_all_q_CPU": "1 1 4 2",
"X_all_q_ROLEs": "q k c v",
"em1d": True,
"gw0": True,
"ppa": True,
"rim_cut": True
})
self.yambo_settings = Dict(
dict={
"ADDITIONAL_RETRIEVE_LIST": [
"r-*", "o-*", "l-*", "l_*", "LOG/l-*_CPU_1",
"aiida/ndb.QP", "aiida/ndb.HF_and_locXC"
]
})
self.p2y_settings = Dict(
dict={
"ADDITIONAL_RETRIEVE_LIST": [
'r-*', 'o-*', 'l-*', 'l_*', 'LOG/l-*_CPU_1',
'aiida/ndb.QP', 'aiida/ndb.HF_and_locXC'
],
'INITIALISE':
True
})
self.yambo_calc_set = Dict(
dict={
'resources': {
"num_machines": 1,
"num_mpiprocs_per_machine": 16
},
'max_wallclock_seconds': 60 * 29,
'max_memory_kb': 1 * 88 * 1000000,
"queue_name":
"s3parvc3", #'custom_scheduler_commands': u"#PBS -A Pra14_3622" ,
'environment_variables': {
"OMP_NUM_THREADS": "1"
}
})
self.p2y_calc_set = Dict(
dict={
'resources': {
"num_machines": 1,
"num_mpiprocs_per_machine": 2
},
'max_wallclock_seconds': 60 * 2,
'max_memory_kb': 1 * 10 * 1000000,
"queue_name":
"s3parvc3", # 'custom_scheduler_commands': u"#PBS -A Pra14_3622" ,
'environment_variables': {
"OMP_NUM_THREADS": "2"
}
})
self.remote_folder = RemoteData(
computer=self.computer, remote_path="/tmp/testcase/work/calcX")
self.remote_folder.store()
self.remote_folder.add_link_from(
self.calc, label='remote_folder', link_type=LinkType.CREATE)
self.calc._set_state(calc_states.FINISHED)
#self.calc.store_all()
def tearDown(self):
"""
"""
pass
def test_simple_log(self):
from aiida.engine.launch import run
from aiida.orm.nodes import Float, Str, NumericType, List, Bool
from aiida_yambo.workflows.yamborestart import YamboRestartWf
p2y_result = run(
YamboRestartWf,
precode=Str('p2y'),
yambocode=Str('yambo'),
parameters=self.parameters,
calculation_set=self.yambo_calc_set,
parent_folder=self.remote_folder,
settings=self.yambo_settings)
assert 'retrieved' in p2y_result
| 38.482213 | 112 | 0.57159 | 8,044 | 0.826212 | 0 | 0 | 0 | 0 | 0 | 0 | 2,160 | 0.221857 |
2e9d1bb5a95a4719b56246692e62c0136fc3c506 | 4,017 | py | Python | knn.py | dengbuqi/lie_detection | a90070511e779c2ec765b34ec68bac806053bf9d | [
"MIT"
] | 1 | 2022-01-23T17:21:20.000Z | 2022-01-23T17:21:20.000Z | knn.py | dengbuqi/lie_detection | a90070511e779c2ec765b34ec68bac806053bf9d | [
"MIT"
] | null | null | null | knn.py | dengbuqi/lie_detection | a90070511e779c2ec765b34ec68bac806053bf9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName : knn.py
# @Time : 2020/9/25 12:29
# @Author : 陈嘉昕
# @Demand : k-临近算法,和训练的模型作比较
import csv
import math
import operator
from random import shuffle
import matplotlib.pyplot as plt
# 数据集
training_set = []
# 测试集
test_set = []
def cross_validation(file_name):
k_max = len(training_set) - 1
k_scores = []
for k in range(1, k_max):
acc = 0
for i in range(k_max):
to_predict = training_set[i]
training_set.remove(to_predict)
predictions = fit([to_predict], training_set, k)
acc += calculate_accuracy([to_predict], predictions)
training_set.insert(i, to_predict)
scores = acc / k_max
print("k =", repr(k), " accuracy = " + repr(scores))
k_scores.append(scores)
plt.plot(range(1, k_max), k_scores)
plt.xlabel('Value of K')
plt.ylabel('Accuracy')
plt.savefig(file_name)
plt.show()
max_index, max_number = max(enumerate(k_scores), key=operator.itemgetter(1))
print("\n\nThe best results: k =", repr(max_index + 1), ", accuracy = " + repr(max_number) + "\n\n")
def load_dataset(filename, training=False):
# 打开文件
with open(filename, 'rt') as camile:
next(csv.reader(camile))
lines = csv.reader(camile)
dataset = list(lines)
shuffle(dataset)
if training:
split = 0.8 * len(dataset)
else:
split = len(dataset)
for x in range(len(dataset)):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if len(training_set) <= split:
training_set.append(dataset[x])
else:
test_set.append(dataset[x])
def euclidean_distance(instance1, instance2, number_of_params):
distance = 0
for param in range(number_of_params):
distance += pow((float(instance1[param]) - float(instance2[param])), 2)
return math.sqrt(distance)
def get_neighbors(trainingSet, instance, k):
distances = []
length = len(instance) - 1
for x in range(len(trainingSet)):
dist = euclidean_distance(instance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def calculate_votes(neighbors):
class_votes = {}
for i in range(len(neighbors)):
response = neighbors[i][-1]
if response in class_votes:
class_votes[response] += 1
else:
class_votes[response] = 1
sorted_votes = sorted(class_votes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_votes[0][0]
def calculate_accuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] == predictions[x]:
correct += 1
return (correct / float(len(testSet))) * 100.0
def fit(to_predict, dataset, k):
predictions = []
for x in range(len(to_predict)):
neighbors = get_neighbors(dataset, to_predict[x], k)
result = calculate_votes(neighbors)
predictions.append(result)
return predictions
def predict(to_predict, data_set_path, k=12, training=False):
if len(training_set) == 0:
load_dataset(data_set_path, training)
if training:
predictions = fit(test_set, training_set, k)
accuracy = calculate_accuracy(test_set, predictions)
print('Accuracy: ' + repr(accuracy) + '%')
else:
predictions = fit(to_predict, training_set, k)
return predictions
def run_one():
training_set.clear()
test_set.clear()
load_dataset("data/video_data_for_lie_training.csv", True)
cross_validation("image/knn_model_1.png")
def run_two():
training_set.clear()
test_set.clear()
load_dataset("data/audio_data_for_lie_training.csv", True)
cross_validation("image/knn_model_2.png")
| 27.703448 | 104 | 0.62584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.109064 |
2e9db835b85cc0a761a8f0a3c7d9faa9306aeec6 | 5,530 | py | Python | vnpy/trader/http/controller/stock.py | chenzj810/vnpy-stock | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | 2 | 2021-01-03T05:28:14.000Z | 2021-01-03T05:28:19.000Z | vnpy/trader/http/controller/stock.py | chenzj810/vnpy | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | null | null | null | vnpy/trader/http/controller/stock.py | chenzj810/vnpy | ca30eb309e38f9f916e9877538b98096303e0b60 | [
"MIT"
] | 1 | 2021-04-26T14:08:23.000Z | 2021-04-26T14:08:23.000Z | # encoding: UTF-8
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import json
########################################################################
class StockHandler(tornado.web.RequestHandler):
"""handler"""
def initialize(self, mainEngine):
self.mainEngine = mainEngine
self.dbClient = mainEngine.DB.client
print('me:', mainEngine)
#----------------------------------------------------------------------
def get(self, *args, **kwargs):
print(self.request.method, self.request.uri, 'args:', str(args))
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Content-Type","application/json")
if args[0] == 'list':
self.__list(args, kwargs)
elif args[0] == 'name':
self.__name(args, kwargs)
else:
self.write({"ret_code": -1, "ret_msg": "FAILED", "extra":"url invalid"})
self.finish()
#----------------------------------------------------------------------
def post(self, *args, **kwargs):
print(self.request.method, self.request.uri, 'args:', str(args))
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Content-Type","application/json")
if args[0] == 'list':
self.__list(args, kwargs)
elif args[0] == 'name':
self.__name(args, kwargs)
else:
self.write({"ret_code": -1, "ret_msg": "FAILED", "extra":"url invalid"})
self.finish()
#----------------------------------------------------------------------
def __list(self, *args, **kwargs):
mylist = []
# 创建迭代器对象, 遍历列表
list_name = self.dbClient.database.basic_report.find()
for item in list_name:
#print(item)
code = item['code']
name = item['name']
#print(code, name)
mylist.append({code:name})
#print(mylist)
self.write({"ret_code": 0, "ret_msg":"FAILED", "extra":mylist})
#----------------------------------------------------------------------
def __name(self, *args, **kwargs):
#print(self.request.body)
try:
stock_code = self.get_argument('stock_code')
item = self.dbClient.database.basic_report.find_one({"code":stock_code})
#print('stock_code', stock_code, item['name'])
self.write({"ret_code": 0, "ret_msg":"SUCCESS", "extra":item['name']})
except:
self.write({"ret_code": -1, "ret_msg":"FAILED", "extra":'not found'})
"""
function
@param db_data: 数据库
@param collection_base: 基本面数据集合
@param date: 日期
@param collection_result: 结果输出数据集合
"""
def select_by_basic_db(self, db_data, collection_base, date, collection_result):
'''根据日线数据和基本面数据选股'''
# 创建迭代器对象, 遍历列表
list_name = collection_base.find()
for item in list_name:
#创建股票代码命名的集合
#print(item)
#print(item['code'])
code = item['code']
#print(code)
collection_code = db_data[code]
#print(collection_code)
#周六,周日时,节假日时,往前找有数据的天进行选股
for i in range(0, -10, -1):
delta = datetime.timedelta(days=i)
my_date = str(date + delta)
#print(my_date)
#今天的记录收盘价
today_record = collection_code.find_one({'date':my_date})
if today_record is not None:
#print(today_record)
#基本面3倍选股
select_by_basic_policy(collection_result, item, today_record, BASE_OF_MAX_WEIGHT)
break;
#退出
return
"""
@function:select_by_basic
@param void:
@return void:
@rtype void:
"""
def select_by_basic(self):
'''今天的k数据进行选股'''
client = self.dbClient
#根据今天的日k 线数据进行选股
today = datetime.date.today() #获得今天的日期
#collection 数据集合
collection_basic = client.basic_report.records
collection_result = client.select_result.basic_env
my_db = client.day
#print(collection)
#删除上次选股的全部记录
collection_result.remove()
#重新选股
select_by_basic_db(my_db, collection_basic, today, collection_result)
return
"""
@function:select_by_basic
@param void:
@return void:
@rtype void:
"""
def select_by_basic(self):
'''今天的k数据进行选股'''
client = self.dbClient
#根据今天的日k 线数据进行选股
today = datetime.date.today() #获得今天的日期
#collection 数据集合
collection_basic = client.basic_report.records
collection_result = client.select_result.basic_env
my_db = client.day
#print(collection)
#删除上次选股的全部记录
collection_result.remove()
#重新选股
select_by_basic_db(my_db, collection_basic, today, collection_result)
return | 29.414894 | 102 | 0.517541 | 5,721 | 0.96378 | 0 | 0 | 0 | 0 | 0 | 0 | 2,302 | 0.387803 |
2e9e6bf6e35ae4a1dfdd7e51e95b5be403c30f21 | 7,126 | py | Python | src/headtracking_network/live_training.py | NaviRice/HeadTracking | 8227cc247425ecacd3e789dbbac11d3e5103d3e2 | [
"MIT"
] | 1 | 2019-10-24T14:29:00.000Z | 2019-10-24T14:29:00.000Z | src/headtracking_network/live_training.py | NaviRice/HeadTracking | 8227cc247425ecacd3e789dbbac11d3e5103d3e2 | [
"MIT"
] | 7 | 2017-11-28T23:58:40.000Z | 2022-03-11T23:12:12.000Z | src/headtracking_network/live_training.py | NaviRice/HeadTracking | 8227cc247425ecacd3e789dbbac11d3e5103d3e2 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import os
import navirice_image_pb2
import cv2
import random
import sys
from navirice_generate_data import generate_bitmap_label
from navirice_helpers import navirice_image_to_np
from navirice_helpers import navirice_ir_to_np
from navirice_helpers import map_depth_and_rgb
from navirice_head_detect import get_head_from_img
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features):
# unkown amount, higrt and width, channel
input_layer = tf.reshape(features, [-1, 424, 512, 1])
mp0 = input_layer
mp1 = max_pool_2x2(mp0)
mp2 = max_pool_2x2(mp1)
mp3 = max_pool_2x2(mp2)
encoder1 = coder(mp1, [10,10,1,2], True)
encoder2 = coder(mp2, [10,10,1,4], True)
encoder3 = coder(mp3, [10,10,1,4], True)
encoder4 = coder(encoder1, [10,10,2,4], True)
encoder5 = coder(encoder2, [10,10,4,8], True)
encoder6 = coder(encoder3, [10,10,4,8], True)
W_fc1 = weight_variable([256*212*4, 1024])
encoder4_last_flat = tf.reshape(encoder4, [-1, 256*212*4])
h_fc1 = tf.matmul(encoder4_last_flat, W_fc1)
W_fc2 = weight_variable([128*106*8, 1024])
encoder5_last_flat = tf.reshape(encoder5, [-1, 128*106*8])
h_fc2 = tf.matmul(encoder5_last_flat, W_fc2)
W_fc3 = weight_variable([64*53*8, 1024])
encoder6_last_flat = tf.reshape(encoder6, [-1, 64*53*8])
h_fc3 = tf.matmul(encoder6_last_flat, W_fc3)
merge_layer = tf.nn.sigmoid(h_fc3 + h_fc2 + h_fc1)
W_fc2 = weight_variable([1024, 3])
h_fc2 = tf.nn.sigmoid(tf.matmul(merge_layer, W_fc2))
return h_fc2
def coder(input_layer, shape, do_relu):
W_conv = weight_variable(shape)
if do_relu:
h_conv = tf.nn.leaky_relu(conv2d(input_layer, W_conv))
return h_conv
else:
h_conv = conv2d(input_layer, W_conv)
return h_conv
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main():
scale_val = 1.0/8.0
x = tf.placeholder(tf.float32, [None, 424, 512, 1])
y_ = tf.placeholder(tf.float32, [None, 3])
y_conv = cnn_model_fn(x)
#cost = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)
cost = tf.square(y_ - y_conv)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print("------------------OUT SHAPES-------------------")
print(y_.get_shape())
print(y_conv.get_shape())
print("-----------------------------------------------")
cnt = 0
from navirice_get_image import KinectClient
kc = KinectClient('127.0.0.1', 29000)
kc.navirice_capture_settings(False, True, True)
s_train = False
r_train = False
train_set_input = []
train_set_expected =[]
train_set_size = 100000
saver = tf.train.Saver()
while(True):
img_set, last_count = kc.navirice_get_image()
if(s_train):
s_train = False
if(img_set != None and img_set.IR.width > 0 and img_set.Depth.width > 0):
ir_image = navirice_ir_to_np(img_set.IR)
depth_image = navirice_image_to_np(img_set.Depth)
inverted_depth = np.ones(depth_image.shape)
inverted_depth = inverted_depth - depth_image
cv_result = get_head_from_img(ir_image)
if cv_result is not None:
arr = [cv_result[0], cv_result[1], cv_result[2]]
if len(train_set_input) < train_set_size:
train_set_input.append(inverted_depth)
train_set_expected.append(arr)
else:
if(random.randint(0, 10000) > -1):
i = random.randint(0, train_set_size-1)
train_set_input[i] = inverted_depth
train_set_expected[i] = arr
#train_step.run(session=sess, feed_dict={x: train_set_input, y_: train_set_expected})
dp = inverted_depth.copy()
cv2.circle(dp, (int(cv_result[0]*512), int(cv_result[1]*424)), int(cv_result[2]*400), (255, 0, 0), thickness=3, lineType=8, shift=0)
cv2.imshow("idl", dp)
print("db count: ", len(train_set_input))
if(img_set != None and img_set.IR.width > 0 and img_set.Depth.width > 0):
depth_image = navirice_image_to_np(img_set.Depth)
ir_image = navirice_ir_to_np(img_set.IR)
inverted_depth = np.ones(depth_image.shape)
inverted_depth = inverted_depth - depth_image
tests = []
tests.append(inverted_depth)
outs = sess.run(y_conv, feed_dict={x: tests})
xf = outs[0][0]
yf = outs[0][1]
radiusf = outs[0][2]
print("nnoutput x:", xf, "y: ", yf," r:", radiusf)
if radiusf < 0:
radiusf = 0
cv2.circle(tests[0], (int(xf*512), int(yf*424)), int(radiusf*400), (255, 0, 0), thickness=3, lineType=8, shift=0)
cv2.imshow("id",tests[0])
if(r_train):
tsi=[]
tse=[]
for i in range(100):
random_index = random.randint(0, len(train_set_input)-1)
tsi.append(train_set_input[random_index])
tse.append(train_set_expected[random_index])
print("TRAINING")
train_step.run(session=sess, feed_dict={x: tsi, y_: tse})
key = cv2.waitKey(10) & 0xFF
#print("key: ", key)
# train
if(key == ord('t')):
r_train = True
# rest
if(key == ord('r')):
r_train = False
# (space) capture
if(key == 32):
s_train = True
# save model
if(key == ord('s')):
loc = input("Enter file destination to save: ")
if(len(loc) > 0):
try:
saver.save(sess, loc)
except ValueError:
print("Error: Did not enter a path..")
# load model
if(key == ord('l')):
loc = input("Enter file destination to load: ")
if(len(loc) > 0):
try:
saver.restore(sess, loc)
except ValueError:
print("Error: no file with that destination")
if __name__ == "__main__":
main()
| 33.772512 | 152 | 0.57648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 849 | 0.119141 |
2e9f1ea21cc06a6a8937ede1cde54d73757d1f69 | 1,448 | py | Python | src/python-opencv/001-base/test_003_change.py | bjlhx15/python-algorithm | bbd162e194359a01806922d73b709fe64fcfa422 | [
"Apache-2.0"
] | null | null | null | src/python-opencv/001-base/test_003_change.py | bjlhx15/python-algorithm | bbd162e194359a01806922d73b709fe64fcfa422 | [
"Apache-2.0"
] | null | null | null | src/python-opencv/001-base/test_003_change.py | bjlhx15/python-algorithm | bbd162e194359a01806922d73b709fe64fcfa422 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import cv2
import numpy as np
# 仿射变换(图像位置校正)
def img_three(imgPath):
# ---------------------------三点得到一个变换矩阵 ---------------------------
"""
三点确定一个平面,通过确定三个点的关系来得到转换矩阵
然后再通过warpAffine来进行变换
"""
img = cv2.imread(imgPath)
rows,cols,_ = img.shape
points1 = np.float32([[50,50],[200,50],[50,200]])
points2 = np.float32([[10,100],[200,50],[100,250]])
matrix = cv2.getAffineTransform(points1,points2)
output = cv2.warpAffine(img,matrix,(cols,rows))
cv2.imshow('input1',img)
cv2.imshow('output1',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
def img_four(imgPath):
# ---------------------------四点得到一个变换矩阵---------------------------
"""
进行透视变换
可以先用四个点来确定一个3*3的变换矩阵(cv2.getPerspectiveTransform)
然后通过cv2.warpPerspective和上述矩阵对图像进行变换
"""
img = cv2.imread(imgPath)
rows,cols,_ = img.shape
points1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
points2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
matrix = cv2.getPerspectiveTransform(points1,points2)
# 将四个点组成的平面转换成另四个点组成的一个平面
output = cv2.warpPerspective(img, matrix, (cols, rows))
# 通过warpPerspective函数来进行变换
cv2.imshow('input2',img)
cv2.imshow('output2',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
imgPath = 'src/python-opencv/a.jpg'
# img_three(imgPath)
img_four(imgPath) | 27.846154 | 71 | 0.594613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 796 | 0.460116 |
2ea01d9b33fe2e0d0e0d8aac551b396ffa45ae0c | 6,836 | py | Python | affine/preprocessing.py | OSU-slatelab/geometric-embedding-properties | 0436d86303439681ad74aa4d29a53a15953cbb88 | [
"MIT"
] | 3 | 2019-06-06T18:57:22.000Z | 2020-04-03T07:47:14.000Z | affine/preprocessing.py | OSU-slatelab/geometric-embedding-properties | 0436d86303439681ad74aa4d29a53a15953cbb88 | [
"MIT"
] | null | null | null | affine/preprocessing.py | OSU-slatelab/geometric-embedding-properties | 0436d86303439681ad74aa4d29a53a15953cbb88 | [
"MIT"
] | 1 | 2019-09-23T20:44:04.000Z | 2019-09-23T20:44:04.000Z |
from progressbar import progressbar
from tqdm import tqdm
import multiprocessing as mp
import pandas as pd
import numpy as np
import pyemblib
import scipy
import queue
import time
import sys
import os
'''
preprocessing.py
Preprocessing methods for cuto.py.
'''
#========1=========2=========3=========4=========5=========6=========7==
def check_valid_dir(some_dir):
if not os.path.isdir(some_dir):
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
print("DIES IST EIN UNGÜLTIGES VERZEICHNIS!!!!")
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
exit()
#========1=========2=========3=========4=========5=========6=========7==
def check_valid_file(some_file):
if not os.path.isfile(some_file):
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("")
print("DIES IST KEIN GÜLTIGER SPEICHERORT FÜR DATEIEN!!!!")
print("")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
exit()
#========1=========2=========3=========4=========5=========6=========7==
def loadGloveModel(gloveFile):
print("Loading Glove Model")
f = open(gloveFile,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print("Done.",len(model)," words loaded!")
return model
#========1=========2=========3=========4=========5=========6=========7==
# pass None to vocab to use use entire embedding
# RETURNS: [numpy matrix of word vectors, df of the labels]
def process_embedding(emb_path, emb_format, first_n, vocab):
print("Preprocessing. ")
file_name_length = len(emb_path)
extension = os.path.basename(emb_path).split('.')[-1]
# Decide if it's a binary or text embedding file, and read in
# the embedding as a dict object, where the keys are the tokens
# (strings), and the values are the components of the corresponding
# vectors (floats).
embedding = {}
read_mode = None
if first_n == 0 or emb_format == pyemblib.Format.Glove:
print("No value passed for first_n or feature not supported. ")
first_n = None
if extension == 'bin':
read_mode = pyemblib.Mode.Binary
binary = True
print("binary reac.")
elif extension == 'txt':
read_mode = pyemblib.Mode.Text
binary = False
print("text read.")
else:
print("Unsupported embedding mode. ")
exit()
'''
if emb_format == pyemblib.Format.Glove:
embedding = loadGloveModel(emb_path)
'''
if first_n:
embedding = pyemblib.read( emb_path,
format=emb_format,
mode=read_mode,
first_n=first_n,
replace_errors=True,
skip_parsing_errors=True,
)
else:
embedding = pyemblib.read( emb_path,
format=emb_format,
mode=read_mode,
replace_errors=True,
skip_parsing_errors=True,
)
# take a subset of the vocab
new_embedding = {}
if (vocab != None):
for word in vocab:
if word in embedding:
vector = embedding[word]
new_embedding.update({word:vector})
embedding = new_embedding
# convert embedding to pandas dataframe
# "words_with_friends" is the column label for the vectors
# this df has shape [num_inputs,2] since the vectors are all in 1
# column as length d lists
emb_array = np.array(embedding.items())
sys.stdout.flush()
label_array = np.array([ row[0] for row in emb_array.tolist() ])
sys.stdout.flush()
vectors_matrix = np.array([ row[1:] for row in emb_array.tolist() ])
vectors_matrix = np.array([ row[0] for row in vectors_matrix ])
sys.stdout.flush()
'''
emb_df = pd.Series(embedding, name="words_with_friends")
# print(emb_df.head(10))
# reset the index of the dataframe
emb_df = emb_df.reset_index()
# print(emb_df.head(10))
# matrix of just the vectors
emb_matrix = emb_df.words_with_friends.values.tolist()
# print(emb_matrix[0:10])
# dataframe of just the vectors
vectors_df = pd.DataFrame(emb_matrix,index=emb_df.index)
# print(vectors_df.head(10))
# numpy matrix of just the vectors
vectors_matrix = vectors_df.as_matrix()
# print(vectors_matrix[0:10])
'''
return vectors_matrix, label_array
#========1=========2=========3=========4=========5=========6=========7==
# pass None to vocab to use use entire embedding
# DOES: Saves the first n words in a new embedding file
def subset_embedding(emb_path, first_n, vocab):
print("Preprocessing. ")
file_name_length = len(emb_path)
last_char = emb_path[file_name_length - 1]
# Decide if it's a binary or text embedding file, and read in
# the embedding as a dict object, where the keys are the tokens
# (strings), and the values are the components of the corresponding
# vectors (floats).
embedding = {}
if (last_char == 'n'):
embedding = pyemblib.read(emb_path,
mode=pyemblib.Mode.Binary,
first_n=first_n)
elif (last_char == 't'):
embedding = pyemblib.read(emb_path,
mode=pyemblib.Mode.Text,
first_n=first_n)
else:
print("Unsupported embedding format. ")
exit()
# make sure it has a valid file extension
extension = emb_path[file_name_length - 4:file_name_length]
if extension != ".txt" and extension != ".bin":
print("Invalid file path. ")
exit()
# get the emb_path without the file extension
path_no_ext = emb_path[0:file_name_length - 4]
new_path = path_no_ext + "_SUBSET.txt"
# write to text embedding file
pyemblib.write(embedding,
new_path,
mode=pyemblib.Mode.Text)
return
| 32.708134 | 72 | 0.510532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,950 | 0.43135 |
2ea13d20660fbbf34a9c2a82526adf203ee02dd1 | 6,931 | py | Python | restcli/cliparser.py | jykntr/rest-cli-client | d9ebf5a6e056ba7ab0e59614d9702e373696580e | [
"MIT"
] | 1 | 2015-07-03T10:43:04.000Z | 2015-07-03T10:43:04.000Z | restcli/cliparser.py | jykntr/rest-cli-client | d9ebf5a6e056ba7ab0e59614d9702e373696580e | [
"MIT"
] | null | null | null | restcli/cliparser.py | jykntr/rest-cli-client | d9ebf5a6e056ba7ab0e59614d9702e373696580e | [
"MIT"
] | null | null | null | import argparse
from profile import Profile
PROXY = 'proxy'
VERIFY = 'verify'
DEBUG = 'verbose'
class CliParser():
def __init__(self, requests, profiles, options):
self.requests = requests
self.profiles = profiles
self.options = options
self.args = None
# Use a pre-parser to get options that aren't data driven by the config file.
# Pre-parser checks global options and specified profile
preparser = argparse.ArgumentParser(add_help=False)
preparser = self._add_global_options(preparser)
known_args, _ = preparser.parse_known_args()
# Now build real parser
self.parser = argparse.ArgumentParser()
# Add options that can be specified with or without a request sub-command
self._add_global_options(self.parser)
# Get specified profile (or empty profile if none specified)
profile = self._get_profile(known_args.profile)
# Add saved requests as sub commands
subparsers = self.parser.add_subparsers(
title='Requests',
help='The request to execute'
)
for request in self.requests:
# Add sub-parser for request
request_parser = subparsers.add_parser(
request.name,
description=request.__str__(),
formatter_class=argparse.RawDescriptionHelpFormatter
)
# Set variable name to request name so we can tell the request that is specified
request_parser.set_defaults(request=request.name)
# Add options common to all sub-commands
self._add_global_options(request_parser)
# Add HTTP request options like proxy and SSL verification
options_group = request_parser.add_argument_group(
title='Options',
description='Options to use when making HTTP requests'
)
options_group.add_argument(
'--' + PROXY,
default=[],
action='append',
metavar='host:port',
help='Maps a protocol to a proxy. For example: "http://user:pass@proxy.url.com:8080". ' +
'Multiple proxies can be defined for different protocols.'
)
no_verify_mx_group = options_group.add_mutually_exclusive_group()
no_verify_mx_group.add_argument(
'--' + VERIFY,
dest=VERIFY,
action='store_true',
help='Verify SSL certificates.'
)
no_verify_mx_group.add_argument(
'--no-' + VERIFY,
action='store_false',
dest=VERIFY,
help='Do not verify SSL certificates.'
)
# Get default verify setting from options
no_verify_mx_group.set_defaults(verify=self.options.get_verify())
# Setup optional and required variables for each request. Optional variables have a name-value pair
# in the user specified profile and required variables don't.
optional_group = None # Only create the group if it is needed
required_group = None
for variable in request.get_variable_list():
if variable in profile.properties:
# Variable exists in profile, so it should be optional
if not optional_group:
# Create optional group if it doesn't exist
optional_group = request_parser.add_argument_group(
title='Optional variable arguments',
description='Variables that have a default value in the active profile ' +
'(' + profile.name + ')'
)
optional_group.add_argument(
'--'+variable,
help='Default value from profile: ' + profile.properties.get(variable),
default=profile.properties.get(variable)
)
else:
# Variable does not exist in the profile so it is required
if not required_group:
# Create required group if it doesn't exist
required_group = request_parser.add_argument_group(
title='Required variable arguments',
description='Variables that have no default value in the active profile ' +
'(' + profile.name + ')'
)
required_group.add_argument(variable)
def parse_args(self):
dictionary_args = vars(self.parser.parse_args())
# The proxy key will contain a list of proxies in the format:
# ['http://proxy.com:8080', 'https://proxy.com:8081']
# Remove the list of proxies from the cli args and put an
# empty dictionary in its place.
proxy_list = dictionary_args.pop(PROXY, [])
dictionary_args[PROXY] = {}
for proxy in proxy_list:
# Split the proxy into protocol and hostname
split_proxy = proxy.split(':', 1)
dictionary_args[PROXY][split_proxy[0]] = proxy
self.args = dictionary_args
return self.args
def get_profile(self, default):
if self.args.no_profile:
return None
if self.args.profile is not None:
return self.args.profile
else:
return default
def _get_profile_names(self):
profile_names = list()
for profile in self.profiles:
profile_names.append(str(profile.name))
return profile_names
def _get_profile(self, name):
empty_profile = Profile({'name': 'none'})
for profile in self.profiles:
if name == profile.name:
return profile
return empty_profile
def _add_global_options(self, parser):
profiles_group = parser.add_argument_group(
title='Profiles',
description='Indicates which profile to use, if any, for variable substitution'
)
profiles_mx_group = profiles_group.add_mutually_exclusive_group()
profiles_mx_group.add_argument(
'--profile',
'-p',
choices=self._get_profile_names(),
help='The name of the profile to use for variable substitution'
)
profiles_mx_group.add_argument(
'--no-profile',
action="store_true",
default=False,
help='No profile will be used for variable substitution'
)
parser.add_argument(
'--' + DEBUG,
'-d',
action='store_true',
help=argparse.SUPPRESS
)
return parser
| 38.292818 | 112 | 0.568605 | 6,831 | 0.985572 | 0 | 0 | 0 | 0 | 0 | 0 | 2,073 | 0.299091 |
2ea1906135b6dd2cc25bcc298892705479bf616f | 448 | py | Python | src/widgets/button.py | priscilafaliani/Analisis-De-Datos | 247695e3e976f042fc453d02bd9d4d722fb331ff | [
"CC0-1.0"
] | 6 | 2021-05-02T08:28:42.000Z | 2021-05-07T13:18:32.000Z | src/widgets/button.py | priscilafaliani/Analisis-De-Datos | 247695e3e976f042fc453d02bd9d4d722fb331ff | [
"CC0-1.0"
] | null | null | null | src/widgets/button.py | priscilafaliani/Analisis-De-Datos | 247695e3e976f042fc453d02bd9d4d722fb331ff | [
"CC0-1.0"
] | null | null | null | """Builds a button according to the theme of the app."""
import PySimpleGUI as sg
from src.globals import colors
def build(text, key, font, size):
"""Returns a button with the current theme"""
button = sg.Button(
button_text=text,
button_color=(colors.WHITE, colors.LIGHT_GRAY),
mouseover_colors=(colors.WHITE, colors.ORANGE),
key=key,
font=font,
size=size,
)
return button | 24.888889 | 56 | 0.633929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.225446 |
2ea1bf2e9cb8105280a4f2635279518d125a4312 | 8,005 | py | Python | python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py | Li-fAngyU/Paddle | e548f65f96697830035a28f9070b40829408ccdb | [
"Apache-2.0"
] | 8 | 2016-08-15T07:02:27.000Z | 2016-08-24T09:34:00.000Z | python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py | Li-fAngyU/Paddle | e548f65f96697830035a28f9070b40829408ccdb | [
"Apache-2.0"
] | 1 | 2022-01-28T07:23:22.000Z | 2022-01-28T07:23:22.000Z | python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py | Li-fAngyU/Paddle | e548f65f96697830035a28f9070b40829408ccdb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
def get_outputs(DOut, X, Y):
DX = np.dot(DOut, Y.T)
DY = np.dot(X.T, DOut)
DBias = np.sum(DOut, axis=0)
return DX, DY, DBias
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP32(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYBiasFP64(
TestFuseGemmEpilogueGradOpDXYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, DBias = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY, 'DBias': DBias}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP32(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYBiasFP64(
TestFuseGemmEpilogueGradOpDYBiasFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
_, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP32(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDYFP64(TestFuseGemmEpilogueGradOpDYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP16(OpTest):
def setUp(self):
self.op_type = "fused_gemm_epilogue_grad"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'DOut': np.random.random((8, 128)).astype(self.dtype) - 0.5,
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
DX, DY, _ = get_outputs(self.inputs['DOut'], self.inputs['X'],
self.inputs['Y'])
self.outputs = {'DX': DX, 'DY': DY}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP32(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGradOpDXYFP64(TestFuseGemmEpilogueGradOpDXYFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
if __name__ == "__main__":
np.random.seed(0)
unittest.main()
| 33.354167 | 75 | 0.6396 | 5,194 | 0.648844 | 0 | 0 | 6,910 | 0.86321 | 0 | 0 | 1,509 | 0.188507 |
2ea653e279e69de83e480c6b72197a40442f52f8 | 435 | py | Python | scripts/classtest.py | alandegenhart/neuropy | a9f7b735da78a5296f648cb3bb94c1c31843c668 | [
"MIT"
] | 1 | 2020-09-03T19:33:48.000Z | 2020-09-03T19:33:48.000Z | scripts/classtest.py | alandegenhart/neuropy | a9f7b735da78a5296f648cb3bb94c1c31843c668 | [
"MIT"
] | null | null | null | scripts/classtest.py | alandegenhart/neuropy | a9f7b735da78a5296f648cb3bb94c1c31843c668 | [
"MIT"
] | null | null | null | """Class static method test"""
class TestClass():
"""Test class"""
def __init__(self, a=1, b=2):
self.a = a
self.b = b
def add(self):
return self.a + self.b
@staticmethod
def static_add(a, b):
return 2 * a + 2 * b
def add2(self):
return self.static_add(self.a, self.b)
if __name__ == '__main__':
C = TestClass(a=1, b=2)
print(C.add())
print(C.add2())
| 16.730769 | 46 | 0.528736 | 305 | 0.701149 | 0 | 0 | 68 | 0.156322 | 0 | 0 | 56 | 0.128736 |