hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19f6345ae5c1d2126796fc52c6c38646c585008d | 7,549 | py | Python | end_to_end/nlp_mlops_company_sentiment/scripts/model_deploy_v2.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 3 | 2020-04-07T00:58:53.000Z | 2020-08-24T04:28:13.000Z | end_to_end/nlp_mlops_company_sentiment/scripts/model_deploy_v2.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 2 | 2020-09-26T01:31:38.000Z | 2020-10-07T22:23:56.000Z | end_to_end/nlp_mlops_company_sentiment/scripts/model_deploy_v2.py | pollyrolly/amazon-sagemaker-examples | b1a56b4dc96201b769f7bbc1e207649423874586 | [
"Apache-2.0"
] | 1 | 2020-04-13T19:49:12.000Z | 2020-04-13T19:49:12.000Z | #!/usr/bin/env python
import argparse
import subprocess
import sys
import os
import json
import boto3
import botocore
import sagemaker
from botocore.exceptions import ClientError
from sagemaker import ModelPackage
from pathlib import Path
import logging
import traceback
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# parameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--initial-instance-count", type=int, default=1)
parser.add_argument("--endpoint-instance-type", type=str, default="ml.m5.xlarge")
parser.add_argument("--endpoint-name", type=str)
parser.add_argument("--model-package-group-name", type=str)
parser.add_argument("--role", type=str)
parser.add_argument("--region", type=str)
args, _ = parser.parse_known_args()
sagemaker_session = sagemaker.Session(boto3.session.Session(region_name=args.region))
sagemaker_boto_client = boto3.client("sagemaker", region_name=args.region)
def describe_model_package(model_package_arn):
"""
Describe the model version details
"""
try:
model_package = sagemaker_boto_client.describe_model_package(
ModelPackageName=model_package_arn
)
LOGGER.info("{}".format(model_package))
if len(model_package) == 0:
error_message = ("No ModelPackage found for: {}".format(model_package_arn))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
def get_approved_package(model_package_group_name):
"""Gets the latest approved model package for a model package group.
Args:
model_package_group_name: The model package group name.
Returns:
The SageMaker Model Package ARN.
"""
try:
# Get the latest approved model package
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
)
approved_packages = response["ModelPackageSummaryList"]
# Fetch more packages if none returned with continuation token
while len(approved_packages) == 0 and "NextToken" in response:
LOGGER.debug("Getting more packages for token: {}".format(response["NextToken"]))
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
NextToken=response["NextToken"],
)
approved_packages.extend(response["ModelPackageSummaryList"])
# Return error if no packages found
if len(approved_packages) == 0:
error_message = ("No approved ModelPackage found for ModelPackageGroup: {}".format(model_package_group_name))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
model_package = approved_packages[0]
LOGGER.info("Identified the latest approved model package: {}".format(model_package))
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
if __name__=='__main__':
try:
sagemaker_boto_client.describe_endpoint(EndpointName=args.endpoint_name)
print(f'Endpoint {args.endpoint_name} already exists...updating with new Model Version')
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_version = model_package_approved["ModelPackageVersion"]
model_package = describe_model_package(model_package_approved["ModelPackageArn"])
model_name = f'{args.endpoint_name}-model-v{model_package_version}'
ep_config_name = f'{args.endpoint_name}-epc-v{model_package_version}'
# Create a model
new_model = sagemaker_boto_client.create_model(ModelName=model_name,
PrimaryContainer={
'Image': model_package["InferenceSpecification"]["Containers"][0]['Image'],
'Environment': model_package["InferenceSpecification"]["Containers"][0]['Environment']
},
ExecutionRoleArn=args.role)
# Create a new Endpoint Config
create_endpoint_config_api_response = sagemaker_boto_client.create_endpoint_config(
EndpointConfigName=ep_config_name,
ProductionVariants=[
{
'VariantName': f'AllTraffic-v{model_package_version}',
'ModelName': model_name,
'InitialInstanceCount': args.initial_instance_count,
'InstanceType': args.endpoint_instance_type
},
]
)
# Update the existing Endpoint
create_endpoint_api_response = sagemaker_boto_client.update_endpoint(
EndpointName=args.endpoint_name,
EndpointConfigName=ep_config_name
)
create_config('Y')
except ClientError as error:
# endpoint does not exist
if "Could not find endpoint" in error.response['Error']['Message']:
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_arn = model_package_approved["ModelPackageArn"]
model = ModelPackage(role=args.role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session)
try:
model.deploy(initial_instance_count=args.initial_instance_count,
instance_type=args.endpoint_instance_type,
endpoint_name=args.endpoint_name)
create_config('Y')
except ClientError as error:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
else:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message) | 42.410112 | 145 | 0.617433 | #!/usr/bin/env python
import argparse
import subprocess
import sys
import os
import json
import boto3
import botocore
import sagemaker
from botocore.exceptions import ClientError
from sagemaker import ModelPackage
from pathlib import Path
import logging
import traceback
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# parameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--initial-instance-count", type=int, default=1)
parser.add_argument("--endpoint-instance-type", type=str, default="ml.m5.xlarge")
parser.add_argument("--endpoint-name", type=str)
parser.add_argument("--model-package-group-name", type=str)
parser.add_argument("--role", type=str)
parser.add_argument("--region", type=str)
args, _ = parser.parse_known_args()
sagemaker_session = sagemaker.Session(boto3.session.Session(region_name=args.region))
sagemaker_boto_client = boto3.client("sagemaker", region_name=args.region)
def create_config(flag):
model_created = { 'model_created': flag }
out_path = Path(f'/opt/ml/processing/output/success.json')
out_str = json.dumps(model_created, indent=4)
out_path.write_text(out_str, encoding='utf-8')
def describe_model_package(model_package_arn):
"""
Describe the model version details
"""
try:
model_package = sagemaker_boto_client.describe_model_package(
ModelPackageName=model_package_arn
)
LOGGER.info("{}".format(model_package))
if len(model_package) == 0:
error_message = ("No ModelPackage found for: {}".format(model_package_arn))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
def get_approved_package(model_package_group_name):
"""Gets the latest approved model package for a model package group.
Args:
model_package_group_name: The model package group name.
Returns:
The SageMaker Model Package ARN.
"""
try:
# Get the latest approved model package
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
)
approved_packages = response["ModelPackageSummaryList"]
# Fetch more packages if none returned with continuation token
while len(approved_packages) == 0 and "NextToken" in response:
LOGGER.debug("Getting more packages for token: {}".format(response["NextToken"]))
response = sagemaker_boto_client.list_model_packages(
ModelPackageGroupName=model_package_group_name,
ModelApprovalStatus="Approved",
SortBy="CreationTime",
MaxResults=100,
NextToken=response["NextToken"],
)
approved_packages.extend(response["ModelPackageSummaryList"])
# Return error if no packages found
if len(approved_packages) == 0:
error_message = ("No approved ModelPackage found for ModelPackageGroup: {}".format(model_package_group_name))
LOGGER.error("{}".format(error_message))
raise Exception(error_message)
model_package = approved_packages[0]
LOGGER.info("Identified the latest approved model package: {}".format(model_package))
return model_package
except ClientError as e:
stacktrace = traceback.format_exc()
error_message = e.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
if __name__=='__main__':
try:
sagemaker_boto_client.describe_endpoint(EndpointName=args.endpoint_name)
print(f'Endpoint {args.endpoint_name} already exists...updating with new Model Version')
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_version = model_package_approved["ModelPackageVersion"]
model_package = describe_model_package(model_package_approved["ModelPackageArn"])
model_name = f'{args.endpoint_name}-model-v{model_package_version}'
ep_config_name = f'{args.endpoint_name}-epc-v{model_package_version}'
# Create a model
new_model = sagemaker_boto_client.create_model(ModelName=model_name,
PrimaryContainer={
'Image': model_package["InferenceSpecification"]["Containers"][0]['Image'],
'Environment': model_package["InferenceSpecification"]["Containers"][0]['Environment']
},
ExecutionRoleArn=args.role)
# Create a new Endpoint Config
create_endpoint_config_api_response = sagemaker_boto_client.create_endpoint_config(
EndpointConfigName=ep_config_name,
ProductionVariants=[
{
'VariantName': f'AllTraffic-v{model_package_version}',
'ModelName': model_name,
'InitialInstanceCount': args.initial_instance_count,
'InstanceType': args.endpoint_instance_type
},
]
)
# Update the existing Endpoint
create_endpoint_api_response = sagemaker_boto_client.update_endpoint(
EndpointName=args.endpoint_name,
EndpointConfigName=ep_config_name
)
create_config('Y')
except ClientError as error:
# endpoint does not exist
if "Could not find endpoint" in error.response['Error']['Message']:
model_package_approved = get_approved_package(args.model_package_group_name)
model_package_arn = model_package_approved["ModelPackageArn"]
model = ModelPackage(role=args.role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session)
try:
model.deploy(initial_instance_count=args.initial_instance_count,
instance_type=args.endpoint_instance_type,
endpoint_name=args.endpoint_name)
create_config('Y')
except ClientError as error:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message)
else:
print(error.response['Error']['Message'])
create_config('N')
error_message = error.response["Error"]["Message"]
LOGGER.error("{}".format(stacktrace))
raise Exception(error_message) | 213 | 0 | 23 |
e83e8600cd0a53faabb2d4e6f1d8f0ae874b2c50 | 3,097 | py | Python | crm/forms.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | 2 | 2016-10-03T08:35:07.000Z | 2016-10-04T07:22:20.000Z | crm/forms.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | null | null | null | crm/forms.py | bpatyi/simpleCRM | bf74f0e0d783ea4538fb96b6790474d991175b51 | [
"MIT"
] | null | null | null | import phonenumbers
from django.forms import (
ModelForm,
HiddenInput,
ValidationError
)
from django.core.validators import validate_email
from crm.models import (
Individual,
IndividualAddress,
IndividualEmail,
IndividualPhone,
SourceType,
Source,
Campaign
)
from crm.validators import validate_address
| 22.605839 | 86 | 0.598967 | import phonenumbers
from django.forms import (
ModelForm,
HiddenInput,
ValidationError
)
from django.core.validators import validate_email
from crm.models import (
Individual,
IndividualAddress,
IndividualEmail,
IndividualPhone,
SourceType,
Source,
Campaign
)
from crm.validators import validate_address
class IndividualForm(ModelForm):
class Meta:
model = Individual
exclude = ['id', 'is_valid', 'is_cleansed']
class IndividualAddressForm(ModelForm):
class Meta:
model = IndividualAddress
exclude = ['id']
widgets = {
'individual': HiddenInput(),
'county': HiddenInput(),
'postal_code_suffix': HiddenInput(),
'administrative_area': HiddenInput(),
'final_type': HiddenInput(),
'formatted_address': HiddenInput(),
'longitude': HiddenInput(),
'latitude': HiddenInput(),
'is_valid': HiddenInput(),
'is_cleansed': HiddenInput()
}
def clean(self):
address = validate_address(self.cleaned_data)
if not address['is_valid']:
raise ValidationError('The given address is invalid.')
return address
class IndividualEmailForm(ModelForm):
class Meta:
model = IndividualEmail
exclude = ['id', 'is_cleansed']
widgets = {
'individual': HiddenInput(),
'is_valid': HiddenInput()
}
def clean(self):
self.cleaned_data['is_valid'] = True
return self.cleaned_data
def clean_email(self):
email = self.cleaned_data.get('email')
validate_email(email)
return email
class IndividualPhoneForm(ModelForm):
number = None
class Meta:
model = IndividualPhone
exclude = ['id', 'is_cleansed']
widgets = {
'type': HiddenInput(),
'individual': HiddenInput(),
'is_valid': HiddenInput()
}
def clean(self):
data = self.cleaned_data
data["type"] = IndividualPhone.get_type(
phonenumbers.number_type(self.number)
)
data['is_valid'] = True
return data
def clean_number(self):
number = self.cleaned_data.get('number')
try:
self.number = phonenumbers.parse(number, self.cleaned_data.get('country'))
except phonenumbers.NumberParseException:
raise ValidationError('We can not parse the given phone number.')
if not phonenumbers.is_valid_number(self.number):
raise ValidationError('The given phone number is not valid.')
return phonenumbers.format_number(
self.number,
phonenumbers.PhoneNumberFormat.E164
)
class SourceTypeForm(ModelForm):
class Meta:
model = SourceType
exclude = ['id']
class SourceForm(ModelForm):
class Meta:
model = Source
exclude = ['id']
class CampaignForm(ModelForm):
class Meta:
model = Campaign
exclude = ['id']
| 1,083 | 1,500 | 161 |
6397d2fcfe22565a93638e495225cca7c825d946 | 1,468 | py | Python | a4kStreaming/api.py | a4k-openproject/a4kStreaming | c9674ddfede5b322496e79ec2a265d0af192d2f8 | [
"MIT"
] | 18 | 2021-01-20T23:33:17.000Z | 2022-01-11T07:51:16.000Z | a4kStreaming/api.py | newt-sc/a4kStreaming | c9674ddfede5b322496e79ec2a265d0af192d2f8 | [
"MIT"
] | 1 | 2021-02-04T18:20:52.000Z | 2021-02-04T18:20:52.000Z | a4kStreaming/api.py | newt-sc/a4kStreaming | c9674ddfede5b322496e79ec2a265d0af192d2f8 | [
"MIT"
] | 4 | 2021-01-17T23:46:29.000Z | 2022-01-23T22:20:24.000Z | # -*- coding: utf-8 -*-
import os
import json
import importlib
api_mode_env_name = 'A4KSTREAMING_API_MODE'
| 26.214286 | 78 | 0.580381 | # -*- coding: utf-8 -*-
import os
import json
import importlib
api_mode_env_name = 'A4KSTREAMING_API_MODE'
class A4kStreamingApi(object):
def __init__(self, mocks=None):
if mocks is None:
mocks = {}
api_mode = {
'kodi': False,
'xbmc': False,
'xbmcaddon': False,
'xbmcplugin': False,
'xbmcgui': False,
'xbmcvfs': False,
}
api_mode.update(mocks)
os.environ[api_mode_env_name] = json.dumps(api_mode)
self.core = importlib.import_module('a4kStreaming.core')
def __mock_settings(self, settings):
default = self.core.kodi.addon.getSetting
def get_setting(id):
setting = settings.get(id, None)
if setting is None:
setting = default(id)
return setting
self.core.kodi.addon.getSetting = get_setting
def restore():
self.core.kodi.addon.getSetting = default
return restore
def __execute(self, action, params, settings):
restore_settings = None
try:
if settings:
restore_settings = self.__mock_settings(settings)
return getattr(self.core, action)(self.core, params)
finally:
if restore_settings:
restore_settings()
def __getattr__(self, name):
return lambda params, settings: self.__execute(name, params, settings)
| 1,220 | 9 | 130 |
60a9574c719b2b6c597a85dfd63b7682c8970f1c | 7,562 | py | Python | payloader.py | tweetz0r/XSS-LOADER-for-WINDOWS | f212f51a37ecc247488ec2d6e81306b2d843cc18 | [
"CC0-1.0"
] | 1 | 2020-04-09T08:20:54.000Z | 2020-04-09T08:20:54.000Z | payloader.py | tweetz0r/XSS-LOADER-for-WINDOWS | f212f51a37ecc247488ec2d6e81306b2d843cc18 | [
"CC0-1.0"
] | null | null | null | payloader.py | tweetz0r/XSS-LOADER-for-WINDOWS | f212f51a37ecc247488ec2d6e81306b2d843cc18 | [
"CC0-1.0"
] | 1 | 2020-04-09T10:32:24.000Z | 2020-04-09T10:32:24.000Z | import base64
import html
import sys
import urllib.parse
from time import sleep
import dorktara
import xssScan
import entry
entry.entryy()
Menu()
| 30.739837 | 115 | 0.457286 | import base64
import html
import sys
import urllib.parse
from time import sleep
import dorktara
import xssScan
import entry
class payloadss:
basic = "<script>alert(1)</script>"
div = "<div onpointerover='alert(1)'>MOVE HERE</div>"
img = "<img src=x onerror=alert('1');>"
svg = "<svg onload=alert('1')>"
body = "<body ontouchstart=alert(1)>"
class payloadsList:
with open("waf.txt", "r", encoding="utf8") as f:
aa = f.read()
with open("cloudflare.txt", "r", encoding="utf8") as f:
bb = f.read()
with open("alertt.txt", "r", encoding="utf8") as f:
cc = f.read()
with open("polyglot.txt", "r", encoding="utf8") as f:
dd = f.read()
def pylds(deger):
if deger == 1:
return secim.upper()
elif deger == 2:
return secim.replace("s", "S").replace("r", "R").replace("p", "P")
elif deger == 3:
return urllib.parse.quote(secim).replace("/", "%2F")
elif deger == 4:
return html.escape(secim)
elif deger == 5:
return secim.replace("script", "scri</script>pt>")
elif deger == 6:
return secim.encode("utf-8").hex()
elif deger == 7:
return secim.encode("utf-16")
elif deger == 8:
return secim.encode("utf-32")
elif deger == 9:
a = "\";alert('XSS');//"
return a
elif deger == 10:
return secim.replace("<", "%uff1c").replace(">", "%uff1e")
elif deger == 11:
return secim.replace("<", "¼").replace(">", "¾").replace("\"", "¢")
elif deger == 12:
a = secim.encode('ascii')
b = base64.b64encode(a)
return b.decode('ascii')
elif deger == 13:
return secim.replace("<", "+ADw-").replace(">", "+AD4-")
elif deger == 14:
return secim.replace("(", "`").replace(")", "`")
elif deger == 15:
return secim.replace("<", "%C0%BC").replace(">", "%C0%BE").replace("'", "%CA%B9").replace("(", "%CA%B9")
elif deger == 16:
return "\">" + secim
elif deger == 17:
return "</script>" + secim
elif deger == 18:
return "\">" + secim + ".gif"
elif deger == 19:
return "<!-->" + secim + "-->"
elif deger == 20:
return "<noscript><p title=\"</noscript>" + secim + "\">"
elif deger == 21:
return "<IMG \"\"\">" + secim + "\">"
elif deger == 22:
return secim.replace(" ", "^L")
elif deger == 23:
return "<!--[if gte IE 4]>" + secim + "<![endif]-->"
else:
print("Wrong Choose..!")
def islem():
x = "\nNew Payload\t:"
y = """
| 1) UPPER CASE | 16) TAG BLOCK BREAKOUT
| 2) UPPER AND LOWER CASE | 17) SCRIPT BREAKOUT
| 3) URL ENCODE | 18) FILE UPLOAD PAYLOAD
| 4) HTML ENTITY ENCODE | 19) INSIDE COMMENTS BYPASS
| 5) SPLIT PAYLOAD | 20) MUTATION PAYLOAD
| 6) HEX ENCODE | 21) MALFORMED IMG
| 7) UTF-16 ENCODE | 22) SPACE BYPASS
| 8) UTF-32 ENCODE | 23) DOWNLEVEL-HIDDEN BLOCK
| 9) DELETE TAG | 24) WAF BYPASS PAYLOADS
| 10) UNICODE ENCODE | 25) CLOUDFLARE BYPASS PAYLOADS
| 11) US-ASCII ENCODE | 26) POLYGLOT PAYLOADS
| 12) BASE64 ENCODE | 27) ALERT PAYLOADS
| 13) UTF-7 ENCODE | 28) ALL CREATE PAYLOAD
| 14) PARENTHESIS BYPASS | 29) GO BACK MAIN MENU
| 15) UTF-8 ENCODE | 30) EXIT
"""
print(y)
secim2 = input("\nPLEASE CHOOSE:")
if secim2 == "1":
print(x, pylds(1))
elif secim2 == "2":
print(x, pylds(2))
elif secim2 == "3":
print(x, pylds(3))
elif secim2 == "4":
print(x, pylds(4))
elif secim2 == "5":
print(x, pylds(5))
elif secim2 == "6":
print(x, pylds(6))
elif secim2 == "7":
print(x, pylds(7))
elif secim2 == "8":
print(x, pylds(8))
elif secim2 == "9":
print(x, pylds(9))
elif secim2 == "10":
print(x, pylds(10))
elif secim2 == "11":
print(x, pylds(11))
elif secim2 == "12":
print(x, pylds(12))
elif secim2 == "13":
print(x, pylds(13))
elif secim2 == "14":
print(x, pylds(14))
elif secim2 == "15":
print(x, pylds(15))
elif secim2 == "16":
print(x, pylds(16))
elif secim2 == "17":
print(x, pylds(17))
elif secim2 == "18":
print(x, pylds(18))
elif secim2 == "19":
print(x, pylds(19))
elif secim2 == "20":
print(x, pylds(20))
elif secim2 == "21":
print(x, pylds(21))
elif secim2 == "22":
print(x, pylds(22))
elif secim2 == "23":
print(x, pylds(23))
elif secim2 == "24":
print(x, payloadsList.aa)
elif secim2 == "25":
print(x, payloadsList.bb)
elif secim2 == "26":
print(x, payloadsList.dd)
elif secim2 == "27":
print(x, payloadsList.cc)
elif secim2 == "28":
print(pylds(1), pylds(2), pylds(3), pylds(4), pylds(5), pylds(6), pylds(7), pylds(8), pylds(9), pylds(10),
pylds(11), pylds(12), pylds(13), pylds(14), pylds(15), pylds(16), pylds(17), pylds(18), pylds(19),
pylds(20), pylds(21), pylds(22), pylds(23),
sep="\n")
elif secim2 == "29":
Menu()
elif secim2 == "30":
print("Exiting...")
sleep(3)
print("Happy Hacking(:...")
sys.exit(0)
else:
print("WRONG CHOOSE...")
entry.entryy()
def Menu():
while True:
print("-----------------------------------")
print("||| XSS-LOADER TOOLS |||")
print("-----------------------------------")
yy = ("\n"
"1) BASIC PAYLOAD\n"
"2) DIV PAYLOAD\n"
"3) IMG PAYLOAD\n"
"4) BODY PAYLOAD\n"
"5) SVG PAYLOAD\n"
"6) ENTER YOUR PAYLOAD\n"
"7) XSS SCANNER\n"
"8) XSS DORK FINDER\n"
"9) EXIT\n"
"\n")
print(yy)
global secim
secim = input("SELECT PAYLOAD TO TAG:")
if secim == "1":
print("Selected Payload\t: ", payloadss.basic)
secim = payloadss.basic
islem()
sleep(2)
elif secim == "2":
print(payloadss.div)
secim = payloadss.div
islem()
sleep(2)
elif secim == "3":
print(payloadss.img)
secim = payloadss.img
islem()
sleep(2)
elif secim == "4":
print(payloadss.body)
secim = payloadss.body
islem()
sleep(2)
elif secim == "5":
print(payloadss.svg)
secim = payloadss.svg
islem()
sleep(2)
elif secim == "6":
a = input("Payload\t:")
secim = a
islem()
sleep(2)
elif secim == "7":
xssScan.xssFind()
elif secim == "8":
dorktara.dorkFind()
elif secim == "9":
print("Exiting...")
sleep(3)
print("Happy Hacking(:...")
sys.exit()
else:
print("Please Select a Valid Option..!!!")
Menu()
| 6,723 | 541 | 125 |
2961bea66cf78972eea7711a6edd2b081934e893 | 176 | py | Python | nicos_mlz/sans1/setups/alias_lambda.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/sans1/setups/alias_lambda.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/sans1/setups/alias_lambda.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'alias for wave length'
group = 'lowlevel'
devices = dict(
wl = device('nicos.devices.generic.DeviceAlias',
devclass='nicos.core.Readable'
),
)
| 17.6 | 52 | 0.659091 | description = 'alias for wave length'
group = 'lowlevel'
devices = dict(
wl = device('nicos.devices.generic.DeviceAlias',
devclass='nicos.core.Readable'
),
)
| 0 | 0 | 0 |
23b8736d962e6f0846db0a4eb93d2a8516a0335b | 206 | py | Python | examples/ssl_client.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | 2 | 2021-12-10T10:20:08.000Z | 2022-01-06T11:15:43.000Z | examples/ssl_client.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | examples/ssl_client.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | from accessdata.client import Client
## Only required if anon auth is disallowed.
client = Client("https://localhost:4443/", None, validate=False)
client.session.cert = "/path/to/cert"
print(client.cases) | 29.428571 | 64 | 0.757282 | from accessdata.client import Client
## Only required if anon auth is disallowed.
client = Client("https://localhost:4443/", None, validate=False)
client.session.cert = "/path/to/cert"
print(client.cases) | 0 | 0 | 0 |
34fa7dbad334c39067d566ae2e26317f5d67d5ed | 954 | py | Python | examples/B_vpe_ex0.py | datavalor/fastg3 | a0d3af54905913668060abbe60786599419b5c3e | [
"BSD-3-Clause"
] | 1 | 2021-07-21T11:55:51.000Z | 2021-07-21T11:55:51.000Z | examples/B_vpe_ex0.py | datavalor/fastg3 | a0d3af54905913668060abbe60786599419b5c3e | [
"BSD-3-Clause"
] | null | null | null | examples/B_vpe_ex0.py | datavalor/fastg3 | a0d3af54905913668060abbe60786599419b5c3e | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from pydataset import data
import sys
sys.path.insert(1, '../')
import fastg3.ncrisp as g3ncrisp
df = data("iris")
xparams = {
'Sepal.Length':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
},
'Sepal.Width':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
}
}
yparams = {
'Species':{
'type': 'categorical',
'predicate': 'equality'
}
}
if __name__ == '__main__':
# Creates an interface with C++ object; errors will be return if any parameter is wrong
VPE = g3ncrisp.create_vpe_instance(
df,
xparams,
yparams,
blocking=True, #unused as there is no equality predicate on the LHS of the FD
opti_ordering=True,
join_type="auto",
verbose=True)
# Finds all violating pairs in the form of vp list
print(VPE.enum_vps()) | 22.714286 | 91 | 0.585954 | import pandas as pd
from pydataset import data
import sys
sys.path.insert(1, '../')
import fastg3.ncrisp as g3ncrisp
df = data("iris")
xparams = {
'Sepal.Length':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
},
'Sepal.Width':{
'type': 'numerical',
'predicate': 'absolute_distance',
'params': [0.05]
}
}
yparams = {
'Species':{
'type': 'categorical',
'predicate': 'equality'
}
}
if __name__ == '__main__':
# Creates an interface with C++ object; errors will be return if any parameter is wrong
VPE = g3ncrisp.create_vpe_instance(
df,
xparams,
yparams,
blocking=True, #unused as there is no equality predicate on the LHS of the FD
opti_ordering=True,
join_type="auto",
verbose=True)
# Finds all violating pairs in the form of vp list
print(VPE.enum_vps()) | 0 | 0 | 0 |
2f4eb449420e50a21c879ab041fc1f34af62e2d3 | 827 | py | Python | zwierzeta/zolw.py | JLLEW/World-Simulation- | ca0d18471f2e3903405540c59bef6bca09a1fe22 | [
"MIT"
] | null | null | null | zwierzeta/zolw.py | JLLEW/World-Simulation- | ca0d18471f2e3903405540c59bef6bca09a1fe22 | [
"MIT"
] | null | null | null | zwierzeta/zolw.py | JLLEW/World-Simulation- | ca0d18471f2e3903405540c59bef6bca09a1fe22 | [
"MIT"
] | null | null | null | from zwierzeta.zwierze import Zwierze
import random
| 27.566667 | 82 | 0.561064 | from zwierzeta.zwierze import Zwierze
import random
class Zolw(Zwierze):
def __init__(self, swiat,ruch=False, x=-1, y=-1):
self.sila = 2
self.priorytet = 1
self.obecnySwiat = swiat
if x == -1:
super().__init__(swiat, 2, 1, "olive")
else:
super().__init__(swiat, 2, 1, "olive", ruch, x, y)
def klonowanie(self, swiat, x, y):
return Zolw(swiat, False, x, y)
def akcja(self):
move = random.randint(1, 100) <= 25
if move:
super().akcja()
def obrona(self, atakujacy):
if atakujacy.sila < 5:
self.obecnySwiat.plansza[atakujacy.prevY][atakujacy.prevX] = atakujacy
atakujacy.x = atakujacy.prevX
atakujacy.y = atakujacy.prevY
return True
return False
| 645 | -1 | 131 |
4b49cdc7a58020c22373357512db3e26fe036e70 | 900 | py | Python | tests/market/admin/__init__.py | ballke-dev/django-admin-confirm | 21f5a37c5ecf1fee30f95d8a2ce01207916a22f8 | [
"Apache-2.0"
] | 32 | 2020-11-09T09:41:03.000Z | 2022-03-25T14:03:53.000Z | tests/market/admin/__init__.py | ballke-dev/django-admin-confirm | 21f5a37c5ecf1fee30f95d8a2ce01207916a22f8 | [
"Apache-2.0"
] | 18 | 2020-11-26T02:05:47.000Z | 2022-03-25T01:07:54.000Z | tests/market/admin/__init__.py | ballke-dev/django-admin-confirm | 21f5a37c5ecf1fee30f95d8a2ce01207916a22f8 | [
"Apache-2.0"
] | 5 | 2021-05-20T18:13:13.000Z | 2022-03-24T09:48:45.000Z | from django.contrib import admin
from ..models import (
GeneralManager,
Item,
Inventory,
ItemSale,
Shop,
ShoppingMall,
Transaction,
Checkout,
)
from .item_admin import ItemAdmin
from .inventory_admin import InventoryAdmin
from .shop_admin import ShopAdmin
from .shoppingmall_admin import ShoppingMallAdmin
from .generalmanager_admin import GeneralManagerAdmin
from .item_sale_admin import ItemSaleAdmin
from .transaction_admin import TransactionAdmin
from .checkout_admin import CheckoutAdmin
admin.site.register(Item, ItemAdmin)
admin.site.register(Inventory, InventoryAdmin)
admin.site.register(Shop, ShopAdmin)
admin.site.register(ShoppingMall, ShoppingMallAdmin)
admin.site.register(GeneralManager, GeneralManagerAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(ItemSale, ItemSaleAdmin)
admin.site.register(Checkout, CheckoutAdmin)
| 29.032258 | 56 | 0.821111 | from django.contrib import admin
from ..models import (
GeneralManager,
Item,
Inventory,
ItemSale,
Shop,
ShoppingMall,
Transaction,
Checkout,
)
from .item_admin import ItemAdmin
from .inventory_admin import InventoryAdmin
from .shop_admin import ShopAdmin
from .shoppingmall_admin import ShoppingMallAdmin
from .generalmanager_admin import GeneralManagerAdmin
from .item_sale_admin import ItemSaleAdmin
from .transaction_admin import TransactionAdmin
from .checkout_admin import CheckoutAdmin
admin.site.register(Item, ItemAdmin)
admin.site.register(Inventory, InventoryAdmin)
admin.site.register(Shop, ShopAdmin)
admin.site.register(ShoppingMall, ShoppingMallAdmin)
admin.site.register(GeneralManager, GeneralManagerAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(ItemSale, ItemSaleAdmin)
admin.site.register(Checkout, CheckoutAdmin)
| 0 | 0 | 0 |
1a4420b9f8bc38c30271a7b81f14c615c051fb96 | 12,150 | py | Python | training_ptr_gen/decode.py | bekirufuk/pointer_summarizer | 8fc9726f9337b26339848d896a09e7e8f9456bcc | [
"Apache-2.0"
] | null | null | null | training_ptr_gen/decode.py | bekirufuk/pointer_summarizer | 8fc9726f9337b26339848d896a09e7e8f9456bcc | [
"Apache-2.0"
] | null | null | null | training_ptr_gen/decode.py | bekirufuk/pointer_summarizer | 8fc9726f9337b26339848d896a09e7e8f9456bcc | [
"Apache-2.0"
] | 2 | 2022-02-06T11:35:21.000Z | 2022-02-06T12:03:31.000Z | #Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/
from __future__ import unicode_literals, print_function, division
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import argparse
from datetime import datetime
import torch
from torch.autograd import Variable
import pandas as pd
from tqdm import tqdm
from rouge import Rouge
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util import data, config
from model import Model
from data_util.utils import write_for_rouge
from train_util import get_input_from_batch
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
use_cuda = config.use_gpu and torch.cuda.is_available()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Decode script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
parser.add_argument("-d",
dest="data_folder",
required=True,
default=None,
help="Dataset name 'data_T50', 'cnn' or 'movie_quotes' (default: None).")
parser.add_argument("-l",
dest="log_file_id",
required=False,
default=datetime.now().strftime("%Y%m%d_%H%M%S"),
help="Postfix for decode log file (default: date_time).")
args = parser.parse_args()
beam_Search_processor = BeamSearch(args.model_file_path, args.data_folder, args.log_file_id)
beam_Search_processor.decode(args.log_file_id)
# rouge_1_df, rouge_2_df, rouge_l_df = beam_Search_processor.rouge_eval(beam_Search_processor._rouge_dec_dir, beam_Search_processor._rouge_ref_dir)
# beam_Search_processor.rouge_save(args.log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
| 41.467577 | 151 | 0.587984 | #Except for the pytorch part content of this file is copied from https://github.com/abisee/pointer-generator/blob/master/
from __future__ import unicode_literals, print_function, division
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import argparse
from datetime import datetime
import torch
from torch.autograd import Variable
import pandas as pd
from tqdm import tqdm
from rouge import Rouge
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util import data, config
from model import Model
from data_util.utils import write_for_rouge
from train_util import get_input_from_batch
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
use_cuda = config.use_gpu and torch.cuda.is_available()
class Beam(object):
def __init__(self, tokens, log_probs, state, context, coverage):
self.tokens = tokens
self.log_probs = log_probs
self.state = state
self.context = context
self.coverage = coverage
def extend(self, token, log_prob, state, context, coverage):
return Beam(tokens = self.tokens + [token],
log_probs = self.log_probs + [log_prob],
state = state,
context = context,
coverage = coverage)
@property
def latest_token(self):
return self.tokens[-1]
@property
def avg_log_prob(self):
return sum(self.log_probs) / len(self.tokens)
class BeamSearch(object):
def __init__(self, model_file_path, data_folder, log_file_id):
# model_name = os.path.basename(model_file_path)
self._decode_dir = os.path.join(config.log_root, 'decode_%s' % (log_file_id))
self._rouge_ref_dir = os.path.join(self._decode_dir, 'rouge_ref')
self._rouge_dec_dir = os.path.join(self._decode_dir, 'rouge_dec_dir')
for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:
if not os.path.exists(p):
os.mkdir(p)
dp = config.get_data_paths(data_folder)
self.vocab = Vocab(dp['vocab'], config.vocab_size)
self.batcher = Batcher(dp['decode'], self.vocab, mode='decode', batch_size=config.beam_size, single_pass=True)
time.sleep(15)
self.model = Model(model_file_path, is_eval=True)
def sort_beams(self, beams):
return sorted(beams, key=lambda h: h.avg_log_prob, reverse=True)
def decode(self, log_file_id):
start = time.time()
counter = 0
batch = self.batcher.next_batch()
while batch is not None:
# Run beam search to get best Hypothesis
best_summary = self.beam_search(batch)
# Extract the output ids from the hypothesis and convert back to words
output_ids = [int(t) for t in best_summary.tokens[1:]]
decoded_words = data.outputids2words(output_ids, self.vocab,
(batch.art_oovs[0] if config.pointer_gen else None))
# Remove the [STOP] token from decoded_words, if necessary
try:
fst_stop_idx = decoded_words.index(data.STOP_DECODING)
decoded_words = decoded_words[:fst_stop_idx]
except ValueError:
decoded_words = decoded_words
original_abstract_sents = batch.original_abstracts_sents[0]
write_for_rouge(original_abstract_sents, decoded_words, counter,
self._rouge_ref_dir, self._rouge_dec_dir)
counter += 1
if counter % config.print_interval == 0:
print('Examples %d-%d decoded in %d sec'%(counter-config.print_interval, counter, time.time() - start))
start = time.time()
batch = self.batcher.next_batch()
print("Decoder has finished reading dataset for single pass.")
print("Now starting ROUGE eval...")
rouge_1_df, rouge_2_df, rouge_l_df = self.rouge_eval(self._rouge_dec_dir, self._rouge_ref_dir)
self.rouge_save(log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
def beam_search(self, batch):
#batch should have only one example
enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_0, coverage_t_0 = \
get_input_from_batch(batch, use_cuda)
encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
s_t_0 = self.model.reduce_state(encoder_hidden)
dec_h, dec_c = s_t_0 # 1 x 2*hidden_size
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
#decoder batch preparation, it has beam_size example initially everything is repeated
beams = [Beam(tokens=[self.vocab.word2id(data.START_DECODING)],
log_probs=[0.0],
state=(dec_h[0], dec_c[0]),
context = c_t_0[0],
coverage=(coverage_t_0[0] if config.is_coverage else None))
for _ in range(config.beam_size)]
results = []
steps = 0
while steps < config.max_dec_steps and len(results) < config.beam_size:
latest_tokens = [h.latest_token for h in beams]
latest_tokens = [t if t < self.vocab.size() else self.vocab.word2id(data.UNKNOWN_TOKEN) \
for t in latest_tokens]
y_t_1 = Variable(torch.LongTensor(latest_tokens))
if use_cuda:
y_t_1 = y_t_1.cuda()
all_state_h =[]
all_state_c = []
all_context = []
for h in beams:
state_h, state_c = h.state
all_state_h.append(state_h)
all_state_c.append(state_c)
all_context.append(h.context)
s_t_1 = (torch.stack(all_state_h, 0).unsqueeze(0), torch.stack(all_state_c, 0).unsqueeze(0))
c_t_1 = torch.stack(all_context, 0)
coverage_t_1 = None
if config.is_coverage:
all_coverage = []
for h in beams:
all_coverage.append(h.coverage)
coverage_t_1 = torch.stack(all_coverage, 0)
final_dist, s_t, c_t, attn_dist, p_gen, coverage_t = self.model.decoder(y_t_1, s_t_1,
encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
extra_zeros, enc_batch_extend_vocab, coverage_t_1, steps)
log_probs = torch.log(final_dist)
topk_log_probs, topk_ids = torch.topk(log_probs, config.beam_size * 2)
dec_h, dec_c = s_t
dec_h = dec_h.squeeze()
dec_c = dec_c.squeeze()
all_beams = []
num_orig_beams = 1 if steps == 0 else len(beams)
for i in range(num_orig_beams):
h = beams[i]
state_i = (dec_h[i], dec_c[i])
context_i = c_t[i]
coverage_i = (coverage_t[i] if config.is_coverage else None)
for j in range(config.beam_size * 2): # for each of the top 2*beam_size hyps:
new_beam = h.extend(token=topk_ids[i, j].item(),
log_prob=topk_log_probs[i, j].item(),
state=state_i,
context=context_i,
coverage=coverage_i)
all_beams.append(new_beam)
beams = []
for h in self.sort_beams(all_beams):
if h.latest_token == self.vocab.word2id(data.STOP_DECODING):
if steps >= config.min_dec_steps:
results.append(h)
else:
beams.append(h)
if len(beams) == config.beam_size or len(results) == config.beam_size:
break
steps += 1
if len(results) == 0:
results = beams
beams_sorted = self.sort_beams(results)
return beams_sorted[0]
def rouge_eval(self, decoded_dir, ref_dir):
rouge = Rouge()
columns=['F1','Recall','Precision']
rouge_l_df = pd.DataFrame(columns=columns)
rouge_1_df = pd.DataFrame(columns=columns)
rouge_2_df = pd.DataFrame(columns=columns)
not_found_list = []
file_count = len(os.listdir(ref_dir))
print('Rouge Evaluation started for {} files..'.format(file_count))
for i in tqdm (range(file_count), desc='Running'):
index = str(i).zfill(6)
dec_file = decoded_dir + "/" + index + '_decoded.txt'
ref_file = ref_dir + "/" + index + '_reference.txt'
if os.path.isfile(dec_file) and os.path.isfile(ref_file):
with open(dec_file, 'r') as file:
decoded = file.read().rstrip().decode("utf8")
with open(ref_file, 'r') as file:
reference = file.read().rstrip().decode("utf8")
# If somehow reference file is empty (a rare case bug, cause of which is undetected) put a placeholder.
if reference == '':
reference = '[Input can not be found]'
score = rouge.get_scores(decoded, reference)[0]
rouge_l_df.loc[i] = [score['rouge-l']['f'], score['rouge-l']['r'], score['rouge-l']['p']]
rouge_1_df.loc[i] = [score['rouge-1']['f'], score['rouge-1']['r'], score['rouge-1']['p']]
rouge_2_df.loc[i] = [score['rouge-2']['f'], score['rouge-2']['r'], score['rouge-2']['p']]
else:
not_found_list.append((dec_file, ref_file))
if len(not_found_list) != 0:
print('{} files could not be identified.'.format(len(not_found_list)))
#print(not_found_list)
print('Evaluation Finished..')
return [rouge_1_df, rouge_2_df, rouge_l_df]
def rouge_save(self, save_dir, rouge_1_df, rouge_2_df, rouge_l_df):
save_dir = "logs/decode_"+save_dir
if not os.path.exists(save_dir+'/rouge_scores/'):
os.makedirs(save_dir+'/rouge_scores/')
rouge_l_df.to_csv(save_dir+'/rouge_scores/rouge_l.csv')
rouge_1_df.to_csv(save_dir+'/rouge_scores/rouge_1.csv')
rouge_2_df.to_csv(save_dir+'/rouge_scores/rouge_2.csv')
print('Rouge scores saved..')
with open(save_dir+'/rouge_scores/summary.txt', 'w') as f:
for df, rouge in zip([rouge_1_df, rouge_2_df,rouge_l_df], ['ROUGE-1','ROUGE-2','ROUGE-L']):
print(rouge)
f.write(rouge+"\n")
for metric in rouge_l_df.columns:
line = "{} Mean {}".format(round(df[metric].mean(),4), metric)
print(line)
f.write(line+"\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Decode script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
parser.add_argument("-d",
dest="data_folder",
required=True,
default=None,
help="Dataset name 'data_T50', 'cnn' or 'movie_quotes' (default: None).")
parser.add_argument("-l",
dest="log_file_id",
required=False,
default=datetime.now().strftime("%Y%m%d_%H%M%S"),
help="Postfix for decode log file (default: date_time).")
args = parser.parse_args()
beam_Search_processor = BeamSearch(args.model_file_path, args.data_folder, args.log_file_id)
beam_Search_processor.decode(args.log_file_id)
# rouge_1_df, rouge_2_df, rouge_l_df = beam_Search_processor.rouge_eval(beam_Search_processor._rouge_dec_dir, beam_Search_processor._rouge_ref_dir)
# beam_Search_processor.rouge_save(args.log_file_id, rouge_1_df, rouge_2_df, rouge_l_df)
| 9,757 | 125 | 207 |
a7ae03a16305aa29791a957ed4da79bb10ff6dd2 | 1,776 | py | Python | pybuild/packages/scikitlearn2.py | wangqiang1588/qpython3-toolchain | 45b0286067d5763ee311d03ccfec6dde07615a9f | [
"WTFPL"
] | 21 | 2018-05-07T06:32:52.000Z | 2022-01-25T07:23:41.000Z | pybuild/packages/scikitlearn2.py | wangqiang1588/qpython3-toolchain | 45b0286067d5763ee311d03ccfec6dde07615a9f | [
"WTFPL"
] | 4 | 2015-04-13T06:54:20.000Z | 2015-10-18T04:51:10.000Z | pybuild/packages/scikitlearn2.py | wangqiang1588/qpython3-toolchain | 45b0286067d5763ee311d03ccfec6dde07615a9f | [
"WTFPL"
] | 12 | 2016-11-12T09:52:36.000Z | 2018-04-28T14:33:52.000Z | from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
| 34.153846 | 133 | 0.567005 | from ..source import GitSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
import os
class Scikitlearn2(Package):
source = GitSource('https://github.com/AIPYX/scikit-learn.git', alias='scikit-learn2', branch='qpyc-0.20.0')
patches = [
#LocalPatch('0001-cross-compile'),
]
def prepare(self):
pass
def build(self):
PY_BRANCH = os.getenv('PY_BRANCH')
PY_M_BRANCH = os.getenv('PY_M_BRANCH')
BLD = os.path.join(os.getcwd(),'build/target')
ANDROID_NDK = os.getenv("ANDROID_NDK")
UNIFIED_SYSROOT = self.env["UNIFIED_SYSROOT"]
self.run([
'python2',
'setup.py',
'build_ext',
f'-I{BLD}/python/usr/include/python{PY_BRANCH}.{PY_M_BRANCH}'\
f':{ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/4.9/include'\
f':{ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/include'\
f':{BLD}/openblas/usr/include'\
f':{UNIFIED_SYSROOT}/include',
f'-L{BLD}/python/usr/lib'\
f':{BLD}/openblas/usr/lib'\
f':{ANDROID_NDK}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a '\
f':{ANDROID_NDK}/toolchains/renderscript/prebuilt/linux-x86_64/platform/arm'\
f':{ANDROID_NDK}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x/armv7-a',
f'-lpython{PY_BRANCH}.{PY_M_BRANCH},m',
])
self.run([
'python2',
'setup.py',
'build_py',
])
self.run([
'python2',
'setup.py',
'install',
'--root',
f'{BLD}/python{PY_BRANCH}',
])
| 1,380 | 239 | 23 |
afba3aef729c63f4a29e862afdbf543860f0ef98 | 25 | py | Python | components/Layout/grids.py | zergos/pantra | e68196489a2da0f46fba2f54473762b3b0b15fa7 | [
"Apache-2.0"
] | null | null | null | components/Layout/grids.py | zergos/pantra | e68196489a2da0f46fba2f54473762b3b0b15fa7 | [
"Apache-2.0"
] | 4 | 2021-03-30T13:36:45.000Z | 2021-09-22T19:13:04.000Z | components/Layout/grids.py | zergos/pantra | e68196489a2da0f46fba2f54473762b3b0b15fa7 | [
"Apache-2.0"
] | 1 | 2021-05-29T22:46:49.000Z | 2021-05-29T22:46:49.000Z | fullscreen: bool = False
| 12.5 | 24 | 0.76 | fullscreen: bool = False
| 0 | 0 | 0 |
c63d9827e27e413d57de8eed27fbd7a102067e12 | 802 | py | Python | pyais/tests.py | reinderien/pyais | c65ad5296966b2c69da7307b8f43e991aa285cb0 | [
"MIT"
] | null | null | null | pyais/tests.py | reinderien/pyais | c65ad5296966b2c69da7307b8f43e991aa285cb0 | [
"MIT"
] | null | null | null | pyais/tests.py | reinderien/pyais | c65ad5296966b2c69da7307b8f43e991aa285cb0 | [
"MIT"
] | null | null | null | from .message import decode
MESSAGES = [
"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C",
"!AIVDM,1,1,,B,15NG6V0P01G?cFhE`R2IU?wn28R>,0*05",
"!AIVDM,1,1,,A,15NJQiPOl=G?m:bE`Gpt<aun00S8,0*56",
"!AIVDM,1,1,,B,15NPOOPP00o?bIjE`UEv4?wF2HIU,0*31",
"!AIVDM,1,1,,A,35NVm2gP00o@5k:EbbPJnwwN25e3,0*35",
"!AIVDM,1,1,,A,B52KlJP00=l4be5ItJ6r3wVUWP06,0*7C",
"!AIVDM,2,1,1,B,53ku:202=kul=4TS@00<tq@V0<uE84LD00000017R@sEE6TE0GUDk1hP,0*57",
"!AIVDM,2,1,2,B,55Mwm;P00001L@?;SKE8uT4j0lDh8uE8pD00000l0`A276S<07gUDp3Q,0*0D"
]
| 32.08 | 83 | 0.67581 | from .message import decode
MESSAGES = [
"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C",
"!AIVDM,1,1,,B,15NG6V0P01G?cFhE`R2IU?wn28R>,0*05",
"!AIVDM,1,1,,A,15NJQiPOl=G?m:bE`Gpt<aun00S8,0*56",
"!AIVDM,1,1,,B,15NPOOPP00o?bIjE`UEv4?wF2HIU,0*31",
"!AIVDM,1,1,,A,35NVm2gP00o@5k:EbbPJnwwN25e3,0*35",
"!AIVDM,1,1,,A,B52KlJP00=l4be5ItJ6r3wVUWP06,0*7C",
"!AIVDM,2,1,1,B,53ku:202=kul=4TS@00<tq@V0<uE84LD00000017R@sEE6TE0GUDk1hP,0*57",
"!AIVDM,2,1,2,B,55Mwm;P00001L@?;SKE8uT4j0lDh8uE8pD00000l0`A276S<07gUDp3Q,0*0D"
]
def time():
import timeit
import random
def test():
decode(MESSAGES[random.randint(0, 7)])
iterations = 8000
elapsed_time = timeit.timeit(test, number=iterations)
print(f"Decoding #{iterations} takes {elapsed_time} seconds")
| 237 | 0 | 23 |
de6061282b0926d9e3e9d1bb54e86a7e7397243c | 2,518 | py | Python | libXiami.py | isombyt/XiaQian | 9e0a1a0d0413a677adb2fb50f22b4aff1c5b92b0 | [
"MIT"
] | 28 | 2015-01-05T08:00:46.000Z | 2021-01-30T04:44:50.000Z | libXiami.py | isombyt/XiaQian | 9e0a1a0d0413a677adb2fb50f22b4aff1c5b92b0 | [
"MIT"
] | 3 | 2015-09-12T04:53:40.000Z | 2018-01-08T09:08:01.000Z | libXiami.py | isombyt/XiaQian | 9e0a1a0d0413a677adb2fb50f22b4aff1c5b92b0 | [
"MIT"
] | 5 | 2015-04-17T06:48:09.000Z | 2020-10-10T22:46:10.000Z | import urllib
import urllib2
import cookielib
import StrCookieJar
import json
import lxml.html
headers = {
"Referer": "http://www.xiami.com/member/login",
"User-Agent": 'Mozilla/5.0 (IsomByt; checker)',
}
if __name__ == "__main__":
user = User()
user.login("email","password")
print user.checkin()
| 27.369565 | 105 | 0.613582 | import urllib
import urllib2
import cookielib
import StrCookieJar
import json
import lxml.html
headers = {
"Referer": "http://www.xiami.com/member/login",
"User-Agent": 'Mozilla/5.0 (IsomByt; checker)',
}
class User:
def __init__(self):
self.opener = None
self._data = None
def loadCookie(self,cookie):
self.cookie = StrCookieJar.StrCookieJar(cookie)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
def dumpCookie(self):
return self.cookie.dump()
def login(self,email,password):
self.cookie = StrCookieJar.StrCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
login_url = "https://login.xiami.com/member/login"
request = urllib2.Request(login_url,headers=headers)
response = self.opener.open(request)
data = response.read()
dom = lxml.html.fromstring(data)
args = dict(map(lambda x:(str(x.name),unicode(x.value).encode("u8")),dom.xpath("//form//input")))
args["email"] = email
args["password"] = password
request = urllib2.Request(login_url, urllib.urlencode(args), headers)
response = self.opener.open(request)
data = response.read()
def __getitem__(self,key):
if not self.data:
raise BaseException("please login")
return self.data[key]
@property
def data(self):
if not self._data:
data = self.getuserinfo()
if data["status"]:
self._data = data["data"]['userInfo']
return self._data
def getuserinfo(self):
url = "http://www.xiami.com/index/home"
request = urllib2.Request(url,headers=headers)
response = self.opener.open(request)
data = response.read()
return json.loads(data)
@property
def ischeckined(self):
if self.data and self.data["is"]:
return True
return False
@property
def islogined(self):
if self.data and self.data["user_id"]:
return True
return False
def checkin(self):
url = "http://www.xiami.com/task/signin"
request = urllib2.Request(url,urllib.urlencode({}),headers)
response = self.opener.open(request)
days = response.read()
if days:
return int(days)
return None
if __name__ == "__main__":
user = User()
user.login("email","password")
print user.checkin()
| 1,872 | 302 | 23 |
fc376a9a9df6198bf01d5714abfa5964a4318d5c | 7,956 | py | Python | scripts/premerge_checks.py | chsigg/llvm-premerge-checks | 2372e9b399ec5d602dd7bf9b914feac074cd4732 | [
"Apache-2.0"
] | null | null | null | scripts/premerge_checks.py | chsigg/llvm-premerge-checks | 2372e9b399ec5d602dd7bf9b914feac074cd4732 | [
"Apache-2.0"
] | null | null | null | scripts/premerge_checks.py | chsigg/llvm-premerge-checks | 2372e9b399ec5d602dd7bf9b914feac074cd4732 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs all check on buildkite agent.
import argparse
import json
import logging
import os
import pathlib
import re
import shutil
import sys
import time
from functools import partial
from typing import Callable
import clang_format_report
import clang_tidy_report
import run_cmake
import test_results_report
from buildkite.utils import upload_file
from exec_utils import watch_shell, if_not_matches, tee
from phabtalk.add_url_artifact import maybe_add_url_artifact
from phabtalk.phabtalk import Report, PhabTalk, Step
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs premerge checks8')
parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true')
parser.add_argument('--filter-output', action='store_true')
parser.add_argument('--projects', type=str, default='detect',
help="Projects to select, either a list or projects like 'clang;libc', or "
"'detect' to automatically infer proejcts from the diff, or "
"'default' to add all enabled projects")
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
build_dir = ''
step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute()
artifacts_dir = os.path.join(os.getcwd(), 'artifacts')
os.makedirs(artifacts_dir, exist_ok=True)
report_path = f'{step_key}_result.json'
report = Report()
report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
report.name = step_key
report.success = False
# Create report with failure in case something below fails.
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
report.success = True
cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r))
if cmake.success:
ninja_all = run_step('ninja all', report, partial(ninja_all_report, filter_output=args.filter_output))
if ninja_all.success:
run_step('ninja check-all', report, partial(ninja_check_all_report, filter_output=args.filter_output))
if args.check_clang_tidy:
run_step('clang-tidy', report,
lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
if args.check_clang_format:
run_step('clang-format', report,
lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
logging.debug(report)
print('+++ Summary', flush=True)
for s in report.steps:
mark = 'OK '
if not s.success:
report.success = False
mark = 'FAIL '
msg = ''
if len(s.messages):
msg = ': ' + '\n '.join(s.messages)
print(f'{mark} {s.name}{msg}', flush=True)
print('--- Reproduce build locally', flush=True)
print(f'git clone {os.getenv("BUILDKITE_REPO")}')
print(f'git checkout {os.getenv("BUILDKITE_BRANCH")}')
for s in report.steps:
if len(s.reproduce_commands) == 0:
continue
print('\n'.join(s.reproduce_commands), flush=True)
print('', flush=True)
if not report.success:
print('^^^ +++', flush=True)
ph_target_phid = os.getenv('ph_target_phid')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_target_phid is not None:
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
for u in report.unit:
u['engine'] = step_key
phabtalk.update_build_status(ph_buildable_diff, ph_target_phid, True, report.success, report.lint, report.unit)
for a in report.artifacts:
url = upload_file(a['dir'], a['file'])
if url is not None:
maybe_add_url_artifact(phabtalk, ph_target_phid, url, f'{a["name"]} ({step_key})')
else:
logging.warning('No phabricator phid is specified. Will not update the build status in Phabricator')
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
if not report.success:
print('Build completed with failures', flush=True)
exit(1)
| 41.65445 | 120 | 0.662267 | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs all check on buildkite agent.
import argparse
import json
import logging
import os
import pathlib
import re
import shutil
import sys
import time
from functools import partial
from typing import Callable
import clang_format_report
import clang_tidy_report
import run_cmake
import test_results_report
from buildkite.utils import upload_file
from exec_utils import watch_shell, if_not_matches, tee
from phabtalk.add_url_artifact import maybe_add_url_artifact
from phabtalk.phabtalk import Report, PhabTalk, Step
def ninja_all_report(step: Step, _: Report, filter_output: bool):
print('Full log will be available in Artifacts "ninja-all.log"', flush=True)
step.reproduce_commands.append('ninja all')
with open(f'{artifacts_dir}/ninja-all.log', 'wb') as f:
w = sys.stdout.buffer.write
if filter_output:
r = re.compile(r'^\[.*] (Building|Linking|Linting|Copying|Generating|Creating)')
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r)
rc = watch_shell(
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja all', cwd=build_dir)
logging.debug(f'ninja all: returned {rc}')
step.set_status_from_exit_code(rc)
if not step.success:
report.add_artifact(artifacts_dir, 'ninja-all.log', 'build failed')
def ninja_check_all_report(step: Step, _: Report, filter_output: bool):
print('Full log will be available in Artifacts "ninja-check-all.log"', flush=True)
step.reproduce_commands.append('ninja check-all')
with open(f'{artifacts_dir}/ninja-check-all.log', 'wb') as f:
w = sys.stdout.buffer.write
if filter_output:
r = re.compile(r'^(\[.*] (Building|Linking|Generating)|(PASS|XFAIL|UNSUPPORTED):)')
w = partial(if_not_matches, write=sys.stdout.buffer.write, regexp=r)
rc = watch_shell(
partial(tee, write1=w, write2=f.write),
partial(tee, write1=sys.stderr.buffer.write, write2=f.write),
'ninja check-all', cwd=build_dir)
logging.debug(f'ninja check-all: returned {rc}')
step.set_status_from_exit_code(rc)
test_results_report.run(build_dir, 'test-results.xml', step, report)
if not step.success:
message = 'tests failed'
f = report.test_stats['fail']
if f == 1:
message = '1 test failed'
if f > 1:
message = f'{f} tests failed'
report.add_artifact(artifacts_dir, 'ninja-check-all.log', message)
def run_step(name: str, report: Report, thunk: Callable[[Step, Report], None]) -> Step:
start = time.time()
print(f'--- {name}', flush=True) # New section in Buildkite log.
step = Step()
step.name = name
thunk(step, report)
step.duration = time.time() - start
# Expand section if it failed.
if not step.success:
print('^^^ +++', flush=True)
report.steps.append(step)
return step
def cmake_report(projects: str, step: Step, _: Report):
global build_dir
cmake_result, build_dir, cmake_artifacts, commands = run_cmake.run(projects, os.getcwd())
for file in cmake_artifacts:
if os.path.exists(file):
shutil.copy2(file, artifacts_dir)
step.set_status_from_exit_code(cmake_result)
step.reproduce_commands = commands
def as_dict(obj):
try:
return obj.toJSON()
except:
return obj.__dict__
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs premerge checks8')
parser.add_argument('--log-level', type=str, default='WARNING')
parser.add_argument('--check-clang-format', action='store_true')
parser.add_argument('--check-clang-tidy', action='store_true')
parser.add_argument('--filter-output', action='store_true')
parser.add_argument('--projects', type=str, default='detect',
help="Projects to select, either a list or projects like 'clang;libc', or "
"'detect' to automatically infer proejcts from the diff, or "
"'default' to add all enabled projects")
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
build_dir = ''
step_key = os.getenv("BUILDKITE_STEP_KEY")
scripts_dir = pathlib.Path(__file__).parent.absolute()
artifacts_dir = os.path.join(os.getcwd(), 'artifacts')
os.makedirs(artifacts_dir, exist_ok=True)
report_path = f'{step_key}_result.json'
report = Report()
report.os = f'{os.getenv("BUILDKITE_AGENT_META_DATA_OS")}'
report.name = step_key
report.success = False
# Create report with failure in case something below fails.
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
report.success = True
cmake = run_step('cmake', report, lambda s, r: cmake_report(args.projects, s, r))
if cmake.success:
ninja_all = run_step('ninja all', report, partial(ninja_all_report, filter_output=args.filter_output))
if ninja_all.success:
run_step('ninja check-all', report, partial(ninja_check_all_report, filter_output=args.filter_output))
if args.check_clang_tidy:
run_step('clang-tidy', report,
lambda s, r: clang_tidy_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-tidy.ignore'), s, r))
if args.check_clang_format:
run_step('clang-format', report,
lambda s, r: clang_format_report.run('HEAD~1', os.path.join(scripts_dir, 'clang-format.ignore'), s, r))
logging.debug(report)
print('+++ Summary', flush=True)
for s in report.steps:
mark = 'OK '
if not s.success:
report.success = False
mark = 'FAIL '
msg = ''
if len(s.messages):
msg = ': ' + '\n '.join(s.messages)
print(f'{mark} {s.name}{msg}', flush=True)
print('--- Reproduce build locally', flush=True)
print(f'git clone {os.getenv("BUILDKITE_REPO")}')
print(f'git checkout {os.getenv("BUILDKITE_BRANCH")}')
for s in report.steps:
if len(s.reproduce_commands) == 0:
continue
print('\n'.join(s.reproduce_commands), flush=True)
print('', flush=True)
if not report.success:
print('^^^ +++', flush=True)
ph_target_phid = os.getenv('ph_target_phid')
ph_buildable_diff = os.getenv('ph_buildable_diff')
if ph_target_phid is not None:
phabtalk = PhabTalk(os.getenv('CONDUIT_TOKEN'), 'https://reviews.llvm.org/api/', False)
for u in report.unit:
u['engine'] = step_key
phabtalk.update_build_status(ph_buildable_diff, ph_target_phid, True, report.success, report.lint, report.unit)
for a in report.artifacts:
url = upload_file(a['dir'], a['file'])
if url is not None:
maybe_add_url_artifact(phabtalk, ph_target_phid, url, f'{a["name"]} ({step_key})')
else:
logging.warning('No phabricator phid is specified. Will not update the build status in Phabricator')
with open(report_path, 'w') as f:
json.dump(report.__dict__, f, default=as_dict)
if not report.success:
print('Build completed with failures', flush=True)
exit(1)
| 2,848 | 0 | 115 |
a7799f2d05794edf1ba4dadb1f23f72cabecad86 | 598 | py | Python | sim_classes/patient.py | MichaelAllen1966/2004_simple_simpy_parallel | 95cba408a02891c5f6671482752eec9afcadafe7 | [
"MIT"
] | 1 | 2021-09-08T19:31:07.000Z | 2021-09-08T19:31:07.000Z | sim_classes/patient.py | MichaelAllen1966/2004_simple_simpy_parallel | 95cba408a02891c5f6671482752eec9afcadafe7 | [
"MIT"
] | 1 | 2021-08-23T20:43:39.000Z | 2021-08-23T20:43:39.000Z | sim_classes/patient.py | MichaelAllen1966/2004_simple_simpy_parallel | 95cba408a02891c5f6671482752eec9afcadafe7 | [
"MIT"
] | null | null | null | class Patient():
"""
Attributes
----------
los:
length of stay in hospital
priority:
priority for accessing bed (loer number = higher priority)
time_enter_queue:
time patient arrives and joins queue for bed
time_leave_queue:
time patient leaves queue and enters hospital bed
Methods
-------
__init__:
Constructor class for patient
"""
| 22.148148 | 66 | 0.591973 | class Patient():
"""
Attributes
----------
los:
length of stay in hospital
priority:
priority for accessing bed (loer number = higher priority)
time_enter_queue:
time patient arrives and joins queue for bed
time_leave_queue:
time patient leaves queue and enters hospital bed
Methods
-------
__init__:
Constructor class for patient
"""
def __init__(self, los=1, priority=2):
self.los = los
self.priority = priority
self.time_enter_queue = 0
self.time_leave_queue = 0
| 141 | 0 | 27 |
7cc8f10573fbdb0dc397339bb1dd089244d269d0 | 4,235 | py | Python | EPISURGSegment/EPISURGSegment.py | fepegar/SlicerEPISURG | c0516641334f216f6bd3944015462dfcede0bc80 | [
"MIT"
] | 4 | 2021-02-23T03:31:24.000Z | 2022-03-23T16:59:08.000Z | EPISURGSegment/EPISURGSegment.py | fepegar/SlicerEPISURG | c0516641334f216f6bd3944015462dfcede0bc80 | [
"MIT"
] | null | null | null | EPISURGSegment/EPISURGSegment.py | fepegar/SlicerEPISURG | c0516641334f216f6bd3944015462dfcede0bc80 | [
"MIT"
] | 1 | 2021-02-23T03:31:25.000Z | 2021-02-23T03:31:25.000Z | import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import (
ScriptedLoadableModule,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleTest,
)
from EPISURGBase import EPISURGBaseLogic # pylint: disable=import-error
class EPISURGSegment(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
class EPISURGSegmentWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
self.logic = None
self.subjects = None
class EPISURGSegmentTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_EPISURGSegment1()
def test_EPISURGSegment1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
# Get/create input data
import SampleData
registerSampleData()
inputVolume = SampleData.downloadSample('EPISURGSegment1')
self.delayDisplay('Loaded test data set')
inputScalarRange = inputVolume.GetImageData().GetScalarRange()
self.assertEqual(inputScalarRange[0], 0)
self.assertEqual(inputScalarRange[1], 695)
outputVolume = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
threshold = 100
# Test the module logic
logic = EPISURGSegmentLogic()
# Test algorithm with non-inverted threshold
logic.process(inputVolume, outputVolume, threshold, True)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], threshold)
# Test algorithm with inverted threshold
logic.process(inputVolume, outputVolume, threshold, False)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], inputScalarRange[1])
self.delayDisplay('Test passed')
| 34.430894 | 100 | 0.747816 | import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import (
ScriptedLoadableModule,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleTest,
)
from EPISURGBase import EPISURGBaseLogic # pylint: disable=import-error
class EPISURGSegment(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
super().__init__(parent)
self.parent.title = "EPISURG Segment"
self.parent.categories = ["EPISURG"]
self.parent.dependencies = []
self.parent.helpText = """
This module can be used to segment brain resection cavities from MRI using deep learning.
See more information in the <a href="https://github.com/fepegar/resseg-ijcars">paper repository</a>.
"""
self.parent.acknowledgementText = """This file was developed by Fernando
Perez-Garcia (University College London and King's College London).
"""
class EPISURGSegmentWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
self.logic = None
self.subjects = None
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.logic = EPISURGSegmentLogic()
self.makeGUI()
slicer.episurgSegment = self
def makeGUI(self):
self.layout.addStretch()
class EPISURGSegmentLogic(EPISURGBaseLogic):
def __init__(self):
super().__init__()
class EPISURGSegmentTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear()
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_EPISURGSegment1()
def test_EPISURGSegment1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
# Get/create input data
import SampleData
registerSampleData()
inputVolume = SampleData.downloadSample('EPISURGSegment1')
self.delayDisplay('Loaded test data set')
inputScalarRange = inputVolume.GetImageData().GetScalarRange()
self.assertEqual(inputScalarRange[0], 0)
self.assertEqual(inputScalarRange[1], 695)
outputVolume = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
threshold = 100
# Test the module logic
logic = EPISURGSegmentLogic()
# Test algorithm with non-inverted threshold
logic.process(inputVolume, outputVolume, threshold, True)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], threshold)
# Test algorithm with inverted threshold
logic.process(inputVolume, outputVolume, threshold, False)
outputScalarRange = outputVolume.GetImageData().GetScalarRange()
self.assertEqual(outputScalarRange[0], inputScalarRange[0])
self.assertEqual(outputScalarRange[1], inputScalarRange[1])
self.delayDisplay('Test passed')
| 705 | 23 | 122 |
45e1bc84dbc069291ff603af4ea17630a9c59768 | 7,944 | py | Python | model.py | janisstreib/sciencequiz | 94573f3b2952d52f2c9d1947161b65d4d2d67616 | [
"MIT"
] | null | null | null | model.py | janisstreib/sciencequiz | 94573f3b2952d52f2c9d1947161b65d4d2d67616 | [
"MIT"
] | null | null | null | model.py | janisstreib/sciencequiz | 94573f3b2952d52f2c9d1947161b65d4d2d67616 | [
"MIT"
] | 1 | 2019-06-24T11:32:57.000Z | 2019-06-24T11:32:57.000Z | from sciencequiz import db
import enum
import datetime
association_quiz_questions = db.Table('quiz_questions', db.Model.metadata,
db.Column('quiz_id', db.Integer, db.ForeignKey('quizzes.id')),
db.Column('question_id', db.Integer, db.ForeignKey('questions.id'))
)
| 38.376812 | 126 | 0.647407 | from sciencequiz import db
import enum
import datetime
association_quiz_questions = db.Table('quiz_questions', db.Model.metadata,
db.Column('quiz_id', db.Integer, db.ForeignKey('quizzes.id')),
db.Column('question_id', db.Integer, db.ForeignKey('questions.id'))
)
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
questions = db.relationship("Question", order_by="Question.id")
class QuestionType(enum.Enum):
choose = 1
estimate = 2
class Question(db.Model):
__tablename__ = 'questions'
id = db.Column(db.Integer, primary_key=True)
question = db.Column(db.String(500), nullable=False)
category_id = db.Column(db.ForeignKey('categories.id'), nullable=False)
image_file_name = db.Column(db.String(200), nullable=True) # Optionales Bilds zur Frage
quizzes = db.relationship('Quiz', secondary=association_quiz_questions,
back_populates='questions')
type = db.Column(db.Enum(QuestionType))
__mapper_args__ = {
'polymorphic_on': type
}
class QuestionChoose(Question):
correct_answer_id = db.Column(db.ForeignKey('answers_choose.id'),
nullable=True) # Has to be nullable because of inheritance. Use join instead?
answers = db.relationship('AnswerChoose', foreign_keys='[AnswerChoose.question_id]', order_by="AnswerChoose.id")
__mapper_args__ = {
'polymorphic_identity': QuestionType.choose
}
class QuestionEstimate(Question):
correct_value = db.Column(db.Float, nullable=True)
__mapper_args__ = {
'polymorphic_identity': QuestionType.estimate
}
class AnswerChoose(db.Model):
__tablename__ = 'answers_choose'
id = db.Column(db.Integer, primary_key=True)
question_id = db.Column(db.ForeignKey('questions.id', ondelete='CASCADE'),
nullable=False) # Can we specify this is only valid for QuestionChoose?
question = db.relationship("QuestionChoose", foreign_keys="[AnswerChoose.question_id]")
answer = db.Column(db.String(250), nullable=False)
class Quiz(db.Model):
__tablename__ = 'quizzes'
id = db.Column(db.Integer, primary_key=True)
public = db.Column(db.Boolean, nullable=False)
name = db.Column(db.String(250), nullable=False)
year = db.Column(db.Integer, nullable=False)
questions = db.relationship("Question", secondary=association_quiz_questions,
back_populates="quizzes", order_by="Question.id")
class DeviceToken(db.Model):
__tablename__ = 'device_tokens'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
token = db.Column(db.String(250), nullable=False, unique=True)
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
year = db.Column(db.Integer, nullable=False)
team_sessions = db.relationship('TeamSession')
members = db.relationship('User')
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), nullable=False)
display_name = db.Column(db.String(150), nullable=False)
password = db.Column(db.String(150), nullable=False)
email = db.Column(db.String(250), nullable=False)
team_id = db.Column(db.ForeignKey('teams.id'), nullable=True)
# @staticmethod
# def login(username, password, db):
# m = hashlib.sha512()
# m.update(password.encode('utf-8'))
# res = db.execute("SELECT id, username, admin, display_name FROM users WHERE username=%s AND password=%s",
# (username, m.hexdigest()))
# if len(res) == 0:
# return None
# return User(**res[0])
class SessionState(enum.Enum):
pending = 1
running = 2
paused = 3
finished = 4
closed = 5
class Session(db.Model):
__tablename__ = 'sessions'
id = db.Column(db.Integer, primary_key=True)
quiz_id = db.Column(db.ForeignKey('quizzes.id'), nullable=False)
quiz = db.relationship("Quiz")
team_sessions = db.relationship('TeamSession')
device_token_id = db.Column(db.ForeignKey('device_tokens.id'))
device_token = db.relationship("DeviceToken")
state = db.Column(db.Enum(SessionState), nullable=False)
current_question_id = db.Column(db.ForeignKey('questions.id'), nullable=True)
current_question = db.relationship('Question')
start_time = db.Column(db.DateTime, nullable=True) # Quiz is temporarily paused if start_time is NULL
offset = db.Column(db.Interval, nullable=False, default=datetime.timedelta())
@classmethod
def on_display(cls):
'''
Whether the session should currently be displayed in its room
'''
return db.or_(cls.state == SessionState.running, cls.state == SessionState.finished, cls.state == SessionState.paused)
class TeamSession(db.Model):
__tablename__ = 'team_sessions'
id = db.Column(db.Integer, primary_key=True)
team_id = db.Column(db.ForeignKey('teams.id'), nullable=False)
team = db.relationship("Team")
session_id = db.Column(db.ForeignKey('sessions.id'), nullable=False)
session = db.relationship("Session")
answers = db.relationship("TeamAnswer")
def score(self):
result = 0.0
for answer in self.answers:
if isinstance(answer, TeamAnswerChoose):
answer_choose = answer.answer
if answer_choose.question.correct_answer_id == answer_choose.id:
result += 1.0
elif isinstance(answer, TeamAnswerEstimate):
correct_estimate = answer.question.correct_value
other_estimates = TeamAnswerEstimate.query.join(TeamAnswerEstimate.team_session).filter(
TeamSession.session_id == self.session_id, # Same session
TeamAnswerEstimate.question_id == answer.question_id, # Same question
TeamAnswerEstimate.id != answer.id # Not me
).options(db.load_only("estimate")).all()
other_estimate_dists = [abs(x.estimate - correct_estimate) for x in other_estimates]
my_estimate_dist = abs(answer.estimate - correct_estimate)
if len(other_estimate_dists) == 0:
result += 1.0
else:
best_other_dist = min(other_estimate_dists)
if my_estimate_dist < best_other_dist:
result += 1.0
else:
print("WAT?")
return result
class TeamAnswer(db.Model):
__tablename__ = 'team_answers'
id = db.Column(db.Integer, primary_key=True)
team_session_id = db.Column(db.ForeignKey('team_sessions.id'), nullable=False)
team_session = db.relationship("TeamSession")
type = db.Column(db.Enum(QuestionType))
__mapper_args__ = {
'polymorphic_on': type
}
class TeamAnswerChoose(TeamAnswer):
answer_id = db.Column(db.ForeignKey('answers_choose.id'), nullable=True)
answer = db.relationship("AnswerChoose")
__mapper_args__ = {
'polymorphic_identity': QuestionType.choose
}
class TeamAnswerEstimate(TeamAnswer):
question_id = db.Column(db.ForeignKey('questions.id'), nullable=True)
question = db.relationship("Question")
estimate = db.Column(db.Float, nullable=True)
__mapper_args__ = {
'polymorphic_identity': QuestionType.estimate
}
class Display(object):
def __init__(self, token, r=True, w=False):
self.r = r
self.w = w
self.token = token
| 1,459 | 5,673 | 417 |
7f8e0bc985c5bb8cf7a43a0da7eb24f21fd70b08 | 285 | py | Python | visdialch/encoders/__init__.py | yuleiniu/rva | 7b35689464822e0c25b273d96caedd0e65b26288 | [
"BSD-3-Clause"
] | 68 | 2019-06-04T13:53:49.000Z | 2022-01-14T11:17:05.000Z | visdialch/encoders/__init__.py | yuleiniu/rva | 7b35689464822e0c25b273d96caedd0e65b26288 | [
"BSD-3-Clause"
] | 17 | 2019-06-18T15:31:30.000Z | 2022-03-11T23:48:39.000Z | visdialch/encoders/__init__.py | yuleiniu/rva | 7b35689464822e0c25b273d96caedd0e65b26288 | [
"BSD-3-Clause"
] | 11 | 2019-08-01T07:24:18.000Z | 2021-09-18T13:13:12.000Z | from visdialch.encoders.lf import LateFusionEncoder
from visdialch.encoders.rva import RvAEncoder
| 25.909091 | 69 | 0.733333 | from visdialch.encoders.lf import LateFusionEncoder
from visdialch.encoders.rva import RvAEncoder
def Encoder(model_config, *args):
name_enc_map = {
"lf": LateFusionEncoder,
"rva": RvAEncoder,
}
return name_enc_map[model_config["encoder"]](model_config, *args)
| 163 | 0 | 23 |
37bad10d9dc52f3dd2ac4e87e22303e9b9c7adce | 1,627 | py | Python | azure-mgmt-logic/azure/mgmt/logic/models/x12_schema_reference.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/logic/models/x12_schema_reference.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure/mgmt/logic/models/x12_schema_reference.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12SchemaReference(Model):
"""The X12 schema reference.
:param message_id: The message id.
:type message_id: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param schema_version: The schema version.
:type schema_version: str
:param schema_name: The schema name.
:type schema_name: str
"""
_validation = {
'message_id': {'required': True},
'schema_version': {'required': True},
'schema_name': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
}
| 35.369565 | 92 | 0.613399 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class X12SchemaReference(Model):
"""The X12 schema reference.
:param message_id: The message id.
:type message_id: str
:param sender_application_id: The sender application id.
:type sender_application_id: str
:param schema_version: The schema version.
:type schema_version: str
:param schema_name: The schema name.
:type schema_name: str
"""
_validation = {
'message_id': {'required': True},
'schema_version': {'required': True},
'schema_name': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'sender_application_id': {'key': 'senderApplicationId', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
}
def __init__(self, message_id, schema_version, schema_name, sender_application_id=None):
self.message_id = message_id
self.sender_application_id = sender_application_id
self.schema_version = schema_version
self.schema_name = schema_name
| 247 | 0 | 27 |
153f0b45ee20f480a212768bb8955a4280fe27df | 607 | py | Python | feincms_banners/admin.py | coandcouk/feincms-banners | 0404f77a3266cd5c69990c21c0badd769168abd7 | [
"BSD-3-Clause"
] | null | null | null | feincms_banners/admin.py | coandcouk/feincms-banners | 0404f77a3266cd5c69990c21c0badd769168abd7 | [
"BSD-3-Clause"
] | null | null | null | feincms_banners/admin.py | coandcouk/feincms-banners | 0404f77a3266cd5c69990c21c0badd769168abd7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from feincms_banners import models
admin.site.register(
models.Banner,
list_display=(
'name', 'is_active', 'type', 'url', 'active_from',
'active_until', 'embeds', 'impressions', 'click_count'),
list_filter=('is_active', 'type'),
raw_id_fields=('mediafile',),
search_fields=('name', 'url', 'code'),
)
admin.site.register(
models.Click,
list_display=('timestamp', 'banner', 'ip', 'user_agent', 'referrer'),
search_fields=('banner__name', 'user_agent', 'referrer'),
)
| 27.590909 | 73 | 0.675453 | from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from feincms_banners import models
admin.site.register(
models.Banner,
list_display=(
'name', 'is_active', 'type', 'url', 'active_from',
'active_until', 'embeds', 'impressions', 'click_count'),
list_filter=('is_active', 'type'),
raw_id_fields=('mediafile',),
search_fields=('name', 'url', 'code'),
)
admin.site.register(
models.Click,
list_display=('timestamp', 'banner', 'ip', 'user_agent', 'referrer'),
search_fields=('banner__name', 'user_agent', 'referrer'),
)
| 0 | 0 | 0 |
b7a03b3fa064f83f50e314801498b74fcc73e6a5 | 1,070 | py | Python | week3/panda.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | null | null | null | week3/panda.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | 1 | 2021-09-16T05:44:31.000Z | 2021-09-16T05:44:31.000Z | week3/panda.py | sevgo/Programming101 | ac25c4d9695563b449a629c60ec77a739c9f5be3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from re import match
| 21.836735 | 51 | 0.574766 | #!/usr/bin/env python3
from re import match
class Panda:
def __init__(self, name, mail_addr, gender):
self.name = name
self.gender = gender
self.mail = self._email(mail_addr)
def _email(self, address):
pattern = "[\.\w]{2,}[@]\w+[.]\w+"
if match(pattern, address):
return address
else:
raise NotValidEmail
def _gender(self):
return self.gender
def isFemale(self):
if self.gender == 'female':
return True
return False
def isMale(self):
if self.gender == 'male':
return True
return False
def __eq__(self, other):
eq_names = self.name == other.name
eq_email = self.mail == other.mail
eq_gender = self.gender == other.gender
return eq_names and eq_email and eq_gender
def __str__(self):
#return self.name + self.gender + self.mail
return self.name
def __hash__(self):
return hash(self.__str__())
class NotValidEmail(Exception):
pass
| 753 | 10 | 262 |
8fa036657efa0af9954d69e3d7891ce07e45a060 | 1,037 | py | Python | profiles/models.py | joarkm/olmonopolet-api | 5fb605f53d6fc87c441ae0f72360c524cb0d3fb7 | [
"MIT"
] | 2 | 2020-11-21T13:15:53.000Z | 2021-05-18T19:17:41.000Z | profiles/models.py | joarkm/olmonopolet-api | 5fb605f53d6fc87c441ae0f72360c524cb0d3fb7 | [
"MIT"
] | 6 | 2021-03-21T19:24:26.000Z | 2021-09-22T19:09:31.000Z | profiles/models.py | joarkm/olmonopolet-api | 5fb605f53d6fc87c441ae0f72360c524cb0d3fb7 | [
"MIT"
] | 1 | 2021-05-20T21:52:10.000Z | 2021-05-20T21:52:10.000Z | from django.db import models
from django.contrib.auth.models import User
from stores.models import Store
# Create your models here.
class Profile(models.Model):
'''
Profile associated with User model.
Automatically added from signal when User is created.
'''
user = models.OneToOneField(User, verbose_name='User',related_name='profiles',on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='Store', related_name='profile_stores', on_delete=models.CASCADE, blank=True, null=True)
premium = models.BooleanField(help_text='Is user premium?', default=False)
# Untappd Fields
untappd_username = models.CharField(help_text='Untappd username', max_length=250, blank=True)
untappd_avatar_url = models.URLField(help_text='URL to Avatar used on Untappd', max_length=256, blank=True, default='')
untappd_sync_date = models.DateTimeField(help_text='Time when Profile was last synced with Untappd',blank=True, null=True)
| 47.136364 | 138 | 0.751205 | from django.db import models
from django.contrib.auth.models import User
from stores.models import Store
# Create your models here.
class Profile(models.Model):
'''
Profile associated with User model.
Automatically added from signal when User is created.
'''
user = models.OneToOneField(User, verbose_name='User',related_name='profiles',on_delete=models.CASCADE)
store = models.ForeignKey(Store, verbose_name='Store', related_name='profile_stores', on_delete=models.CASCADE, blank=True, null=True)
premium = models.BooleanField(help_text='Is user premium?', default=False)
# Untappd Fields
untappd_username = models.CharField(help_text='Untappd username', max_length=250, blank=True)
untappd_avatar_url = models.URLField(help_text='URL to Avatar used on Untappd', max_length=256, blank=True, default='')
untappd_sync_date = models.DateTimeField(help_text='Time when Profile was last synced with Untappd',blank=True, null=True)
def __str__(self):
return self.user.username
| 31 | 0 | 27 |
a4eaf3264b92bd8b8637723dba6ddc191dbf0e58 | 1,753 | py | Python | travels/management/commands/add_trips.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | null | null | null | travels/management/commands/add_trips.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | null | null | null | travels/management/commands/add_trips.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | 2 | 2021-06-26T13:03:22.000Z | 2021-06-27T10:47:59.000Z | import pandas as pd
import os.path
from django.core.management.base import BaseCommand, CommandError
import logging
from travels.models import Trip
# full_path = os.path.join("../files/Wycieczki.xlsx")
# trips = pd.read_excel(full_path, sheet_name='Góry')
# print(trips["Nazwa wycieczki"])
class Command(BaseCommand):
"""
Klasa do tworzenia wycieczek
"""
help = "Creating trips"
full_path = 'travels/management/files/Wycieczki.xlsx'
trips = pd.read_excel(full_path, sheet_name='Zwiedzanie')
def handle(self, *args, **options):
"""
Metoda do tworzenia wycieczek z exela
Args:
*args ():
**options ():
Returns:
object: Trip
"""
for i in range(len(self.trips)):
_, created = Trip.objects.get_or_create(title=self.trips["Nazwa wycieczki"][i],
# hotelstars=self.trips["Gwiazdki hotelu"][i],
country=self.trips["Kraj"][i],
timezone=self.trips["Strefa czasowa"][i],
landscape=self.trips["Krajobraz"][i],
type=self.trips["Rodzaj wycieczki"][i],
transport=self.trips["Rodzaj podróży"][i],
price=self.trips["Cena"][i],
duration=self.trips["Ilość dni"][i],
)
if created:
logging.info("Dodano Wycieczkę ", self.trips["Nazwa wycieczki"][i])
| 39.840909 | 98 | 0.474615 | import pandas as pd
import os.path
from django.core.management.base import BaseCommand, CommandError
import logging
from travels.models import Trip
# full_path = os.path.join("../files/Wycieczki.xlsx")
# trips = pd.read_excel(full_path, sheet_name='Góry')
# print(trips["Nazwa wycieczki"])
class Command(BaseCommand):
"""
Klasa do tworzenia wycieczek
"""
help = "Creating trips"
full_path = 'travels/management/files/Wycieczki.xlsx'
trips = pd.read_excel(full_path, sheet_name='Zwiedzanie')
def handle(self, *args, **options):
"""
Metoda do tworzenia wycieczek z exela
Args:
*args ():
**options ():
Returns:
object: Trip
"""
for i in range(len(self.trips)):
_, created = Trip.objects.get_or_create(title=self.trips["Nazwa wycieczki"][i],
# hotelstars=self.trips["Gwiazdki hotelu"][i],
country=self.trips["Kraj"][i],
timezone=self.trips["Strefa czasowa"][i],
landscape=self.trips["Krajobraz"][i],
type=self.trips["Rodzaj wycieczki"][i],
transport=self.trips["Rodzaj podróży"][i],
price=self.trips["Cena"][i],
duration=self.trips["Ilość dni"][i],
)
if created:
logging.info("Dodano Wycieczkę ", self.trips["Nazwa wycieczki"][i])
| 0 | 0 | 0 |
b03f345408be8a598b9b9ce0c717148f00a9246b | 34,199 | py | Python | mmnrm/training.py | T-Almeida/mmnrm | f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a | [
"MIT"
] | 1 | 2021-04-19T10:10:43.000Z | 2021-04-19T10:10:43.000Z | mmnrm/training.py | T-Almeida/mmnrm | f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a | [
"MIT"
] | 1 | 2020-09-30T11:34:01.000Z | 2020-09-30T11:34:01.000Z | mmnrm/training.py | T-Almeida/mmnrm | f67441a4e2cb0a8335b5e96f3ea9ea0a0eba080a | [
"MIT"
] | null | null | null | """
This file contains an abstraction for implment pairwise loss training
"""
import tensorflow as tf
from tensorflow.keras import backend as K
import time
import tempfile
import shutil
import subprocess
import os
from collections import defaultdict
from timeit import default_timer as timer
from mmnrm.utils import save_model_weights, load_model_weights, set_random_seed, merge_dicts, flat_list, index_from_list
from mmnrm.text import TREC04_merge_goldstandard_files
from mmnrm.callbacks import WandBValidationLogger
import random
import numpy as np
import pickle
import wandb
import nltk
# Create a more abstract class that uses common elemetns like, b_size, transform_input etc...
class CrossValidationCollection(BaseCollection):
"""
Helper class to store the folds data and build the respective Train and Test Collections
"""
def sentence_splitter_builder(tokenizer, mode=0, max_sentence_size=21):
"""
Return a transform_inputs_fn for training and test as a tuple
mode 0: use fixed sized window for the split
mode 1: split around a query-document match with a fixed size
mode 2: deeprank alike. Similar to mode 1, but group the match by q-term
mode 3: split with ntlk sentence splitter
mode 4: similar to 2, but uses sentence splitting instead of fix size
"""
if mode in [1, 2]:
half_window = max_sentence_size//2
min_w = lambda x: max(0,x-half_window)
max_w = lambda x,l: min(x+half_window,l)+1
return train_splitter, test_splitter
| 39.39977 | 213 | 0.524401 | """
This file contains an abstraction for implment pairwise loss training
"""
import tensorflow as tf
from tensorflow.keras import backend as K
import time
import tempfile
import shutil
import subprocess
import os
from collections import defaultdict
from timeit import default_timer as timer
from mmnrm.utils import save_model_weights, load_model_weights, set_random_seed, merge_dicts, flat_list, index_from_list
from mmnrm.text import TREC04_merge_goldstandard_files
from mmnrm.callbacks import WandBValidationLogger
import random
import numpy as np
import pickle
import wandb
import nltk
def hinge_loss(positive_score, negative_score, *args):
return K.mean(K.maximum(0., 1. - positive_score + negative_score))
def pairwise_cross_entropy(positive_score, negative_score, *args):
positive_exp = K.exp(positive_score)
return K.mean(-K.log(positive_exp/(positive_exp+K.exp(negative_score))))
class BaseTraining():
def __init__(self,
model,
loss,
train_collection,
optimizer="adam", # keras optimizer
callbacks=[],
**kwargs):
super(BaseTraining, self).__init__(**kwargs)
self.model = model
self.loss = loss
self.train_collection = train_collection
self.optimizer = tf.keras.optimizers.get(optimizer)
self.callbacks = callbacks
def draw_graph(self, name, *data):
logdir = 'logs/func/'+name
writer = tf.summary.create_file_writer(logdir)
tf.summary.trace_on(graph=True, profiler=True)
self.training_step(*data)
with writer.as_default():
tf.summary.trace_export(
name="training_trace",
step=0,
profiler_outdir=logdir)
def train(self, epoch, draw_graph=False):
raise NotImplementedError("This is an abstract class, should not be initialized")
class PairwiseTraining(BaseTraining):
def __init__(self, loss=hinge_loss, grads_callback=None, transform_model_inputs_callback=None, **kwargs):
super(PairwiseTraining, self).__init__(loss=loss, **kwargs)
self.grads_callback = grads_callback
self.transform_model_inputs_callback = transform_model_inputs_callback
def predict_score(self, inputs):
return self.model.predict(inputs)
#output = self.model.predict(inputs)
#if isinstance(self.model.output,list):
# return output[0]
#else:
# return output
@tf.function # check if this can reutilize the computational graph for the prediction phase
def model_score(self, inputs):
print("\r[DEBUG] CALL MODEL_SCORE FUNCTION")
return self.model(inputs)
@tf.function # build a static computational graph
def training_step(self, pos_in, neg_in, custom_output=None):
print("training step")
pos_label, neg_label = None, None
if self.transform_model_inputs_callback is not None:
pos_in, neg_in, pos_label, neg_label = self.transform_model_inputs_callback(pos_in, neg_in)
# manual optimization
with tf.GradientTape() as tape:
pos_score = self.model_score(pos_in)
neg_score = self.model_score(neg_in)
if custom_output is not None:
print("DEBUG custom output")
pos_score = custom_output(pos_score)
neg_score = custom_output(neg_score)
loss = self.loss(pos_score, neg_score, pos_label, neg_label)
# using auto-diff to get the gradients
grads = tape.gradient(loss, self.model.trainable_weights)
#normalize grads???????????????????????????
if self.grads_callback is not None:
grads = self.grads_callback(grads)
#tf.print(grads)
# applying the gradients using an optimizer
#tf.print(self.model.trainable_weights[-1])
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
return loss
def evaluate_test_set(self, test_set):
generator_Y = test_set.generator()
q_scores = defaultdict(list)
for i, _out in enumerate(generator_Y):
query_id, Y, docs_ids, _ = _out
s_time = time.time()
scores = self.model_score(Y).numpy()[:,0].tolist()
print("\rEvaluation {} | time {}".format(i, time.time()-s_time), end="\r")
q_scores[query_id].extend(list(zip(docs_ids,scores)))
# sort the rankings
for query_id in q_scores.keys():
q_scores[query_id].sort(key=lambda x:-x[1])
# evaluate
return test_set.evaluate(q_scores)
def train(self, epoch, draw_graph=False, custom_output=None, steps=None):
# create train generator
if steps is None:
steps = self.train_collection.get_steps()
generator_X = self.train_collection.generator()
positive_inputs, negative_inputs = next(generator_X)
if draw_graph:
self.draw_graph(positive_inputs, negative_inputs)
for c in self.callbacks:
c.on_train_start(self)
for e in range(epoch):
#execute callbacks
for c in self.callbacks:
c.on_epoch_start(self, e)
for s in range(steps):
#execute callbacks
for c in self.callbacks:
c.on_step_start(self, e, s)
s_time = timer()
# static TF computational graph for the traning step
loss = self.training_step(positive_inputs, negative_inputs, custom_output).numpy()
#execute callbacks
f_time = timer()-s_time
for c in self.callbacks:
c.on_step_end(self, e, s, loss, f_time)
# next batch
positive_inputs, negative_inputs = next(generator_X)
#execute callbacks
for c in self.callbacks:
c.on_epoch_end(self, e)
#execute callbacks
for c in self.callbacks:
c.on_train_end(self)
class CrossValidation(BaseTraining):
def __init__(self,
loss=hinge_loss,
wandb_config=None,
callbacks=[],
**kwargs):
super(CrossValidation, self).__init__(loss=loss, **kwargs)
self.wandb_config = wandb_config
self.callbacks = callbacks
def train(self, epoch, draw_graph=False):
print("Start the Cross validation for", self.train_collection.get_n_folds(), "folds")
temp_dir = tempfile.mkdtemp()
try:
# save model init state
save_model_weights(os.path.join(temp_dir,"temp_weights.h5"), self.model)
best_test_scores = []
for i, collections in enumerate(self.train_collection.generator()):
print("Prepare FOLD", i)
train_collection, test_collection = collections
# show baseline metrics over the previous ranking order
pre_metrics = test_collection.evaluate_pre_rerank()
print("Evaluation of the original ranking order")
for n,m in pre_metrics:
print(n,m)
# reset all the states
set_random_seed()
K.clear_session()
# load model init state
load_model_weights(os.path.join(temp_dir,"temp_weights.h5"), self.model)
self.wandb_config["name"] = "Fold_0"+str(i)+"_"+self.wandb_config["name"]
# create evaluation callback
if self.wandb_config is not None:
wandb_val_logger = WandBValidationLogger(wandb_args=self.wandb_config,
steps_per_epoch=train_collection.get_steps(),
validation_collection=test_collection)
else:
raise KeyError("Please use wandb for now!!!")
best_test_scores.append(wandb_val_logger.current_best)
callbacks = [wandb_val_logger]+ self.callbacks
print("Train and test FOLD", i)
pairwise_training = PairwiseTraining(model=self.model,
train_collection=train_collection,
loss=self.loss,
optimizer=self.optimizer,
callbacks=callbacks)
pairwise_training.train(epoch, draw_graph=draw_graph)
x_score = sum(best_test_scores)/len(best_test_scores)
print("X validation best score:", x_score)
wandb_val_logger.wandb.run.summary["best_xval_"+wandb_val_logger.comparison_metric] = x_score
except Exception as e:
raise e # maybe handle the exception in the future
finally:
# always remove the temp directory
print("Remove {}".format(temp_dir))
shutil.rmtree(temp_dir)
# Create a more abstract class that uses common elemetns like, b_size, transform_input etc...
class BaseCollection:
def __init__(self,
transform_inputs_fn=None,
b_size=64,
name=None,
**kwargs):
self.transform_inputs_fn = transform_inputs_fn
self.b_size = b_size
self.name = name
def update_query_list(self, query_list):
# NEED REFACTOR, class TEST and TRAIN collection that extend BaseCollection has query_list parameter, that should be moved to this class
self.query_list = query_list
return self
def set_transform_inputs_fn(self, transform_inputs_fn):
# build style method
self.transform_inputs_fn = transform_inputs_fn
return self
def batch_size(self, b_size=32):
# build style method
self.b_size = b_size
return self
def get_config(self):
data_json = {
"b_size": self.b_size
}
return data_json
def set_name(self, name):
self.name = name
return self
def generator(self, **kwargs):
# generator for the query, pos and negative docs
gen_X = self._generate(**kwargs)
if self.transform_inputs_fn is not None:
gen_X = self.transform_inputs_fn(gen_X)
# finally yield the input to the model
for X in gen_X:
yield X
def save(self, path):
with open(path+".p", "wb") as f:
pickle.dump(self.get_config(), f)
@classmethod
def load(cls, path):
with open(path+".p", "rb") as f:
config = pickle.load(f)
return cls(**config)
class CrossValidationCollection(BaseCollection):
"""
Helper class to store the folds data and build the respective Train and Test Collections
"""
def __init__(self,
folds_query_list,
folds_goldstandard,
folds_goldstandard_trec_file,
folds_query_docs,
trec_script_eval_path,
**kwargs):
super(CrossValidationCollection, self).__init__(**kwargs)
self.folds_query_list = folds_query_list
self.folds_goldstandard = folds_goldstandard
self.folds_goldstandard_trec_file = folds_goldstandard_trec_file
self.folds_query_docs = folds_query_docs
self.trec_script_eval_path = trec_script_eval_path
# assert fold size
def get_n_folds(self):
return len(self.folds_query_list)
def _generate(self, **kwargs):
for i in range(len(self.folds_query_list)):
# create the folds
test_query = self.folds_query_list[i]
test_goldstandard_trec_file = self.folds_goldstandard_trec_file[i]
test_query_docs = self.folds_query_docs[i]
train_query = flat_list(self.folds_query_list[:i] + self.folds_query_list[i+1:])
train_goldstandard = merge_dicts(self.folds_goldstandard[:i] + self.folds_goldstandard[i+1:])
train_query_docs = merge_dicts(self.folds_query_docs[:i] + self.folds_query_docs[i+1:])
train_collection = TrainCollection(train_query, train_goldstandard, train_query_docs)
test_collection = TestCollection(test_query, test_goldstandard_trec_file, test_query_docs, self.trec_script_eval_path, train_collection.skipped_queries)
yield train_collection, test_collection
def get_config(self):
super_config = super().get_config()
data_json = {
"folds_query_list": self.folds_query_list,
"folds_goldstandard": self.folds_goldstandard,
"folds_goldstandard_trec_file": self.folds_goldstandard_trec_file,
"folds_query_docs": self.folds_query_docs,
"trec_script_eval_path": self.trec_script_eval_path
}
return dict(data_json, **super_config) #fast dict merge
class TestCollection(BaseCollection):
def __init__(self,
query_list,
query_docs,
evaluator,
skipped_queries = [],
**kwargs):
"""
query_list - must be a list with the following format :
[
{
id: <str>
query: <str>
},
...
]
query_docs - dictionary with documents to be ranked by the model
{
id: [{
id: <str>
text: <str>
}],
...
}
"""
super(TestCollection, self).__init__(**kwargs)
self.query_list = query_list
self.query_docs = query_docs
self.evaluator = evaluator
self.skipped_queries = skipped_queries
if isinstance(self.evaluator, dict):
self.evaluator = self.evaluator["class"].load(**self.evaluator)
def get_config(self):
super_config = super().get_config()
data_json = {
"query_list": self.query_list,
"query_docs": self.query_docs,
"skipped_queries": self.skipped_queries,
"evaluator": self.evaluator.get_config()
}
return dict(data_json, **super_config) #fast dict merge
def _generate(self, **kwargs):
for query_data in self.query_list:
if query_data["id"] in self.skipped_queries:
continue
for i in range(0, len(self.query_docs[query_data["id"]]), self.b_size):
docs = self.query_docs[query_data["id"]][i:i+self.b_size]
yield query_data["id"], query_data["query"], docs
def evaluate_pre_rerank(self, output_metris=["recall_100", "map_cut_20", "ndcg_cut_20", "P_20"]):
"""
Compute evaluation metrics over the documents order before been ranked
"""
ranked_format = {k:list(map(lambda x:(x[1]["id"], len(v)-x[0]), enumerate(v))) for k,v in self.query_docs.items()}
metrics = self.evaluate(ranked_format)
if isinstance(output_metris, list):
return [ (m, metrics[m]) for m in output_metris]
else:
return metrics
def evaluate_oracle(self, output_metris=["recall_100", "map_cut_20", "ndcg_cut_20", "P_20"]):
metrics = self.evaluator.evaluate_oracle()
if isinstance(output_metris, list):
return [ (m, metrics[m]) for m in output_metris]
else:
return metrics
def evaluate(self, ranked_query_docs):
return self.evaluator.evaluate(ranked_query_docs)
class TrainCollection(BaseCollection):
def __init__(self,
query_list,
goldstandard,
query_docs_subset=None,
use_relevance_groups=False,
verbose=True,
**kwargs):
"""
query_list - must be a list with the following format :
[
{
id: <str>
query: <str>
},
...
]
goldstandard - must be a dictionary with the following format:
{
id: {
0: [<str:id>, <str:id>, ...],
1: [<str:id>, <str:id>, ...],
2: ...,
...
},
...
}
query_docs_subset (optional) - if a previous retrieved method were used to retrieved the TOP_K documents, this parameter
can be used to ignore the collection and instead use only the TOP_K docs
{
id: [{
id: <str>
text: <str>
}, ...],
...
}
"""
super(TrainCollection, self).__init__(**kwargs)
self.query_list = query_list # [{query data}]
self.goldstandard = goldstandard # {query_id:[relevance docs]}
self.use_relevance_groups = use_relevance_groups
self.verbose = verbose
if "sub_set_goldstandard" in kwargs:
self.sub_set_goldstandard = kwargs.pop("sub_set_goldstandard")
else:
self.sub_set_goldstandard = None
if "collection" in kwargs:
self.collection = kwargs.pop("collection")
else:
self.collection = None
self.skipped_queries = []
self.__build(query_docs_subset)
def __find_relevance_group(self, doc_id, search_gs):
for k in search_gs.keys():
if doc_id in search_gs[k]:
return k
return -1
def __build(self, query_docs_subset):
if query_docs_subset is None:
# number of samples
return #
self.sub_set_goldstandard = {}
self.collection = {}
# filter the goldstandard
for _id, relevance in query_docs_subset.items():
if _id not in self.goldstandard:
self.skipped_queries.append(_id)
continue
# do not use queries without true positives
# this add an overhead that can be avoided by refactor the follwing for loop!
unique_relevants = set(sum([self.goldstandard[_id][k] for k in self.goldstandard[_id].keys() if k>0], []))
if all([ doc["id"] not in unique_relevants for doc in relevance ]):
self.skipped_queries.append(_id)
continue
self.sub_set_goldstandard[_id] = defaultdict(list)
for doc in relevance:
k = self.__find_relevance_group(doc["id"], self.goldstandard[_id])
if k>0:
if self.use_relevance_groups:
self.sub_set_goldstandard[_id][k].append(doc["id"])
else:
self.sub_set_goldstandard[_id][1].append(doc["id"])
else:
# default add to the less relevance group
self.sub_set_goldstandard[_id][0].append(doc["id"])
#add to the collection
self.collection[doc["id"]] = doc["text"]
# remove the skipped queries from the data
index_to_remove = []
for skipped in self.skipped_queries:
_index = index_from_list(self.query_list, lambda x: x["id"]==skipped)
if _index>-1:
index_to_remove.append(_index)
index_to_remove.sort(key=lambda x:-x)
# start removing from the tail
for _index in index_to_remove:
del self.query_list[_index]
# stats
if self.verbose:
max_keys = max(map(lambda x:max(x.keys()), self.sub_set_goldstandard.values()))
for k in range(max_keys+1):
print("Minimum number of relevance type({}) in the queries of the goldstandard sub set: {}".format(k, min(map(lambda x: len(x[k]), self.sub_set_goldstandard.values()))))
print("Mean number of relevance type({}) in the queries of the goldstandard sub set: {}".format(k, sum(map(lambda x: len(x[k]), self.sub_set_goldstandard.values()))/len(self.sub_set_goldstandard)))
print("Sub Collection size", len(self.collection))
print("Number of skipped question, due to lack of true positives", len(self.skipped_queries))
def __get_goldstandard(self):
if self.collection is not None:
return self.sub_set_goldstandard
else:
return self.goldstandard
def get_steps(self):
training_data = self.__get_goldstandard()
# an epoch will be defined with respect to the total number of positive pairs
total_positives = sum(map(lambda x: sum([ len(x[k]) for k in x.keys() if k>0]), training_data.values()))
return total_positives//self.b_size
def _generate(self, collection=None, **kwargs):
# sanity check
assert(not(self.sub_set_goldstandard==None and collection==None))
training_data = self.__get_goldstandard()
# TODO this condition is dependent on the previous
if collection is None:
collection = self.collection
while True:
# TODO check if it is worthit to use numpy to vectorize these operations
y_query = []
y_pos_doc = []
y_neg_doc = []
# build $batch_size triples and yield
query_indexes = random.sample(population=list(range(len(self.query_list))), k=self.b_size)
for q_i in query_indexes:
selected_query = self.query_list[q_i]
#print(selected_query["id"])
# select the relevance group, (only pos)
positive_keys=list(training_data[selected_query["id"]].keys())
#print("positive_keys", positive_keys)
positive_keys.remove(0)
#print("positive_keys", positive_keys)
if len(positive_keys)>1:
group_len = list(map(lambda x: len(training_data[selected_query["id"]][x]), positive_keys))
total = sum(group_len)
prob = list(map(lambda x: x/total, group_len))
#print("probs", prob)
relevance_group = np.random.choice(positive_keys, p=prob)
else:
relevance_group = positive_keys[0]
_pos_len = len(training_data[selected_query["id"]][relevance_group])
pos_doc_index = random.randint(0, _pos_len-1) if _pos_len>1 else 0
pos_doc_id = training_data[selected_query["id"]][relevance_group][pos_doc_index]
pos_doc = collection[pos_doc_id]
_neg_len = len(training_data[selected_query["id"]][relevance_group-1])
neg_doc_index = random.randint(0, _neg_len-1) if _neg_len>1 else 0
neg_doc_id = training_data[selected_query["id"]][relevance_group-1][neg_doc_index]
neg_doc = collection[neg_doc_id]
y_query.append(selected_query["query"])
y_pos_doc.append(pos_doc)
y_neg_doc.append(neg_doc)
yield (np.array(y_query), np.array(y_pos_doc), np.array(y_neg_doc))
def get_config(self):
super_config = super().get_config()
data_json = {
"query_list": self.query_list,
"goldstandard": self.goldstandard,
"use_relevance_groups": self.use_relevance_groups,
"verbose": self.verbose,
"sub_set_goldstandard": self.sub_set_goldstandard,
"collection": self.collection,
}
return dict(data_json, **super_config) #fast dict merge
def sentence_splitter_builder(tokenizer, mode=0, max_sentence_size=21):
"""
Return a transform_inputs_fn for training and test as a tuple
mode 0: use fixed sized window for the split
mode 1: split around a query-document match with a fixed size
mode 2: deeprank alike. Similar to mode 1, but group the match by q-term
mode 3: split with ntlk sentence splitter
mode 4: similar to 2, but uses sentence splitting instead of fix size
"""
if mode in [1, 2]:
half_window = max_sentence_size//2
min_w = lambda x: max(0,x-half_window)
max_w = lambda x,l: min(x+half_window,l)+1
def train_splitter(data_generator):
while True:
# get the batch triplet
query, pos_docs, neg_docs = next(data_generator)
# tokenization
query = tokenizer.texts_to_sequences(query)
if mode not in [3, 4]: # for the mode 3 this is a preprocessing step
pos_docs = tokenizer.texts_to_sequences(pos_docs)
neg_docs = tokenizer.texts_to_sequences(neg_docs)
if any([ len(doc)==0 for doc in pos_docs]):
continue # try a new resampling, NOTE THIS IS A EASY FIX PLS REDO THIS!!!!!!!
# for obvious reasons
new_pos_docs = []
new_neg_docs = []
# sentence splitting
if mode==0:
for i in range(len(pos_docs)):
new_pos_docs.append([])
new_neg_docs.append([])
for s in range(0, len(pos_docs[i]), max_sentence_size):
new_pos_docs[-1].append(pos_docs[i][s:s+max_sentence_size])
for s in range(0, len(neg_docs[i]), max_sentence_size):
new_neg_docs[-1].append(neg_docs[i][s:s+max_sentence_size])
elif mode==1:
for b in range(len(pos_docs)):
new_pos_docs.append([])
new_neg_docs.append([])
# split by exact matching
for t_q in query[b]:
# exact math for the pos_document
for i,t_pd in enumerate(pos_docs[b]):
if t_pd==t_q:
new_pos_docs[-1].append(pos_docs[b][min_w(i):max_w(i,len(pos_docs[b]))])
# exact math for the neg_document
for i,t_nd in enumerate(neg_docs[b]):
if t_nd==t_q:
new_neg_docs[-1].append(neg_docs[b][min_w(i):max_w(i,len(neg_docs[b]))])
elif mode==2:
for b in range(len(pos_docs)):
new_pos_docs.append([])
new_neg_docs.append([])
# split by exact matching
for t_q in query[b]:
# entry for the query-term
new_pos_docs[-1].append([])
new_neg_docs[-1].append([])
# exact math for the pos_document
for i,t_pd in enumerate(pos_docs[b]):
if t_pd==t_q:
new_pos_docs[-1][-1].append(pos_docs[b][min_w(i):max_w(i,len(pos_docs[b]))])
# exact math for the neg_document
for i,t_nd in enumerate(neg_docs[b]):
if t_nd==t_q:
new_neg_docs[-1][-1].append(neg_docs[b][min_w(i):max_w(i,len(neg_docs[b]))])
elif mode==3:
for b in range(len(pos_docs)):
new_pos_docs.append([])
new_neg_docs.append([])
for pos_sentence in nltk.sent_tokenize(pos_docs[b]):
new_pos_docs[-1].append(pos_sentence)
for neg_sentence in nltk.sent_tokenize(neg_docs[b]):
new_neg_docs[-1].append(neg_sentence)
new_pos_docs[-1] = tokenizer.texts_to_sequences(new_pos_docs[-1])
new_neg_docs[-1] = tokenizer.texts_to_sequences(new_neg_docs[-1])
elif mode==4:
for b in range(len(pos_docs)):
new_pos_docs.append([])
new_neg_docs.append([])
_temp_pos_docs = nltk.sent_tokenize(pos_docs[b])
_temp_pos_docs = tokenizer.texts_to_sequences(_temp_pos_docs)
_temp_neg_docs = nltk.sent_tokenize(neg_docs[b])
_temp_neg_docs = tokenizer.texts_to_sequences(_temp_neg_docs)
# split by exact matching
for t_q in query[b]:
# entry for the query-term
new_pos_docs[-1].append([])
new_neg_docs[-1].append([])
for pos_sent in _temp_pos_docs:
# exact math for the pos_document
for i,t_pd in enumerate(pos_sent):
if t_pd==t_q:
new_pos_docs[-1][-1].append(pos_sent)
break
for neg_sent in _temp_neg_docs:
for i,t_nd in enumerate(neg_sent):
if t_nd==t_q:
new_neg_docs[-1][-1].append(neg_sent)
break
else:
raise NotImplementedError("Missing implmentation for mode "+str(mode))
yield query, new_pos_docs, new_neg_docs
def test_splitter(data_generator):
for _id, query, docs in data_generator:
# tokenization
tokenized_query = tokenizer.texts_to_sequences([query])[0]
for doc in docs:
if isinstance(doc["text"], list):
continue # cached tokenization
if mode not in [3, 4]:
doc["text"] = tokenizer.texts_to_sequences([doc["text"]])[0]
# sentence splitting
new_docs = []
if mode==0:
for s in range(0,len(doc["text"]), max_sentence_size):
new_docs.append(doc["text"][s:s+max_sentence_size])
elif mode==1:
for t_q in tokenized_query:
for i,t_d in enumerate(doc["text"]):
if t_d==t_q:
new_docs.append(doc["text"][min_w(i):max_w(i,len(doc["text"]))])
elif mode==2:
for t_q in tokenized_query:
new_docs.append([])
for i,t_d in enumerate(doc["text"]):
if t_d==t_q:
new_docs[-1].append(doc["text"][min_w(i):max_w(i,len(doc["text"]))])
elif mode==3:
for s in nltk.sent_tokenize(doc["text"]):
new_docs.append(s)
new_docs = tokenizer.texts_to_sequences(new_docs)
elif mode==4:
_temp_new_docs = tokenizer.texts_to_sequences(nltk.sent_tokenize(doc["text"]))
for t_q in tokenized_query:
new_docs.append([])
for _new_doc in _temp_new_docs:
for i,t_d in enumerate(_new_doc):
if t_d==t_q:
new_docs[-1].append(_new_doc)
break
else:
raise NotImplementedError("Missing implmentation for mode "+str(mode))
doc["text"] = new_docs
yield _id, tokenized_query, docs
return train_splitter, test_splitter
| 27,094 | 4,977 | 538 |
0af490936f95512da77a6c83f179f347d727a4bd | 26,547 | py | Python | calchipan/resolver.py | zzzeek/calchipan | 86ef380c572b9c1b8186278446a9b4952a538f97 | [
"MIT"
] | 1 | 2021-02-04T15:02:40.000Z | 2021-02-04T15:02:40.000Z | calchipan/resolver.py | zzzeek/calchipan | 86ef380c572b9c1b8186278446a9b4952a538f97 | [
"MIT"
] | null | null | null | calchipan/resolver.py | zzzeek/calchipan | 86ef380c572b9c1b8186278446a9b4952a538f97 | [
"MIT"
] | null | null | null |
"""Represent SQL tokens as Pandas operations.
"""
from sqlalchemy.sql import operators
from sqlalchemy import sql
from sqlalchemy import util
from sqlalchemy import types as sqltypes
import functools
import pandas as pd
import numpy as np
import collections
from . import dbapi
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.ext.compiler import compiles
def aggregate_fn(package=None):
"""Mark a Python function as a SQL aggregate function.
The function should typically receive a Pandas Series object
as an argument and return a scalar result.
E.g.::
from calchipan import aggregate_fn
@aggregate_fn()
def stddev(values):
return values.std()
The object is converted into a SQLAlchemy GenericFunction
object, which can be used directly::
stmt = select([stddev(table.c.value)])
or via the SQLAlchemy ``func`` namespace::
from sqlalchemy import func
stmt = select([func.stddev(table.c.value)])
Functions can be placed in ``func`` under particular
"package" names using the ``package`` argument::
@aggregate_fn(package='numpy')
def stddev(values):
return values.std()
Usage via ``func`` is then::
from sqlalchemy import func
stmt = select([func.numpy.stddev(table.c.value)])
An aggregate function that is called with multiple expressions
will be passed a single argument that is a list of Series
objects.
"""
return mark_aggregate
def non_aggregate_fn(package=None):
"""Mark a Python function as a SQL non-aggregate function.
The function should receive zero or more scalar
Python objects as arguments and return a scalar result.
E.g.::
from calchipan import non_aggregate_fn
@non_aggregate_fn()
def add_numbers(value1, value2):
return value1 + value2
Usage and behavior is identical to that of :func:`.aggregate_fn`,
except that the function is not treated as an aggregate. Function
expressions are also expanded out to individual positional arguments,
whereas an aggregate always receives a single structure as an argument.
"""
return mark_non_aggregate
ResolverContext = collections.namedtuple("ResolverContext",
["cursor", "namespace", "params"])
class ColumnElementResolver(Resolver):
"""Top level class for SQL expressions."""
def resolve_expression(self, ctx, product):
"""Resolve as a column expression.
Return value here is typically a Series or a scalar
value.
"""
raise NotImplementedError()
class FromResolver(Resolver):
"""Top level class for 'from' objects, things you can select rows from."""
def resolve_dataframe(self, ctx, names=True):
"""Resolve as a dataframe.
Return value here is a DataFrame object.
"""
raise NotImplementedError()
def _cartesian(ctx, f1, f2):
"""produce a cartesian product.
This is to support multiple FROM clauses against a WHERE.
Clearly, this is a bad place to be, and a join() should be
used instead. But this allows the results to come back,
at least.
"""
df1, df2 = f1.resolve_dataframe(ctx), f2.resolve_dataframe(ctx)
return DerivedResolver(
_cartesian_dataframe(ctx, df1, df2)
)
| 32.493268 | 90 | 0.586808 |
"""Represent SQL tokens as Pandas operations.
"""
from sqlalchemy.sql import operators
from sqlalchemy import sql
from sqlalchemy import util
from sqlalchemy import types as sqltypes
import functools
import pandas as pd
import numpy as np
import collections
from . import dbapi
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.ext.compiler import compiles
def aggregate_fn(package=None):
"""Mark a Python function as a SQL aggregate function.
The function should typically receive a Pandas Series object
as an argument and return a scalar result.
E.g.::
from calchipan import aggregate_fn
@aggregate_fn()
def stddev(values):
return values.std()
The object is converted into a SQLAlchemy GenericFunction
object, which can be used directly::
stmt = select([stddev(table.c.value)])
or via the SQLAlchemy ``func`` namespace::
from sqlalchemy import func
stmt = select([func.stddev(table.c.value)])
Functions can be placed in ``func`` under particular
"package" names using the ``package`` argument::
@aggregate_fn(package='numpy')
def stddev(values):
return values.std()
Usage via ``func`` is then::
from sqlalchemy import func
stmt = select([func.numpy.stddev(table.c.value)])
An aggregate function that is called with multiple expressions
will be passed a single argument that is a list of Series
objects.
"""
def mark_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), True)
return custom_func
return mark_aggregate
def non_aggregate_fn(package=None):
"""Mark a Python function as a SQL non-aggregate function.
The function should receive zero or more scalar
Python objects as arguments and return a scalar result.
E.g.::
from calchipan import non_aggregate_fn
@non_aggregate_fn()
def add_numbers(value1, value2):
return value1 + value2
Usage and behavior is identical to that of :func:`.aggregate_fn`,
except that the function is not treated as an aggregate. Function
expressions are also expanded out to individual positional arguments,
whereas an aggregate always receives a single structure as an argument.
"""
def mark_non_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), False)
return custom_func
return mark_non_aggregate
ResolverContext = collections.namedtuple("ResolverContext",
["cursor", "namespace", "params"])
class Resolver(object):
def __call__(self, cursor, namespace, params):
"""Resolve this expression.
Resolvers are callables; this is called by the DBAPI."""
return self.resolve(ResolverContext(cursor, namespace, params))
def resolve(self, ctx):
"""Resolve this expression given a ResolverContext.
Front end for resolution, linked to top-level __call__()."""
raise NotImplementedError()
class NullResolver(Resolver):
def resolve(self, ctx):
pass
class ColumnElementResolver(Resolver):
"""Top level class for SQL expressions."""
def resolve_expression(self, ctx, product):
"""Resolve as a column expression.
Return value here is typically a Series or a scalar
value.
"""
raise NotImplementedError()
class FromResolver(Resolver):
"""Top level class for 'from' objects, things you can select rows from."""
def resolve_dataframe(self, ctx, names=True):
"""Resolve as a dataframe.
Return value here is a DataFrame object.
"""
raise NotImplementedError()
class FunctionResolver(ColumnElementResolver):
def __init__(self, fn, expr, aggregate):
self.fn = fn
self.expr = expr
self.aggregate = aggregate
def resolve_expression(self, ctx, product):
if self.aggregate:
q = self.fn(self.expr.resolve_expression(
ctx, product))
q = pd.Series([q], name="aggregate")
else:
q = self.fn(*self.expr.resolve_expression(
ctx, product))
return q
class ConstantResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
def resolve_expression(self, ctx, product):
return self.value
class LiteralResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
self.name = str(id(self))
def resolve_expression(self, ctx, product):
return self.value
@property
def df_index(self):
return self.name
class ColumnResolver(ColumnElementResolver):
def __init__(self, name, tablename):
self.name = name
self.tablename = tablename
def resolve_expression(self, ctx, product):
if product is None:
df = TableResolver(self.tablename).resolve_dataframe(ctx)
else:
df = product.resolve_dataframe(ctx)
return df[self.df_index]
@property
def df_index(self):
return "#T_%s_#C_%s" % (self.tablename, self.name)
class UnaryResolver(ColumnElementResolver):
def __init__(self, expression, operator, modifier):
self.operator = operator
self.modifier = modifier
self.expression = expression
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(
ctx, product)
@property
def df_index(self):
return self.expression.df_index
class LabelResolver(Resolver):
def __init__(self, expression, name):
self.expression = expression
self.name = name
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(ctx, product)
@property
def df_index(self):
return self.name
class BinaryResolver(ColumnElementResolver):
def __init__(self, left, right, operator):
self.left = left
self.right = right
self.operator = operator
def resolve_expression(self, ctx, product):
return self.operator(
self.left.resolve_expression(ctx, product),
self.right.resolve_expression(ctx, product),
)
class ClauseListResolver(ColumnElementResolver):
def __init__(self, expressions, operator):
self.expressions = expressions
self.operator = operator
def resolve_expression(self, ctx, product):
exprs = [expr.resolve_expression(ctx, product)
for expr in self.expressions]
if self.operator is operators.comma_op:
if len(exprs) == 1:
return exprs[0]
else:
return exprs
else:
return functools.reduce(self.operator, exprs)
class BindParamResolver(ColumnElementResolver):
def __init__(self, name):
self.name = name
def resolve_expression(self, ctx, product):
return ctx.params[self.name]
class DerivedResolver(FromResolver):
def __init__(self, dataframe):
self.dataframe = dataframe
def resolve_dataframe(self, ctx, names=True):
return self.dataframe
class TableResolver(FromResolver):
def __init__(self, tablename, autoincrement_col=None):
self.tablename = tablename
self.autoincrement_col = autoincrement_col
def resolve_dataframe(self, ctx, names=True):
df = ctx.namespace[self.tablename]
if names:
# performance tests show that the rename() here is
# not terribly expensive as long as copy=False. Adding the
# index as a column is much more expensive, however,
# though is not as common of a use case.
# the renamed dataframe can be cached, though this means
# that all mutation operations need to clear the cache also.
# a quicker route to having the index accessible is to
# add an explicit copy of the index to the DataFrame outside
# of the SQL dialect - that way it won't be copied here
# each time.
renamed_df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.tablename, k))
for k in df.keys()
), copy=False
)
if self.autoincrement_col and self.autoincrement_col not in df:
renamed_df["#T_%s_#C_%s" %
(self.tablename, self.autoincrement_col)] = df.index
return renamed_df
elif self.autoincrement_col and self.autoincrement_col not in df:
renamed_df = df.copy()
renamed_df[self.autoincrement_col] = df.index
return renamed_df
else:
return df
class AliasResolver(FromResolver):
def __init__(self, table, aliasname):
self.table = table
self.aliasname = aliasname
def resolve_dataframe(self, ctx, names=True):
df = self.table.resolve_dataframe(ctx, names=False)
if names:
df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.aliasname, k))
for k in df.keys()
), copy=False
)
return df
class JoinResolver(FromResolver):
def __init__(self, left, right, onclause, isouter):
self.left = left
self.right = right
self.onclause = onclause
self.isouter = isouter
def resolve_dataframe(self, ctx, names=True):
df1 = left = self.left.resolve_dataframe(ctx)
df2 = self.right.resolve_dataframe(ctx)
if self.isouter:
left['_cp_left_index'] = left.index
straight_binaries, remainder = self._produce_join_expressions(df1, df2)
df1 = self._merge_straight_binaries(ctx, df1, df2, straight_binaries)
df1 = self._merge_remainder(ctx, left, df1, df2,
straight_binaries, remainder)
return df1.where(pd.notnull(df1), None)
def _produce_join_expressions(self, df1, df2):
straight_binaries = []
remainder = []
if isinstance(self.onclause, ClauseListResolver) and \
self.onclause.operator is operators.and_:
comparisons = self.onclause.expressions
else:
comparisons = [self.onclause]
# extract comparisons like this:
# col1 == col2 AND col3 == col4 AND ...
# use pd.merge() for those
for comp in comparisons:
if isinstance(comp, BinaryResolver) and \
comp.operator is operators.eq and \
hasattr(comp.left, "df_index") and \
hasattr(comp.right, "df_index"):
if comp.left.df_index in df1 and \
comp.right.df_index in df2:
straight_binaries.append(
(comp.left.df_index, comp.right.df_index)
)
continue
elif comp.right.df_index in df1 and \
comp.left.df_index in df2:
straight_binaries.append(
(comp.right.df_index, comp.left.df_index)
)
continue
remainder.append(comp)
return straight_binaries, remainder
def _merge_straight_binaries(self, ctx, df1, df2, straight_binaries):
if straight_binaries:
# use merge() for straight binaries.
left_on, right_on = zip(*straight_binaries)
df1 = df1.merge(df2, left_on=left_on, right_on=right_on,
how='left' if self.isouter else 'inner')
return df1
def _merge_remainder(self, ctx, left, df1, df2,
straight_binaries, remainder):
# for joins that aren't straight "col == col",
# we use the ON criterion directly.
# if we don't already have a dataframe with the full
# left + right cols, we use a cartesian product first.
# ideally, we'd limit the cartesian on only those columns we
# need.
if remainder:
if len(remainder) > 1:
remainder = ClauseListResolver(remainder, operators.and_)
else:
remainder = remainder[0]
# TODO: performance optimization: do the cartesian product
# here on a subset of the two dataframes, that only includes
# those columns we need in the expression. Then reindex
# back out to the original dataframes.
if not straight_binaries:
df1 = _cartesian_dataframe(ctx, df1, df2)
expr = remainder.resolve_expression(ctx, DerivedResolver(df1))
joined = df1[expr]
if self.isouter:
# for outer join, grab remaining rows from "left"
remaining_left_ids = set(df1['_cp_left_index']).\
difference(joined['_cp_left_index'])
remaining = left.ix[remaining_left_ids]
df1 = pd.concat([joined, remaining]).reset_index()
else:
df1 = joined
return df1
class _ExprCol(ColumnElementResolver):
def __init__(self, expr, name):
self.expr = expr
self.name = name
def resolve_expression(self, ctx, product):
return self.expr.resolve_expression(ctx, product)
@property
def df_index(self):
return self.name
class BaseSelectResolver(FromResolver):
group_by = None
order_by = None
having = None
limit = None
offset = None
@util.memoized_property
def columns(self):
return []
def _evaluate(self, ctx, correlate=None):
raise NotImplementedError()
def resolve(self, ctx, correlate=None):
product = self._evaluate(ctx, correlate)
if self.group_by is not None:
df = product.resolve_dataframe(ctx)
gp = self.group_by.resolve_expression(ctx, product)
groups = [DerivedResolver(gdf[1]) for gdf in df.groupby(gp)]
else:
groups = [product]
frame_columns = list(self.columns)
if self.having is not None:
if self.group_by is None:
raise dbapi.Error("HAVING must also have GROUP BY")
frame_columns.append(_ExprCol(self.having, '_having'))
if self.order_by is not None:
for idx, ob_expr in enumerate(self.order_by.expressions):
frame_columns.append(_ExprCol(ob_expr, '_order_by_%d' % idx))
def process_aggregates(gprod):
"""detect aggregate functions in column clauses and
flatten results if present
"""
cols = [
_coerce_to_series(
ctx,
c.resolve_expression(ctx, gprod)
).reset_index(drop=True)
for c in frame_columns]
for c in cols:
if c.name == 'aggregate':
break
else:
return cols
return [
list(c)[0]
if c.name != 'aggregate'
else c
for c in cols
]
nu = _unique_name()
names = [nu(c.name) for c in self.columns]
group_results = [
pd.DataFrame.from_items(
[
(
c.df_index,
expr
)
for c, expr
in zip(frame_columns, process_aggregates(gprod))
]
)
for gprod in groups
]
non_empty = [g for g in group_results if len(g)]
if not non_empty:
# empty result
return pd.DataFrame(columns=names)
else:
results = pd.concat(non_empty)
if self.having is not None:
results = results[results['_having'] == True]
del results['_having']
if self.order_by:
cols = []
asc = []
for idx, ob_expr in enumerate(self.order_by.expressions):
ascending = \
not isinstance(ob_expr, UnaryResolver) or \
ob_expr.modifier is not operators.desc_op
key = '_order_by_%d' % idx
cols.append(key)
asc.append(ascending)
results = results.sort(columns=cols, ascending=asc).\
reset_index(drop=True)
for col in cols:
del results[col]
results.rename(columns=dict(
(col.df_index, name)
for col, name in zip(self.columns, names)
), inplace=True)
if self.offset is not None or self.limit is not None:
slice_start = self.offset if self.offset is not None else 0
if self.limit is None:
results = results[slice_start:]
else:
results = results[slice_start:slice_start + self.limit]
return results
class SelectResolver(BaseSelectResolver):
whereclause = None
@util.memoized_property
def dataframes(self):
return []
def resolve_dataframe(self, ctx, names=True):
return self.resolve(ctx)
def resolve_expression(self, ctx, product):
# correlated subquery - resolve for every row.
# TODO: probably *dont* need to resolve for every row
# for an uncorrelated subquery, can detect that
p_df = product.resolve_dataframe(ctx)
# iterate through rows in dataframe and form one-row
# dataframes. The ind:ind thing is the only way I could
# figure out to achieve this, might be an eaiser way.
things = []
for ind in p_df.index:
row = p_df.ix[ind:ind]
df = DerivedResolver(row)
thing = self._evaluate(ctx, correlate=df)
things.append(_coerce_to_scalar(ctx, thing))
return pd.Series(things)
def _evaluate(self, ctx, correlate=None):
if not self.dataframes:
# "null" dataframe
product = DerivedResolver(pd.DataFrame(
[{col.df_index: [1]} for col in self.columns]))
else:
product = self.dataframes[0]
for df in self.dataframes[1:]:
product = _cartesian(ctx, product, df)
if correlate:
product = _cartesian(ctx, product, correlate)
df = product.resolve_dataframe(ctx)
if self.whereclause is not None:
df = df[self.whereclause.resolve_expression(ctx, product)]
product = DerivedResolver(df)
if correlate:
col = self.columns[0].resolve_expression(ctx, product)
return _coerce_to_scalar(ctx, col)
return product
class CompoundResolver(BaseSelectResolver):
keyword = None
@util.memoized_property
def selects(self):
return []
@property
def columns(self):
return self.selects[0].columns
def resolve_dataframe(self, ctx, names=True):
return self.resolve(ctx)
def _evaluate(self, ctx, correlate=None, **kw):
assert self.keyword in (sql.CompoundSelect.UNION,
sql.CompoundSelect.UNION_ALL)
evaluated = [
sel.resolve(ctx, **kw)
for sel in self.selects
]
for ev in evaluated:
ev.rename(columns=dict(
(old, new.df_index) for old, new in
zip(ev.keys(), self.columns)
),
inplace=True)
df = pd.concat(evaluated)
if self.keyword == sql.CompoundSelect.UNION:
df = df.drop_duplicates()
return DerivedResolver(df)
class CRUDResolver(Resolver):
pass
class InsertResolver(CRUDResolver):
columns = ()
values = ()
def __init__(self, tablename, pandas_index_pk):
self.tablename = tablename
self.pandas_index_pk = pandas_index_pk
def resolve(self, ctx, **kw):
df = ctx.namespace[self.tablename]
if not self.values:
new = df.append({}, ignore_index=True)
elif isinstance(self.values[0], list):
new = df.append(
pd.DataFrame(
[
dict((c,
v.resolve_expression(ctx, None))
for c, v in zip(self.columns, row))
for row in self.values
]
), ignore_index=True
)
else:
new = df.append(dict(
(c, v.resolve_expression(ctx, None))
for c, v in zip(self.columns, self.values)
), ignore_index=True)
# TODO: is 'value=[None]' correct usage here?
ctx.namespace[self.tablename] = new.fillna(value=[None])
if self.pandas_index_pk:
ctx.cursor.lastrowid = new.index[-1]
else:
ctx.cursor.lastrowid = None
class UpdateResolver(CRUDResolver):
values = ()
whereclause = None
def __init__(self, tablename, autoincrement_col):
self.tablename = tablename
self.autoincrement_col = autoincrement_col
def resolve(self, ctx, **kw):
dataframe = ctx.namespace[self.tablename]
product = TableResolver(self.tablename,
autoincrement_col=self.autoincrement_col)
df = product.resolve_dataframe(ctx)
if self.whereclause is not None:
df_ind = df[self.whereclause.resolve_expression(ctx, product)]
else:
df_ind = df
# doing an UPDATE huh? Yeah, this is quite slow, sorry.
for ind in df_ind.index:
product = DerivedResolver(df_ind.ix[ind:ind])
for k, v in self.values:
thing = v.resolve_expression(ctx, product)
thing = _coerce_to_scalar(ctx, thing)
dataframe[k][ind] = thing
ctx.cursor.rowcount = len(df_ind)
class DeleteResolver(CRUDResolver):
whereclause = None
def __init__(self, tablename, autoincrement_col):
self.tablename = tablename
self.autoincrement_col = autoincrement_col
def resolve(self, ctx, **kw):
dataframe = ctx.namespace[self.tablename]
product = TableResolver(self.tablename,
autoincrement_col=self.autoincrement_col)
df = product.resolve_dataframe(ctx)
if self.whereclause is not None:
df_ind = df[self.whereclause.resolve_expression(ctx, product)]
else:
df_ind = df
ctx.namespace[self.tablename] = dataframe.drop(df_ind.index)
ctx.cursor.rowcount = len(df_ind)
class DDLResolver(Resolver):
pass
class CreateTableResolver(DDLResolver):
def __init__(self, tablename, colnames, coltypes, autoincrement_col, pandas_index_pk):
self.tablename = tablename
self.colnames = colnames
self.coltypes = coltypes
self.autoincrement_col = autoincrement_col
self.pandas_index_pk = pandas_index_pk
def resolve(self, ctx, **kw):
if self.tablename in ctx.namespace:
raise dbapi.Error("Dataframe '%s' already exists" % self.tablename)
# TODO: this is a hack for now
def get_type(type_):
if isinstance(type_, sqltypes.Integer):
return np.dtype('int64')
elif isinstance(type_, sqltypes.Float):
return np.dtype('float64')
else:
return np.dtype('object')
ctx.namespace[self.tablename] = pd.DataFrame.from_items([
(c, pd.Series(dtype=get_type(typ)))
for (c, typ) in zip(self.colnames, self.coltypes)
if not self.pandas_index_pk
or c != self.autoincrement_col
])
class DropTableResolver(DDLResolver):
def __init__(self, tablename):
self.tablename = tablename
def resolve(self, ctx, **kw):
if self.tablename not in ctx.namespace:
raise dbapi.Error("No such dataframe '%s'" % self.tablename)
del ctx.namespace[self.tablename]
def _coerce_to_series(ctx, col):
if not isinstance(col, pd.Series):
col = pd.Series([col])
return col
def _coerce_to_scalar(ctx, col):
if isinstance(col, pd.Series):
col = col.reset_index(drop=True)
if len(col) > 1:
raise dbapi.Error("scalar expression "
"returned more than one row")
col = col[0] if col else None
return col
def _unique_name():
names = collections.defaultdict(int)
def go(name):
count = names[name]
names[name] += 1
if count:
return "%s_%d" % (name, count)
else:
return name
return go
def _cartesian(ctx, f1, f2):
"""produce a cartesian product.
This is to support multiple FROM clauses against a WHERE.
Clearly, this is a bad place to be, and a join() should be
used instead. But this allows the results to come back,
at least.
"""
df1, df2 = f1.resolve_dataframe(ctx), f2.resolve_dataframe(ctx)
return DerivedResolver(
_cartesian_dataframe(ctx, df1, df2)
)
def _cartesian_dataframe(ctx, df1, df2):
if '_cartesian_ones' not in df1:
df1['_cartesian_ones'] = np.ones(len(df1))
if '_cartesian_ones' not in df2:
df2['_cartesian_ones'] = np.ones(len(df2))
return df1.merge(df2, on='_cartesian_ones')
| 19,572 | 2,131 | 1,432 |
0cc714f1c62d1358fd01873a96b4a0caac960520 | 4,202 | py | Python | plyades/kepler.py | blacKitten13/plyades | 146a17c50b913f0f4abc96b7bcaa627f37955b14 | [
"MIT"
] | 25 | 2015-08-26T09:26:21.000Z | 2021-11-06T23:57:55.000Z | plyades/kepler.py | blacKitten13/plyades | 146a17c50b913f0f4abc96b7bcaa627f37955b14 | [
"MIT"
] | 2 | 2015-08-29T15:20:34.000Z | 2017-03-22T19:10:39.000Z | plyades/kepler.py | blacKitten13/plyades | 146a17c50b913f0f4abc96b7bcaa627f37955b14 | [
"MIT"
] | 6 | 2016-10-02T23:43:04.000Z | 2020-06-19T00:43:15.000Z | from __future__ import division, print_function
from scipy import optimize
import numpy as np
import plyades.util as util
import astropy.units as units
| 28.013333 | 79 | 0.536649 | from __future__ import division, print_function
from scipy import optimize
import numpy as np
import plyades.util as util
import astropy.units as units
def elements(mu, r, v):
r = np.atleast_2d(r)
v = np.atleast_2d(v)
r_mag = util.mag(r)
v_mag = util.mag(v)
h = util.cross(r, v)
h_mag = util.mag(h)
k = np.array([[0, 0, 1]]).repeat(r.shape[0], axis=0)
n = util.cross(k, h)
n_mag = util.mag(n)
xi = v_mag ** 2 / 2 - mu / r_mag
e = ((v_mag ** 2 - mu / r_mag) * r - v * util.dot(r, v)) / mu
ecc = util.mag(e)
if not (ecc == 1).any():
sma = - mu / (2 * xi)
p = sma * (1 - ecc ** 2)
else:
p = h_mag ** 2 / mu
sma = p
inc = np.arccos(h[:, 2, np.newaxis] / h_mag)
# node = np.arccos(n[:, 0, np.newaxis] / n_mag)
node = np.arctan2(n[:, 1, np.newaxis]/h_mag, n[:, 0, np.newaxis]/h_mag)
peri = np.arccos(util.dot(n, e) / (ecc * n_mag))
ano = np.arccos(util.dot(e, r) / (ecc * r_mag))
# Quadrant checks
node = util.mod2pi(node)
peri = util.mod2pi(peri)
ano = util.mod2pi(ano)
return (
sma.squeeze(), ecc.squeeze(), inc.squeeze(),
node.squeeze(), peri.squeeze(), ano.squeeze())
def print_elements(ele):
names = ["Semi-major axis:", "Eccentricity:", "Inclination:",
"Ascending node:", "Argument of perigee:",
"True anomaly:"]
for name, element in zip(names[:2], ele[:2]):
print("{:<26}{:>16.5f}".format(name, element))
for name, element in zip(names[2:], ele[2:]):
print("{:<26}{:>16.5f}".format(name, np.degrees(element)))
def cartesian(mu, sma, ecc, inc, node, peri, ano):
u = peri + ano
p = sma * (1 - np.square(ecc))
e_ix = ecc == 1
if e_ix.any():
p[e_ix] = sma[e_ix]
r = p / (1 + ecc * np.cos(ano))
x = r*(np.cos(node)*np.cos(u) - np.sin(node)*np.cos(inc)*np.sin(u))
y = r*(np.sin(node)*np.cos(u) + np.cos(node)*np.cos(inc)*np.sin(u))
z = r*np.sin(inc)*np.sin(u)
vr = np.sqrt(mu/p)*ecc*np.sin(ano)
vf = np.sqrt(mu*p)/r
vx = (
vr*(np.cos(node)*np.cos(u) - np.sin(node)*np.cos(inc)*np.sin(u)) -
vf*(np.cos(node)*np.sin(u) + np.sin(node)*np.cos(u)*np.cos(inc)))
vy = (
vr*(np.sin(node)*np.cos(u) + np.cos(node)*np.cos(inc)*np.sin(u)) -
vf*(np.sin(node)*np.sin(u) - np.cos(node)*np.cos(u)*np.cos(inc)))
vz = vr*np.sin(inc)*np.sin(u) + vf*np.cos(u)*np.sin(inc)
return (
x.squeeze(), y.squeeze(), z.squeeze(),
vx.squeeze(), vy.squeeze(), vz.squeeze())
def period(a, mu):
return np.sqrt(4 * a**3 * np.pi**2 / mu)
def orbital_energy(a, mu):
return -mu/(2*a)
def ecc_to_true(E, e):
return 2*np.arctan2(np.sqrt(1 + e)*np.sin(E/2), np.sqrt(1 - e)*np.cos(E/2))
def true_to_ecc(T, e):
return 2*np.arctan2(np.sqrt(1 - e)*np.sin(T/2), np.sqrt(1 + e)*np.cos(T/2))
def ecc_to_mean(E, e):
unit = getattr(E, 'unit', None)
if not unit:
return E - e*np.sin(E)
else:
return (E.value - e*np.sin(E))*unit
def mean_to_ecc(M, e):
unit = getattr(M, 'unit', None)
if unit:
M = M.value
e = e.value
def kepler_eq(E):
return E - e*np.sin(E) - M
def kepler_eq_der(E):
return 1 - e*np.cos(E)
if unit:
return optimize.newton(
kepler_eq, M, kepler_eq_der, args=(), tol=1e-10, maxiter=50)*unit
else:
return optimize.newton(
kepler_eq, M, kepler_eq_der, args=(), tol=1e-10, maxiter=50)
def true_to_mean(T, e):
return ecc_to_mean(true_to_ecc(T, e), e)
def mean_to_true(M, e):
return ecc_to_true(mean_to_ecc(M, e), e)
def kepler(ele, dt, mu):
E0 = true2ecc(ele[5], ele[1])
M0 = ecc2mean(E0, ele[1])
n = 2*np.pi/period(ele[0], mu)
M = M0 + n*dt
if not np.isscalar(M):
E = np.zeros(np.shape(M))
out = np.zeros((len(M), 6))
for i, m in enumerate(M):
E[i] = mean2ecc(m, ele[1])
else:
out = np.zeros((1, 6))
E = mean2ecc(M, ele[1])
T = ecc2true(E, ele[1])
out[:, 0:5] = ele[0:5]
out[:, 5] = T
if out.shape == (6, ):
return out.flatten()
else:
return out
| 3,762 | 0 | 276 |
745727e1a86b6e0796582eda1945544ceaf05a40 | 1,101 | py | Python | mpf/core/scriptlet.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | mpf/core/scriptlet.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | mpf/core/scriptlet.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | """Contains the parent class for DEPRECATED Scriptlets.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
from mpf.core.delays import DelayManager
from mpf.core.logging import LogMixin
class Scriptlet(LogMixin):
"""Baseclass for DEPRECATED scriptlets which are simple scripts in a machine.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
def __init__(self, machine, name):
"""Initialise scriptlet."""
super().__init__()
self.machine = machine
self.name = name
self.configure_logging('Scriptlet.' + name, 'basic', 'full')
self.delay = DelayManager(self.machine)
self.on_load()
def __repr__(self):
"""Return string representation."""
return '<Scriptlet.{}>'.format(self.name)
def on_load(self):
"""Automatically called when this Scriptlet loads.
It's the intention that the Scriptlet writer will overwrite this method
in the Scriptlet.
"""
pass
| 28.230769 | 81 | 0.66485 | """Contains the parent class for DEPRECATED Scriptlets.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
from mpf.core.delays import DelayManager
from mpf.core.logging import LogMixin
class Scriptlet(LogMixin):
"""Baseclass for DEPRECATED scriptlets which are simple scripts in a machine.
This is deprecated and will be removed in config_version 6 with MPF 0.60.
Use custom code instead.
"""
def __init__(self, machine, name):
"""Initialise scriptlet."""
super().__init__()
self.machine = machine
self.name = name
self.configure_logging('Scriptlet.' + name, 'basic', 'full')
self.delay = DelayManager(self.machine)
self.on_load()
def __repr__(self):
"""Return string representation."""
return '<Scriptlet.{}>'.format(self.name)
def on_load(self):
"""Automatically called when this Scriptlet loads.
It's the intention that the Scriptlet writer will overwrite this method
in the Scriptlet.
"""
pass
| 0 | 0 | 0 |
411c02325644c682286a87553f1732277d9df53c | 146 | py | Python | coffee/solutions/solution_1.py | mcptr/py-tut-stats | c096bde2b9668a9fbe85aa2f8251b6ecc0545a4f | [
"MIT"
] | null | null | null | coffee/solutions/solution_1.py | mcptr/py-tut-stats | c096bde2b9668a9fbe85aa2f8251b6ecc0545a4f | [
"MIT"
] | null | null | null | coffee/solutions/solution_1.py | mcptr/py-tut-stats | c096bde2b9668a9fbe85aa2f8251b6ecc0545a4f | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import data
moisture = data.get("Moisture")
plt.plot(moisture)
plt.show()
plt.plot(sorted(moisture))
plt.show()
| 14.6 | 31 | 0.753425 | import matplotlib.pyplot as plt
import data
moisture = data.get("Moisture")
plt.plot(moisture)
plt.show()
plt.plot(sorted(moisture))
plt.show()
| 0 | 0 | 0 |
a0f3807f0984681dc3708575a6ea527b3d8e34b7 | 41 | py | Python | ejercicio4.py | jzazooro/warm-up | 5dcc2a6996923ffbeba5f9cb9b34d79660ee217d | [
"Apache-2.0"
] | null | null | null | ejercicio4.py | jzazooro/warm-up | 5dcc2a6996923ffbeba5f9cb9b34d79660ee217d | [
"Apache-2.0"
] | null | null | null | ejercicio4.py | jzazooro/warm-up | 5dcc2a6996923ffbeba5f9cb9b34d79660ee217d | [
"Apache-2.0"
] | null | null | null | altura=200
altura=altura+50
print(altura) | 13.666667 | 16 | 0.829268 | altura=200
altura=altura+50
print(altura) | 0 | 0 | 0 |
2ed29407ea8f2fa563e2d88d687ea85c987dc21e | 1,074 | py | Python | code/quiver.py | Iydon/ODE_Notes | b455ccd1e36c7f55b243e95eaf0a83896f56dd5b | [
"MIT"
] | 1 | 2018-11-17T04:50:56.000Z | 2018-11-17T04:50:56.000Z | code/quiver.py | Iydon/ODE_Notes | b455ccd1e36c7f55b243e95eaf0a83896f56dd5b | [
"MIT"
] | null | null | null | code/quiver.py | Iydon/ODE_Notes | b455ccd1e36c7f55b243e95eaf0a83896f56dd5b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
x = lambda x,y,z=None: y
y = lambda x,y,z=None: -np.sin(x) + 0.01*y
z = lambda x,y,z=None: np.cos(z)
xlim = [-2*np.pi, 2*np.pi]
ylim = [-2*np.pi, 2*np.pi]
zlim = [-2*np.pi, 2*np.pi]
direction_field_2d(x, y, xlim, ylim)
direction_field_3d(x, y, z, xlim, ylim, zlim, num=[5,5,5])
| 25.571429 | 75 | 0.594041 | import numpy as np
import matplotlib.pyplot as plt
def direction_field_2d(x, y, xlim=None, ylim=None, num=None):
xlim = xlim or [-10, 10]
ylim = ylim or [-10, 10]
num = num or [10, 10]
x_ = np.linspace(*xlim, num[0])
y_ = np.linspace(*ylim, num[-1])
X,Y = np.meshgrid(x_, y_)
Zx = x(X, Y)
Zy = y(X, Y)
plt.quiver(X, Y, Zx, Zy)
plt.show()
def direction_field_3d(x, y, z, xlim=None, ylim=None, zlim=None, num=None):
xlim = xlim or [-10, 10]
ylim = ylim or [-10, 10]
zlim = zlim or [-10, 10]
num = num or [10, 10, 10]
x_ = np.linspace(*xlim, num[0])
y_ = np.linspace(*ylim, num[1])
z_ = np.linspace(*zlim, num[2])
X,Y,Z = np.meshgrid(x_, y_, z_)
Zx = x(X, Y, Z)
Zy = y(X, Y, Z)
Zz = z(X, Y, Z)
ax = plt.gca(projection="3d")
ax.quiver3D(X, Y, Z, Zx, Zy, Zz)
plt.show()
x = lambda x,y,z=None: y
y = lambda x,y,z=None: -np.sin(x) + 0.01*y
z = lambda x,y,z=None: np.cos(z)
xlim = [-2*np.pi, 2*np.pi]
ylim = [-2*np.pi, 2*np.pi]
zlim = [-2*np.pi, 2*np.pi]
direction_field_2d(x, y, xlim, ylim)
direction_field_3d(x, y, z, xlim, ylim, zlim, num=[5,5,5])
| 696 | 0 | 46 |
109e2d81f972667f3a6d7e758fe4740311cef247 | 6,228 | py | Python | cloudsaga/cloudsaga.py | mbeacom/aws-cloudsaga | 1db34790bad2340d6a49c452936a7e2127679f17 | [
"Apache-2.0"
] | 241 | 2022-02-22T15:52:29.000Z | 2022-03-29T20:15:11.000Z | cloudsaga/cloudsaga.py | mbeacom/aws-cloudsaga | 1db34790bad2340d6a49c452936a7e2127679f17 | [
"Apache-2.0"
] | 2 | 2022-02-24T21:13:54.000Z | 2022-02-25T17:33:50.000Z | cloudsaga/cloudsaga.py | mbeacom/aws-cloudsaga | 1db34790bad2340d6a49c452936a7e2127679f17 | [
"Apache-2.0"
] | 9 | 2022-02-22T15:22:24.000Z | 2022-03-12T19:55:11.000Z | #!/usr/bin/env python3
#// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: Apache-2.0
# AWS CloudSaga - Simulate security events in AWS
# Joshua "DozerCat" McKiddy - Customer Incident Response Team (CIRT) - AWS
import logging
import time
import datetime
import argparse
from datetime import timezone
from .scenarios import iam_credentials, imds_reveal, mining_bitcoin, network_changes, public_resources
current_date = datetime.datetime.now(tz=timezone.utc)
current_date_string = str(current_date)
timestamp_date = datetime.datetime.now(tz=timezone.utc).strftime("%Y-%m-%d-%H%M%S")
timestamp_date_string = str(timestamp_date)
logFormatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=logFormatter, level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
# output_handle.setLevel(logging.INFO)
# logger.addHandler(output_handle)
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# output_handle.setFormatter(formatter)
def banner():
"""Function to run the AWS CloudSaga banner"""
print('''
___ ____ __ ____ _______.
/ \ \ \ / \ / / / |
/ ^ \ \ \/ \/ / | (----`
/ /_\ \ \ / \ \
/ _____ \ \ /\ / .----) |
/__/ \__\ \__/ \__/ |_______/
______ __ ______ __ __ _______ _______. ___ _______ ___
/ || | / __ \ | | | | | \ / | / \ / _____| / \
| ,----'| | | | | | | | | | | .--. | | (----` / ^ \ | | __ / ^ \
| | | | | | | | | | | | | | | | \ \ / /_\ \ | | |_ | / /_\ \
| `----.| `----.| `--' | | `--' | | '--' |.----) | / _____ \ | |__| | / _____ \
\______||_______| \______/ \______/ |_______/ |_______/ /__/ \__\ \______| /__/ \__\
Joshua "DozerCat" McKiddy - Team DragonCat - AWS
Type -h for help.
'''
)
def main():
"""Main function to run the code"""
output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
output_handle.setLevel(logging.INFO)
logger.addHandler(output_handle)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
output_handle.setFormatter(formatter)
parser = argparse.ArgumentParser(description='AWS CloudSaga - Simulate security events in AWS')
parser.add_argument('--scenario',help=' Perform the scenario you want to run against your AWS environment.', required=False)
parser.add_argument('--chapters',help=' List the available scenarios within CloudSaga. Use the --about flag to read details about a specific scenario.', action='store_true', required=False)
parser.add_argument('--about',help=' Read about a specific scenario (e.g. --about <scenario>. For a list of available scenarios, use the --chapters flag.', required=False)
args = parser.parse_args()
banner()
if args.chapters:
print('''
Chapters:
imds-reveal: IMDS Reveal discovers instances that using IMDS v1, which are vulnerable to the IMDSv1 attack vector.
mining-bitcoin: Uses Amazon EC2 resources to simulate creation of Bitcoin mining.
network-changes: Uses Amazon VPC resources to simulate network changes.
iam-credentials: Attempts to grab the IAM credential report within the AWS account.
public-resources: Checks Amazon RDS and Amazon S3 for resources that are public, as well as creates a public RDS instance.
''')
return
elif args.about == 'imds-reveal':
print('''
IMDS Reveal Scenario:
This scenario is based on the attack vector provided by IMDS version 1.
EC2 instances using IMDS version 1 are vulnerable to server side request
forgery (SSRF) attacks, and can be used as a pivot point for privilege
escalation within AWS.
Resources Checked:
Amazon EC2
''')
elif args.about == 'mining-bitcoin':
print('''
Bitcoin Mining Scenario:
This scenario simulates the creation of Bitcoin mining instances.
Attackers attempt to create Bitcoin mining instances using Amazon EC2,
in order to leverage legitimate AWS customer's resources for their own purposes.
Resources Checked:
Amazon EC2
''')
elif args.about == 'network-changes':
print('''
Network Changes Scenario:
This scenario simulates the creation and modification of network resources within
AWS. This includes creating Amazon VPCs, as well as modifications to Security Groups,
for the purposes of compromising resources within the AWS account.
Resources Checked:
Amazon VPC
Amazon EC2
''')
elif args.about == 'iam-credentials':
print('''
IAM Credentials Scenario:
This scenario attempts to grab the IAM credential report within the AWS account.
Resources Checked:
Amazon IAM
''')
elif args.about == 'public-resources':
print('''
Public Resources Scenario:
This scenario is for checking and creating public AWS resources within an AWS account.
This includes Amazon RDS and Amazon S3.
Resources Checked:
Amazon RDS
Amazon S3
''')
if args.scenario == 'imds-reveal':
imds_reveal.main()
elif args.scenario == 'mining-bitcoin':
mining_bitcoin.main()
elif args.scenario == 'network-changes':
network_changes.main()
elif args.scenario == 'iam-credentials':
iam_credentials.main()
elif args.scenario == 'public-resources':
public_resources.main()
else:
print("No options selected. Please run -h for help.")
if __name__ == '__main__':
main()
| 38.925 | 193 | 0.599711 | #!/usr/bin/env python3
#// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#// SPDX-License-Identifier: Apache-2.0
# AWS CloudSaga - Simulate security events in AWS
# Joshua "DozerCat" McKiddy - Customer Incident Response Team (CIRT) - AWS
import logging
import time
import datetime
import argparse
from datetime import timezone
from .scenarios import iam_credentials, imds_reveal, mining_bitcoin, network_changes, public_resources
current_date = datetime.datetime.now(tz=timezone.utc)
current_date_string = str(current_date)
timestamp_date = datetime.datetime.now(tz=timezone.utc).strftime("%Y-%m-%d-%H%M%S")
timestamp_date_string = str(timestamp_date)
logFormatter = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(format=logFormatter, level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
# output_handle.setLevel(logging.INFO)
# logger.addHandler(output_handle)
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# output_handle.setFormatter(formatter)
def banner():
"""Function to run the AWS CloudSaga banner"""
print('''
___ ____ __ ____ _______.
/ \ \ \ / \ / / / |
/ ^ \ \ \/ \/ / | (----`
/ /_\ \ \ / \ \
/ _____ \ \ /\ / .----) |
/__/ \__\ \__/ \__/ |_______/
______ __ ______ __ __ _______ _______. ___ _______ ___
/ || | / __ \ | | | | | \ / | / \ / _____| / \
| ,----'| | | | | | | | | | | .--. | | (----` / ^ \ | | __ / ^ \
| | | | | | | | | | | | | | | | \ \ / /_\ \ | | |_ | / /_\ \
| `----.| `----.| `--' | | `--' | | '--' |.----) | / _____ \ | |__| | / _____ \
\______||_______| \______/ \______/ |_______/ |_______/ /__/ \__\ \______| /__/ \__\
Joshua "DozerCat" McKiddy - Team DragonCat - AWS
Type -h for help.
'''
)
def main():
"""Main function to run the code"""
output_handle = logging.FileHandler('cloudsaga_' + timestamp_date_string + '.log')
output_handle.setLevel(logging.INFO)
logger.addHandler(output_handle)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
output_handle.setFormatter(formatter)
parser = argparse.ArgumentParser(description='AWS CloudSaga - Simulate security events in AWS')
parser.add_argument('--scenario',help=' Perform the scenario you want to run against your AWS environment.', required=False)
parser.add_argument('--chapters',help=' List the available scenarios within CloudSaga. Use the --about flag to read details about a specific scenario.', action='store_true', required=False)
parser.add_argument('--about',help=' Read about a specific scenario (e.g. --about <scenario>. For a list of available scenarios, use the --chapters flag.', required=False)
args = parser.parse_args()
banner()
if args.chapters:
print('''
Chapters:
imds-reveal: IMDS Reveal discovers instances that using IMDS v1, which are vulnerable to the IMDSv1 attack vector.
mining-bitcoin: Uses Amazon EC2 resources to simulate creation of Bitcoin mining.
network-changes: Uses Amazon VPC resources to simulate network changes.
iam-credentials: Attempts to grab the IAM credential report within the AWS account.
public-resources: Checks Amazon RDS and Amazon S3 for resources that are public, as well as creates a public RDS instance.
''')
return
elif args.about == 'imds-reveal':
print('''
IMDS Reveal Scenario:
This scenario is based on the attack vector provided by IMDS version 1.
EC2 instances using IMDS version 1 are vulnerable to server side request
forgery (SSRF) attacks, and can be used as a pivot point for privilege
escalation within AWS.
Resources Checked:
Amazon EC2
''')
elif args.about == 'mining-bitcoin':
print('''
Bitcoin Mining Scenario:
This scenario simulates the creation of Bitcoin mining instances.
Attackers attempt to create Bitcoin mining instances using Amazon EC2,
in order to leverage legitimate AWS customer's resources for their own purposes.
Resources Checked:
Amazon EC2
''')
elif args.about == 'network-changes':
print('''
Network Changes Scenario:
This scenario simulates the creation and modification of network resources within
AWS. This includes creating Amazon VPCs, as well as modifications to Security Groups,
for the purposes of compromising resources within the AWS account.
Resources Checked:
Amazon VPC
Amazon EC2
''')
elif args.about == 'iam-credentials':
print('''
IAM Credentials Scenario:
This scenario attempts to grab the IAM credential report within the AWS account.
Resources Checked:
Amazon IAM
''')
elif args.about == 'public-resources':
print('''
Public Resources Scenario:
This scenario is for checking and creating public AWS resources within an AWS account.
This includes Amazon RDS and Amazon S3.
Resources Checked:
Amazon RDS
Amazon S3
''')
if args.scenario == 'imds-reveal':
imds_reveal.main()
elif args.scenario == 'mining-bitcoin':
mining_bitcoin.main()
elif args.scenario == 'network-changes':
network_changes.main()
elif args.scenario == 'iam-credentials':
iam_credentials.main()
elif args.scenario == 'public-resources':
public_resources.main()
else:
print("No options selected. Please run -h for help.")
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
5d9cde014b591bf086df8f14bcaaa02cd339ed38 | 9,193 | py | Python | auth0/v3/test/authentication/test_base.py | Sytten/auth0-python | 59c1942acbd9723adaf587ac4bc94c9583fe38a0 | [
"MIT"
] | null | null | null | auth0/v3/test/authentication/test_base.py | Sytten/auth0-python | 59c1942acbd9723adaf587ac4bc94c9583fe38a0 | [
"MIT"
] | null | null | null | auth0/v3/test/authentication/test_base.py | Sytten/auth0-python | 59c1942acbd9723adaf587ac4bc94c9583fe38a0 | [
"MIT"
] | null | null | null | import base64
import json
import mock
import sys
import requests
import unittest
from ...authentication.base import AuthenticationBase
from ...exceptions import Auth0Error
| 38.95339 | 84 | 0.610138 | import base64
import json
import mock
import sys
import requests
import unittest
from ...authentication.base import AuthenticationBase
from ...exceptions import Auth0Error
class TestBase(unittest.TestCase):
def test_telemetry_enabled_by_default(self):
ab = AuthenticationBase('auth0.com')
user_agent = ab.base_headers['User-Agent']
auth0_client_bytes = base64.b64decode(ab.base_headers['Auth0-Client'])
auth0_client_json = auth0_client_bytes.decode('utf-8')
auth0_client = json.loads(auth0_client_json)
content_type = ab.base_headers['Content-Type']
from auth0 import __version__ as auth0_version
python_version = '{}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
client_info = {
'name': 'auth0-python',
'version': auth0_version,
'env': {
'python': python_version
}
}
self.assertEqual(user_agent, 'Python/{}'.format(python_version))
self.assertEqual(auth0_client, client_info)
self.assertEqual(content_type, 'application/json')
def test_telemetry_disabled(self):
ab = AuthenticationBase('auth0.com', telemetry=False)
self.assertEqual(ab.base_headers, {'Content-Type': 'application/json'})
@mock.patch('requests.post')
def test_post(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
mock_post.return_value.status_code = 200
mock_post.return_value.text = '{"x": "y"}'
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
mock_post.assert_called_with(url='the-url', json={'a': 'b'},
headers={'c': 'd', 'Content-Type': 'application/json'}, timeout=5.0)
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.post')
def test_post_with_defaults(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
mock_post.return_value.status_code = 200
mock_post.return_value.text = '{"x": "y"}'
# Only required params are passed
data = ab.post('the-url')
mock_post.assert_called_with(url='the-url', json=None,
headers={'Content-Type': 'application/json'}, timeout=5.0)
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.post')
def test_post_includes_telemetry(self, mock_post):
ab = AuthenticationBase('auth0.com')
mock_post.return_value.status_code = 200
mock_post.return_value.text = '{"x": "y"}'
data = ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(mock_post.call_count, 1)
call_kwargs = mock_post.call_args[1]
self.assertEqual(call_kwargs['url'], 'the-url')
self.assertEqual(call_kwargs['json'], {'a': 'b'})
headers = call_kwargs['headers']
self.assertEqual(headers['c'], 'd')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertIn('User-Agent', headers)
self.assertIn('Auth0-Client', headers)
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.post')
def test_post_error(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"error": "e0",' \
'"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_code_property(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"code": "e0",' \
'"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code, 'e0')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_no_error_code(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = '{"error_description": "desc"}'
with self.assertRaises(Auth0Error) as context:
ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code,
'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, 'desc')
@mock.patch('requests.post')
def test_post_error_with_text_response(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = 'there has been a terrible error'
with self.assertRaises(Auth0Error) as context:
ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code,
'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message,
'there has been a terrible error')
@mock.patch('requests.post')
def test_post_error_with_no_response_text(self, mock_post):
ab = AuthenticationBase('auth0.com', telemetry=False)
for error_status in [400, 500, None]:
mock_post.return_value.status_code = error_status
mock_post.return_value.text = None
with self.assertRaises(Auth0Error) as context:
ab.post('the-url', data={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(context.exception.status_code, error_status)
self.assertEqual(context.exception.error_code,
'a0.sdk.internal.unknown')
self.assertEqual(context.exception.message, '')
@mock.patch('requests.get')
def test_get(self, mock_get):
ab = AuthenticationBase('auth0.com', telemetry=False)
mock_get.return_value.status_code = 200
mock_get.return_value.text = '{"x": "y"}'
data = ab.get('the-url', params={'a': 'b'}, headers={'c': 'd'})
mock_get.assert_called_with(url='the-url', params={'a': 'b'},
headers={'c': 'd', 'Content-Type': 'application/json'}, timeout=5.0)
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.get')
def test_get_with_defaults(self, mock_get):
ab = AuthenticationBase('auth0.com', telemetry=False)
mock_get.return_value.status_code = 200
mock_get.return_value.text = '{"x": "y"}'
# Only required params are passed
data = ab.get('the-url')
mock_get.assert_called_with(url='the-url', params=None,
headers={'Content-Type': 'application/json'}, timeout=5.0)
self.assertEqual(data, {'x': 'y'})
@mock.patch('requests.get')
def test_get_includes_telemetry(self, mock_get):
ab = AuthenticationBase('auth0.com')
mock_get.return_value.status_code = 200
mock_get.return_value.text = '{"x": "y"}'
data = ab.get('the-url', params={'a': 'b'}, headers={'c': 'd'})
self.assertEqual(mock_get.call_count, 1)
call_kwargs = mock_get.call_args[1]
self.assertEqual(call_kwargs['url'], 'the-url')
self.assertEqual(call_kwargs['params'], {'a': 'b'})
headers = call_kwargs['headers']
self.assertEqual(headers['c'], 'd')
self.assertEqual(headers['Content-Type'], 'application/json')
self.assertIn('User-Agent', headers)
self.assertIn('Auth0-Client', headers)
self.assertEqual(data, {"x": "y"})
def test_get_can_timeout(self):
ab = AuthenticationBase('auth0.com', timeout=0.00001)
with self.assertRaises(requests.exceptions.Timeout):
ab.get('https://auth0.com', params={'a': 'b'}, headers={'c': 'd'})
def test_post_can_timeout(self):
ab = AuthenticationBase('auth0.com', timeout=0.00001)
with self.assertRaises(requests.exceptions.Timeout):
ab.post('https://auth0.com', data={'a': 'b'}, headers={'c': 'd'})
| 8,211 | 786 | 23 |
4b95d8b606a83b2d29ddd85abb26585df3b98363 | 332 | py | Python | convnet/layers/__init__.py | tech-team/convnet | ee401c29349e163a95a868304e120de215158e37 | [
"MIT"
] | null | null | null | convnet/layers/__init__.py | tech-team/convnet | ee401c29349e163a95a868304e120de215158e37 | [
"MIT"
] | null | null | null | convnet/layers/__init__.py | tech-team/convnet | ee401c29349e163a95a868304e120de215158e37 | [
"MIT"
] | null | null | null | from .input_layer import InputLayer, InputLayerSettings
from .convolutional_layer import ConvolutionalLayer, ConvolutionalLayerSettings
from .pooling_layer import PoolingLayer, PoolingLayerSettings
from .relu_layer import ReluLayer, ReluLayerSettings
from .full_connected_layer import FullConnectedLayer, FullConnectedLayerSettings
| 55.333333 | 80 | 0.894578 | from .input_layer import InputLayer, InputLayerSettings
from .convolutional_layer import ConvolutionalLayer, ConvolutionalLayerSettings
from .pooling_layer import PoolingLayer, PoolingLayerSettings
from .relu_layer import ReluLayer, ReluLayerSettings
from .full_connected_layer import FullConnectedLayer, FullConnectedLayerSettings
| 0 | 0 | 0 |
d3b77e4223ee1c4ee3bec9c7994bdfb4265f5762 | 13,777 | py | Python | tor_worker/role_moderator/tasks.py | GrafeasGroup/tor_worker | b17981f45323c182c2526749250018450d034ef7 | [
"MIT"
] | 1 | 2018-03-22T17:10:00.000Z | 2018-03-22T17:10:00.000Z | tor_worker/role_moderator/tasks.py | GrafeasGroup/tor_worker | b17981f45323c182c2526749250018450d034ef7 | [
"MIT"
] | null | null | null | tor_worker/role_moderator/tasks.py | GrafeasGroup/tor_worker | b17981f45323c182c2526749250018450d034ef7 | [
"MIT"
] | null | null | null | from tor_worker import OUR_BOTS
from tor_worker.config import Config
from tor_worker.context import (
InvalidState,
is_claimable_post,
is_claimed_post_response,
is_code_of_conduct,
has_youtube_captions,
)
from tor_worker.user_interaction import (
format_bot_response as _,
message_link,
responses as bot_msg,
post_comment,
)
from tor_worker.task_base import Task, InvalidUser
from celery.utils.log import get_task_logger
from celery import (
current_app as app,
signature,
)
from praw.models import Comment
import re
import textwrap
log = get_task_logger(__name__)
MOD_SUPPORT_PHRASES = [
re.compile('fuck', re.IGNORECASE),
re.compile('unclaim', re.IGNORECASE),
re.compile('undo', re.IGNORECASE),
re.compile('(?:good|bad) bot', re.IGNORECASE),
]
@app.task(bind=True, ignore_result=True, base=Task)
def check_inbox(self):
"""
Checks all unread messages in the inbox, routing the responses to other
queues. This effectively transfers tasks from Reddit's inbox to our internal
task queuing system, reducing the required API calls.
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
process_comment = signature('tor_worker.role_moderator.tasks.'
'process_comment')
process_admin_command = signature('tor_worker.role_moderator.tasks.'
'process_admin_command')
for item in reversed(list(self.reddit.inbox.unread(limit=None))):
# NOTE: We compare the `kind` attribute due to testing issues with
# `isinstance()`. We can mock out the objects with MagicMock now and
# have fewer classes loaded in this context.
if item.kind == 't1': # Comment
if 'username mention' in item.subject.lower():
log.info(f'Username mention by /u/{item.author.name}')
send_bot_message.delay(to=item.author.name,
subject='Username Call',
body=_(bot_msg['mention']))
else:
process_comment.delay(item.id)
elif item.kind == 't4': # Message
# Very rarely we may actually get a message from Reddit admins, in
# which case there will be no author attribute
if item.author is None:
log.info(f'Received message from the admins: {item.subject}')
send_to_slack.delay(
f'*Incoming message without an author*\n\n'
f'*Subject:* {item.subject}\n'
f'*Body:*\n\n'
f'{item.body}',
'#general'
)
elif item.subject and item.subject[0] == '!':
process_admin_command.delay(author=item.author.name,
subject=item.subject,
body=item.body,
message_id=item.id)
else:
log.info(f'Received unhandled message from '
f'/u/{item.author.name}. Subject: '
f'{repr(item.subject)}')
send_to_slack.delay(
f'Unhandled message by [/u/{item.author.name}]'
f'(https://reddit.com/user/{item.author.name})'
f'\n\n'
f'*Subject:* {item.subject}'
f'\n\n'
f'{item.body}'
'#general'
)
else: # pragma: no cover
# There shouldn't be any other types than Message and Comment,
# but on the off-chance there is, we'll log what it is here.
send_to_slack.delay(
f'Unhandled, unknown inbox item: {type(item).__name__}',
'#botstuffs'
)
log.warning(f'Unhandled, unknown inbox item: {type(item).__name__}')
item.mark_read()
@app.task(bind=True, ignore_result=True, base=Task)
def process_admin_command(self, author, subject, body, message_id):
"""
This task is the basis for all other admin commands. It does not farm it out
to another task per command, rather it runs it in the existing task.
Steps:
- Check for permissions
- Retrieve associated function as a callable
- Call said function with the commands (author, body, svc)
- Send the response from the function as a reply back to the invoking
message.
"""
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
# It only makes sense to have this be scoped to /r/ToR
config = Config.subreddit('TranscribersOfReddit')
command_name = subject.lower()[1:] # Lowercase and remove the initial '!'
if not config.commands.allows(command_name).by_user(author):
log.warning(f'DENIED: {author} is not allowed to call {command_name}')
# TODO: Send to slack
return
log.info(f'{author} called {command_name} with args {repr(body)}')
func = config.commands.func(command_name)
response = func(author=author, body=body, svc=self)
log.debug(f'Responding to {command_name} with {repr(body)} -> '
f'{repr(response)}.')
send_bot_message.delay(body=_(response), message_id=message_id)
@app.task(bind=True, ignore_result=True, base=Task)
def update_post_flair(self, submission_id, flair):
"""
Updates the flair of the original post to the pre-existing flair template id
given the string value of the flair. If there is no pre-existing styling for
that flair choice, task will error out with ``NotImplementedError``.
EXAMPLE:
``flair`` is "unclaimed", sets the post to "Unclaimed" with pre-existing
styling
"""
post = self.reddit.submission(submission_id)
for choice in post.flair.choices():
if choice['flair_text'].lower() == flair.lower():
# NOTE: This is hacky if we have multiple styles for the same flair.
# That said, we shouldn't rely on visual style if we're being
# truly accessible...
post.flair.select(
flair_template_id=choice['flair_template_id']
)
return
raise NotImplementedError(f"Unknown flair, {repr(flair)}, for post")
@app.task(bind=True, ignore_result=True, base=Task)
def send_bot_message(self, body, message_id=None, to=None,
subject='Just bot things...'):
"""
Sends a message as /u/TranscribersOfReddit
If this is intended to be a reply to an existing message:
- fill out the ``message_id`` param with a ref to the previous message
If no previous context:
- fill out the ``to`` param with the author's username
- fill out the ``subject`` param with the subject of the message
One of these _must_ be done.
"""
sender = self.reddit.user.me().name
if sender != 'transcribersofreddit':
raise InvalidUser(f'Attempting to send message as {sender}'
f'instead of the official ToR bot')
if message_id:
self.reddit.message(message_id).reply(body)
elif to:
self.reddit.redditor(to).message(subject, body)
else:
raise NotImplementedError(
"Must give either a value for ``message_id`` or ``to``"
)
def process_mod_intervention(comment: Comment):
"""
Triggers an alert in Slack with a link to the comment if there is something
offensive or in need of moderator intervention
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
phrases = []
for regex in MOD_SUPPORT_PHRASES:
matches = regex.search(comment.body)
if not matches:
continue
phrases.append(matches.group())
if len(phrases) == 0:
# Nothing offensive here, why did this function get triggered?
return
# Wrap each phrase in double-quotes (") and commas in between
phrase = '"' + '", "'.join(phrases) + '"'
title = 'Mod Intervention Needed'
message = f'Detected use of {phrase} <{comment.submission.shortlink}>'
send_to_slack.delay(
f':rotating_light::rotating_light: {title} '
f':rotating_light::rotating_light:\n\n'
f'{message}',
'#general'
)
@app.task(bind=True, ignore_result=True, base=Task)
def process_comment(self, comment_id):
"""
Processes a notification of comment being made, routing to other tasks as
is deemed necessary
"""
accept_code_of_conduct = signature('tor_worker.role_anyone.tasks.'
'accept_code_of_conduct')
unhandled_comment = signature('tor_worker.role_anyone.tasks.'
'unhandled_comment')
claim_post = signature('tor_worker.role_moderator.tasks.claim_post')
reply = self.reddit.comment(comment_id)
if reply.author.name in OUR_BOTS:
return
body = reply.body.lower()
# This should just be a filter that doesn't stop further processing
process_mod_intervention(reply)
if is_code_of_conduct(reply.parent()):
if re.search(r'\bi accept\b', body):
accept_code_of_conduct.delay(reply.author.name)
claim_post.delay(reply.id, verify=False, first_claim=True)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimable_post(reply.parent()):
if re.search(r'\bclaim\b', body):
claim_post.delay(reply.id)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimed_post_response(reply.parent()):
if re.search(r'\b(?:done|deno)\b', body): # pragma: no coverage
# TODO: Fill out completed post scenario and remove pragma directive
# mark_post_complete.delay(reply.id)
pass
elif re.search(r'(?=<^|\W)!override\b', body): # pragma: no coverage
# TODO: Fill out override scenario and remove pragma directive
pass
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
@app.task(bind=True, ignore_result=True, base=Task)
def claim_post(self, comment_id, verify=True, first_claim=False):
"""
Macro for a couple tasks:
- Update flair: ``Unclaimed`` -> ``In Progress``
- Post response: ``Hey, you have the post!``
"""
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
comment = self.reddit.comment(comment_id)
if verify and not self.redis.sismember('accepted_CoC', comment.author.name):
raise InvalidState(f'Unable to claim a post without first accepting '
f'the code of conduct')
if not is_claimable_post(comment.parent(), override=True):
raise InvalidState(f'Unable to claim a post that is not claimable. '
f'https://redd.it/{comment.id}')
update_post_flair.delay(comment.submission.id, 'In Progress')
if first_claim:
# TODO: replace with more first-time friendly of a response
post_comment(repliable=comment, body=bot_msg['claim_success'])
else:
post_comment(repliable=comment, body=bot_msg['claim_success'])
@app.task(bind=True, ignore_result=True, base=Task)
def post_to_tor(self, sub, title, link, domain, post_id, media_link=None):
"""
Posts a transcription to the /r/ToR front page
Params:
sub - Subreddit name that this comes from
title - The original title of the post from the other subreddit
link - The link to the original post from the other subreddit
domain - The domain of the original post's linked content
media_link - The link to the media in need of transcription
"""
if not media_link:
log.warn(f'Attempting to post content with no media link. '
f'({sub}: [{domain}] {repr(title)})')
return
# If youtube transcript is found, skip posting it to /r/ToR
if has_youtube_captions(media_link):
log.info(f'Found youtube captions for {media_link}... skipped.')
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
return
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
config = Config.subreddit(sub)
title = textwrap.shorten(title, width=250, placeholder='...')
post_type = config.templates.url_type(domain)
post_template = config.templates.content(domain)
footer = config.templates.footer
submission = self.reddit.subreddit('TranscribersOfReddit').submit(
title=f'{sub} | {post_type.title()} | "{title}"',
url=link,
)
update_post_flair.delay(submission.id, 'Unclaimed')
# Add completed post to tracker
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
# TODO: OCR job for this comment
reply = bot_msg['intro_comment'].format(
post_type=post_type,
formatting=post_template,
footer=footer,
message_url=message_link(subject='General Questions'),
)
post_comment(repliable=submission, body=reply)
| 36.160105 | 80 | 0.617769 | from tor_worker import OUR_BOTS
from tor_worker.config import Config
from tor_worker.context import (
InvalidState,
is_claimable_post,
is_claimed_post_response,
is_code_of_conduct,
has_youtube_captions,
)
from tor_worker.user_interaction import (
format_bot_response as _,
message_link,
responses as bot_msg,
post_comment,
)
from tor_worker.task_base import Task, InvalidUser
from celery.utils.log import get_task_logger
from celery import (
current_app as app,
signature,
)
from praw.models import Comment
import re
import textwrap
log = get_task_logger(__name__)
MOD_SUPPORT_PHRASES = [
re.compile('fuck', re.IGNORECASE),
re.compile('unclaim', re.IGNORECASE),
re.compile('undo', re.IGNORECASE),
re.compile('(?:good|bad) bot', re.IGNORECASE),
]
@app.task(bind=True, ignore_result=True, base=Task)
def check_inbox(self):
"""
Checks all unread messages in the inbox, routing the responses to other
queues. This effectively transfers tasks from Reddit's inbox to our internal
task queuing system, reducing the required API calls.
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
process_comment = signature('tor_worker.role_moderator.tasks.'
'process_comment')
process_admin_command = signature('tor_worker.role_moderator.tasks.'
'process_admin_command')
for item in reversed(list(self.reddit.inbox.unread(limit=None))):
# NOTE: We compare the `kind` attribute due to testing issues with
# `isinstance()`. We can mock out the objects with MagicMock now and
# have fewer classes loaded in this context.
if item.kind == 't1': # Comment
if 'username mention' in item.subject.lower():
log.info(f'Username mention by /u/{item.author.name}')
send_bot_message.delay(to=item.author.name,
subject='Username Call',
body=_(bot_msg['mention']))
else:
process_comment.delay(item.id)
elif item.kind == 't4': # Message
# Very rarely we may actually get a message from Reddit admins, in
# which case there will be no author attribute
if item.author is None:
log.info(f'Received message from the admins: {item.subject}')
send_to_slack.delay(
f'*Incoming message without an author*\n\n'
f'*Subject:* {item.subject}\n'
f'*Body:*\n\n'
f'{item.body}',
'#general'
)
elif item.subject and item.subject[0] == '!':
process_admin_command.delay(author=item.author.name,
subject=item.subject,
body=item.body,
message_id=item.id)
else:
log.info(f'Received unhandled message from '
f'/u/{item.author.name}. Subject: '
f'{repr(item.subject)}')
send_to_slack.delay(
f'Unhandled message by [/u/{item.author.name}]'
f'(https://reddit.com/user/{item.author.name})'
f'\n\n'
f'*Subject:* {item.subject}'
f'\n\n'
f'{item.body}'
'#general'
)
else: # pragma: no cover
# There shouldn't be any other types than Message and Comment,
# but on the off-chance there is, we'll log what it is here.
send_to_slack.delay(
f'Unhandled, unknown inbox item: {type(item).__name__}',
'#botstuffs'
)
log.warning(f'Unhandled, unknown inbox item: {type(item).__name__}')
item.mark_read()
@app.task(bind=True, ignore_result=True, base=Task)
def process_admin_command(self, author, subject, body, message_id):
"""
This task is the basis for all other admin commands. It does not farm it out
to another task per command, rather it runs it in the existing task.
Steps:
- Check for permissions
- Retrieve associated function as a callable
- Call said function with the commands (author, body, svc)
- Send the response from the function as a reply back to the invoking
message.
"""
send_bot_message = signature('tor_worker.role_moderator.tasks.'
'send_bot_message')
# It only makes sense to have this be scoped to /r/ToR
config = Config.subreddit('TranscribersOfReddit')
command_name = subject.lower()[1:] # Lowercase and remove the initial '!'
if not config.commands.allows(command_name).by_user(author):
log.warning(f'DENIED: {author} is not allowed to call {command_name}')
# TODO: Send to slack
return
log.info(f'{author} called {command_name} with args {repr(body)}')
func = config.commands.func(command_name)
response = func(author=author, body=body, svc=self)
log.debug(f'Responding to {command_name} with {repr(body)} -> '
f'{repr(response)}.')
send_bot_message.delay(body=_(response), message_id=message_id)
@app.task(bind=True, ignore_result=True, base=Task)
def update_post_flair(self, submission_id, flair):
"""
Updates the flair of the original post to the pre-existing flair template id
given the string value of the flair. If there is no pre-existing styling for
that flair choice, task will error out with ``NotImplementedError``.
EXAMPLE:
``flair`` is "unclaimed", sets the post to "Unclaimed" with pre-existing
styling
"""
post = self.reddit.submission(submission_id)
for choice in post.flair.choices():
if choice['flair_text'].lower() == flair.lower():
# NOTE: This is hacky if we have multiple styles for the same flair.
# That said, we shouldn't rely on visual style if we're being
# truly accessible...
post.flair.select(
flair_template_id=choice['flair_template_id']
)
return
raise NotImplementedError(f"Unknown flair, {repr(flair)}, for post")
@app.task(bind=True, ignore_result=True, base=Task)
def send_bot_message(self, body, message_id=None, to=None,
subject='Just bot things...'):
"""
Sends a message as /u/TranscribersOfReddit
If this is intended to be a reply to an existing message:
- fill out the ``message_id`` param with a ref to the previous message
If no previous context:
- fill out the ``to`` param with the author's username
- fill out the ``subject`` param with the subject of the message
One of these _must_ be done.
"""
sender = self.reddit.user.me().name
if sender != 'transcribersofreddit':
raise InvalidUser(f'Attempting to send message as {sender}'
f'instead of the official ToR bot')
if message_id:
self.reddit.message(message_id).reply(body)
elif to:
self.reddit.redditor(to).message(subject, body)
else:
raise NotImplementedError(
"Must give either a value for ``message_id`` or ``to``"
)
def process_mod_intervention(comment: Comment):
"""
Triggers an alert in Slack with a link to the comment if there is something
offensive or in need of moderator intervention
"""
send_to_slack = signature('tor_worker.role_anyone.tasks.send_to_slack')
phrases = []
for regex in MOD_SUPPORT_PHRASES:
matches = regex.search(comment.body)
if not matches:
continue
phrases.append(matches.group())
if len(phrases) == 0:
# Nothing offensive here, why did this function get triggered?
return
# Wrap each phrase in double-quotes (") and commas in between
phrase = '"' + '", "'.join(phrases) + '"'
title = 'Mod Intervention Needed'
message = f'Detected use of {phrase} <{comment.submission.shortlink}>'
send_to_slack.delay(
f':rotating_light::rotating_light: {title} '
f':rotating_light::rotating_light:\n\n'
f'{message}',
'#general'
)
@app.task(bind=True, ignore_result=True, base=Task)
def process_comment(self, comment_id):
"""
Processes a notification of comment being made, routing to other tasks as
is deemed necessary
"""
accept_code_of_conduct = signature('tor_worker.role_anyone.tasks.'
'accept_code_of_conduct')
unhandled_comment = signature('tor_worker.role_anyone.tasks.'
'unhandled_comment')
claim_post = signature('tor_worker.role_moderator.tasks.claim_post')
reply = self.reddit.comment(comment_id)
if reply.author.name in OUR_BOTS:
return
body = reply.body.lower()
# This should just be a filter that doesn't stop further processing
process_mod_intervention(reply)
if is_code_of_conduct(reply.parent()):
if re.search(r'\bi accept\b', body):
accept_code_of_conduct.delay(reply.author.name)
claim_post.delay(reply.id, verify=False, first_claim=True)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimable_post(reply.parent()):
if re.search(r'\bclaim\b', body):
claim_post.delay(reply.id)
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
elif is_claimed_post_response(reply.parent()):
if re.search(r'\b(?:done|deno)\b', body): # pragma: no coverage
# TODO: Fill out completed post scenario and remove pragma directive
# mark_post_complete.delay(reply.id)
pass
elif re.search(r'(?=<^|\W)!override\b', body): # pragma: no coverage
# TODO: Fill out override scenario and remove pragma directive
pass
else:
unhandled_comment.delay(
comment_id=reply.id,
body=reply.body
)
@app.task(bind=True, ignore_result=True, base=Task)
def claim_post(self, comment_id, verify=True, first_claim=False):
"""
Macro for a couple tasks:
- Update flair: ``Unclaimed`` -> ``In Progress``
- Post response: ``Hey, you have the post!``
"""
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
comment = self.reddit.comment(comment_id)
if verify and not self.redis.sismember('accepted_CoC', comment.author.name):
raise InvalidState(f'Unable to claim a post without first accepting '
f'the code of conduct')
if not is_claimable_post(comment.parent(), override=True):
raise InvalidState(f'Unable to claim a post that is not claimable. '
f'https://redd.it/{comment.id}')
update_post_flair.delay(comment.submission.id, 'In Progress')
if first_claim:
# TODO: replace with more first-time friendly of a response
post_comment(repliable=comment, body=bot_msg['claim_success'])
else:
post_comment(repliable=comment, body=bot_msg['claim_success'])
@app.task(bind=True, ignore_result=True, base=Task)
def post_to_tor(self, sub, title, link, domain, post_id, media_link=None):
"""
Posts a transcription to the /r/ToR front page
Params:
sub - Subreddit name that this comes from
title - The original title of the post from the other subreddit
link - The link to the original post from the other subreddit
domain - The domain of the original post's linked content
media_link - The link to the media in need of transcription
"""
if not media_link:
log.warn(f'Attempting to post content with no media link. '
f'({sub}: [{domain}] {repr(title)})')
return
# If youtube transcript is found, skip posting it to /r/ToR
if has_youtube_captions(media_link):
log.info(f'Found youtube captions for {media_link}... skipped.')
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
return
update_post_flair = signature('tor_worker.role_moderator.tasks.'
'update_post_flair')
config = Config.subreddit(sub)
title = textwrap.shorten(title, width=250, placeholder='...')
post_type = config.templates.url_type(domain)
post_template = config.templates.content(domain)
footer = config.templates.footer
submission = self.reddit.subreddit('TranscribersOfReddit').submit(
title=f'{sub} | {post_type.title()} | "{title}"',
url=link,
)
update_post_flair.delay(submission.id, 'Unclaimed')
# Add completed post to tracker
self.redis.sadd('complete_post_ids', post_id)
self.redis.incr('total_posted', amount=1)
self.redis.incr('total_new', amount=1)
# TODO: OCR job for this comment
reply = bot_msg['intro_comment'].format(
post_type=post_type,
formatting=post_template,
footer=footer,
message_url=message_link(subject='General Questions'),
)
post_comment(repliable=submission, body=reply)
| 0 | 0 | 0 |
b0a56c0fc71298f5b26bd3d7813f83c443e2275e | 4,005 | py | Python | neuromorpholib/neuromorpho/__init__.py | j6k4m8/neuromorpholib | 816eac0ae1214ba56dc378b47ab7af154efff0bf | [
"Apache-2.0"
] | 2 | 2018-08-27T17:50:02.000Z | 2018-10-18T11:47:30.000Z | neuromorpholib/neuromorpho/__init__.py | j6k4m8/neuromorpholib | 816eac0ae1214ba56dc378b47ab7af154efff0bf | [
"Apache-2.0"
] | 4 | 2021-01-26T17:49:52.000Z | 2021-12-07T19:42:24.000Z | neuromorpholib/neuromorpho/__init__.py | j6k4m8/neuromorpholib | 816eac0ae1214ba56dc378b47ab7af154efff0bf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from typing import List, Union
import requests
from .. import swc
class NeuroMorpho:
"""
A class that manages remote queries and downloads from a NeuronMorphology
server, such as neuromorpho.org.
.
"""
def __init__(self, cache_location: str = "~/.neuromorphocache/") -> None:
"""
Construct a new NeuroMorpho.
Arguments:
cache_location (str): Where to store SWC files after download
"""
self.cache = {}
self.cache_location = cache_location
self.base_url = "http://neuromorpho.org/"
self._permitted_fields = self.get_json("api/neuron/fields")["Neuron Fields"]
def url(self, ext: str = "") -> str:
"""
Construct a URL with the base_url of this remote as prefix.
.
"""
ext = ext.lstrip("/")
return self.base_url + ext
def get_json(self, ext: str) -> dict:
"""
Get JSON from a GET request.
.
"""
res = requests.get(self.url(ext))
return res.json()
def search(self, query: dict, page: int = 0, limit: int = None) -> List:
"""
Search the remote for a query (dict).
.
"""
for k, _ in query.items():
if k not in self._permitted_fields:
raise ValueError(
"Key {} is not a valid search parameter!\n".format(k)
+ "Must be one of:\n{}".format(self._permitted_fields)
)
query_string = "&".join(["fq={}:{}".format(k, v) for k, v in query.items()])
listing = self.get_json(
"api/neuron/select/?" + query_string[1:] + "&page={}".format(page)
)
try:
results = listing["_embedded"]["neuronResources"]
print(
"Downloading page {} for {} neurons, ending in {}".format(
page, len(results), results[-1]["neuron_name"]
)
)
neuron_listing = results
except KeyError:
return []
if (
"page" in listing
and "totalPages" in listing["page"]
and listing["page"]["totalPages"] >= page
):
if limit is None or len(neuron_listing) < limit:
if limit is None:
neuron_listing += self.search(query, page=page + 1)
else:
neuron_listing += self.search(
query, page=page + 1, limit=limit - 50
)
else:
return neuron_listing
return neuron_listing
def download_swc(
self, archive: str, neuron_name: str = None, text_only: bool = False
) -> Union[str, "swc.NeuronMorphology"]:
"""
Download a SWC file (or SWC string).
Optionally convert into a NeuroMorpho object.
"""
if neuron_name is None and isinstance(archive, dict):
return self.download_swc(
archive["archive"], archive["neuron_name"], text_only
)
if neuron_name is None and isinstance(archive, int):
data = self.get_neuron_info(archive)
return self.download_swc(data["archive"], data["neuron_name"], text_only)
ext = "dableFiles/{}/CNG%20version/{}.CNG.swc".format(
archive.lower(), neuron_name
)
res = requests.get(self.url(ext))
if "<html>" in res.text:
raise ValueError("Failed to fetch from {}.".format(ext))
if text_only:
return res.text
return swc.read_swc(res.text)
def get_neuron_info(self, neuron_name: Union[str, int]) -> dict:
"""
http://www.neuromorpho.org/api/neuron/name/{name}
"""
if isinstance(neuron_name, int):
return self.get_json("api/neuron/id/{}".format(neuron_name))
else:
return self.get_json("api/neuron/name/{}".format(neuron_name))
| 31.535433 | 85 | 0.535081 | #!/usr/bin/env python3
from typing import List, Union
import requests
from .. import swc
class NeuroMorpho:
"""
A class that manages remote queries and downloads from a NeuronMorphology
server, such as neuromorpho.org.
.
"""
def __init__(self, cache_location: str = "~/.neuromorphocache/") -> None:
"""
Construct a new NeuroMorpho.
Arguments:
cache_location (str): Where to store SWC files after download
"""
self.cache = {}
self.cache_location = cache_location
self.base_url = "http://neuromorpho.org/"
self._permitted_fields = self.get_json("api/neuron/fields")["Neuron Fields"]
def url(self, ext: str = "") -> str:
"""
Construct a URL with the base_url of this remote as prefix.
.
"""
ext = ext.lstrip("/")
return self.base_url + ext
def get_json(self, ext: str) -> dict:
"""
Get JSON from a GET request.
.
"""
res = requests.get(self.url(ext))
return res.json()
def search(self, query: dict, page: int = 0, limit: int = None) -> List:
"""
Search the remote for a query (dict).
.
"""
for k, _ in query.items():
if k not in self._permitted_fields:
raise ValueError(
"Key {} is not a valid search parameter!\n".format(k)
+ "Must be one of:\n{}".format(self._permitted_fields)
)
query_string = "&".join(["fq={}:{}".format(k, v) for k, v in query.items()])
listing = self.get_json(
"api/neuron/select/?" + query_string[1:] + "&page={}".format(page)
)
try:
results = listing["_embedded"]["neuronResources"]
print(
"Downloading page {} for {} neurons, ending in {}".format(
page, len(results), results[-1]["neuron_name"]
)
)
neuron_listing = results
except KeyError:
return []
if (
"page" in listing
and "totalPages" in listing["page"]
and listing["page"]["totalPages"] >= page
):
if limit is None or len(neuron_listing) < limit:
if limit is None:
neuron_listing += self.search(query, page=page + 1)
else:
neuron_listing += self.search(
query, page=page + 1, limit=limit - 50
)
else:
return neuron_listing
return neuron_listing
def download_swc(
self, archive: str, neuron_name: str = None, text_only: bool = False
) -> Union[str, "swc.NeuronMorphology"]:
"""
Download a SWC file (or SWC string).
Optionally convert into a NeuroMorpho object.
"""
if neuron_name is None and isinstance(archive, dict):
return self.download_swc(
archive["archive"], archive["neuron_name"], text_only
)
if neuron_name is None and isinstance(archive, int):
data = self.get_neuron_info(archive)
return self.download_swc(data["archive"], data["neuron_name"], text_only)
ext = "dableFiles/{}/CNG%20version/{}.CNG.swc".format(
archive.lower(), neuron_name
)
res = requests.get(self.url(ext))
if "<html>" in res.text:
raise ValueError("Failed to fetch from {}.".format(ext))
if text_only:
return res.text
return swc.read_swc(res.text)
def get_neuron_info(self, neuron_name: Union[str, int]) -> dict:
"""
http://www.neuromorpho.org/api/neuron/name/{name}
"""
if isinstance(neuron_name, int):
return self.get_json("api/neuron/id/{}".format(neuron_name))
else:
return self.get_json("api/neuron/name/{}".format(neuron_name))
| 0 | 0 | 0 |
31545a93cdcbd2697169a81aace1542cac812e3f | 3,466 | py | Python | Archive/Second_Simulator/gmm_mdp_simulator.py | AIandSocialGoodLab/learningplan | c4fbed3c109717740b16b1931d25c2ca00f3a0e8 | [
"MIT"
] | 3 | 2019-06-07T23:18:38.000Z | 2020-04-30T16:59:39.000Z | Archive/Second_Simulator/gmm_mdp_simulator.py | AIandSocialGoodLab/learningplan | c4fbed3c109717740b16b1931d25c2ca00f3a0e8 | [
"MIT"
] | null | null | null | Archive/Second_Simulator/gmm_mdp_simulator.py | AIandSocialGoodLab/learningplan | c4fbed3c109717740b16b1931d25c2ca00f3a0e8 | [
"MIT"
] | null | null | null | import numpy as np
import sklearn.mixture as mixture
import math, pickle, json, copy, random, ast
#Note: We assume that each action takes 1 time unit.
class GMMMDPSimulator():
"""
Arguments:
- gmm_pickled: Name of a txt file containing a pickled GMM
- transition_matrix: Name of a npy file containing transition matrix
- action_index: Dictionary mapping action names to indices in transition_matrix
- revealKC: Dictionary mapping action names to lists of proficiency levels to reveal
"""
#Wherever necessary, returns proficiencies in relevant KCs
#Returns None otherwise | 32.092593 | 107 | 0.746394 | import numpy as np
import sklearn.mixture as mixture
import math, pickle, json, copy, random, ast
#Note: We assume that each action takes 1 time unit.
class GMMMDPSimulator():
"""
Arguments:
- gmm_pickled: Name of a txt file containing a pickled GMM
- transition_matrix: Name of a npy file containing transition matrix
- action_index: Dictionary mapping action names to indices in transition_matrix
- revealKC: Dictionary mapping action names to lists of proficiency levels to reveal
"""
def __init__(self, num_kc, num_proficiency_level, gmm_pickled, transition_matrix, action_index, revealKC):
self.num_kc = num_kc
self.num_proficiency_level = num_proficiency_level
#Set up gmm and transition matrix
gmm_file = open(gmm_pickled, "rb")
self.initial_prof_distribution = pickle.load(gmm_file)
gmm_file.close()
self.P = np.load(transition_matrix)
#Set up actions and proficiencies to reveal
self.action_index = copy.deepcopy(action_index)
self.revealed_proficiencies = copy.deepcopy(revealKC)
self.cur_proficiency = None
self.initial_proficiency = None
self.reset()
def make_valid_prof(self, l):
l = np.minimum(l, self.num_proficiency_level - 1)
l = np.maximum(l, 0)
return (l + 0.5).astype(int)
def reset(self):
self.cur_proficiency = self.initial_prof_distribution.sample()[0][0]
self.cur_proficiency = self.make_valid_prof(self.cur_proficiency)
self.initial_proficiency = copy.deepcopy(self.cur_proficiency)
def reset_prof(self, initial_proficiency):
self.cur_proficiency = copy.deepcopy(initial_proficiency)
self.initial_proficiency = copy.deepcopy(self.cur_proficiency)
def get_initial_proficiency(self):
return self.initial_proficiency
def get_action_set(self):
return self.revealed_proficiencies.keys()
#Wherever necessary, returns proficiencies in relevant KCs
#Returns None otherwise
def make_action(self, action_name):
#Update current proficiency by sampling from P
#The following two functions are from parseDatasheet.py.
def encode_state(state):
res = 0
for i in range(len(state)):
res += state[i] * NUM_PLEVELS**(len(state)-1-i)
return res
def decode_state(num):
res = [0] * NUM_KC
for i in range(NUM_KC):
res[-1-i] = num % NUM_PLEVELS
num = num/NUM_PLEVELS
return res
#The following function is copied from simulator.py.
def gen_random_output_index(l):
ACCURACY = 0.001
if 1 - sum(l) > ACCURACY:
print("Invalid Input for gen_random_output_index!")
return
else:
p = random.random()
index = 0
while p > 0:
p -=l[index]
index += 1
return index - 1
if action_name == 'Prior Assessment Test':
raise Exception("Prior Assessment Test: Not a valid action for sim 2!")
if action_name == 'Final Exam':
#Immediately go to terminal state and restart episode
full_proficiencies = copy.deepcopy(self.cur_proficiency)
self.reset()
return full_proficiencies
action_id = self.action_index[action_name]
transition_prof = self.P[action_id][encode_state(self.cur_proficiency)]
new_state_index = gen_random_output_index(transition_prof)
new_prof = decode_state(new_state_index)
self.cur_proficiency = new_prof
#Reveal certain proficiencies, in the case of assessment tests
prof_list = self.revealed_proficiencies[action_id]
if len(prof_list) == 0:
return None
else:
prof_dict = {}
for kc in prof_list:
prof_dict[kc] = self.cur_proficiency[kc]
return prof_dict | 2,717 | 0 | 166 |
ccf755a4dafd0109a562f5da605b77762015f904 | 2,948 | py | Python | fido2_u2f/up_check.py | jshuo/pykey | fa332bd571cb5b885c52cd35325b03716779729a | [
"MIT"
] | null | null | null | fido2_u2f/up_check.py | jshuo/pykey | fa332bd571cb5b885c52cd35325b03716779729a | [
"MIT"
] | null | null | null | fido2_u2f/up_check.py | jshuo/pykey | fa332bd571cb5b885c52cd35325b03716779729a | [
"MIT"
] | null | null | null | from micropython import const
from time import sleep, monotonic
from board import LED1, BUTTON1
from digitalio import DigitalInOut, Direction, Pull
from wink import flash_led
from ctap_errors import CTAP2_OK, CTAP2_ERR_ACTION_TIMEOUT, CTAP2_ERR_KEEPALIVE_CANCEL
DELAY_TIME = const(10) # 10 ms
WINK_FREQ = const(10) # Hz
| 30.708333 | 86 | 0.588195 | from micropython import const
from time import sleep, monotonic
from board import LED1, BUTTON1
from digitalio import DigitalInOut, Direction, Pull
from wink import flash_led
from ctap_errors import CTAP2_OK, CTAP2_ERR_ACTION_TIMEOUT, CTAP2_ERR_KEEPALIVE_CANCEL
DELAY_TIME = const(10) # 10 ms
WINK_FREQ = const(10) # Hz
def up_check(channel, led_type=LED1):
led = DigitalInOut(led_type)
led.direction = Direction.OUTPUT
button = DigitalInOut(BUTTON1)
button.direction = Direction.INPUT
button.pull = Pull.UP
MAX_TIME = const(10000) # 10 seconds
counter = 0
ka_counter = 0
while True:
if button.value is False:
led.deinit()
button.deinit()
return CTAP2_OK
if counter >= MAX_TIME:
led.deinit()
button.deinit()
return CTAP2_ERR_ACTION_TIMEOUT
if ((counter * WINK_FREQ) // 2000) % 2 == 0:
led.value = False
else:
led.value = True
sleep(DELAY_TIME / 1000)
counter += DELAY_TIME
ka_counter += DELAY_TIME
if ka_counter > 70:
if channel is not None:
if channel.is_cancelled():
led.deinit()
button.deinit()
return CTAP2_ERR_KEEPALIVE_CANCEL
channel.keepalive(channel.STATUS_UPNEEDED)
ka_counter = 0
def u2f_up_check(led_type=LED1):
led = DigitalInOut(led_type)
led.direction = Direction.OUTPUT
button = DigitalInOut(BUTTON1)
button.direction = Direction.INPUT
MAX_U2F_TIME = const(50) # 50 ms
counter = 0
led.value = False
while True:
if button.value is False:
led.value = True
led.deinit()
button.deinit()
return CTAP2_OK
if counter >= MAX_U2F_TIME:
led.value = True
led.deinit()
button.deinit()
return CTAP2_ERR_ACTION_TIMEOUT
sleep(DELAY_TIME / 1000)
counter += DELAY_TIME
class ButtonLongPressed:
def __init__(self, period):
self.period = period
self.last_button_pressed = monotonic() - 10.0
self.button_pressed_duration = 0.0
# flash_led(4)
def check(self):
button = DigitalInOut(BUTTON1)
button.direction = Direction.INPUT
button.pull = Pull.UP
if button.value is True:
# flash_led(2)
# no button button pressed
button.deinit()
return False
else:
# flash_led(1)
button.deinit()
t = monotonic()
if t - self.last_button_pressed < 0.05:
self.button_pressed_duration += t - self.last_button_pressed
self.last_button_pressed = t
if self.button_pressed_duration > self.period:
self.button_pressed_duration = 0.0
return True
| 2,496 | 3 | 122 |
ff71ef4ea67f47f7da15038f3050a82f00ce1c4d | 1,729 | py | Python | src/sage/categories/principal_ideal_domains.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | 5 | 2015-01-04T07:15:06.000Z | 2022-03-04T15:15:18.000Z | src/sage/categories/principal_ideal_domains.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | null | null | null | src/sage/categories/principal_ideal_domains.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | 10 | 2016-09-28T13:12:40.000Z | 2022-02-12T09:28:34.000Z | r"""
Principal ideal domains
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category import Category
from sage.categories.category_singleton import Category_singleton
from sage.misc.cachefunc import cached_method
from sage.categories.unique_factorization_domains import UniqueFactorizationDomains
class PrincipalIdealDomains(Category_singleton):
"""
The category of (constructive) principal ideal domains
By constructive, we mean that a single generator can be
constructively found for any ideal given by a finite set of
generators. Note that this constructive definition only implies
that finitely generated ideals are principal. It is not clear what
we would mean by an infinitely generated ideal.
EXAMPLES::
sage: PrincipalIdealDomains()
Category of principal ideal domains
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
See also: http://en.wikipedia.org/wiki/Principal_ideal_domain
TESTS::
sage: TestSuite(PrincipalIdealDomains()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
"""
return [UniqueFactorizationDomains()]
| 32.018519 | 83 | 0.650665 | r"""
Principal ideal domains
"""
#*****************************************************************************
# Copyright (C) 2008 Teresa Gomez-Diaz (CNRS) <Teresa.Gomez-Diaz@univ-mlv.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category import Category
from sage.categories.category_singleton import Category_singleton
from sage.misc.cachefunc import cached_method
from sage.categories.unique_factorization_domains import UniqueFactorizationDomains
class PrincipalIdealDomains(Category_singleton):
"""
The category of (constructive) principal ideal domains
By constructive, we mean that a single generator can be
constructively found for any ideal given by a finite set of
generators. Note that this constructive definition only implies
that finitely generated ideals are principal. It is not clear what
we would mean by an infinitely generated ideal.
EXAMPLES::
sage: PrincipalIdealDomains()
Category of principal ideal domains
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
See also: http://en.wikipedia.org/wiki/Principal_ideal_domain
TESTS::
sage: TestSuite(PrincipalIdealDomains()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: PrincipalIdealDomains().super_categories()
[Category of unique factorization domains]
"""
return [UniqueFactorizationDomains()]
class ParentMethods:
pass
class ElementMethods:
pass
| 0 | 25 | 54 |
b8b4425f1750fef5a62e9413c9aedde78e0165f0 | 11,828 | py | Python | desdeo_problem/testproblems/EngineeringRealWorld.py | light-weaver/desdeo_problem | f3732bdd154ea5b6e94566d4daaf9fea67669646 | [
"MIT"
] | null | null | null | desdeo_problem/testproblems/EngineeringRealWorld.py | light-weaver/desdeo_problem | f3732bdd154ea5b6e94566d4daaf9fea67669646 | [
"MIT"
] | null | null | null | desdeo_problem/testproblems/EngineeringRealWorld.py | light-weaver/desdeo_problem | f3732bdd154ea5b6e94566d4daaf9fea67669646 | [
"MIT"
] | null | null | null | from desdeo_problem.problem.Variable import Variable
from desdeo_problem.problem.Objective import ScalarObjective
from desdeo_problem.problem.Problem import MOProblem, ProblemBase
from desdeo_problem import ScalarConstraint, problem
import numpy as np
"""
A real-world multi-objective problem suite (the RE benchmark set)
Tanabe, R. & Ishibuchi, H. (2020). An easy-to-use real-world multi-objective
optimization problem suite. Applied soft computing, 89, 106078.
https://doi.org/10.1016/j.asoc.2020.106078
https://github.com/ryojitanabe/reproblems/blob/master/reproblem_python_ver/reproblem.py
"""
def re21(var_iv: np.array = np.array([2, 2, 2, 2])) -> MOProblem:
""" Four bar truss design problem.
Two objectives and four variables.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 2, 2, 2]. x1, x4 ∈ [a, 3a], x2, x3 ∈ [√2 a, 3a]
and a = F / sigma
Returns:
MOProblem: a problem object.
"""
# Parameters
F = 10.0
sigma = 10.0
E = 2.0 * 1e5
L = 200.0
a = F / sigma
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([a, np.sqrt(2) * a, np.sqrt(2) * a, a])
# Upper bounds
ub = np.array([3 * a, 3 * a, 3 * a, 3 * a])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
objective_1 = ScalarObjective(name="minimize the structural volume", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="minimize the joint displacement", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
# The four variables determine the length of four bars
x_1 = Variable("x_1", 2 * a, a, 3 * a)
x_2 = Variable("x_2", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_3 = Variable("x_3", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_4 = Variable("x_4", 2 * a, a, 3 * a)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives)
return problem
def re22(var_iv: np.array = np.array([7.2, 10, 20])) -> MOProblem:
""" Reinforced concrete beam design problem.
2 objectives, 3 variables and 2 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [7.2, 10, 20]. x2 ∈ [0, 20] and x3 ∈ [0, 40].
x1 has a pre-defined discrete value from 0.2 to 15.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (3,)):
raise RuntimeError("Number of variables must be three")
# Lower bounds
lb = np.array([0.2, 0, 0])
# Upper bounds
ub = np.array([15, 20, 40])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# x1 pre-defined discrete values
feasible_vals = np.array([0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93,
1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60, 1.76, 1.80,
1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79,
2.80, 3.0, 3.08, 3.10, 3.16, 3.41, 3.52, 3.60, 3.72, 3.95,
3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84,
5.0, 5.28, 5.40, 5.53, 5.72, 6.0, 6.16, 6.32, 6.60, 7.11,
7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0,
11.06, 11.85, 12.0, 13.0, 14.0, 15.0])
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="minimize the total cost of concrete and reinforcing steel of the beam",
evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 3, 2, g_1)
cons_2 = ScalarConstraint("c_2", 3, 2, g_2)
constraints = [cons_1, cons_2]
x_1 = Variable("the area of the reinforcement", 7.2, 0.2, 15)
x_2 = Variable("the width of the beam", 10, 0, 20)
x_3 = Variable("the depth of the beam", 20, 0, 40)
variables = [x_1, x_2, x_3]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re23(var_iv: np.array = np.array([50, 50, 100, 120])) -> MOProblem:
""" Pressure vesssel design problem.
2 objectives, 4 variables and 3 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [50, 50, 100, 120]. x1 and x2 ∈ {1, ..., 100},
x3 ∈ [10, 200] and x4 ∈ [10, 240].
x1 and x2 are integer multiples of 0.0625.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([1, 1, 10, 10])
# Upper bounds
ub = np.array([100, 100, 200, 240])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="minimize to total cost of a clyndrical pressure vessel", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 4, 2, g_1)
cons_2 = ScalarConstraint("c_2", 4, 2, g_2)
cons_3 = ScalarConstraint("c_3", 4, 2, g_3)
constraints = [cons_1, cons_2, cons_3]
x_1 = Variable("the thicknesses of the shell", 50, 1, 100)
x_2 = Variable("the the head of pressure vessel", 50, 1, 100)
x_3 = Variable("the inner radius", 100, 10, 200)
x_4 = Variable("the length of the cylindrical section", 120, 10, 240)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re24(var_iv : np.array = np.array([2, 25])) -> MOProblem:
""" Hatch cover design problem.
2 objectives, 2 variables and 4 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 25]. x1 ∈ [0.5, 4] and
x2 ∈ [4, 50].
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (2,)):
raise RuntimeError("Number of variables must be two")
# Lower bounds
lb = np.array([0.5, 4])
# Upper bounds
ub = np.array([4, 50])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
# Objective functions
objective_1 = ScalarObjective(name="to minimize the weight of the hatch cover", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 2, 2, g_1)
cons_2 = ScalarConstraint("c_2", 2, 2, g_2)
cons_3 = ScalarConstraint("c_3", 2, 2, g_3)
cons_4 = ScalarConstraint("c_4", 2, 2, g_4)
constraints = [cons_1, cons_2, cons_3, cons_4]
x_1 = Variable("the flange thickness", 2, 0.5, 4)
x_2 = Variable("the beam height", 25, 4, 50)
variables = [x_1, x_2]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem | 35.842424 | 129 | 0.568228 | from desdeo_problem.problem.Variable import Variable
from desdeo_problem.problem.Objective import ScalarObjective
from desdeo_problem.problem.Problem import MOProblem, ProblemBase
from desdeo_problem import ScalarConstraint, problem
import numpy as np
"""
A real-world multi-objective problem suite (the RE benchmark set)
Tanabe, R. & Ishibuchi, H. (2020). An easy-to-use real-world multi-objective
optimization problem suite. Applied soft computing, 89, 106078.
https://doi.org/10.1016/j.asoc.2020.106078
https://github.com/ryojitanabe/reproblems/blob/master/reproblem_python_ver/reproblem.py
"""
def re21(var_iv: np.array = np.array([2, 2, 2, 2])) -> MOProblem:
""" Four bar truss design problem.
Two objectives and four variables.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 2, 2, 2]. x1, x4 ∈ [a, 3a], x2, x3 ∈ [√2 a, 3a]
and a = F / sigma
Returns:
MOProblem: a problem object.
"""
# Parameters
F = 10.0
sigma = 10.0
E = 2.0 * 1e5
L = 200.0
a = F / sigma
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([a, np.sqrt(2) * a, np.sqrt(2) * a, a])
# Upper bounds
ub = np.array([3 * a, 3 * a, 3 * a, 3 * a])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
def f_1(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
return L * ((2 * x[:, 0]) + np.sqrt(2.0) * x[:, 1] + np.sqrt(x[:, 2]) + x[:, 3])
def f_2(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
return ((F * L) / E) * ((2.0 / x[:, 0]) +
(2.0 * np.sqrt(2.0) / x[:, 1]) - (2.0 * np.sqrt(2.0) / x[:, 2]) + (2.0 / x[:, 3]))
objective_1 = ScalarObjective(name="minimize the structural volume", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="minimize the joint displacement", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
# The four variables determine the length of four bars
x_1 = Variable("x_1", 2 * a, a, 3 * a)
x_2 = Variable("x_2", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_3 = Variable("x_3", 2 * a, (np.sqrt(2.0) * a), 3 * a)
x_4 = Variable("x_4", 2 * a, a, 3 * a)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives)
return problem
def re22(var_iv: np.array = np.array([7.2, 10, 20])) -> MOProblem:
""" Reinforced concrete beam design problem.
2 objectives, 3 variables and 2 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [7.2, 10, 20]. x2 ∈ [0, 20] and x3 ∈ [0, 40].
x1 has a pre-defined discrete value from 0.2 to 15.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (3,)):
raise RuntimeError("Number of variables must be three")
# Lower bounds
lb = np.array([0.2, 0, 0])
# Upper bounds
ub = np.array([15, 20, 40])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# x1 pre-defined discrete values
feasible_vals = np.array([0.20, 0.31, 0.40, 0.44, 0.60, 0.62, 0.79, 0.80, 0.88, 0.93,
1.0, 1.20, 1.24, 1.32, 1.40, 1.55, 1.58, 1.60, 1.76, 1.80,
1.86, 2.0, 2.17, 2.20, 2.37, 2.40, 2.48, 2.60, 2.64, 2.79,
2.80, 3.0, 3.08, 3.10, 3.16, 3.41, 3.52, 3.60, 3.72, 3.95,
3.96, 4.0, 4.03, 4.20, 4.34, 4.40, 4.65, 4.74, 4.80, 4.84,
5.0, 5.28, 5.40, 5.53, 5.72, 6.0, 6.16, 6.32, 6.60, 7.11,
7.20, 7.80, 7.90, 8.0, 8.40, 8.69, 9.0, 9.48, 10.27, 11.0,
11.06, 11.85, 12.0, 13.0, 14.0, 15.0])
# Constrain functions
def g_1(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
fv_2d = np.repeat(np.atleast_2d(feasible_vals), x.shape[0], axis=0)
idx = np.abs(fv_2d.T - x[:, 0]).argmin(axis=0)
x[:, 0] = feasible_vals[idx]
return x[:, 0] * x[:, 2] - 7.735 * (x[:, 0]**2 / x[:, 1]) - 180
def g_2(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return 4 - x[:, 2] / x[:, 1]
# Objective functions
def f_1(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
fv_2d = np.repeat(np.atleast_2d(feasible_vals), x.shape[0], axis=0)
idx = np.abs(fv_2d.T - x[:, 0]).argmin(axis=0)
x[:, 0] = feasible_vals[idx]
return (29.4 * x[:, 0]) + (0.6 * x[:, 1] * x[:,2])
def f_2(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
sum1 = g_1(x)
sum2 = g_2(x)
sum1 = np.where(sum1 > 0, sum1, 0)
sum2 = np.where(sum2 > 0, sum2, 0)
return sum1 + sum2
objective_1 = ScalarObjective(name="minimize the total cost of concrete and reinforcing steel of the beam",
evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 3, 2, g_1)
cons_2 = ScalarConstraint("c_2", 3, 2, g_2)
constraints = [cons_1, cons_2]
x_1 = Variable("the area of the reinforcement", 7.2, 0.2, 15)
x_2 = Variable("the width of the beam", 10, 0, 20)
x_3 = Variable("the depth of the beam", 20, 0, 40)
variables = [x_1, x_2, x_3]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re23(var_iv: np.array = np.array([50, 50, 100, 120])) -> MOProblem:
""" Pressure vesssel design problem.
2 objectives, 4 variables and 3 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [50, 50, 100, 120]. x1 and x2 ∈ {1, ..., 100},
x3 ∈ [10, 200] and x4 ∈ [10, 240].
x1 and x2 are integer multiples of 0.0625.
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (4,)):
raise RuntimeError("Number of variables must be four")
# Lower bounds
lb = np.array([1, 1, 10, 10])
# Upper bounds
ub = np.array([100, 100, 200, 240])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
def g_1(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
x = x.astype(float)
x[:, 0] = 0.0625 * (np.round(x[:,0]))
return x[:, 0] - (0.0193 * x[:, 2])
def g_2(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
x = x.astype(float)
x[:, 1] = 0.0625 * (np.round(x[:,1]))
return x[:, 1] - (0.00954 * x[:, 2])
def g_3(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return (np.pi * x[:, 2]**2 * x[:, 3]) + ((4/3) * np.pi * x[:, 2]**3) - 1296000
# Objective functions
def f_1(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
x = x.astype(float)
x[:, 0] = 0.0625 * (np.round(x[:,0]))
x[:, 1] = 0.0625 * (np.round(x[:,1]))
return (
(0.6224 * x[:, 0] * x[:, 2] * x[:, 3]) + (1.7781 * x[:, 1] * x[:, 2]**2) +
(3.1661 * x[:, 0]**2 * x[:, 3]) + (19.84 * x[:, 0]**2 * x[:, 2])
)
def f_2(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
sum1 = g_1(x)
sum2 = g_2(x)
sum3 = g_3(x)
sum1 = np.where(sum1 > 0, sum1, 0)
sum2 = np.where(sum2 > 0, sum2, 0)
sum3 = np.where(sum3 > 0, sum3, 0)
return sum1 + sum2 + sum3
objective_1 = ScalarObjective(name="minimize to total cost of a clyndrical pressure vessel", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 4, 2, g_1)
cons_2 = ScalarConstraint("c_2", 4, 2, g_2)
cons_3 = ScalarConstraint("c_3", 4, 2, g_3)
constraints = [cons_1, cons_2, cons_3]
x_1 = Variable("the thicknesses of the shell", 50, 1, 100)
x_2 = Variable("the the head of pressure vessel", 50, 1, 100)
x_3 = Variable("the inner radius", 100, 10, 200)
x_4 = Variable("the length of the cylindrical section", 120, 10, 240)
variables = [x_1, x_2, x_3, x_4]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem
def re24(var_iv : np.array = np.array([2, 25])) -> MOProblem:
""" Hatch cover design problem.
2 objectives, 2 variables and 4 constraints.
Arguments:
var_iv (np.array): Optional, initial variable values.
Defaults are [2, 25]. x1 ∈ [0.5, 4] and
x2 ∈ [4, 50].
Returns:
MOProblem: a problem object.
"""
# Check the number of variables
if (np.shape(np.atleast_2d(var_iv)[0]) != (2,)):
raise RuntimeError("Number of variables must be two")
# Lower bounds
lb = np.array([0.5, 4])
# Upper bounds
ub = np.array([4, 50])
# Check the variable bounds
if np.any(lb > var_iv) or np.any(ub < var_iv):
raise ValueError("Initial variable values need to be between lower and upper bounds")
# Constrain functions
def g_1(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return 1.0 - ((4500 / (x[:, 0] * x[:, 1])) / 700)
def g_2(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return 1.0 - ((1800 / x[:, 1]) / 450)
def g_3(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return 1.0 - (((56.2 * 10000) / (700000 * x[:, 0] * x[:, 1]**2)) / 1.5)
def g_4(x: np.ndarray, _ = None) -> np.ndarray:
x = np.atleast_2d(x)
return 1.0 - ((4500 / (x[:,0] * x[:, 1])) / ((700000 * x[:, 0]**2) / 100))
# Objective functions
def f_1(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
return x[:, 0] + 120 * x[:, 1]
def f_2(x: np.ndarray) -> np.ndarray:
x = np.atleast_2d(x)
sum1 = g_1(x)
sum2 = g_2(x)
sum3 = g_3(x)
sum4 = g_4(x)
sum1 = np.where(sum1 > 0, sum1, 0)
sum2 = np.where(sum2 > 0, sum2, 0)
sum3 = np.where(sum3 > 0, sum3, 0)
sum4 = np.where(sum4 > 0, sum4, 0)
return sum1 + sum2 + sum3 + sum4
objective_1 = ScalarObjective(name="to minimize the weight of the hatch cover", evaluator=f_1, maximize=[False])
objective_2 = ScalarObjective(name="the sum of the four constraint violations", evaluator=f_2, maximize=[False])
objectives = [objective_1, objective_2]
cons_1 = ScalarConstraint("c_1", 2, 2, g_1)
cons_2 = ScalarConstraint("c_2", 2, 2, g_2)
cons_3 = ScalarConstraint("c_3", 2, 2, g_3)
cons_4 = ScalarConstraint("c_4", 2, 2, g_4)
constraints = [cons_1, cons_2, cons_3, cons_4]
x_1 = Variable("the flange thickness", 2, 0.5, 4)
x_2 = Variable("the beam height", 25, 4, 50)
variables = [x_1, x_2]
problem = MOProblem(variables=variables, objectives=objectives, constraints=constraints)
return problem | 3,222 | 0 | 453 |
13c7784ed035912948d91375dc21a4e18434d9c0 | 462 | py | Python | stdplugins/Exit.py | Sur-vivor/PepeBot | 7394b1d958ac789fe5f427e632d566052634e680 | [
"Apache-2.0"
] | 6 | 2020-04-21T04:52:42.000Z | 2020-06-19T09:33:02.000Z | stdplugins/Exit.py | faquario/PepeBorg | be9be790b4e2b5456536162786dbd1ead71be024 | [
"Apache-2.0"
] | 2 | 2020-05-22T14:59:05.000Z | 2020-05-28T12:04:02.000Z | stdplugins/Exit.py | prono69/LazyAF-Pepe | b78d4b9f174a65b77b6b5f4969386aa6dd3359ce | [
"Apache-2.0"
] | 3 | 2021-01-28T15:42:25.000Z | 2021-11-18T04:02:01.000Z | # For @UniBorg
"""fake exit
\n.fexit"""
from telethon import events
@borg.on(events.NewMessage(outgoing=True, pattern='^\.(f?f)exit'))
| 25.666667 | 126 | 0.649351 | # For @UniBorg
"""fake exit
\n.fexit"""
from telethon import events
@borg.on(events.NewMessage(outgoing=True, pattern='^\.(f?f)exit'))
async def timer_blankx(e):
txt=e.text[7:] + '\n\n`Processing....` '
j=1
k=j
for j in range(j):
await e.edit(txt + str(k))
k=k-1
await asyncio.sleep(1)
if e.pattern_match.group(1) == 'f':
await e.edit("`Legend is leaving this chat.....!` @admin `Goodbye aren't forever. It was a pleasant time with you guys..` ")
| 303 | 0 | 22 |
3b89ed5c36aad92f067f5c27a83ddeb72df5b112 | 1,737 | py | Python | constants.py | alexschwarzresearch/repo-migrate | 16a2fae37f2748f9bc3d26239aa2d972c48c022c | [
"MIT"
] | null | null | null | constants.py | alexschwarzresearch/repo-migrate | 16a2fae37f2748f9bc3d26239aa2d972c48c022c | [
"MIT"
] | null | null | null | constants.py | alexschwarzresearch/repo-migrate | 16a2fae37f2748f9bc3d26239aa2d972c48c022c | [
"MIT"
] | null | null | null | CONTEXT_DICT = {"premis": "http://www.loc.gov/premis/rdf/v1#",
"test": "info:fedora/test/",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xmlns": "http://www.w3.org/2000/xmlns/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"fedora": "http://fedora.info/definitions/v4/repository#",
"xml": "http://www.w3.org/XML/1998/namespace",
"ebucore": "http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#",
"ldp": "http://www.w3.org/ns/ldp#",
"xs": "http://www.w3.org/2001/XMLSchema",
"fedoraconfig": "http://fedora.info/definitions/v4/config#",
"foaf": "http://xmlns.com/foaf/0.1/",
"dc": "http://purl.org/dc/elements/1.1/",
"rm": "https://repomigrate.io/schema#"}
CONTEXT_TEXT = """PREFIX premis: <http://www.loc.gov/premis/rdf/v1#>
PREFIX test: <info:fedora/test/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsi: <http://www.w3.org/2001/XMLSchema-instance>
PREFIX xmlns: <http://www.w3.org/2000/xmlns/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX fedora: <http://fedora.info/definitions/v4/repository#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX ebucore: <http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#>
PREFIX ldp: <http://www.w3.org/ns/ldp#>
PREFIX xs: <http://www.w3.org/2001/XMLSchema>
PREFIX fedoraconfig: <http://fedora.info/definitions/v4/config#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX rm: <https://repomigrate.io/schema#>
"""
| 52.636364 | 84 | 0.601036 | CONTEXT_DICT = {"premis": "http://www.loc.gov/premis/rdf/v1#",
"test": "info:fedora/test/",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xmlns": "http://www.w3.org/2000/xmlns/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"fedora": "http://fedora.info/definitions/v4/repository#",
"xml": "http://www.w3.org/XML/1998/namespace",
"ebucore": "http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#",
"ldp": "http://www.w3.org/ns/ldp#",
"xs": "http://www.w3.org/2001/XMLSchema",
"fedoraconfig": "http://fedora.info/definitions/v4/config#",
"foaf": "http://xmlns.com/foaf/0.1/",
"dc": "http://purl.org/dc/elements/1.1/",
"rm": "https://repomigrate.io/schema#"}
CONTEXT_TEXT = """PREFIX premis: <http://www.loc.gov/premis/rdf/v1#>
PREFIX test: <info:fedora/test/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsi: <http://www.w3.org/2001/XMLSchema-instance>
PREFIX xmlns: <http://www.w3.org/2000/xmlns/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX fedora: <http://fedora.info/definitions/v4/repository#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX ebucore: <http://www.ebu.ch/metadata/ontologies/ebucore/ebucore#>
PREFIX ldp: <http://www.w3.org/ns/ldp#>
PREFIX xs: <http://www.w3.org/2001/XMLSchema>
PREFIX fedoraconfig: <http://fedora.info/definitions/v4/config#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX rm: <https://repomigrate.io/schema#>
"""
| 0 | 0 | 0 |
de42cf0c9553f612c0df6b14893b3a2a6ec942e9 | 2,792 | py | Python | src/code/RawTransactionsGenerator.py | faustodelatog/TFM-UOC | 06c10af41e70e5b67db7b74abdc40dffbe974cf8 | [
"Apache-2.0"
] | null | null | null | src/code/RawTransactionsGenerator.py | faustodelatog/TFM-UOC | 06c10af41e70e5b67db7b74abdc40dffbe974cf8 | [
"Apache-2.0"
] | null | null | null | src/code/RawTransactionsGenerator.py | faustodelatog/TFM-UOC | 06c10af41e70e5b67db7b74abdc40dffbe974cf8 | [
"Apache-2.0"
] | null | null | null | import os
import pandas as pd | 48.137931 | 130 | 0.65043 | import os
import pandas as pd
class RawTransactionsGenerator:
data_path = None
sales = None
calendar = None
stock = None
promotions = None
transactions = None
def __init__(self, data_path):
self.data_path = data_path
self.verify_data_dir()
self.load_data()
def verify_data_dir(self):
# Verificando que los datos se encuentran en el directorio
print(f'leyendo ficheros de {self.data_path}')
print(os.listdir(self.data_path))
def load_data(self):
self.sales = pd.read_csv(self.data_path + '/Venta.csv', sep=';')
self.calendar = pd.read_csv(self.data_path + '/Calendarios.csv', sep=';')
self.stock = pd.read_csv(self.data_path + '/Stock.csv', sep=';')
self.promotions = pd.read_csv(self.data_path + '/Promos.csv', sep=';')
self.print_files_info()
def print_files_info(self):
print(f'Existen {self.sales.shape[0]} registros de venta desde {self.sales.fecha.min()}')
print(f'Existen {self.calendar.shape[0]} registros de calendario desde {self.calendar.fecha.min()}')
print(f'Existen {self.stock.shape[0]} registros de stock desde {self.stock.fecha.min()}')
print(f'Existen {self.promotions.shape[0]} registros de promociones desde {self.promotions.fechaIni.min()}')
def group_rows(self):
self.sales = self.sales.groupby(by=['fecha', 'sku']).sum().reset_index()
self.stock = self.stock.groupby(by=['fecha', 'sku']).sum().reset_index()
self.calendar = self.calendar.groupby(by=['fecha', 'sku']).max().reset_index()
def generate_transactions(self):
self.group_rows()
txs = pd.merge(self.sales, self.stock, how='left', on=['fecha', 'sku'])
txs = pd.merge(txs, self.calendar, how='left', on=['fecha', 'sku'])
self.transactions = txs
def isProm(self, t):
return any((self.promotions.sku == t.sku) & (self.promotions.fechaIni <= t.fecha) & (self.promotions.fechaFin >= t.fecha))
def add_promotions_to_transactions(self):
self.transactions['bolProm'] = self.transactions.apply(lambda t: self.isProm(t), axis=1)
self.transactions.bolProm = self.transactions.bolProm.astype(int)
print("transacciones en promoción: " + str(self.transactions[(self.transactions.bolProm == 1)].shape[0]))
def save_transactions(self, output_file):
self.transactions['date'] = pd.to_datetime(self.transactions.date.astype(str), format='%Y%m%d')
self.transactions.columns = ['date', 'sku', 'units_sold', 'stock', 'is_open', 'is_holiday', 'is_prom']
self.transactions.to_csv(output_file, index = False, sep=';')
print(f'{self.transactions.shape[0]} transacciones guardadas en: {output_file}') | 2,339 | 402 | 23 |
87a0a674166d3b429e989903cdaa07a8e5880df9 | 944 | py | Python | example_2310/warriors_app/migrations/0002_auto_20201022_1719.py | TonikX/ITMO_ICT_WebDevelopment_Examples | d74117f9ff1352852dad1b6900b71cfdb403322b | [
"MIT"
] | null | null | null | example_2310/warriors_app/migrations/0002_auto_20201022_1719.py | TonikX/ITMO_ICT_WebDevelopment_Examples | d74117f9ff1352852dad1b6900b71cfdb403322b | [
"MIT"
] | 5 | 2020-11-03T06:57:02.000Z | 2020-11-11T22:07:32.000Z | example_2310/warriors_app/migrations/0002_auto_20201022_1719.py | TonikX/ITMO_ICT_WebDevelopment_Examples | d74117f9ff1352852dad1b6900b71cfdb403322b | [
"MIT"
] | 1 | 2021-03-17T11:14:13.000Z | 2021-03-17T11:14:13.000Z | # Generated by Django 3.1.2 on 2020-10-22 14:19
from django.db import migrations, models
import django.db.models.deletion
| 31.466667 | 160 | 0.603814 | # Generated by Django 3.1.2 on 2020-10-22 14:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('warriors_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='warrior',
name='level',
field=models.IntegerField(default=0, verbose_name='Уровень'),
),
migrations.AlterField(
model_name='warrior',
name='profession',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='warriors_app.profession', verbose_name='Профессия'),
),
migrations.AlterField(
model_name='warrior',
name='race',
field=models.CharField(choices=[('s', 'student'), ('d', 'developer'), ('t', 'teamlead')], max_length=1, verbose_name='Расса'),
),
]
| 0 | 818 | 23 |
ca702d5d22ff82dd5598a89603a24ea1350587a6 | 73,238 | py | Python | bigg_models/queries.py | SBRG/bigg_models | cf16b53ab77ea699a1e132ce10a3eca1690e0aee | [
"MIT"
] | 57 | 2015-12-15T20:31:09.000Z | 2021-11-14T15:59:28.000Z | bigg_models/queries.py | SBRG/bigg_models | cf16b53ab77ea699a1e132ce10a3eca1690e0aee | [
"MIT"
] | 235 | 2015-09-30T17:51:25.000Z | 2022-01-06T19:48:11.000Z | bigg_models/queries.py | SBRG/bigg_models | cf16b53ab77ea699a1e132ce10a3eca1690e0aee | [
"MIT"
] | 18 | 2016-02-12T06:01:09.000Z | 2021-12-03T17:12:10.000Z | # -*- coding: utf-8 -*-
from bigg_models.version import (__version__ as version,
__api_version__ as api_version)
from cobradb.models import Model
from cobradb.models import *
from cobradb.model_loading import parse
from cobradb import settings
from cobradb.util import make_reaction_copy_id, ref_str_to_tuple, ref_tuple_to_str
from sqlalchemy import desc, asc, func, or_, and_, not_
from collections import defaultdict
from os.path import abspath, dirname, join, isfile, getsize
from itertools import chain
root_directory = abspath(dirname(__file__))
#-------------------------------------------------------------------------------
# Utils
#-------------------------------------------------------------------------------
def _apply_order_limit_offset(query, sort_column_object=None, sort_direction='ascending',
page=None, size=None):
"""Get model metabolites.
Arguments
---------
query: A sqlalchemy query
sort_column_object: An object or list of objects to order by, or None to not
order.
sort_direction: Either 'ascending' or 'descending'. Ignored if
sort_column_object is None.
page: The page, or None for all pages.
size: The page length, or None for all pages.
Returns
-------
An updated query.
"""
# sort
if sort_column_object is not None:
if sort_direction == 'descending':
direction_fn = desc
elif sort_direction == 'ascending':
direction_fn = asc
else:
raise ValueError('Bad sort direction %s' % sort_direction)
if type(sort_column_object) is list:
query = query.order_by(*[direction_fn(x) for x in sort_column_object])
else:
query = query.order_by(direction_fn(sort_column_object))
# limit and offset
if page is not None and size is not None:
page = int(page); size = int(size)
offset = page * size
query = query.limit(size).offset(offset)
return query
#-------------------------------------------------------------------------------
# Reactions
#-------------------------------------------------------------------------------
def get_universal_reactions_count(session):
"""Return the number of universal reactions."""
return session.query(Reaction).count()
def get_universal_reactions(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal reactions.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_reactions_count(model_bigg_id, session):
"""Count the model reactions."""
return (session
.query(Reaction)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_reactions(
model_bigg_id,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
**kwargs
):
"""Get model reactions.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve reactions.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name, Model.bigg_id, Model.organism)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def get_model_reaction(model_bigg_id, reaction_bigg_id, session):
"""Get details about this reaction in the given model. Returns multiple
results when the reaction appears in the model multiple times.
"""
model_reaction_db = (session
.query(Reaction.bigg_id,
Reaction.name,
ModelReaction.id,
ModelReaction.gene_reaction_rule,
ModelReaction.lower_bound,
ModelReaction.upper_bound,
ModelReaction.objective_coefficient,
Reaction.pseudoreaction,
ModelReaction.copy_number,
ModelReaction.subsystem)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.filter(Reaction.bigg_id == reaction_bigg_id))
db_count = model_reaction_db.count()
if db_count == 0:
raise NotFoundError('Reaction %s not found in model %s' %(reaction_bigg_id, model_bigg_id))
# metabolites
metabolite_db = _get_metabolite_list_for_reaction(reaction_bigg_id, session)
# models
model_db = get_model_list_for_reaction(reaction_bigg_id, session)
model_result = [x for x in model_db if x != model_bigg_id]
# database_links
db_link_results = _get_db_links_for_model_reaction(reaction_bigg_id, session)
# old identifiers
old_id_results = _get_old_ids_for_model_reaction(model_bigg_id, reaction_bigg_id, session)
# escher maps
escher_maps = get_escher_maps_for_reaction(reaction_bigg_id, model_bigg_id,
session)
result_list = []
for result_db in model_reaction_db:
gene_db = _get_gene_list_for_model_reaction(result_db[2], session)
reaction_string = build_reaction_string(metabolite_db,
result_db[4],
result_db[5],
False)
exported_reaction_id = (make_reaction_copy_id(reaction_bigg_id, result_db[8])
if db_count > 1 else reaction_bigg_id)
result_list.append({
'gene_reaction_rule': result_db[3],
'lower_bound': result_db[4],
'upper_bound': result_db[5],
'objective_coefficient': result_db[6],
'genes': gene_db,
'copy_number': result_db[8],
'subsystem': result_db[9],
'exported_reaction_id': exported_reaction_id,
'reaction_string': reaction_string,
})
return {
'count': len(result_list),
'bigg_id': reaction_bigg_id,
'name': model_reaction_db[0][1],
'pseudoreaction': model_reaction_db[0][7],
'model_bigg_id': model_bigg_id,
'metabolites': metabolite_db,
'database_links': db_link_results,
'old_identifiers': old_id_results,
'other_models_with_reaction': model_result,
'escher_maps': escher_maps,
'results': result_list
}
#-------------------------------------------------------------------------------
# Metabolites
#-------------------------------------------------------------------------------
def get_universal_metabolites(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal metabolites.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_metabolites_count(model_bigg_id, session):
"""Count the model metabolites."""
return (session
.query(Component)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_metabolites(model_bigg_id, session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get model metabolites.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve metabolites.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'name', 'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'compartment_bigg_id',
'model_bigg_id', and 'organism'.
"""
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(
Component.bigg_id,
Component.name,
Model.bigg_id,
Model.organism,
Compartment.bigg_id,
)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.join(Compartment, Compartment.id == CompartmentalizedComponent.compartment_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3], 'compartment_bigg_id': x[4]}
for x in query]
#-------------------------------------------------------------------------------
# Models
#-------------------------------------------------------------------------------
def get_models_count(session, multistrain_off, **kwargs):
"""Return the number of models in the database."""
query = session.query(Model)
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def get_models(
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Get models and number of components.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount, ModelCount.model_id == Model.id))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{
'bigg_id': x[0],
'organism': x[1],
'metabolite_count': x[2],
'reaction_count': x[3],
'gene_count': x[4],
} for x in query]
def get_model_list(session):
"""Return a list of all models, for advanced search."""
model_list = (session
.query(Model.bigg_id)
.order_by(Model.bigg_id)
)
list = [x[0] for x in model_list]
list.sort()
return list
def get_model_json_string(model_bigg_id):
"""Get the model JSON for download."""
path = join(settings.model_dump_directory,
model_bigg_id + '.json')
try:
with open(path, 'r') as f:
data = f.read()
except IOError as e:
raise NotFoundError(e.message)
return data
#-------------------------------------------------------------------------------
# Genes
#-------------------------------------------------------------------------------
def get_model_genes_count(model_bigg_id, session):
"""Get the number of gene for the given model."""
return (session.query(Gene)
.join(ModelGene)
.join(Model)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_genes(model_bigg_id, session, page=None, size=None,
sort_column=None, sort_direction='ascending', **kwargs):
"""Get model genes.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve genes.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
raise ValueError('Bad sort_column name: %s' % sort_column)
# set up the query
query = (session
.query(Gene.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
#---------------------------------------------------------------------
# Genomes
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# old IDs
#---------------------------------------------------------------------
def _compile_db_links(results):
"""Return links for the results that have a url_prefix."""
links = {}
sources = defaultdict(list)
for data_source_bigg_id, data_source_name, url_prefix, synonym in results:
if url_prefix is None:
continue
link = url_prefix + synonym
sources[data_source_name].append({'link': link, 'id': synonym})
return dict(sources)
#-----------
# Utilities
#-----------
# Escher maps
#-------
# Genes
#-------
#-------------------------------------------------------------------------------
# Search
#-------------------------------------------------------------------------------
name_sim_cutoff = 0.3
bigg_id_sim_cutoff = 0.2
gene_bigg_id_sim_cutoff = 1.0
organism_sim_cutoff = 0.1
def search_for_universal_reactions_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
return query.count()
def search_for_universal_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
):
"""Search for model reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Model.bigg_id, Model.organism, Reaction.name)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'model_bigg_id': x[1], 'organism': x[2], 'name': x[3]}
for x in query]
def reaction_with_hash(hash, session):
"""Find the reaction with the given hash."""
res = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(Reaction.reaction_hash == hash)
.first())
if res is None:
raise NotFoundError
return {'bigg_id': res[0], 'model_bigg_id': 'universal', 'name': res[1]}
def search_for_universal_metabolites_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
return query.count()
def search_for_universal_metabolites(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal Metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_metabolites(query_string, session, page=None, size=None,
sort_column=None, sort_direction='ascending',
limit_models=None, strict=False):
"""Search for model metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
strict: if True, then only look for exact matches to the BiGG ID, with the
compartment.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
if sort_column is None:
if strict:
# just sort by bigg ID
sort_column_object = columns['bigg_id']
sort_direction = 'ascending'
else:
# sort by most similar
sort_column_object = func.greatest(sim_name, sim_bigg_id)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Compartment.bigg_id, Model.bigg_id,
Model.organism, Component.name)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id))
# whether to allow fuzzy search
if strict:
try:
metabolite_bigg_id, compartment_bigg_id = parse.split_compartment(query_string)
except Exception:
return []
query = (query
.filter(Component.bigg_id == metabolite_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id))
else:
query = (query
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# just search certain models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'compartment_bigg_id': x[1], 'model_bigg_id': x[2],
'organism': x[3], 'name': x[4]}
for x in query]
def search_for_genes_count(
query_string,
session,
limit_models=None,
multistrain_off=False,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Gene.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# set up the query
query = (session
.query(Gene.bigg_id, Model.bigg_id, Gene.name, sim_bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return query.count()
def search_for_genes(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
multistrain_off=False,
):
"""Search for genes.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(GenomeRegion.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(GenomeRegion.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(Gene)
.join(ModelGene)
.join(Model)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def search_for_models_count(query_string, session, multistrain_off):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# set up the query
query = (session
.query(Model.bigg_id, ModelCount, Model.organism)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def search_for_models(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for models.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# models by bigg_id
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_organism)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{'bigg_id': x[0], 'organism': x[1], 'metabolite_count': x[2],
'reaction_count': x[3], 'gene_count': x[4]}
for x in query]
def search_ids_fast(query_string, session, limit=None):
"""Search used for autocomplete."""
gene_q = (session
.query(Gene.bigg_id)
.join(ModelGene)
.filter(Gene.bigg_id.ilike(query_string + '%')))
gene_name_q = (session
.query(Gene.name)
.join(ModelGene)
.filter(Gene.name.ilike(query_string + '%')))
reaction_q = (session
.query(Reaction.bigg_id)
.filter(Reaction.bigg_id.ilike(query_string + '%')))
reaction_name_q = (session
.query(Reaction.name)
.filter(Reaction.name.ilike(query_string + '%')))
metabolite_q = (session
.query(Component.bigg_id)
.filter(Component.bigg_id.ilike(query_string + '%')))
metabolite_name_q = (session
.query(Component.name)
.filter(Component.name.ilike(query_string + '%')))
model_q = (session
.query(Model.bigg_id)
.filter(Model.bigg_id.ilike(query_string + '%')))
organism_q = (session
.query(Model.organism)
.filter(Model.organism.ilike(query_string + '%')))
query = (gene_q
.union(gene_name_q,
reaction_q,
reaction_name_q,
metabolite_q,
metabolite_name_q,
model_q,
organism_q))
if limit is not None:
query = query.limit(limit)
return [x[0] for x in query]
# advanced search by external database ID
# version
| 36.582418 | 122 | 0.583399 | # -*- coding: utf-8 -*-
from bigg_models.version import (__version__ as version,
__api_version__ as api_version)
from cobradb.models import Model
from cobradb.models import *
from cobradb.model_loading import parse
from cobradb import settings
from cobradb.util import make_reaction_copy_id, ref_str_to_tuple, ref_tuple_to_str
from sqlalchemy import desc, asc, func, or_, and_, not_
from collections import defaultdict
from os.path import abspath, dirname, join, isfile, getsize
from itertools import chain
root_directory = abspath(dirname(__file__))
#-------------------------------------------------------------------------------
# Utils
#-------------------------------------------------------------------------------
class NotFoundError(Exception):
pass
class RedirectError(Exception):
pass
def _shorten_name(name, l=100):
if name is None:
return None
if len(name) > l:
return name[:l] + '...'
else:
return name
def _apply_order_limit_offset(query, sort_column_object=None, sort_direction='ascending',
page=None, size=None):
"""Get model metabolites.
Arguments
---------
query: A sqlalchemy query
sort_column_object: An object or list of objects to order by, or None to not
order.
sort_direction: Either 'ascending' or 'descending'. Ignored if
sort_column_object is None.
page: The page, or None for all pages.
size: The page length, or None for all pages.
Returns
-------
An updated query.
"""
# sort
if sort_column_object is not None:
if sort_direction == 'descending':
direction_fn = desc
elif sort_direction == 'ascending':
direction_fn = asc
else:
raise ValueError('Bad sort direction %s' % sort_direction)
if type(sort_column_object) is list:
query = query.order_by(*[direction_fn(x) for x in sort_column_object])
else:
query = query.order_by(direction_fn(sort_column_object))
# limit and offset
if page is not None and size is not None:
page = int(page); size = int(size)
offset = page * size
query = query.limit(size).offset(offset)
return query
def _add_pub_filter(query):
return (query
.join(PublicationModel, PublicationModel.model_id == Model.id)
.join(Publication, Publication.id == PublicationModel.publication_id)
.filter(not_(and_(
Publication.reference_id.in_(['24277855', '27667363']),
Publication.reference_type == 'pmid',
))))
def _add_multistrain_filter(session, query, from_class):
if from_class is Reaction:
return query.filter(Reaction.id.in_(_add_pub_filter(
session.query(Reaction.id)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
)))
elif from_class is Component:
return query.filter(Component.id.in_(_add_pub_filter(
session.query(Component.id)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id)
)))
elif from_class is Model or from_class is Gene:
return _add_pub_filter(query)
else:
raise Exception
#-------------------------------------------------------------------------------
# Reactions
#-------------------------------------------------------------------------------
def get_universal_reactions_count(session):
"""Return the number of universal reactions."""
return session.query(Reaction).count()
def get_universal_reactions(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal reactions.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_reactions_count(model_bigg_id, session):
"""Count the model reactions."""
return (session
.query(Reaction)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_reactions(
model_bigg_id,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
**kwargs
):
"""Get model reactions.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve reactions.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name, Model.bigg_id, Model.organism)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def _get_metabolite_list_for_reaction(reaction_id, session):
result_db = (session
.query(Component.bigg_id,
ReactionMatrix.stoichiometry,
Compartment.bigg_id,
Component.name)
# Metabolite -> ReactionMatrix
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ReactionMatrix,
ReactionMatrix.compartmentalized_component_id == CompartmentalizedComponent.id)
# -> Reaction> Model
.join(Reaction,
Reaction.id == ReactionMatrix.reaction_id)
# -> Compartment
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
# filter
.filter(Reaction.bigg_id == reaction_id)
.all())
return [{'bigg_id': x[0], 'stoichiometry': x[1], 'compartment_bigg_id': x[2],
'name': x[3]} for x in result_db]
def get_reaction_and_models(reaction_bigg_id, session):
result_db = (session
.query(Reaction.bigg_id,
Reaction.name,
Reaction.pseudoreaction,
Model.bigg_id,
Model.organism)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Reaction.bigg_id == reaction_bigg_id)
.distinct()
.all())
if len(result_db) == 0:
# Look for a result with a deprecated ID
res_db = (session
.query(DeprecatedID, Reaction)
.filter(DeprecatedID.type == 'reaction')
.filter(DeprecatedID.deprecated_id == reaction_bigg_id)
.join(Reaction, Reaction.id == DeprecatedID.ome_id)
.first())
if res_db:
raise RedirectError(res_db[1].bigg_id)
else:
raise NotFoundError('No Reaction found with BiGG ID ' + reaction_bigg_id)
db_link_results = _get_db_links_for_reaction(reaction_bigg_id, session)
old_id_results = _get_old_ids_for_reaction(reaction_bigg_id, session)
# metabolites
metabolite_db = _get_metabolite_list_for_reaction(reaction_bigg_id, session)
reaction_string = build_reaction_string(metabolite_db, -1000, 1000, False)
return {
'bigg_id': result_db[0][0],
'name': result_db[0][1],
'pseudoreaction': result_db[0][2],
'database_links': db_link_results,
'old_identifiers': old_id_results,
'metabolites': metabolite_db,
'reaction_string': reaction_string,
'models_containing_reaction': [{'bigg_id': x[3], 'organism': x[4]}
for x in result_db],
}
def get_reactions_for_model(model_bigg_id, session):
result_db = (session
.query(Reaction.bigg_id, Reaction.name, Model.organism)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.all())
return [{'bigg_id': x[0], 'name': x[1], 'organism': x[2]}
for x in result_db]
def _get_gene_list_for_model_reaction(model_reaction_id, session):
result_db = (session
.query(Gene.bigg_id, Gene.name)
.join(ModelGene)
.join(GeneReactionMatrix)
.filter(GeneReactionMatrix.model_reaction_id == model_reaction_id))
return [{'bigg_id': x[0], 'name': x[1]} for x in result_db]
def get_model_reaction(model_bigg_id, reaction_bigg_id, session):
"""Get details about this reaction in the given model. Returns multiple
results when the reaction appears in the model multiple times.
"""
model_reaction_db = (session
.query(Reaction.bigg_id,
Reaction.name,
ModelReaction.id,
ModelReaction.gene_reaction_rule,
ModelReaction.lower_bound,
ModelReaction.upper_bound,
ModelReaction.objective_coefficient,
Reaction.pseudoreaction,
ModelReaction.copy_number,
ModelReaction.subsystem)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(Model.bigg_id == model_bigg_id)
.filter(Reaction.bigg_id == reaction_bigg_id))
db_count = model_reaction_db.count()
if db_count == 0:
raise NotFoundError('Reaction %s not found in model %s' %(reaction_bigg_id, model_bigg_id))
# metabolites
metabolite_db = _get_metabolite_list_for_reaction(reaction_bigg_id, session)
# models
model_db = get_model_list_for_reaction(reaction_bigg_id, session)
model_result = [x for x in model_db if x != model_bigg_id]
# database_links
db_link_results = _get_db_links_for_model_reaction(reaction_bigg_id, session)
# old identifiers
old_id_results = _get_old_ids_for_model_reaction(model_bigg_id, reaction_bigg_id, session)
# escher maps
escher_maps = get_escher_maps_for_reaction(reaction_bigg_id, model_bigg_id,
session)
result_list = []
for result_db in model_reaction_db:
gene_db = _get_gene_list_for_model_reaction(result_db[2], session)
reaction_string = build_reaction_string(metabolite_db,
result_db[4],
result_db[5],
False)
exported_reaction_id = (make_reaction_copy_id(reaction_bigg_id, result_db[8])
if db_count > 1 else reaction_bigg_id)
result_list.append({
'gene_reaction_rule': result_db[3],
'lower_bound': result_db[4],
'upper_bound': result_db[5],
'objective_coefficient': result_db[6],
'genes': gene_db,
'copy_number': result_db[8],
'subsystem': result_db[9],
'exported_reaction_id': exported_reaction_id,
'reaction_string': reaction_string,
})
return {
'count': len(result_list),
'bigg_id': reaction_bigg_id,
'name': model_reaction_db[0][1],
'pseudoreaction': model_reaction_db[0][7],
'model_bigg_id': model_bigg_id,
'metabolites': metabolite_db,
'database_links': db_link_results,
'old_identifiers': old_id_results,
'other_models_with_reaction': model_result,
'escher_maps': escher_maps,
'results': result_list
}
def get_reaction(reaction_bigg_id, session):
return (session
.query(Reaction)
.filter(Reaction.bigg_id == reaction_bigg_id)
.first())
#-------------------------------------------------------------------------------
# Metabolites
#-------------------------------------------------------------------------------
def get_universal_metabolites_count(session):
return session.query(Component).count()
def get_universal_metabolites(session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get universal metabolites.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def get_model_metabolites_count(model_bigg_id, session):
"""Count the model metabolites."""
return (session
.query(Component)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_metabolites(model_bigg_id, session, page=None, size=None, sort_column=None,
sort_direction='ascending', **kwargs):
"""Get model metabolites.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve metabolites.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'name', 'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'compartment_bigg_id',
'model_bigg_id', and 'organism'.
"""
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(
Component.bigg_id,
Component.name,
Model.bigg_id,
Model.organism,
Compartment.bigg_id,
)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.join(Compartment, Compartment.id == CompartmentalizedComponent.compartment_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3], 'compartment_bigg_id': x[4]}
for x in query]
#-------------------------------------------------------------------------------
# Models
#-------------------------------------------------------------------------------
def get_models_count(session, multistrain_off, **kwargs):
"""Return the number of models in the database."""
query = session.query(Model)
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def get_models(
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Get models and number of components.
Arguments
---------
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount, ModelCount.model_id == Model.id))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{
'bigg_id': x[0],
'organism': x[1],
'metabolite_count': x[2],
'reaction_count': x[3],
'gene_count': x[4],
} for x in query]
def get_model_list_for_reaction(reaction_bigg_id, session):
result = (session
.query(Model.bigg_id)
.join(ModelReaction, ModelReaction.model_id == Model.id)
.join(Reaction, Reaction.id == ModelReaction.reaction_id)
.filter(Reaction.bigg_id == reaction_bigg_id)
.distinct()
.all())
return [{'bigg_id': x[0]} for x in result]
def get_model_list_for_metabolite(metabolite_bigg_id, session):
result = (session
.query(Model.bigg_id, Compartment.bigg_id)
.join(ModelCompartmentalizedComponent)
.join(CompartmentalizedComponent)
.join(Compartment)
.join(Component)
.filter(Component.bigg_id == metabolite_bigg_id)
)
return [{'bigg_id': x[0], 'compartment_bigg_id': x[1]
} for x in result]
def get_model_and_counts(model_bigg_id, session, static_model_dir=None,
static_multistrain_dir=None):
model_db = (session
.query(Model, ModelCount, Genome, Publication.reference_type,
Publication.reference_id)
.join(ModelCount, ModelCount.model_id == Model.id)
.outerjoin(Genome, Genome.id == Model.genome_id)
.outerjoin(PublicationModel, PublicationModel.model_id == Model.id)
.outerjoin(Publication, Publication.id == PublicationModel.publication_id)
.filter(Model.bigg_id == model_bigg_id)
.first())
if model_db is None:
raise NotFoundError('No Model found with BiGG ID ' + model_bigg_id)
# genome ref
if model_db[2] is None:
genome_ref_string = genome_name = None
else:
genome_name = model_db[2].accession_value
genome_ref_string = ref_tuple_to_str(model_db[2].accession_type,
genome_name)
escher_maps = get_escher_maps_for_model(model_db[0].id, session)
result = {
'model_bigg_id': model_db[0].bigg_id,
'published_filename': model_db[0].published_filename,
'organism': getattr(model_db[2], 'organism', None),
'genome_name': genome_name,
'genome_ref_string': genome_ref_string,
'metabolite_count': model_db[1].metabolite_count,
'reaction_count': model_db[1].reaction_count,
'gene_count': model_db[1].gene_count,
'reference_type': model_db[3],
'reference_id': model_db[4],
'escher_maps': escher_maps,
'last_updated': session.query(DatabaseVersion).first().date_time.strftime('%b %d, %Y'),
}
if static_model_dir:
# get filesizes
for ext in ('xml', 'xml_gz', 'mat', 'mat_gz', 'json', 'json_gz', 'multistrain'):
if ext == 'multistrain':
if not static_multistrain_dir:
continue
fpath = join(static_multistrain_dir, model_bigg_id + '_multistrain.zip')
else:
fpath = join(static_model_dir,
model_bigg_id + "." + ext.replace("_", "."))
byte_size = getsize(fpath) if isfile(fpath) else 0
if byte_size > 1048576:
result[ext + "_size"] = "%.1f MB" % (byte_size / 1048576.)
elif byte_size > 1024:
result[ext + "_size"] = "%.1f kB" % (byte_size / 1024.)
elif byte_size > 0:
result[ext + "_size"] = "%d B" % (byte_size)
return result
def get_model_list(session):
"""Return a list of all models, for advanced search."""
model_list = (session
.query(Model.bigg_id)
.order_by(Model.bigg_id)
)
list = [x[0] for x in model_list]
list.sort()
return list
def get_model_json_string(model_bigg_id):
"""Get the model JSON for download."""
path = join(settings.model_dump_directory,
model_bigg_id + '.json')
try:
with open(path, 'r') as f:
data = f.read()
except IOError as e:
raise NotFoundError(e.message)
return data
#-------------------------------------------------------------------------------
# Genes
#-------------------------------------------------------------------------------
def get_model_genes_count(model_bigg_id, session):
"""Get the number of gene for the given model."""
return (session.query(Gene)
.join(ModelGene)
.join(Model)
.filter(Model.bigg_id == model_bigg_id)
.count())
def get_model_genes(model_bigg_id, session, page=None, size=None,
sort_column=None, sort_direction='ascending', **kwargs):
"""Get model genes.
Arguments
---------
model_bigg_id: The bigg id of the model to retrieve genes.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
raise ValueError('Bad sort_column name: %s' % sort_column)
# set up the query
query = (session
.query(Gene.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(Model.bigg_id == model_bigg_id))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def get_model_gene(gene_bigg_id, model_bigg_id, session):
result_db = (session
.query(Gene.bigg_id,
Gene.name,
Gene.leftpos,
Gene.rightpos,
Model.bigg_id,
Gene.strand,
Chromosome.ncbi_accession,
Genome.accession_type,
Genome.accession_value,
Gene.mapped_to_genbank,
Gene.dna_sequence,
Gene.protein_sequence)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.outerjoin(Genome, Genome.id == Model.genome_id)
.outerjoin(Chromosome, Chromosome.id == Gene.chromosome_id)
.filter(Gene.bigg_id == gene_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.first())
if result_db is None:
raise NotFoundError('Gene %s not found in model %s' %
(gene_bigg_id, model_bigg_id))
reaction_db = (session
.query(Reaction.bigg_id,
ModelReaction.gene_reaction_rule,
Reaction.name)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.join(GeneReactionMatrix, GeneReactionMatrix.model_reaction_id == ModelReaction.id)
.join(ModelGene, ModelGene.id == GeneReactionMatrix.model_gene_id)
.join(Gene, Gene.id == ModelGene.gene_id)
.filter(Model.bigg_id == model_bigg_id)
.filter(Gene.bigg_id == gene_bigg_id)
)
reaction_results = [{'bigg_id': r[0], 'gene_reaction_rule': r[1],
'name': r[2]} for r in reaction_db]
synonym_db = _get_db_links_for_model_gene(gene_bigg_id, session)
old_id_results = _get_old_ids_for_model_gene(gene_bigg_id, model_bigg_id,
session)
return {
'bigg_id': result_db[0],
'name': result_db[1],
'leftpos': result_db[2],
'rightpos': result_db[3],
'model_bigg_id': result_db[4],
'strand': result_db[5],
'chromosome_ncbi_accession': result_db[6],
'genome_ref_string': ref_tuple_to_str(result_db[7], result_db[8]),
'genome_name': result_db[8],
'mapped_to_genbank': result_db[9],
'dna_sequence': result_db[10],
'protein_sequence': result_db[11],
'reactions': reaction_results,
'database_links': synonym_db,
'old_identifiers': old_id_results
}
def get_metabolite(met_bigg_id, session):
result_db = (session
.query(Component.bigg_id,
Component.name)
.filter(Component.bigg_id == met_bigg_id)
.first())
if result_db is None:
# Look for a result with a deprecated ID
res_db = (session
.query(DeprecatedID, Component)
.filter(DeprecatedID.type == 'component')
.filter(DeprecatedID.deprecated_id == met_bigg_id)
.join(Component, Component.id == DeprecatedID.ome_id)
.first())
if res_db:
raise RedirectError(res_db[1].bigg_id)
else:
raise NotFoundError('No Component found with BiGG ID ' + met_bigg_id)
comp_comp_db = (session
.query(Compartment.bigg_id,
Model.bigg_id,
Model.organism,
ModelCompartmentalizedComponent.formula,
ModelCompartmentalizedComponent.charge)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.compartment_id == Compartment.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id)
.join(Component, Component.id == CompartmentalizedComponent.component_id)
.filter(Component.bigg_id == met_bigg_id))
formulae = list({y for y in (x[3] for x in comp_comp_db) if y is not None})
charges = list({y for y in (x[4] for x in comp_comp_db) if y is not None})
# database links and old ids
db_link_results = _get_db_links_for_metabolite(met_bigg_id, session)
old_id_results = _get_old_ids_for_metabolite(met_bigg_id, session)
return {
'bigg_id': result_db[0],
'name': result_db[1],
'formulae': formulae,
'charges': charges,
'database_links': db_link_results,
'old_identifiers': old_id_results,
'compartments_in_models': [{'bigg_id': c[0], 'model_bigg_id': c[1], 'organism': c[2]}
for c in comp_comp_db]
}
def get_model_comp_metabolite(met_bigg_id, compartment_bigg_id, model_bigg_id, session):
result_db = (session
.query(Component.bigg_id,
Component.name,
Compartment.bigg_id,
Compartment.name,
Model.bigg_id,
ModelCompartmentalizedComponent.formula,
ModelCompartmentalizedComponent.charge,
CompartmentalizedComponent.id,
Model.id)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id)
.filter(Component.bigg_id == met_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.first())
if result_db is None:
raise NotFoundError("Component %s in compartment %s not in model %s" %
(met_bigg_id, compartment_bigg_id, model_bigg_id))
reactions_db = (session
.query(Reaction.bigg_id, Reaction.name, Model.bigg_id)
.join(ReactionMatrix)
.join(ModelReaction)
.join(Model)
.filter(ReactionMatrix.compartmentalized_component_id == result_db[7])
.filter(Model.id == result_db[8])
.distinct())
model_db = get_model_list_for_metabolite(met_bigg_id, session)
escher_maps = get_escher_maps_for_metabolite(met_bigg_id,
compartment_bigg_id,
model_bigg_id, session)
model_result = [x for x in model_db if x['bigg_id'] != model_bigg_id]
db_link_results = _get_db_links_for_model_comp_metabolite(met_bigg_id,
session)
old_id_results = _get_old_ids_for_model_comp_metabolite(met_bigg_id,
compartment_bigg_id,
model_bigg_id,
session)
return {'bigg_id': result_db[0],
'name': result_db[1],
'compartment_bigg_id': result_db[2],
'compartment_name': result_db[3],
'model_bigg_id': result_db[4],
'formula': result_db[5],
'charge': result_db[6],
'database_links': db_link_results,
'old_identifiers': old_id_results,
'reactions': [{'bigg_id': r[0], 'name': r[1], 'model_bigg_id': r[2]}
for r in reactions_db],
'escher_maps': escher_maps,
'other_models_with_metabolite': model_result}
def get_gene_list_for_model(model_bigg_id, session):
result = (session
.query(Gene.bigg_id, Gene.name, Model.organism, Model.bigg_id)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(Model.bigg_id == model_bigg_id)
)
return [{'bigg_id': x[0], 'name': x[1], 'organism': x[2], 'model_bigg_id': x[3]}
for x in result]
#---------------------------------------------------------------------
# Genomes
#---------------------------------------------------------------------
def get_genome_list(session):
genome_db = session.query(Genome)
return [{'name': x.accession_value,
'genome_ref_string': ref_tuple_to_str(x.accession_type,
x.accession_value),
'organism': x.organism}
for x in genome_db]
def get_genome_and_models(genome_ref_string, session):
accession_type, accession_value = ref_str_to_tuple(genome_ref_string)
genome_db = (session
.query(Genome)
.filter(Genome.accession_type == accession_type)
.filter(Genome.accession_value == accession_value)
.first())
models_db = (session
.query(Model)
.filter(Model.genome_id == genome_db.id))
chromosomes_db = (session
.query(Chromosome)
.filter(Chromosome.genome_id == genome_db.id))
return {'name': genome_db.accession_value,
'genome_ref_string': ref_tuple_to_str(genome_db.accession_type,
genome_db.accession_value),
'organism': genome_db.organism,
'models': [x.bigg_id for x in models_db],
'chromosomes': [x.ncbi_accession for x in chromosomes_db]}
#---------------------------------------------------------------------
# old IDs
#---------------------------------------------------------------------
def _compile_db_links(results):
"""Return links for the results that have a url_prefix."""
links = {}
sources = defaultdict(list)
for data_source_bigg_id, data_source_name, url_prefix, synonym in results:
if url_prefix is None:
continue
link = url_prefix + synonym
sources[data_source_name].append({'link': link, 'id': synonym})
return dict(sources)
def _get_db_links_for_reaction(reaction_bigg_id, session):
result_db = (session
.query(DataSource.bigg_id, DataSource.name, DataSource.url_prefix, Synonym.synonym)
.join(Synonym)
.join(Reaction, Reaction.id == Synonym.ome_id)
.filter(Synonym.type == 'reaction')
.filter(Reaction.bigg_id == reaction_bigg_id))
return _compile_db_links(result_db)
def _get_old_ids_for_reaction(reaction_bigg_id, session):
result_db = (session
.query(Synonym.synonym)
.join(OldIDSynonym)
.join(ModelReaction, ModelReaction.id == OldIDSynonym.ome_id)
.filter(OldIDSynonym.type == 'model_reaction')
.join(Reaction)
.filter(Reaction.bigg_id == reaction_bigg_id)
.distinct())
return [x[0] for x in result_db]
def _get_db_links_for_model_reaction(reaction_bigg_id, session):
return _get_db_links_for_reaction(reaction_bigg_id, session)
def _get_old_ids_for_model_reaction(model_bigg_id, reaction_bigg_id, session):
result_db = (session
.query(Synonym.synonym)
.join(OldIDSynonym)
.join(ModelReaction, ModelReaction.id == OldIDSynonym.ome_id)
.filter(OldIDSynonym.type == 'model_reaction')
.join(Reaction)
.join(Model)
.filter(Reaction.bigg_id == reaction_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.distinct())
return [x[0] for x in result_db]
def _get_db_links_for_model_gene(gene_bigg_id, session):
result_db = (session
.query(DataSource.bigg_id, DataSource.name, DataSource.url_prefix, Synonym.synonym)
.join(Synonym)
.join(Gene, Gene.id == Synonym.ome_id)
.filter(Synonym.type == 'gene')
.filter(Gene.bigg_id == gene_bigg_id))
return _compile_db_links(result_db)
def _get_old_ids_for_model_gene(gene_bigg_id, model_bigg_id, session):
result_db = (session
.query(Synonym.synonym)
.join(OldIDSynonym)
.join(ModelGene, ModelGene.id == OldIDSynonym.ome_id)
.filter(OldIDSynonym.type == 'model_gene')
.join(Gene)
.join(Model)
.filter(Gene.bigg_id == gene_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.distinct())
return [x[0] for x in result_db]
def _get_db_links_for_metabolite(met_bigg_id, session):
result_db_1 = (session
.query(DataSource.bigg_id, DataSource.name, DataSource.url_prefix, Synonym.synonym)
.join(Synonym)
.join(Component, Component.id == Synonym.ome_id)
.filter(Synonym.type == 'component')
.filter(Component.bigg_id == met_bigg_id))
result_db_2 = (session
.query(DataSource.bigg_id, DataSource.name, DataSource.url_prefix, Synonym.synonym)
.join(Synonym)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.id == Synonym.ome_id)
.filter(Synonym.type == 'compartmentalized_component')
.join(Component,
Component.id == CompartmentalizedComponent.component_id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.filter(Component.bigg_id == met_bigg_id))
return _compile_db_links(chain(result_db_1, result_db_2))
def _get_old_ids_for_metabolite(met_bigg_id, session):
result_db = (session
.query(Synonym.synonym)
.join(OldIDSynonym)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.id == OldIDSynonym.ome_id)
.filter(OldIDSynonym.type == 'model_compartmentalized_component')
.filter(Synonym.type == 'component')
.join(CompartmentalizedComponent)
.join(Component)
.filter(Component.bigg_id == met_bigg_id)
.distinct())
return [x[0] for x in result_db]
def _get_db_links_for_model_comp_metabolite(met_bigg_id, session):
return _get_db_links_for_metabolite(met_bigg_id, session)
def _get_old_ids_for_model_comp_metabolite(met_bigg_id, compartment_bigg_id,
model_bigg_id, session):
result_db = (session
.query(Synonym.synonym)
.join(OldIDSynonym)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.id == OldIDSynonym.ome_id)
.filter(OldIDSynonym.type == 'model_compartmentalized_component')
.filter(Synonym.type == 'compartmentalized_component')
.join(CompartmentalizedComponent)
.join(Compartment)
.join(Component)
.join(Model)
.filter(Component.bigg_id == met_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.distinct())
return [x[0] for x in result_db]
#-----------
# Utilities
#-----------
def build_reaction_string(metabolite_list, lower_bound, upper_bound,
universal=False, html=True):
post_reaction_string = ''
pre_reaction_string = ''
for met in metabolite_list:
if float(met['stoichiometry']) < 0:
if float(met['stoichiometry']) != -1:
pre_reaction_string += '{}'.format(abs(met['stoichiometry'])) + \
' ' + met['bigg_id'] + '_' + met['compartment_bigg_id'] + ' + '
else:
pre_reaction_string += met['bigg_id'] + '_' + met['compartment_bigg_id'] + ' + '
if float(met['stoichiometry']) > 0:
if float(met['stoichiometry']) != 1:
post_reaction_string += '{}'.format(abs(met['stoichiometry'])) + ' ' + \
met['bigg_id'] + '_' + met['compartment_bigg_id'] + ' + '
else:
post_reaction_string += met['bigg_id'] + '_' + met['compartment_bigg_id'] + ' + '
both_arrow = ' ⇌ ' if html else ' <-> '
left_arrow = ' ← ' if html else ' <-- '
right_arrow = ' → ' if html else ' --> '
if len(metabolite_list) == 1 or universal is True:
reaction_string = pre_reaction_string[:-3] + both_arrow + post_reaction_string[:-3]
elif lower_bound < 0 and upper_bound <= 0:
reaction_string = pre_reaction_string[:-3] + left_arrow + post_reaction_string[:-3]
elif lower_bound >= 0 and upper_bound > 0:
reaction_string = pre_reaction_string[:-3] + right_arrow + post_reaction_string[:-3]
else:
reaction_string = pre_reaction_string[:-3] + both_arrow + post_reaction_string[:-3]
return reaction_string
# Escher maps
def get_escher_maps_for_model(model_id, session):
result_db = (session
.query(EscherMap)
.filter(EscherMap.model_id == model_id)
)
return [{'map_name': x.map_name, 'element_id': None} for x in result_db]
def get_escher_maps_for_reaction(reaction_bigg_id, model_bigg_id, session):
result_db = (session
.query(EscherMap.map_name, EscherMapMatrix.escher_map_element_id)
.join(EscherMapMatrix,
EscherMapMatrix.escher_map_id == EscherMap.id)
.join(ModelReaction,
ModelReaction.id == EscherMapMatrix.ome_id)
.filter(EscherMapMatrix.type == 'model_reaction')
.join(Model,
Model.id == ModelReaction.model_id)
.join(Reaction,
Reaction.id == ModelReaction.reaction_id)
.filter(Reaction.bigg_id == reaction_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.order_by(EscherMap.priority.desc())
)
return [{'map_name': x[0], 'element_id': x[1]} for x in result_db]
def get_escher_maps_for_metabolite(metabolite_bigg_id, compartment_bigg_id,
model_bigg_id, session):
result_db = (session
.query(EscherMap.map_name, EscherMapMatrix.escher_map_element_id)
.join(EscherMapMatrix,
EscherMapMatrix.escher_map_id == EscherMap.id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.id == EscherMapMatrix.ome_id)
.filter(EscherMapMatrix.type == 'model_compartmentalized_component')
.join(Model,
Model.id == ModelCompartmentalizedComponent.model_id)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.id == ModelCompartmentalizedComponent.compartmentalized_component_id)
.join(Component,
Component.id == CompartmentalizedComponent.component_id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.filter(Component.bigg_id == metabolite_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id)
.filter(Model.bigg_id == model_bigg_id)
.order_by(EscherMap.priority.desc())
)
return [{'map_name': x[0], 'element_id': x[1]} for x in result_db]
def json_for_map(map_name, session):
result_db = (session
.query(EscherMap.map_data)
.filter(EscherMap.map_name == map_name)
.first())
if result_db is None:
raise NotFoundError('Could not find Escher map %s' % map_name)
return result_db[0].decode('utf8')
#-------
# Genes
#-------
def sequences_for_reaction(reaction_bigg_id, session):
print(reaction_bigg_id)
res = (session.query(Gene.dna_sequence, Gene.protein_sequence, Gene.bigg_id, Genome.accession_value, Model.bigg_id)
.join(Chromosome, Chromosome.id == Gene.chromosome_id)
.join(Genome, Genome.id == Chromosome.genome_id)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.join(GeneReactionMatrix, GeneReactionMatrix.model_gene_id == ModelGene.id)
.join(ModelReaction, ModelReaction.id == GeneReactionMatrix.model_reaction_id)
.join(Reaction, Reaction.id == ModelReaction.reaction_id)
.filter(Reaction.bigg_id == reaction_bigg_id)
.filter(Gene.dna_sequence != None))
return [{
'dna_sequence': x[0],
'protein_sequence': x[1],
'gene_bigg_id': x[2],
'genome_accession_value': x[3],
'model_bigg_id': x[4],
} for x in res.all()]
#-------------------------------------------------------------------------------
# Search
#-------------------------------------------------------------------------------
name_sim_cutoff = 0.3
bigg_id_sim_cutoff = 0.2
gene_bigg_id_sim_cutoff = 1.0
organism_sim_cutoff = 0.1
def search_for_universal_reactions_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
return query.count()
def search_for_universal_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Reaction)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_reactions(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
):
"""Search for model reactions.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Reaction.bigg_id, query_string)
sim_name = func.similarity(Reaction.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Reaction.bigg_id),
'name': func.lower(Reaction.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Reaction.bigg_id, Model.bigg_id, Model.organism, Reaction.name)
.join(ModelReaction, ModelReaction.reaction_id == Reaction.id)
.join(Model, Model.id == ModelReaction.model_id)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Reaction.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'model_bigg_id': x[1], 'organism': x[2], 'name': x[3]}
for x in query]
def reaction_with_hash(hash, session):
"""Find the reaction with the given hash."""
res = (session
.query(Reaction.bigg_id, Reaction.name)
.filter(Reaction.reaction_hash == hash)
.first())
if res is None:
raise NotFoundError
return {'bigg_id': res[0], 'model_bigg_id': 'universal', 'name': res[1]}
def search_for_universal_metabolites_count(
query_string,
session,
multistrain_off,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
return query.count()
def search_for_universal_metabolites(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for universal Metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name'.
sort_direction: Either 'ascending' or 'descending'.
Returns
-------
A list of objects with keys 'bigg_id', 'name'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Component.bigg_id),
'name': func.lower(Component.name)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Component.name)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Component)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
return [{'bigg_id': x[0], 'name': x[1]} for x in query]
def search_for_metabolites(query_string, session, page=None, size=None,
sort_column=None, sort_direction='ascending',
limit_models=None, strict=False):
"""Search for model metabolites.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
strict: if True, then only look for exact matches to the BiGG ID, with the
compartment.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(Component.bigg_id, query_string)
sim_name = func.similarity(Component.name, query_string)
# get the sort column
columns = {'bigg_id': [func.lower(Component.bigg_id), func.lower(Compartment.bigg_id)],
'name': func.lower(Component.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
sort_column_object = None
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
if sort_column is None:
if strict:
# just sort by bigg ID
sort_column_object = columns['bigg_id']
sort_direction = 'ascending'
else:
# sort by most similar
sort_column_object = func.greatest(sim_name, sim_bigg_id)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Component.bigg_id, Compartment.bigg_id, Model.bigg_id,
Model.organism, Component.name)
.join(CompartmentalizedComponent,
CompartmentalizedComponent.component_id == Component.id)
.join(Compartment,
Compartment.id == CompartmentalizedComponent.compartment_id)
.join(ModelCompartmentalizedComponent,
ModelCompartmentalizedComponent.compartmentalized_component_id == CompartmentalizedComponent.id)
.join(Model, Model.id == ModelCompartmentalizedComponent.model_id))
# whether to allow fuzzy search
if strict:
try:
metabolite_bigg_id, compartment_bigg_id = parse.split_compartment(query_string)
except Exception:
return []
query = (query
.filter(Component.bigg_id == metabolite_bigg_id)
.filter(Compartment.bigg_id == compartment_bigg_id))
else:
query = (query
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Component.name != ''))))
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# just search certain models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'compartment_bigg_id': x[1], 'model_bigg_id': x[2],
'organism': x[3], 'name': x[4]}
for x in query]
def search_for_genes_count(
query_string,
session,
limit_models=None,
multistrain_off=False,
):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Gene.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# set up the query
query = (session
.query(Gene.bigg_id, Model.bigg_id, Gene.name, sim_bigg_id, Model.organism)
.join(ModelGene, ModelGene.gene_id == Gene.id)
.join(Model, Model.id == ModelGene.model_id)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return query.count()
def search_for_genes(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
limit_models=None,
multistrain_off=False,
):
"""Search for genes.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id', 'name',
'model_bigg_id', and 'organism'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'name', 'model_bigg_id', and
'organism'.
"""
# similarity functions
sim_bigg_id = func.similarity(GenomeRegion.bigg_id, query_string)
sim_name = func.similarity(Gene.name, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Gene.bigg_id),
'name': func.lower(Gene.name),
'model_bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism)}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_name)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(GenomeRegion.bigg_id, Gene.name, Model.bigg_id, Model.organism)
.join(Gene)
.join(ModelGene)
.join(Model)
.filter(or_(sim_bigg_id >= gene_bigg_id_sim_cutoff,
and_(sim_name >= name_sim_cutoff,
Gene.name != ''))))
if multistrain_off:
query = _add_multistrain_filter(session, query, Gene)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object, sort_direction,
page, size)
# limit the models
if limit_models:
query = query.filter(Model.bigg_id.in_(limit_models))
return [{'bigg_id': x[0], 'name': x[1], 'model_bigg_id': x[2], 'organism': x[3]}
for x in query]
def search_for_models_count(query_string, session, multistrain_off):
"""Count the search results."""
# similarity functions
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# set up the query
query = (session
.query(Model.bigg_id, ModelCount, Model.organism)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
return query.count()
def search_for_models(
query_string,
session,
page=None,
size=None,
sort_column=None,
sort_direction='ascending',
multistrain_off=False,
):
"""Search for models.
Arguments
---------
query_string: The string to search for.
session: An ome session object.
page: The page, or None for all pages.
size: The page length, or None for all pages.
sort_column: The name of the column to sort. Must be one of 'bigg_id',
'organism', 'metabolite_count', 'reaction_count', and 'gene_count'.
sort_direction: Either 'ascending' or 'descending'.
limit_models: search for results in only this array of model BiGG IDs.
Returns
-------
A list of objects with keys 'bigg_id', 'organism', 'metabolite_count',
'reaction_count', and 'gene_count'.
"""
# models by bigg_id
sim_bigg_id = func.similarity(Model.bigg_id, query_string)
sim_organism = func.similarity(Model.organism, query_string)
# get the sort column
columns = {'bigg_id': func.lower(Model.bigg_id),
'organism': func.lower(Model.organism),
'metabolite_count': ModelCount.metabolite_count,
'reaction_count': ModelCount.reaction_count,
'gene_count': ModelCount.gene_count}
if sort_column is None:
# sort by the greater similarity
sort_column_object = func.greatest(sim_bigg_id, sim_organism)
sort_direction = 'descending'
else:
try:
sort_column_object = columns[sort_column]
except KeyError:
print('Bad sort_column name: %s' % sort_column)
sort_column_object = columns.itervalues().next()
# set up the query
query = (session
.query(Model.bigg_id, Model.organism, ModelCount.metabolite_count,
ModelCount.reaction_count, ModelCount.gene_count)
.join(ModelCount)
.filter(or_(sim_bigg_id >= bigg_id_sim_cutoff,
sim_organism >= organism_sim_cutoff)))
if multistrain_off:
query = _add_multistrain_filter(session, query, Model)
# order and limit
query = _apply_order_limit_offset(query, sort_column_object,
sort_direction, page, size)
return [{'bigg_id': x[0], 'organism': x[1], 'metabolite_count': x[2],
'reaction_count': x[3], 'gene_count': x[4]}
for x in query]
def search_ids_fast(query_string, session, limit=None):
"""Search used for autocomplete."""
gene_q = (session
.query(Gene.bigg_id)
.join(ModelGene)
.filter(Gene.bigg_id.ilike(query_string + '%')))
gene_name_q = (session
.query(Gene.name)
.join(ModelGene)
.filter(Gene.name.ilike(query_string + '%')))
reaction_q = (session
.query(Reaction.bigg_id)
.filter(Reaction.bigg_id.ilike(query_string + '%')))
reaction_name_q = (session
.query(Reaction.name)
.filter(Reaction.name.ilike(query_string + '%')))
metabolite_q = (session
.query(Component.bigg_id)
.filter(Component.bigg_id.ilike(query_string + '%')))
metabolite_name_q = (session
.query(Component.name)
.filter(Component.name.ilike(query_string + '%')))
model_q = (session
.query(Model.bigg_id)
.filter(Model.bigg_id.ilike(query_string + '%')))
organism_q = (session
.query(Model.organism)
.filter(Model.organism.ilike(query_string + '%')))
query = (gene_q
.union(gene_name_q,
reaction_q,
reaction_name_q,
metabolite_q,
metabolite_name_q,
model_q,
organism_q))
if limit is not None:
query = query.limit(limit)
return [x[0] for x in query]
# advanced search by external database ID
def get_database_sources(session):
# for advanced search
result_db = (session
.query(DataSource.bigg_id, DataSource.name)
.filter(DataSource.name != None)
.distinct()
.order_by(DataSource.name))
return [(x[0], x[1]) for x in result_db]
def get_metabolites_for_database_id(session, query, database_source):
met_db = (session
.query(Component.bigg_id, Component.name)
.join(Synonym, Synonym.ome_id == Component.id)
.filter(Synonym.type == 'component')
.join(DataSource, DataSource.id == Synonym.data_source_id)
.filter(DataSource.bigg_id == database_source)
.filter(Synonym.synonym == query.strip()))
comp_comp_db = (session
.query(Component.bigg_id, Component.name)
.join(CompartmentalizedComponent)
.join(Synonym, Synonym.ome_id == CompartmentalizedComponent.id)
.filter(Synonym.type == 'compartmentalized_component')
.join(DataSource, DataSource.id == Synonym.data_source_id)
.filter(DataSource.bigg_id == database_source)
.filter(Synonym.synonym == query.strip()))
return [{'bigg_id': x[0], 'model_bigg_id': 'universal', 'name': x[1]}
for x in chain(met_db, comp_comp_db)]
def get_reactions_for_database_id(session, query, database_source):
result_db = (session
.query(Reaction.bigg_id, Reaction.name)
.join(Synonym, Synonym.ome_id == Reaction.id)
.filter(Synonym.type == 'reaction')
.join(DataSource, DataSource.id == Synonym.data_source_id)
.filter(DataSource.bigg_id == database_source)
.filter(Synonym.synonym == query.strip()))
return [{'bigg_id': x[0], 'model_bigg_id': 'universal', 'name': x[1]}
for x in result_db]
def get_genes_for_database_id(session, query, database_source):
result_db = (session
.query(Gene.bigg_id, Model.bigg_id, Gene.name)
.join(Synonym, Synonym.ome_id == Gene.id)
.filter(Synonym.type == 'gene')
.join(DataSource)
.join(ModelGene)
.join(Model)
.filter(DataSource.bigg_id == database_source)
.filter(Synonym.synonym == query.strip()))
return [{'bigg_id': x[0], 'model_bigg_id': x[1], 'name': x[2]}
for x in result_db]
# version
def database_version(session):
return {
'last_updated': str(session.query(DatabaseVersion).first().date_time),
'bigg_models_version': version,
'api_version': api_version,
}
| 32,279 | 38 | 942 |
6c63c564de7de2faff6ba53762b90aa1d9901077 | 2,999 | py | Python | hypergbm/cuml/_search_space.py | oaksharks/HyperGBM | be9442ccf9513727c1123972945c6fa5e8eb63ff | [
"Apache-2.0"
] | null | null | null | hypergbm/cuml/_search_space.py | oaksharks/HyperGBM | be9442ccf9513727c1123972945c6fa5e8eb63ff | [
"Apache-2.0"
] | null | null | null | hypergbm/cuml/_search_space.py | oaksharks/HyperGBM | be9442ccf9513727c1123972945c6fa5e8eb63ff | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
"""
"""
from hypergbm.estimators import detect_lgbm_gpu
from hypergbm.search_space import GeneralSearchSpaceGenerator
from hypernets.core import Choice
from hypernets.pipeline.base import DataFrameMapper
from hypernets.tabular.cuml_ex import CumlToolBox
from hypernets.utils import logging
from . import _estimators as es
from . import _ops as ops
from ..cfg import HyperGBMCfg as cfg
logger = logging.get_logger(__name__)
search_space_general = \
CumlGeneralSearchSpaceGenerator(enable_lightgbm=cfg.estimator_lightgbm_enabled,
enable_xgb=cfg.estimator_xgboost_enabled,
enable_catboost=cfg.estimator_catboost_enabled,
enable_histgb=cfg.estimator_histgb_enabled,
n_estimators=200)
| 36.13253 | 113 | 0.675892 | # -*- coding:utf-8 -*-
"""
"""
from hypergbm.estimators import detect_lgbm_gpu
from hypergbm.search_space import GeneralSearchSpaceGenerator
from hypernets.core import Choice
from hypernets.pipeline.base import DataFrameMapper
from hypernets.tabular.cuml_ex import CumlToolBox
from hypernets.utils import logging
from . import _estimators as es
from . import _ops as ops
from ..cfg import HyperGBMCfg as cfg
logger = logging.get_logger(__name__)
class CumlDataFrameMapper(DataFrameMapper):
@staticmethod
def _create_dataframe_mapper(features, **kwargs):
dfm_cls = CumlToolBox.transformers['DataFrameMapper']
return dfm_cls(features=features, **kwargs)
class CumlGeneralSearchSpaceGenerator(GeneralSearchSpaceGenerator):
lightgbm_estimator_cls = es.LightGBMCumlEstimator
xgboost_estimator_cls = es.XGBoostCumlEstimator
catboost_estimator_cls = es.CatBoostCumlEstimator
histgb_estimator_cls = es.HistGBCumlEstimator
def create_preprocessor(self, hyper_input, options):
cat_pipeline_mode = options.pop('cat_pipeline_mode', cfg.category_pipeline_mode)
num_pipeline_mode = options.pop('num_pipeline_mode', cfg.numeric_pipeline_mode)
dataframe_mapper_default = options.pop('dataframe_mapper_default', False)
if num_pipeline_mode == 'simple':
num_pipeline = ops.numeric_pipeline_simple()(hyper_input)
else:
num_pipeline = ops.numeric_pipeline_complex()(hyper_input)
if cat_pipeline_mode == 'simple':
cat_pipeline = ops.categorical_pipeline_simple()(hyper_input)
else:
cat_pipeline = ops.categorical_pipeline_complex()(hyper_input)
column_object = CumlToolBox.column_selector.column_object
dfm = CumlDataFrameMapper(default=dataframe_mapper_default, input_df=True, df_out=True,
df_out_dtype_transforms=[(column_object, 'int')])([num_pipeline, cat_pipeline])
return dfm
@property
def default_lightgbm_init_kwargs(self):
r = super().default_lightgbm_init_kwargs
if detect_lgbm_gpu():
r = {**r,
'device': 'GPU',
'max_bin': Choice([63, 127]),
}
return r
@property
def default_xgb_init_kwargs(self):
return {**super().default_xgb_init_kwargs,
'tree_method': 'gpu_hist',
}
@property
def default_catboost_init_kwargs(self):
return {**super().default_catboost_init_kwargs,
'task_type': 'GPU',
}
search_space_general = \
CumlGeneralSearchSpaceGenerator(enable_lightgbm=cfg.estimator_lightgbm_enabled,
enable_xgb=cfg.estimator_xgboost_enabled,
enable_catboost=cfg.estimator_catboost_enabled,
enable_histgb=cfg.estimator_histgb_enabled,
n_estimators=200)
| 1,624 | 472 | 46 |
44935be3f4980d40aeada2a925a0b2833a874792 | 973 | py | Python | 40-Base-of-Numbers/test_intToBase.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | 40-Base-of-Numbers/test_intToBase.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | 40-Base-of-Numbers/test_intToBase.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | import unittest
from utils import intToBase
if __name__ == '__main__':
unittest.main() | 29.484848 | 51 | 0.692703 | import unittest
from utils import intToBase
class TestIntToBase(unittest.TestCase):
def test_raisesValueErrorOnInvalidInput(self):
with self.assertRaises(ValueError):
intToBase(34, 1)
with self.assertRaises(ValueError):
intToBase(23, 40)
def test_returns0WhenInputValueIsZero(self):
self.assertEqual(intToBase(0, 2), '0')
self.assertEqual(intToBase(0, 16), '0')
self.assertEqual(intToBase(0, 36), '0')
def test_denaryToBase2(self):
self.assertEqual(intToBase(30, 2), '11110')
self.assertEqual(intToBase(255, 2), '11111111')
def test_denaryToBase16(self):
self.assertEqual(intToBase(30, 16), '1E')
self.assertEqual(intToBase(255, 16), 'FF')
self.assertEqual(intToBase(32, 16), '20')
def test_denaryToBase32(self):
self.assertEqual(intToBase(31, 32), 'V')
self.assertEqual(intToBase(25, 32), 'P')
self.assertEqual(intToBase(4, 32), '4')
if __name__ == '__main__':
unittest.main() | 710 | 18 | 156 |
113e609db11774c5366ff10622055c5928de1a52 | 19,233 | py | Python | src/tbscm/utils/data.py | muhlbach/tbscm | b6d668d2d18f72a082f4d914cc73767f8047ef2c | [
"MIT"
] | null | null | null | src/tbscm/utils/data.py | muhlbach/tbscm | b6d668d2d18f72a082f4d914cc73767f8047ef2c | [
"MIT"
] | null | null | null | src/tbscm/utils/data.py | muhlbach/tbscm | b6d668d2d18f72a082f4d914cc73767f8047ef2c | [
"MIT"
] | null | null | null | #------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import pandas as pd
import cvxpy as cp
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tools.tools import add_constant
from scipy.stats import norm
# User
from .exceptions import WrongInputException
###############################################################################
# Main
###############################################################################
#------------------------------------------------------------------------------
# Tools
#------------------------------------------------------------------------------
def convert_normal_to_uniform(x, mu="infer", sigma="infer", lower_bound=0, upper_bound=1, n_digits_round=2):
""" See link: https://math.stackexchange.com/questions/2343952/how-to-transform-gaussiannormal-distribution-to-uniform-distribution
"""
# Convert to np and break link
x = np.array(x.copy())
if mu=="infer":
mu = np.mean(x, axis=0).round(n_digits_round)
if sigma=="infer":
sigma = np.sqrt(np.var(x, axis=0)).round(n_digits_round)
# Get CDF
x_cdf = norm.cdf(x=x, loc=mu, scale=sigma)
# Transform
x_uni = (upper_bound-lower_bound)*x_cdf - lower_bound
return x_uni
#------------------------------------------------------------------------------
# Generate X data
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Generate f_star = E[Y|X=x]
#------------------------------------------------------------------------------
def _solve_meta_problem(A,B,w):
"""
Solve diag(X @ A') = B @ w for X such that X_ij>=0 and sum_j(X_ij)==1 for all i
"""
# Vectorize weights
w = _vectorize_beta(beta=w,x=B)
# Set up variable to solve for
X = cp.Variable(shape=(A.shape))
# Set up constraints
constraints = [X >= 0,
X @ np.ones(shape=(A.shape[1],)) == 1
]
# Set up objective function
objective = cp.Minimize(cp.sum_squares(cp.diag(X @ A.T) - B @ w))
# Instantiate
problem = cp.Problem(objective=objective, constraints=constraints)
# Solve (No need to specify solver because by default CVXPY calls the solver most specialized to the problem type)
problem.solve(verbose=False)
return X.value
def _vectorize_beta(beta,x):
"""
Turn supplied beta into an appropriate shape
"""
if isinstance(beta, (int, float, np.integer)):
beta = np.repeat(a=beta, repeats=x.shape[1])
elif isinstance(beta, np.ndarray):
if len(beta)<x.shape[1]:
beta = np.tile(A=beta, reps=int(np.ceil(x.shape[1]/len(beta))))
# Shorten potentially
beta = beta[:x.shape[1]]
elif isinstance(beta, str):
if beta=="uniform":
beta = np.repeat(a=1/x.shape[1], repeats=x.shape[1])
else:
raise WrongInputException(input_name="beta",
provided_input=beta,
allowed_inputs=[int, float, str, np.ndarray, np.integer])
# Make sure beta has the right dimensions
beta = beta.reshape(-1,)
if x.shape[1]!=beta.shape[0]:
raise Exception(f"Beta is {beta.shape}-dim vector, but X is {x.shape}-dim matrix")
return beta
def generate_linear_data(x,
beta=1,
beta_handling="default",
include_intercept=False,
expand=False,
degree=2,
interaction_only=False,
enforce_limits=False,
tol_fstar=100,
**kwargs):
"""
Parameters
----------
x : np.array or pd.DataFrame
Exogeneous data
beta : int, list-type or array, optional
Coefficients to be multiplied to x. The default is 1.
beta_handling : str, optional
How to handle beta. The default is "default".
if "default", use x'beta
if "structural", make it look like some beta was multiplied to x, where it fact we use clever weights
include_intercept : bool, optional
Add intercept/bias term to x. The default is False.
expand : bool, optional
Add higher-order terms of x. The default is False.
degree : int, optional
Degree of higher-order terms if expand==True. The default is 2.
interaction_only : bool, optional
Whether to focus on interactions when expand==True or also higher order polynomials. The default is False.
enforce_limits : bool, optional
Enforce f_star to be min(x) <= max(x). The default is False.
tol_fstar : float, optional
Tolerance when beta_handling="structural". The default is 100.
Returns
-------
f_star : np.array
Conditional mean of Y
"""
#
BETA_HANDLING_ALLOWED = ["default", "structural", "split_order"]
# Convert to np and break link
x = np.array(x.copy())
# Convert extrama points of X
if enforce_limits:
x_min, x_max = np.min(x, axis=1), np.max(x, axis=1)
# Series expansion of X
if expand:
if degree<2:
raise Exception(f"When polynomial features are generated (expand=True), 'degree' must be >=2. It is curently {degree}")
# Instantiate
polynomialfeatures = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly = polynomialfeatures.fit_transform(x)[:,x.shape[1]:]
# Concatenate
x_all = np.concatenate((x,x_poly), axis=1)
else:
x_all = x
# Include a constant in X
if include_intercept:
x = add_constant(data=x, prepend=True, has_constant='skip')
# Different ways to generating beta and fstar
if beta_handling=="default":
# Make beta a conformable vector
beta = _vectorize_beta(beta=beta,x=x_all)
# Generate fstar=E[y|X=x]
f_star = x_all @ beta
elif beta_handling=="structural":
"""
Constrcut Y=f_star, such that
f_star = diag(WX')=X_all*beta_uniform, with with summing to one per j and all non-negative.
"""
# Get tricky weight matrix, solving diag(WX')=X_all*beta_uniform
weights = _solve_meta_problem(A=x, B=x_all, w="uniform")
# Generate fstar=E[y|X=x]
f_star = np.diagonal(weights @ x.T)
# Fact check this
f_star_check = x_all @ _vectorize_beta(beta="uniform",x=x_all)
if np.sum(f_star-f_star_check) > tol_fstar:
raise Exception("Trickiness didn't work as differences are above tolerance")
elif beta_handling=="split_order":
"""
Apply different beta to each higher-order term, forinstance X*b1 + X^2*b2 + X^3*b3, where beta=[b1,b2,b3]
"""
if isinstance(beta, (int, float, str, np.integer)):
raise Exception("Whenever 'beta_handling'='split_order', then 'beta' cannot be either (int, float, str)")
elif len(beta)!=degree:
raise Exception(f"beta is of length {len(beta)}, but MUST be of length {degree}")
if not expand:
raise Exception("Whenever 'beta_handling'='split_order', then 'expand' must be True")
# First-order beta
beta_first_order = _vectorize_beta(beta=beta[0],x=x)
# Higher-order beta
beta_higher_order = np.empty(shape=(0,))
# Initialize
higher_order_col = 0
for higher_order in range(2,degree+1):
# Instantiate
poly_temp = PolynomialFeatures(degree=higher_order, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly_temp = poly_temp.fit_transform(x)[:,x.shape[1]+higher_order_col:]
# Generate temporary betas for this degree of the expansion
beta_higher_order_temp = _vectorize_beta(beta=beta[higher_order-1],x=x_poly_temp)
# Append betas
beta_higher_order = np.append(arr=beta_higher_order, values=beta_higher_order_temp)
# Add column counter that governs which columns to match in X
higher_order_col += x_poly_temp.shape[1]
# Generate fstar=E[y|X=x]
f_star = x @ beta_first_order + x_poly @ beta_higher_order
else:
raise WrongInputException(input_name="beta_handling",
provided_input=beta_handling,
allowed_inputs=BETA_HANDLING_ALLOWED)
# Reshape for conformity
f_star = f_star.reshape(-1,)
if enforce_limits:
f_star = np.where(f_star<x_min, x_min, f_star)
f_star = np.where(f_star>x_max, x_max, f_star)
return f_star
#------------------------------------------------------------------------------
# Simulate data
#------------------------------------------------------------------------------ | 35.354779 | 140 | 0.510581 | #------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import pandas as pd
import cvxpy as cp
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tools.tools import add_constant
from scipy.stats import norm
# User
from .exceptions import WrongInputException
###############################################################################
# Main
###############################################################################
#------------------------------------------------------------------------------
# Tools
#------------------------------------------------------------------------------
def get_colnames(x,prefix="X"):
try:
dim = x.shape[1]
colnames = [prefix+str(j) for j in np.arange(start=1,stop=dim+1)]
except IndexError:
colnames = [prefix]
return colnames
def convert_to_dict_series(Yobs=None,Ytrue=None,Y0=None,Y1=None,W=None):
# Save local arguments
args = locals()
# Convert values to series with appropriate names
args = {k: pd.Series(v, name=k) for k,v in args.items() if v is not None}
return args
def convert_to_dict_df(X=None):
# Save local arguments
args = locals()
# Convert values to series with appropriate names
args = {k: pd.DataFrame(v, columns=get_colnames(x=v,prefix=k)) for k,v in args.items() if v is not None}
return args
def convert_normal_to_uniform(x, mu="infer", sigma="infer", lower_bound=0, upper_bound=1, n_digits_round=2):
""" See link: https://math.stackexchange.com/questions/2343952/how-to-transform-gaussiannormal-distribution-to-uniform-distribution
"""
# Convert to np and break link
x = np.array(x.copy())
if mu=="infer":
mu = np.mean(x, axis=0).round(n_digits_round)
if sigma=="infer":
sigma = np.sqrt(np.var(x, axis=0)).round(n_digits_round)
# Get CDF
x_cdf = norm.cdf(x=x, loc=mu, scale=sigma)
# Transform
x_uni = (upper_bound-lower_bound)*x_cdf - lower_bound
return x_uni
#------------------------------------------------------------------------------
# Generate X data
#------------------------------------------------------------------------------
def multivariate_normal(N,p,mu,sigma,covariance,lower_limit=None, upper_limit=None):
if (lower_limit is None) and (upper_limit is None):
# Covariance matrix
cov_diag = np.diag(np.repeat(a=sigma**2, repeats=p))
cov_off_diag = np.ones(shape=(p,p)) * covariance
np.fill_diagonal(a=cov_off_diag, val=0)
cov_mat = cov_diag + cov_off_diag
X = pd.DataFrame(np.random.multivariate_normal(mean=np.repeat(a=mu, repeats=p),
cov=cov_mat,
size=N))
else:
valid_N = 0
X = pd.DataFrame()
while valid_N<N:
# Generate temporary data without limits
X_temp = multivariate_normal(N=N,p=p,mu=mu,sigma=sigma,covariance=covariance,lower_limit=None,upper_limit=None)
if lower_limit is None:
lower_limit = -np.inf
elif upper_limit is None:
upper_limit = +np.inf
if lower_limit>=upper_limit:
raise Exception(f"Lower limit (= {lower_limit}) cannot exceed upper limit (= {upper_limit})")
# Invalid indices
invalid_idx = (X_temp < lower_limit).any(axis=1) | (X_temp > upper_limit).any(axis=1)
# Remove rows that exceed limits
X_temp = X_temp.loc[~invalid_idx,:]
# Append
X = X.append(X_temp, ignore_index=True)
valid_N = len(X)
X = X.iloc[0:N,:]
return X
def generate_ar_process(T,
p,
AR_lags,
AR_coefs,
burnin=50,
intercept=0,
mu0=0,
sigma0=1,
coef_on_error=1,
**kwargs):
# Fix AR coefs; flip order and reshape to comformable shape
AR_coefs = np.flip(AR_coefs).reshape(-1,1)
# Generate errors
errors = kwargs.get('errors', np.random.multivariate_normal(mean=np.ones(p),
cov=np.identity(p),
size=T))
print(np.corrcoef(errors.T))
# Generate errors for burn-in period
errors_burnin = np.random.multivariate_normal(mean=np.mean(errors,axis=0),
cov=np.cov(errors.T),
size=burnin)
errors_all = np.concatenate((errors_burnin,errors))
# Generate initial value(s)
X = mu0 + sigma0 * np.random.randn(AR_lags,p)
# Simulate AR(p) with burn-in included
for b in range(burnin+T):
X = np.concatenate((X,
intercept + AR_coefs.T @ X[0:AR_lags,:] + coef_on_error * errors_all[b,0:p]),
axis=0)
# Return only the last T observations (we have removed the dependency on the initial draws)
return X[-T:,]
def generate_cross_sectional_data(N,
p,
distribution="normal",
mu=None,
sigma=None,
covariance=None,
lower_bound=None,
upper_bound=None,
dtype="np.darray",
**kwargs):
DISTRIBUTION_ALLOWED = ["normal", "uniform"]
# Generate X
if distribution=="normal":
if (mu is None) or (sigma is None) or (covariance is None):
raise Exception("When 'distribution'=='normal', both 'mu', 'sigma', and 'covariance' must be provided and neither can be None")
X = multivariate_normal(N=N,
p=p,
mu=mu,
sigma=sigma,
covariance=covariance,
lower_limit=lower_bound,
upper_limit=upper_bound)
elif distribution=="uniform":
if (lower_bound is None) or (upper_bound is None):
raise Exception("When 'distribution'=='uniform', both 'lower_bound' and 'upper_bound' must be provided and neither can be None")
# Draw from uniform distribution
X = np.random.uniform(low=lower_bound,
high=upper_bound,
size=(N,p))
else:
raise WrongInputException(input_name="distribution",
provided_input=distribution,
allowed_inputs=DISTRIBUTION_ALLOWED)
if dtype=="np.darray":
X = np.array(X)
elif dtype=="pd.DataFrame":
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
return X
def generate_errors(N=1000, p=5, mu=0, sigma=1, cov_X=0.25, cov_X_y=0.5):
# Number of dimensions including y
n_dim = p+1
## Construct variance-covariance matrix
# Construct diagonal with variance = sigma^2
cov_diag = np.diag(np.repeat(a=sigma**2, repeats=n_dim))
## Construct off-diagonal with covariances
# Fill out for X (and y)
cov_off_diag = np.ones(shape=(n_dim,n_dim)) * cov_X
# Update y entries
cov_off_diag[p,:] = cov_off_diag[:,p] = cov_X_y
# Set diagonal to zero
np.fill_diagonal(a=cov_off_diag, val=0)
# Update final variance-covariance matrix
cov_mat = cov_diag + cov_off_diag
# Generate epsilon
eps = np.random.multivariate_normal(mean=np.repeat(a=mu, repeats=n_dim),
cov=cov_mat,
size=N)
return eps
#------------------------------------------------------------------------------
# Generate f_star = E[Y|X=x]
#------------------------------------------------------------------------------
def _solve_meta_problem(A,B,w):
"""
Solve diag(X @ A') = B @ w for X such that X_ij>=0 and sum_j(X_ij)==1 for all i
"""
# Vectorize weights
w = _vectorize_beta(beta=w,x=B)
# Set up variable to solve for
X = cp.Variable(shape=(A.shape))
# Set up constraints
constraints = [X >= 0,
X @ np.ones(shape=(A.shape[1],)) == 1
]
# Set up objective function
objective = cp.Minimize(cp.sum_squares(cp.diag(X @ A.T) - B @ w))
# Instantiate
problem = cp.Problem(objective=objective, constraints=constraints)
# Solve (No need to specify solver because by default CVXPY calls the solver most specialized to the problem type)
problem.solve(verbose=False)
return X.value
def _vectorize_beta(beta,x):
"""
Turn supplied beta into an appropriate shape
"""
if isinstance(beta, (int, float, np.integer)):
beta = np.repeat(a=beta, repeats=x.shape[1])
elif isinstance(beta, np.ndarray):
if len(beta)<x.shape[1]:
beta = np.tile(A=beta, reps=int(np.ceil(x.shape[1]/len(beta))))
# Shorten potentially
beta = beta[:x.shape[1]]
elif isinstance(beta, str):
if beta=="uniform":
beta = np.repeat(a=1/x.shape[1], repeats=x.shape[1])
else:
raise WrongInputException(input_name="beta",
provided_input=beta,
allowed_inputs=[int, float, str, np.ndarray, np.integer])
# Make sure beta has the right dimensions
beta = beta.reshape(-1,)
if x.shape[1]!=beta.shape[0]:
raise Exception(f"Beta is {beta.shape}-dim vector, but X is {x.shape}-dim matrix")
return beta
def generate_linear_data(x,
beta=1,
beta_handling="default",
include_intercept=False,
expand=False,
degree=2,
interaction_only=False,
enforce_limits=False,
tol_fstar=100,
**kwargs):
"""
Parameters
----------
x : np.array or pd.DataFrame
Exogeneous data
beta : int, list-type or array, optional
Coefficients to be multiplied to x. The default is 1.
beta_handling : str, optional
How to handle beta. The default is "default".
if "default", use x'beta
if "structural", make it look like some beta was multiplied to x, where it fact we use clever weights
include_intercept : bool, optional
Add intercept/bias term to x. The default is False.
expand : bool, optional
Add higher-order terms of x. The default is False.
degree : int, optional
Degree of higher-order terms if expand==True. The default is 2.
interaction_only : bool, optional
Whether to focus on interactions when expand==True or also higher order polynomials. The default is False.
enforce_limits : bool, optional
Enforce f_star to be min(x) <= max(x). The default is False.
tol_fstar : float, optional
Tolerance when beta_handling="structural". The default is 100.
Returns
-------
f_star : np.array
Conditional mean of Y
"""
#
BETA_HANDLING_ALLOWED = ["default", "structural", "split_order"]
# Convert to np and break link
x = np.array(x.copy())
# Convert extrama points of X
if enforce_limits:
x_min, x_max = np.min(x, axis=1), np.max(x, axis=1)
# Series expansion of X
if expand:
if degree<2:
raise Exception(f"When polynomial features are generated (expand=True), 'degree' must be >=2. It is curently {degree}")
# Instantiate
polynomialfeatures = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly = polynomialfeatures.fit_transform(x)[:,x.shape[1]:]
# Concatenate
x_all = np.concatenate((x,x_poly), axis=1)
else:
x_all = x
# Include a constant in X
if include_intercept:
x = add_constant(data=x, prepend=True, has_constant='skip')
# Different ways to generating beta and fstar
if beta_handling=="default":
# Make beta a conformable vector
beta = _vectorize_beta(beta=beta,x=x_all)
# Generate fstar=E[y|X=x]
f_star = x_all @ beta
elif beta_handling=="structural":
"""
Constrcut Y=f_star, such that
f_star = diag(WX')=X_all*beta_uniform, with with summing to one per j and all non-negative.
"""
# Get tricky weight matrix, solving diag(WX')=X_all*beta_uniform
weights = _solve_meta_problem(A=x, B=x_all, w="uniform")
# Generate fstar=E[y|X=x]
f_star = np.diagonal(weights @ x.T)
# Fact check this
f_star_check = x_all @ _vectorize_beta(beta="uniform",x=x_all)
if np.sum(f_star-f_star_check) > tol_fstar:
raise Exception("Trickiness didn't work as differences are above tolerance")
elif beta_handling=="split_order":
"""
Apply different beta to each higher-order term, forinstance X*b1 + X^2*b2 + X^3*b3, where beta=[b1,b2,b3]
"""
if isinstance(beta, (int, float, str, np.integer)):
raise Exception("Whenever 'beta_handling'='split_order', then 'beta' cannot be either (int, float, str)")
elif len(beta)!=degree:
raise Exception(f"beta is of length {len(beta)}, but MUST be of length {degree}")
if not expand:
raise Exception("Whenever 'beta_handling'='split_order', then 'expand' must be True")
# First-order beta
beta_first_order = _vectorize_beta(beta=beta[0],x=x)
# Higher-order beta
beta_higher_order = np.empty(shape=(0,))
# Initialize
higher_order_col = 0
for higher_order in range(2,degree+1):
# Instantiate
poly_temp = PolynomialFeatures(degree=higher_order, interaction_only=interaction_only, include_bias=False, order='C')
# Expand x
x_poly_temp = poly_temp.fit_transform(x)[:,x.shape[1]+higher_order_col:]
# Generate temporary betas for this degree of the expansion
beta_higher_order_temp = _vectorize_beta(beta=beta[higher_order-1],x=x_poly_temp)
# Append betas
beta_higher_order = np.append(arr=beta_higher_order, values=beta_higher_order_temp)
# Add column counter that governs which columns to match in X
higher_order_col += x_poly_temp.shape[1]
# Generate fstar=E[y|X=x]
f_star = x @ beta_first_order + x_poly @ beta_higher_order
else:
raise WrongInputException(input_name="beta_handling",
provided_input=beta_handling,
allowed_inputs=BETA_HANDLING_ALLOWED)
# Reshape for conformity
f_star = f_star.reshape(-1,)
if enforce_limits:
f_star = np.where(f_star<x_min, x_min, f_star)
f_star = np.where(f_star>x_max, x_max, f_star)
return f_star
def generate_friedman_data_1(x, **kwargs):
# Convert to np and break link
x = np.array(x.copy())
# Sanity check
if x.shape[1]<5:
raise Exception(f"Friedman 1 requires at least 5 regresors, but only {x.shape[1]} are provided in x")
# Generate fstar=E[y|X=x]
f_star = 0.1*np.exp(4*x[:,0]) + 4/(1+np.exp(-20*(x[:,1]-0.5))) + 3*x[:,2] + 2*x[:,3] + 1*x[:,4]
# Reshape for conformity
f_star = f_star.reshape(-1,)
return f_star
def generate_friedman_data_2(x, **kwargs):
# Convert to np and break link
x = np.array(x.copy())
# Sanity check
if x.shape[1]<5:
raise Exception(f"Friedman 2 requires at least 5 regresors, but only {x.shape[1]} are provided in x")
# Generate fstar=E[y|X=x]
f_star = 10*np.sin(np.pi*x[:,0]*x[:,1]) + 20*(x[:,2]-0.5)**2 + 10*x[:,3] + 5*x[:,4]
# Reshape for conformity
f_star = f_star.reshape(-1,)
return f_star
#------------------------------------------------------------------------------
# Simulate data
#------------------------------------------------------------------------------
def simulate_data(f,
T0=500,
T1=50,
X_type="cross_section",
X_dist="normal",
X_dim=5,
X_mean=0,
X_std=1,
X_covariance=0,
ate=1,
eps_mean=0,
eps_std=1,
eps_cov_X=0,
eps_cov_X_y=0,
**kwargs):
# Total number of time periods
T = T0 + T1
## Step 1: Generate errors (because they might be used in the generation of X)
# Generate errors
errors = generate_errors(N=T, p=X_dim, mu=eps_mean, sigma=eps_std, cov_X=eps_cov_X, cov_X_y=eps_cov_X_y)
# Generate covariates
if X_type=="AR":
X = generate_ar_process(
T=T,
p=X_dim,
errors=errors,
**kwargs
)
elif X_type=="cross_section":
X = generate_cross_sectional_data(N=T,
p=X_dim,
mu=X_mean,
sigma=X_std,
covariance=X_covariance,
distribution=X_dist,
**kwargs
)
# Generate W
W = np.repeat((0,1), (T0,T1))
# Generate Ystar
Ystar = f(x=X, **kwargs)
# Generate Y
Y = Ystar + ate*W + errors[:,-1]
# Collect data
df = pd.concat(objs=[pd.Series(data=Y,name="Y"),
pd.Series(data=W,name="W"),
pd.DataFrame(data=X,columns=[f"X{d}" for d in range(X.shape[1])]),
pd.Series(data=Ystar,name="Ystar"),
pd.Series(data=errors[:,-1],name="U"),
],
axis=1)
return df | 9,501 | 0 | 261 |
73fa3f6c48dd4eb5fa2c1c2c1f58bda42bbea3ea | 5,152 | py | Python | ntk/widgets/frame.py | njNafir/ntk | 05dc8c89751ba91f9ff6220a9850266f5185f451 | [
"MIT"
] | 3 | 2021-01-05T04:31:32.000Z | 2021-01-07T04:33:48.000Z | ntk/widgets/frame.py | njNafir/ntk | 05dc8c89751ba91f9ff6220a9850266f5185f451 | [
"MIT"
] | 1 | 2021-12-29T17:15:01.000Z | 2021-12-29T17:15:01.000Z | ntk/widgets/frame.py | njNafir/ntk | 05dc8c89751ba91f9ff6220a9850266f5185f451 | [
"MIT"
] | 2 | 2021-01-05T07:45:24.000Z | 2021-01-07T04:33:52.000Z | # Import Tkinter Frame to use it and modify default
from tkinter import Frame as tkFrame
# Import all util from ntk.utils
from ntk.utils import *
# frame class can be called once the root tk is defined
# only one must required field is root which is the any of widget
# other params can be set to get different type of design
# but this is also preferred design to use in your window
# every design is custom and can be set twice
# Frame instance will contain the base tkinter frame instance just with modified styles and methods
# init method is getting all of your arguments
# and keyword arguments
# and passing related
# and unknown params
# and args to tkinter frame
# so if it cause an error most probably it's getting from tkinter frame object
# see your all arguments and keywords is supporting by Frame or tkinter frame
| 45.192982 | 116 | 0.504464 | # Import Tkinter Frame to use it and modify default
from tkinter import Frame as tkFrame
# Import all util from ntk.utils
from ntk.utils import *
class Frame(tkFrame):
# frame class can be called once the root tk is defined
# only one must required field is root which is the any of widget
# other params can be set to get different type of design
# but this is also preferred design to use in your window
# every design is custom and can be set twice
# Frame instance will contain the base tkinter frame instance just with modified styles and methods
# init method is getting all of your arguments
# and keyword arguments
# and passing related
# and unknown params
# and args to tkinter frame
# so if it cause an error most probably it's getting from tkinter frame object
# see your all arguments and keywords is supporting by Frame or tkinter frame
def __init__(self,
root, # root is a master window to place this frame into it
bg="bg-white", # background color, default is bootstrap referenced white
bd=0, # border width
colormap=None, # color map
class_=False, # class name to inherit styles and methods
container=0, # container
cursor="arrow", # mouse cursor style arrow hand2 etc
height=64, # frame height
highlightbackground="bg-light", # background color when frame is highlighted,
# default is bootstrap referenced light
highlightcolor="bg-dark", # foreground color when frame is highlighted,
# default is bootstrap referenced dark
highlightthickness=0, # thickness width when frame is highlighted
row=0, # grid row position
column=0, # grid column position
padx=0, # grid padding left and right
pady=0, # grid padding top and bottom
relief="flat", # relief style flat groove etc
sticky="w", # grid sticky position
takefocus=0, # set frame can take focus or not
visual=0, # visual
width=128, # frame width
gridrow=1, # grid row configure row weight
gridcolumn=1, # grid column configure column weight
*args, **kwargs # extra arguments and keyword arguments
):
super(Frame, self).__init__(
root,
class_=class_ if class_ else "Frame", # pass class if has else pass "Frame"
# which is tkinter default
visual=visual,
container=container,
colormap=colormap,
highlightbackground=color(highlightbackground), # background color when frame is
# highlighted
highlightcolor=color(highlightcolor), # foreground color when frame is
# highlighted
highlightthickness=highlightthickness,
bg=color(bg), # color of background
borderwidth=bd,
cursor=cursor,
height=height,
relief=relief,
takefocus=takefocus,
# width=width
*args, **kwargs
)
# setting width by configure frame
# because somehow passing it into super method
# raising a error
self.config(width=width)
# frame grid configure automatically
# so you don't need to call it again
# but you can pass grid params in frame class
self.grid(
row=row, # grid row position
column=column, # grid column position
padx=padx, # grid padding left and right
pady=pady, # grid padding top and bottom
sticky=sticky # grid sticky position
)
# set grid column configure
# so it will auto scale when
# main window scaled
root.grid_columnconfigure(
column, # column configure column position
weight=gridcolumn # grid weight of column
)
# set grid row configure
# so it will auto scale when
# main window scaled
root.grid_rowconfigure(
row, # row configure row position
weight=gridrow # grid weight of row
)
| 4,213 | 0 | 50 |
fc178f5f4dd1d6d7e04ff5f73e3e91fe2d56a918 | 2,959 | py | Python | src/robotide/widgets/popupmenu.py | jnhyperion/RIDE | 2fa78c32640e9c314ebc41f0f7b7d2024d71de8b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-09-01T06:50:17.000Z | 2020-09-01T06:50:17.000Z | src/robotide/widgets/popupmenu.py | jnhyperion/RIDE | 2fa78c32640e9c314ebc41f0f7b7d2024d71de8b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/widgets/popupmenu.py | jnhyperion/RIDE | 2fa78c32640e9c314ebc41f0f7b7d2024d71de8b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
| 29.009804 | 89 | 0.649206 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
class PopupCreator(object):
def __init__(self):
self._external_hooks = []
def add_hook(self, hook):
self._external_hooks.append(hook)
def remove_hook(self, hook):
self._external_hooks.remove(hook)
def _get_all_actions(self, fixed_menu_items, data):
menu_items = fixed_menu_items
external_items = self._get_external_menu_items(data)
if external_items:
menu_items.add_separator()
for item in external_items:
menu_items.add_menu_item(item)
return menu_items
def _get_external_menu_items(self, data):
menu_items = []
for hook in self._external_hooks:
menu_items.extend(hook(data))
return menu_items
def show(self, parent, fixed_menu_items, data):
PopupMenu(parent, self._get_all_actions(fixed_menu_items, data))
class PopupMenu(wx.Menu):
def __init__(self, parent, menu_items):
wx.Menu.__init__(self)
for item in menu_items:
if item.is_separator():
self.AppendSeparator()
else:
self._add_item(item)
parent.PopupMenu(self)
self.Destroy()
def _add_item(self, item):
id_ = wx.NewIdRef()
self.Append(id_, item.name)
self.Bind(wx.EVT_MENU, item.callable, id=id_)
class PopupMenuItems(object):
def __init__(self, parent=None, menu_names=[]):
self._items = []
for item in menu_names:
self.add_menu_item(PopupMenuItem(item, parent=parent))
def __iter__(self):
return iter(self._items)
def add_menu_item(self, item):
self._items.append(item)
def add_separator(self):
self.add_menu_item(PopupMenuItem('---'))
class PopupMenuItem(object):
def __init__(self, name, callable=None, parent=None):
self.name = name
self.callable = self._get_callable(name, callable, parent)
def _get_callable(self, name, callable, parent):
if callable:
return callable
if name == '---':
return None
handler_name = ''.join(x for x in name.split('\t')[0].title() if not x.isspace())
return getattr(parent, 'On'+handler_name)
def is_separator(self):
return self.name == '---'
| 1,777 | 25 | 497 |
4c6304a7a31fc6801d176feb32e9e99ee19825c8 | 5,293 | py | Python | tests/python/unittest/test_target_codegen_rocm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 9 | 2019-12-17T08:03:54.000Z | 2022-01-19T02:34:23.000Z | tests/python/unittest/test_target_codegen_rocm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-06-18T21:15:42.000Z | 2020-06-24T17:38:37.000Z | tests/python/unittest/test_target_codegen_rocm.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 3 | 2020-10-04T20:30:18.000Z | 2022-01-24T18:03:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import unittest
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
if __name__ == "__main__":
test_rocm_cross_thread_reduction()
test_rocm_inf_nan()
test_rocm_reduction_binding()
test_rocm_copy()
test_rocm_vectorize_add()
| 38.355072 | 112 | 0.623087 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import unittest
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_cross_thread_reduction():
# based on the reduction tutorial
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name='A')
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(xo, bx)
s[B].bind(xi, ty)
s[B].bind(s[B].op.reduce_axis[0], tx)
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
s[B].set_store_predicate(tx.var.equal(0))
frocm = tvm.build(s, [A, B], "rocm")
nn = 128
ctx = tvm.rocm(0)
a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), ctx)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), ctx)
frocm(a, b)
tvm.testing.assert_allclose(
b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4)
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_inf_nan():
def check_inf_nan(ctx, n, value, dtype):
A = te.placeholder((n,), name='A', dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name='C')
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], "rocm")
a = tvm.nd.empty((n,), A.dtype, ctx)
c = tvm.nd.empty((n,), A.dtype, ctx)
# Only need to test compiling here
fun(a, c)
ctx = tvm.rocm(0)
check_inf_nan(ctx, 1, -float('inf'), 'float32')
check_inf_nan(ctx, 1, -float('inf'), 'float64')
check_inf_nan(ctx, 1, float('inf'), 'float32')
check_inf_nan(ctx, 1, float('inf'), 'float64')
check_inf_nan(ctx, 1, float('nan'), 'float32')
check_inf_nan(ctx, 1, float('nan'), 'float64')
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_reduction_binding():
k = te.reduce_axis((0, 32), 'k')
A = te.placeholder((96, 32), name='A')
B = te.compute( (96,), lambda m:
te.sum(A[m, k], axis=k),
name='B')
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, bx)
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_copy():
def check_rocm(dtype, n):
A = te.placeholder((n,), name='A', dtype=dtype)
ctx = tvm.rocm(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(a_np)
b_np = a.asnumpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.asnumpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_rocm(dtype, int(peturb * (2 ** logN)))
@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..")
def test_rocm_vectorize_add():
num_thread = 8
def check_rocm(dtype, n, lanes):
A = te.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i]+tvm.tir.const(1, A.dtype), name='B')
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "rocm")
ctx = tvm.rocm(0)
a = tvm.nd.empty((n,), A.dtype, ctx).copyfrom(
np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, ctx)
fun(a, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1)
check_rocm("float32", 64, 2)
check_rocm("float16", 64, 2)
if __name__ == "__main__":
test_rocm_cross_thread_reduction()
test_rocm_inf_nan()
test_rocm_reduction_binding()
test_rocm_copy()
test_rocm_vectorize_add()
| 3,447 | 0 | 110 |
9e202bf296952a87e8bd13621aef31c44d2e8513 | 972 | py | Python | app/__init__.py | FaiZaman/BreakingBoundrio | 2127c67542f65f46c5d6e41ab22f6f1438b90e84 | [
"MIT"
] | null | null | null | app/__init__.py | FaiZaman/BreakingBoundrio | 2127c67542f65f46c5d6e41ab22f6f1438b90e84 | [
"MIT"
] | null | null | null | app/__init__.py | FaiZaman/BreakingBoundrio | 2127c67542f65f46c5d6e41ab22f6f1438b90e84 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_migrate import Migrate
from config import Config
from app.main import bp as main_bp
from app.auth import bp as auth_bp
from app.account import bp as acc_bp
from app.interface import bp as interface_bp
from app.questions import bp as questions_bp
from app.db import db
from app.auth import login_manager
migrate = Migrate()
| 27.771429 | 84 | 0.76749 | from flask import Flask
from flask_migrate import Migrate
from config import Config
from app.main import bp as main_bp
from app.auth import bp as auth_bp
from app.account import bp as acc_bp
from app.interface import bp as interface_bp
from app.questions import bp as questions_bp
from app.db import db
from app.auth import login_manager
migrate = Migrate()
def create_app(config_class=Config):
# Setup flask app and configure from config object
app = Flask(__name__, template_folder='../templates', static_folder='../static')
app.config.from_object(config_class)
# Register blueprints
app.register_blueprint(main_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(acc_bp)
app.register_blueprint(interface_bp, url_prefix='/interface')
app.register_blueprint(questions_bp, url_prefix='/questions')
# Initialise objects
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
return app
| 587 | 0 | 23 |
99d1a724c12e650c4eb466cbefc05505a9bb5d8d | 116 | py | Python | fintoc/__init__.py | KnowYourselves/fintoc-python | 7b61850db6bb029aafd6fbf8e37b46e1188474a9 | [
"BSD-3-Clause"
] | 80 | 2020-05-10T13:41:26.000Z | 2022-01-14T14:20:40.000Z | fintoc/__init__.py | nmassardot/fintoc-python | 5560e1f06ede0ff155d4274d3d8cf91e40e53710 | [
"BSD-3-Clause"
] | 23 | 2020-05-27T22:48:06.000Z | 2022-01-04T13:40:09.000Z | fintoc/__init__.py | nmassardot/fintoc-python | 5560e1f06ede0ff155d4274d3d8cf91e40e53710 | [
"BSD-3-Clause"
] | 8 | 2020-09-22T16:13:32.000Z | 2021-12-11T19:58:58.000Z | """
Init file for the Fintoc Python SDK.
"""
from fintoc.core import Fintoc
from fintoc.version import __version__
| 16.571429 | 38 | 0.767241 | """
Init file for the Fintoc Python SDK.
"""
from fintoc.core import Fintoc
from fintoc.version import __version__
| 0 | 0 | 0 |
729d56e724f94e9f641359a7e0bde4602f676368 | 1,671 | py | Python | tests/common_testing.py | michele-arrival/pytorch3d | f358b9b14dbc1414c588f308b35f55705d777873 | [
"BSD-3-Clause"
] | 2 | 2020-02-08T07:08:45.000Z | 2020-02-19T16:31:06.000Z | tests/common_testing.py | michele-arrival/pytorch3d | f358b9b14dbc1414c588f308b35f55705d777873 | [
"BSD-3-Clause"
] | null | null | null | tests/common_testing.py | michele-arrival/pytorch3d | f358b9b14dbc1414c588f308b35f55705d777873 | [
"BSD-3-Clause"
] | 1 | 2020-12-12T20:42:33.000Z | 2020-12-12T20:42:33.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
| 29.315789 | 78 | 0.582286 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
class TestCaseMixin(unittest.TestCase):
def assertSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in distinct locations.
"""
self.assertNotEqual(
tensor1.storage().data_ptr(), tensor2.storage().data_ptr()
)
def assertAllSeparate(self, tensor_list) -> None:
"""
Verify that all tensors in tensor_list have their data in
distinct locations.
"""
ptrs = [i.storage().data_ptr() for i in tensor_list]
self.assertCountEqual(ptrs, set(ptrs))
def assertClose(
self,
input,
other,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False
) -> None:
"""
Verify that two tensors or arrays are the same shape and close.
Args:
input, other: two tensors or two arrays.
rtol, atol, equal_nan: as for torch.allclose.
Note:
Optional arguments here are all keyword-only, to avoid confusion
with msg arguments on other assert functions.
"""
self.assertEqual(np.shape(input), np.shape(other))
if torch.is_tensor(input):
close = torch.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
else:
close = np.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
self.assertTrue(close)
| 0 | 1,502 | 23 |
adbbe98799dffd625e9c99385cc643e2678bbae3 | 2,093 | py | Python | backend/app/routes/app_router.py | Elguetajj/threat-intelligence | 29c017fb7fc3f9d46297eae0f5d2581f7ad80ab2 | [
"MIT"
] | null | null | null | backend/app/routes/app_router.py | Elguetajj/threat-intelligence | 29c017fb7fc3f9d46297eae0f5d2581f7ad80ab2 | [
"MIT"
] | null | null | null | backend/app/routes/app_router.py | Elguetajj/threat-intelligence | 29c017fb7fc3f9d46297eae0f5d2581f7ad80ab2 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, Depends, HTTPException
from fastapi_utils.cbv import cbv
from sqlalchemy.orm import Session
from controllers.app_controller import get_all_apps, create_app, get_app_info_by_id, update_app_info, delete_app_info
from models.db import get_db
from controllers.exceptions import AppInfoException
from models.schemas import App, CreateAndUpdateApp, PaginatedAppInfo
router = APIRouter()
@cbv(router)
# API endpoint to get info of a particular app
@router.get("/apps/{app_id}", response_model=App)
# API to update a existing app info
@router.put("/apps/{app_id}", response_model=App)
# API to delete a app info from the data base
@router.delete("/apps/{app_id}") | 32.703125 | 117 | 0.71333 | from fastapi import APIRouter, Depends, HTTPException
from fastapi_utils.cbv import cbv
from sqlalchemy.orm import Session
from controllers.app_controller import get_all_apps, create_app, get_app_info_by_id, update_app_info, delete_app_info
from models.db import get_db
from controllers.exceptions import AppInfoException
from models.schemas import App, CreateAndUpdateApp, PaginatedAppInfo
router = APIRouter()
@cbv(router)
class Apps:
session: Session = Depends(get_db)
# API to get the list of app info
@router.get("/apps", response_model=PaginatedAppInfo)
def list_apps(self, limit: int = 10, offset: int = 0):
apps_list = get_all_apps(self.session, limit, offset)
response = {"limit": limit, "offset": offset, "data": apps_list}
return response
# API endpoint to add a app info to the database
@router.post("/apps")
def add_app(self, app_info: CreateAndUpdateApp):
try:
app_info = create_app(self.session, app_info)
return app_info
except AppInfoException as cie:
raise HTTPException(**cie.__dict__)
# API endpoint to get info of a particular app
@router.get("/apps/{app_id}", response_model=App)
def get_app_info(app_id: int, session: Session = Depends(get_db)):
try:
app_info = get_app_info_by_id(session, app_id)
return app_info
except AppInfoException as cie:
raise HTTPException(**cie.__dict__)
# API to update a existing app info
@router.put("/apps/{app_id}", response_model=App)
def update_app(app_id: int, new_info: CreateAndUpdateApp, session: Session = Depends(get_db)):
try:
app_info = update_app_info(session, app_id, new_info)
return app_info
except AppInfoException as cie:
raise HTTPException(**cie.__dict__)
# API to delete a app info from the data base
@router.delete("/apps/{app_id}")
def delete_app(app_id: int, session: Session = Depends(get_db)):
try:
return delete_app_info(session, app_id)
except AppInfoException as cie:
raise HTTPException(**cie.__dict__) | 1,053 | 258 | 88 |
55d216721bbc2658604e4c1423c52916cf18518e | 3,097 | py | Python | examples/slice.py | pierky/mrtparse | eb8d5556eca7fc9a9d41ad88d6fcd88462c58bd7 | [
"Apache-2.0"
] | null | null | null | examples/slice.py | pierky/mrtparse | eb8d5556eca7fc9a9d41ad88d6fcd88462c58bd7 | [
"Apache-2.0"
] | null | null | null | examples/slice.py | pierky/mrtparse | eb8d5556eca7fc9a9d41ad88d6fcd88462c58bd7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
slice.py - This script slices MRT format data.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Tetsumune KISO <t2mune@gmail.com>
Yoshiyuki YAMAUCHI <info@greenhippo.co.jp>
Nobuhiro ITOU <js333123@gmail.com>
'''
from mrtparse import *
import argparse, time, gzip, bz2, re
from datetime import datetime
if __name__ == '__main__':
main()
| 30.362745 | 72 | 0.622215 | #!/usr/bin/env python
'''
slice.py - This script slices MRT format data.
Copyright (C) 2016 greenHippo, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Tetsumune KISO <t2mune@gmail.com>
Yoshiyuki YAMAUCHI <info@greenhippo.co.jp>
Nobuhiro ITOU <js333123@gmail.com>
'''
from mrtparse import *
import argparse, time, gzip, bz2, re
from datetime import datetime
def parse_args():
parser = argparse.ArgumentParser(
description='This script slices MRT format data.')
parser.add_argument(
'path_to_file',
help='specify path to MRT format file')
parser.add_argument(
'-s', type=str, metavar='START_TIME', dest='start_time',
help='specify start time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-e', type=str, metavar='END_TIME', dest='end_time',
help='specify end time in format YYYY-MM-DD HH:MM:SS')
parser.add_argument(
'-i', type=int, metavar='INTERVAL', dest='interval',
help='specify interval in seconds')
parser.add_argument(
'-c', type=str, choices=['gz', 'bz2'], dest='compress_type',
help='specify compress type (gz, bz2)')
return parser.parse_args()
def conv_unixtime(t):
try:
t = datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
t = int(time.mktime(t.timetuple()))
except TypeError:
t = None
except ValueError:
print('error: invalid time \'%s\'' % t)
exit(1)
return t
def file_open(f, t, c):
f = re.sub(r'.gz$|.bz2$', '', f)
t = datetime.fromtimestamp(t).strftime('%Y%m%d-%H%M%S')
if c is None:
return open('%s-%s' % (f, t), 'wb')
elif c == 'gz':
return gzip.GzipFile('%s-%s.%s' % (f, t, c), 'wb')
elif c == 'bz2':
return bz2.BZ2File('%s-%s.%s' % (f, t, c), 'wb')
def slice_mrt(args):
t = start_time = conv_unixtime(args.start_time)
end_time = conv_unixtime(args.end_time)
interval = args.interval
if t is None:
d = Reader(args.path_to_file)
m = d.next()
t = m.mrt.ts
f = file_open(args.path_to_file, t, args.compress_type)
d = Reader(args.path_to_file)
for m in d:
m = m.mrt
if start_time and (m.ts < start_time):
continue
if end_time and (m.ts >= end_time):
break
if interval and (m.ts >= t + interval):
f.close()
t += interval
f = file_open(args.path_to_file, t, args.compress_type)
f.write(m.buf)
f.close()
def main():
args = parse_args()
slice_mrt(args)
if __name__ == '__main__':
main()
| 2,079 | 0 | 115 |
fc35680459621f37de93d9ee060917b0efa48cd9 | 1,684 | py | Python | examples/browser-remote/error_image.py | shaperilio/sony_camera_api | ff1c071f9c41903e585aed7d868cbe7a4350e9db | [
"MIT"
] | null | null | null | examples/browser-remote/error_image.py | shaperilio/sony_camera_api | ff1c071f9c41903e585aed7d868cbe7a4350e9db | [
"MIT"
] | null | null | null | examples/browser-remote/error_image.py | shaperilio/sony_camera_api | ff1c071f9c41903e585aed7d868cbe7a4350e9db | [
"MIT"
] | null | null | null | from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from io import BytesIO
import time
# Wraps text so it fits within max_width. | 34.367347 | 75 | 0.570665 | from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from io import BytesIO
import time
# Wraps text so it fits within max_width.
def wrap(msg, max_width=70):
lines = []
words = msg.split(' ')
line = ''
for i in range(len(words)):
this_word = words[i]
if len(line + this_word + ' ') > max_width:
# If we add this word, the line will be too long.
# First, check if this is just a really long word.
if len(line) == 0:
# This word is too long for our width limit, so we force it
line = this_word
lines.append(line)
line = ''
continue
# Line already has stuff in it. So append this line, and start
# the next one with this word.
lines.append(line)
line = this_word + ' '
continue
# Line still has room, so add this word and go to the next word.
line += this_word + ' '
lines.append(line)
return '\n'.join(lines)
def get_jpeg_for_message(msg):
img = Image.new('RGB', (640, 480))
draw = ImageDraw.Draw(img)
try:
font = ImageFont.truetype("Menlo.ttc", 15) # mac
except:
try:
font = ImageFont.truetype("arial.ttf", 15) # windows
except:
font = ImageFont.truetype("DejaVuSans.ttf", 15) # ubuntu
msg = time.strftime('%Y-%m-%d %H:%M:%S') + '\n' + wrap(msg)
draw.text((10, 10),msg,(255,255,255),font=font)
# BytesIO cleverness from https://stackoverflow.com/a/14921165/149506
result = BytesIO()
img.save(result, format='JPEG')
return result.getvalue() | 1,487 | 0 | 45 |
ad98af37685e9aaef7b2a4b87c3e60887ad8c4c2 | 2,063 | py | Python | rundeck-libext/cache/py-winrm-plugin-2.0.13/common.py | mrcit94/mrc-ansible-automation | 018320410e1f046c0a86cfb39a6df0e1c2e7b0b1 | [
"MIT"
] | null | null | null | rundeck-libext/cache/py-winrm-plugin-2.0.13/common.py | mrcit94/mrc-ansible-automation | 018320410e1f046c0a86cfb39a6df0e1c2e7b0b1 | [
"MIT"
] | null | null | null | rundeck-libext/cache/py-winrm-plugin-2.0.13/common.py | mrcit94/mrc-ansible-automation | 018320410e1f046c0a86cfb39a6df0e1c2e7b0b1 | [
"MIT"
] | null | null | null | import re
| 30.791045 | 134 | 0.653417 | import re
def check_is_file(destination):
# check if destination file is a file
regex = r"((?:(?:[cC]:))[^\.]+\.[A-Za-z]{3})"
matches = re.finditer(regex, destination, re.MULTILINE)
isfile = False
for matchNum, match in enumerate(matches):
isfile = True
return isfile
def get_file(destination):
filename = ""
split = "/"
if("\\" in destination):
split = "\\"
for file in destination.split(split):
filename = file
return filename
def removeSimpleQuotes(command):
result = cleanSimpleQuoteCommand(command)
return result
def isAPathThatRequiresDoubleQuotes(candidate):
#first check that this is not multiple paths, e.g. 'C:\windows C:\tmp...'
regexpMultipleAbsolutePath = re.compile('\'[a-zA-Z]:\\\\.*\s[a-zA-Z]:\\\\.*') #at least two absolute paths
if regexpMultipleAbsolutePath.match(candidate): return False
#verify if this is a path with no options after, windows style. e.g. 'C:\Windows /w...'
regexpPathAndOption = re.compile('\'[a-zA-Z]:\\\\.*\s/.+')
if regexpPathAndOption.match(candidate): return False
#verify if this is a path with no options after, unix style. e.g. 'C:\Windows -v'
regexpPathAndOptionUnix = re.compile('\'[a-zA-Z]:\\\\.*\s-.+')
if regexpPathAndOptionUnix.match(candidate): return False
#finally, check if this is a single path, with single quotes, and requires to be put between double quotes.e.g. 'C:\Program Files'
regexPathRequireQuotes = re.compile('\'[a-zA-Z]:\\\\.*\s')
if regexPathRequireQuotes.match(candidate):
return True
else:
return False
def cleanSimpleQuoteCommand(command):
result = re.sub(r'(\'.+?\')\s', conditionalReplace, ' '+command+' ' )
return result
def conditionalReplace( aMatch ) :
result = ''
capturedGroup = aMatch.group(1)
capturedGroup = capturedGroup.strip()
result = capturedGroup[1:(len(capturedGroup)-1)]
if isAPathThatRequiresDoubleQuotes(capturedGroup):
result = '"' + result + '"'
return result+' '
| 1,913 | 0 | 138 |
7fa40b7449647d173c65b0c3ef5701ec409def2d | 2,484 | py | Python | project/plugins/slack/__init__.py | Signiant/External-user-provisioning | 986311234a3fcfd05e18d899865d5890a09b1690 | [
"MIT"
] | null | null | null | project/plugins/slack/__init__.py | Signiant/External-user-provisioning | 986311234a3fcfd05e18d899865d5890a09b1690 | [
"MIT"
] | 10 | 2018-05-02T12:42:24.000Z | 2018-10-12T17:05:38.000Z | project/plugins/slack/__init__.py | Signiant/External-user-provisioning | 986311234a3fcfd05e18d899865d5890a09b1690 | [
"MIT"
] | 1 | 2018-03-21T08:49:31.000Z | 2018-03-21T08:49:31.000Z | import json
import requests
from project.user_provision import getJsonResponse
from project.plugin import getApiToken, inviteMessage, removalMessage
| 40.721311 | 156 | 0.609903 | import json
import requests
from project.user_provision import getJsonResponse
from project.plugin import getApiToken, inviteMessage, removalMessage
def inviteUser(email,configMap,allPermissions, plugin_tag, name):
log = 'Slack: Instruction sent in email.\n'
instruction = inviteMessage(configMap,plugin_tag)
done = True
return getJsonResponse('Slack', email, log, instruction, done)
def removeUser(email,configMap,allPermissions, plugin_tag):
instruction = ''
log = ''
done = False
userName = email.split('@', 1)[0]
#get team id
team = requests.get("https://slack.com/api/team.info?token=" + getApiToken(configMap,plugin_tag) )
my_json = team.content.decode('utf8')
data = json.loads(my_json)
if 'error' in data:
if 'invalid_auth' in team.text:
log = plugin_tag + ' error: for user: ' + userName + '. Wrong api token'
if 'not_authed' in team.text:
log = plugin_tag + ' error: for user: ' + userName + '. No api token provided'
if 'fatal_error' in team.text:
log = plugin_tag + ' unexpected error: for user: ' + userName + '. could not be removed'
elif 'team' not in data:
log = plugin_tag + ' error: for user: ' + userName + '. Is not in the team'
else:
teamId=data['team']['id']
#get user id
userId= requests.get("https://slack.com/api/auth.findUser?token=" + getApiToken(configMap,plugin_tag)+"&email="+email+"&team="+teamId )
if 'user_not_found' in userId.text:
log = plugin_tag + ' error: for user: ' + userName + ' was not found. Delete failed. '
print(log)
else:
my_json = userId.content.decode('utf8')
data = json.loads(my_json)
slackUserID = data['user_id']
try:
#disable user
user = requests.post("https://slack.com/api/users.admin.setInactive" + "?token=" + getApiToken(configMap,plugin_tag) + "&user="+slackUserID)
log = plugin_tag + ": username " + userName + " has been deactivated\n"
instruction = log
print(log)
done = True
except Exception as error:
log = 'Slack: Remove from slack error: '+ userName+' could not be removed'
instruction = email+' was not found or is already inactive.'
print(log)
return getJsonResponse('Slack', email, log, instruction, done)
| 2,288 | 0 | 46 |
1752fde9a877f72278e07b52712ad990ea585232 | 10,739 | py | Python | tact/lib.py | jonchang/tact | bc0a240819716630b4ef258688c88ef2f7f4fe50 | [
"MIT"
] | 2 | 2020-02-13T21:17:18.000Z | 2020-08-15T03:08:22.000Z | tact/lib.py | jonchang/tact | bc0a240819716630b4ef258688c88ef2f7f4fe50 | [
"MIT"
] | 49 | 2019-04-02T03:09:17.000Z | 2022-02-22T07:51:40.000Z | tact/lib.py | jonchang/tact | bc0a240819716630b4ef258688c88ef2f7f4fe50 | [
"MIT"
] | 3 | 2019-12-19T16:41:10.000Z | 2020-12-09T02:32:32.000Z | # -*- coding: utf-8 -*-
"""Functions to handle various numerical operations, including optimization."""
from __future__ import division
import random
import sys
from decimal import Decimal as D
from math import exp
from math import log
import numpy as np
from scipy.optimize import minimize, minimize_scalar, dual_annealing
# Raise on overflow
np.seterr(all="raise")
def get_bd(r, a):
"""
Converts turnover and relative extinction to birth and death rates.
Args:
r (float): turnover or net diversification (birth - death)
a (float): relative extinction (death / birth)
Returns:
(float, float): birth, death
"""
return -r / (a - 1), -a * r / (a - 1)
def get_ra(b, d):
"""
Converts birth and death to turnover and relative extinction rates.
Args:
b (float): birth rate
d (float): extinction rate
Returns:
(float, float): turnover, relative extinction
"""
return (b - d, d / b)
def wrapped_lik_constant(x, sampling, ages):
"""
Wrapper for birth-death likelihood to make optimizing more convenient.
Args:
x (float, float): turnover, relative extinction
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant(get_bd(*x), sampling, ages)
def wrapped_lik_constant_yule(x, sampling, ages):
"""
Wrapper for Yule likelihood to make optimizing more convenient.
Args:
x (float): birth rate
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant((x, 0.0), sampling, ages)
def two_step_optim(func, x0, bounds, args):
"""
Conduct a two-step function optimization, first by using the fast L-BFGS-B method,
and if that fails, use simulated annealing.
Args:
func (callable): function to optimize
x0 (tuple): initial conditions
bounds (tuple): boundary conditions
args (lsit): additional argumnets to pass to `func`
Returns:
tuple: optimized parameter values
"""
try:
result = minimize(func, x0=x0, bounds=bounds, args=args, method="L-BFGS-B")
if result["success"]:
return result["x"].tolist()
except FloatingPointError:
pass
result = dual_annealing(func, x0=x0, bounds=bounds, args=args)
if result["success"]:
return result["x"].tolist()
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def optim_bd(ages, sampling, min_bound=1e-9):
"""
Optimizes birth and death parameters given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates
"""
if max(ages) < 0.000001:
init_r = 1e-3
else:
# Magallon-Sanderson crown estimator
init_r = (log((len(ages) + 1) / sampling) - log(2)) / max(ages)
init_r = max(1e-3, init_r)
bounds = ((min_bound, 100), (0, 1 - min_bound))
result = two_step_optim(wrapped_lik_constant, x0=(init_r, min_bound), bounds=bounds, args=(sampling, ages))
return get_bd(*result)
def optim_yule(ages, sampling, min_bound=1e-9):
"""
Optimizes birth parameter under a Yule model, given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates (where death is always 0)
"""
bounds = (min_bound, 100)
result = minimize_scalar(wrapped_lik_constant_yule, bounds=bounds, args=(sampling, ages), method="Bounded")
if result["success"]:
return (result["x"], 0.0)
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def p0_exact(t, l, m, rho): # noqa: E741
"Exact version of `p0` using Decimal math."
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
return D(1) - rho * (l - m) / (rho * l + (l * (D(1) - rho) - m) * (-(l - m) * t).exp())
def p1_exact(t, l, m, rho): # noqa: E741
"""Exact version of `p1` using Decimal math."""
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
num = rho * (l - m) ** D(2) * (-(l - m) * t).exp()
denom = (rho * l + (l * (1 - rho) - m) * (-(l - m) * t).exp()) ** D(2)
return num / denom
def p1_orig(t, l, m, rho): # noqa: E741
"""Original version of `p1`, here for testing and comparison purposes."""
try:
num = rho * (l - m) ** 2 * np.exp(-(l - m) * t)
denom = (rho * l + (l * (1 - rho) - m) * np.exp(-(l - m) * t)) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def p1(t, l, m, rho): # noqa: E741
"""
Optimized version of `p1_orig` using common subexpression elimination and strength reduction
from exponentiation to multiplication.
"""
try:
ert = np.exp(-(l - m) * t, dtype=np.float64)
num = rho * (l - m) ** 2 * ert
denom = (rho * l + (l * (1 - rho) - m) * ert) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def intp1_exact(t, l, m): # noqa: E741
"""Exact version of `intp1` using Decimal math."""
l = D(l) # noqa: E741
m = D(m)
t = D(t)
num = D(1) - (-(l - m) * t).exp()
denom = l - m * (-(l - m) * t).exp()
return num / denom
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1):
"""
Calculates the likelihood of a constant-rate birth-death process, conditioned
on the waiting times of a phylogenetic tree and degree of incomplete sampling.
Based off of the R function `TreePar::LikConstant` written by Tanja Stadler.
T. Stadler. On incomplete sampling under birth-death models and connections
to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009.
Args:
vec (float, float): two element tuple of birth and death
rho (float): sampling fraction
t (list): vector of waiting times
root (bool): include the root or not? (default: 1)
survival (bool): assume survival of the process? (default: 1)
Returns:
float: a likelihood
"""
l = vec[0] # noqa: E741
m = vec[1]
t.sort(reverse=True)
lik = (root + 1) * log(p1(t[0], l, m, rho))
for tt in t[1:]:
lik += log(l) + log(p1(tt, l, m, rho))
if survival == 1:
lik -= (root + 1) * log(1 - p0(t[0], l, m, rho))
return -lik
def crown_capture_probability(n, k):
"""
Calculate the probability that a sample of `k` taxa from a clade
of `n` total taxa includes a root node, under a Yule process.
This equation is taken from:
Sanderson, M. J. 1996. How many taxa must be sampled to identify
the root node of a large clade? Systematic Biology 45:168-173
Args:
n (int): total number of taxa
k (int): sampled taxa
Returns:
float: probability
"""
if n < k:
raise Exception(f"n must be greater than or equal to k (n={n}, k={k})")
if n == 1 and k == 1:
return 0 # not technically correct but it works for our purposes
return 1 - 2 * (n - k) / ((n - 1) * (k + 1))
# TODO: This could probably be optimized
def get_new_times(ages, birth, death, missing, told=None, tyoung=None):
"""
Simulates new speciation events in an incomplete phylogeny assuming a
constant-rate birth-death process.
Adapted from the R function `TreeSim::corsim` written by Tanja Stadler.
N. Cusimano, T. Stadler, S. Renner. A new method for handling missing
species in diversification analysis applicable to randomly or
non-randomly sampled phylogenies. Syst. Biol., 61(5): 785-792, 2012.
Args:
ages (list): vector of waiting times
birth (float): birth rate
death (float): death rate
missing (int): number of missing taxa to simulate
told (float): maximum simulated age (default: `max(ages)`)
tyoung (float): minimum simulated age bound (default: `0`)
Returns:
list: vector of simulated waiting times.
"""
if told is None:
told = max(ages)
if len(ages) > 0:
if max(ages) > told and abs(max(ages) - told) > sys.float_info.epsilon:
raise Exception("Zero or negative branch lengths detected in backbone phylogeny")
if tyoung is None:
tyoung = 0
ages.sort(reverse=True)
times = [x for x in ages if told >= x >= tyoung]
times = [told] + times + [tyoung]
ranks = range(0, len(times))
only_new = []
while missing > 0:
if len(ranks) > 2:
distrranks = []
for i in range(1, len(ranks)):
temp = ranks[i] * (intp1(times[i - 1], birth, death) - intp1(times[i], birth, death))
distrranks.append(temp)
try:
dsum = sum(distrranks)
distrranks = [x / dsum for x in distrranks]
for i in range(1, len(distrranks)):
distrranks[i] = distrranks[i] + distrranks[i - 1]
r = random.uniform(0, 1)
addrank = min([idx for idx, x in enumerate(distrranks) if x > r])
except ZeroDivisionError:
addrank = 0
except ValueError:
addrank = 0
else:
addrank = 0
r = random.uniform(0, 1)
const = intp1(times[addrank], birth, death) - intp1(times[addrank + 1], birth, death)
try:
temp = intp1(times[addrank + 1], birth, death) / const
except ZeroDivisionError:
temp = 0.0
xnew = 1 / (death - birth) * log((1 - (r + temp) * const * birth) / (1 - (r + temp) * const * death))
only_new.append(xnew)
missing -= 1
only_new.sort(reverse=True)
return only_new
| 31.218023 | 111 | 0.58823 | # -*- coding: utf-8 -*-
"""Functions to handle various numerical operations, including optimization."""
from __future__ import division
import random
import sys
from decimal import Decimal as D
from math import exp
from math import log
import numpy as np
from scipy.optimize import minimize, minimize_scalar, dual_annealing
# Raise on overflow
np.seterr(all="raise")
def get_bd(r, a):
"""
Converts turnover and relative extinction to birth and death rates.
Args:
r (float): turnover or net diversification (birth - death)
a (float): relative extinction (death / birth)
Returns:
(float, float): birth, death
"""
return -r / (a - 1), -a * r / (a - 1)
def get_ra(b, d):
"""
Converts birth and death to turnover and relative extinction rates.
Args:
b (float): birth rate
d (float): extinction rate
Returns:
(float, float): turnover, relative extinction
"""
return (b - d, d / b)
def wrapped_lik_constant(x, sampling, ages):
"""
Wrapper for birth-death likelihood to make optimizing more convenient.
Args:
x (float, float): turnover, relative extinction
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant(get_bd(*x), sampling, ages)
def wrapped_lik_constant_yule(x, sampling, ages):
"""
Wrapper for Yule likelihood to make optimizing more convenient.
Args:
x (float): birth rate
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant((x, 0.0), sampling, ages)
def two_step_optim(func, x0, bounds, args):
"""
Conduct a two-step function optimization, first by using the fast L-BFGS-B method,
and if that fails, use simulated annealing.
Args:
func (callable): function to optimize
x0 (tuple): initial conditions
bounds (tuple): boundary conditions
args (lsit): additional argumnets to pass to `func`
Returns:
tuple: optimized parameter values
"""
try:
result = minimize(func, x0=x0, bounds=bounds, args=args, method="L-BFGS-B")
if result["success"]:
return result["x"].tolist()
except FloatingPointError:
pass
result = dual_annealing(func, x0=x0, bounds=bounds, args=args)
if result["success"]:
return result["x"].tolist()
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def optim_bd(ages, sampling, min_bound=1e-9):
"""
Optimizes birth and death parameters given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates
"""
if max(ages) < 0.000001:
init_r = 1e-3
else:
# Magallon-Sanderson crown estimator
init_r = (log((len(ages) + 1) / sampling) - log(2)) / max(ages)
init_r = max(1e-3, init_r)
bounds = ((min_bound, 100), (0, 1 - min_bound))
result = two_step_optim(wrapped_lik_constant, x0=(init_r, min_bound), bounds=bounds, args=(sampling, ages))
return get_bd(*result)
def optim_yule(ages, sampling, min_bound=1e-9):
"""
Optimizes birth parameter under a Yule model, given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates (where death is always 0)
"""
bounds = (min_bound, 100)
result = minimize_scalar(wrapped_lik_constant_yule, bounds=bounds, args=(sampling, ages), method="Bounded")
if result["success"]:
return (result["x"], 0.0)
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def p0_exact(t, l, m, rho): # noqa: E741
"Exact version of `p0` using Decimal math."
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
return D(1) - rho * (l - m) / (rho * l + (l * (D(1) - rho) - m) * (-(l - m) * t).exp())
def p0(t, l, m, rho): # noqa: E741
try:
return 1 - rho * (l - m) / (rho * l + (l * (1 - rho) - m) * exp(-(l - m) * t))
except FloatingPointError:
return float(p0_exact(t, l, m, rho))
def p1_exact(t, l, m, rho): # noqa: E741
"""Exact version of `p1` using Decimal math."""
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
num = rho * (l - m) ** D(2) * (-(l - m) * t).exp()
denom = (rho * l + (l * (1 - rho) - m) * (-(l - m) * t).exp()) ** D(2)
return num / denom
def p1_orig(t, l, m, rho): # noqa: E741
"""Original version of `p1`, here for testing and comparison purposes."""
try:
num = rho * (l - m) ** 2 * np.exp(-(l - m) * t)
denom = (rho * l + (l * (1 - rho) - m) * np.exp(-(l - m) * t)) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def p1(t, l, m, rho): # noqa: E741
"""
Optimized version of `p1_orig` using common subexpression elimination and strength reduction
from exponentiation to multiplication.
"""
try:
ert = np.exp(-(l - m) * t, dtype=np.float64)
num = rho * (l - m) ** 2 * ert
denom = (rho * l + (l * (1 - rho) - m) * ert) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def intp1_exact(t, l, m): # noqa: E741
"""Exact version of `intp1` using Decimal math."""
l = D(l) # noqa: E741
m = D(m)
t = D(t)
num = D(1) - (-(l - m) * t).exp()
denom = l - m * (-(l - m) * t).exp()
return num / denom
def intp1(t, l, m): # noqa: E741
try:
return (1 - exp(-(l - m) * t)) / (l - m * exp(-(l - m) * t))
except OverflowError:
return float(intp1_exact(t, l, m))
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1):
"""
Calculates the likelihood of a constant-rate birth-death process, conditioned
on the waiting times of a phylogenetic tree and degree of incomplete sampling.
Based off of the R function `TreePar::LikConstant` written by Tanja Stadler.
T. Stadler. On incomplete sampling under birth-death models and connections
to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009.
Args:
vec (float, float): two element tuple of birth and death
rho (float): sampling fraction
t (list): vector of waiting times
root (bool): include the root or not? (default: 1)
survival (bool): assume survival of the process? (default: 1)
Returns:
float: a likelihood
"""
l = vec[0] # noqa: E741
m = vec[1]
t.sort(reverse=True)
lik = (root + 1) * log(p1(t[0], l, m, rho))
for tt in t[1:]:
lik += log(l) + log(p1(tt, l, m, rho))
if survival == 1:
lik -= (root + 1) * log(1 - p0(t[0], l, m, rho))
return -lik
def crown_capture_probability(n, k):
"""
Calculate the probability that a sample of `k` taxa from a clade
of `n` total taxa includes a root node, under a Yule process.
This equation is taken from:
Sanderson, M. J. 1996. How many taxa must be sampled to identify
the root node of a large clade? Systematic Biology 45:168-173
Args:
n (int): total number of taxa
k (int): sampled taxa
Returns:
float: probability
"""
if n < k:
raise Exception(f"n must be greater than or equal to k (n={n}, k={k})")
if n == 1 and k == 1:
return 0 # not technically correct but it works for our purposes
return 1 - 2 * (n - k) / ((n - 1) * (k + 1))
# TODO: This could probably be optimized
def get_new_times(ages, birth, death, missing, told=None, tyoung=None):
"""
Simulates new speciation events in an incomplete phylogeny assuming a
constant-rate birth-death process.
Adapted from the R function `TreeSim::corsim` written by Tanja Stadler.
N. Cusimano, T. Stadler, S. Renner. A new method for handling missing
species in diversification analysis applicable to randomly or
non-randomly sampled phylogenies. Syst. Biol., 61(5): 785-792, 2012.
Args:
ages (list): vector of waiting times
birth (float): birth rate
death (float): death rate
missing (int): number of missing taxa to simulate
told (float): maximum simulated age (default: `max(ages)`)
tyoung (float): minimum simulated age bound (default: `0`)
Returns:
list: vector of simulated waiting times.
"""
if told is None:
told = max(ages)
if len(ages) > 0:
if max(ages) > told and abs(max(ages) - told) > sys.float_info.epsilon:
raise Exception("Zero or negative branch lengths detected in backbone phylogeny")
if tyoung is None:
tyoung = 0
ages.sort(reverse=True)
times = [x for x in ages if told >= x >= tyoung]
times = [told] + times + [tyoung]
ranks = range(0, len(times))
only_new = []
while missing > 0:
if len(ranks) > 2:
distrranks = []
for i in range(1, len(ranks)):
temp = ranks[i] * (intp1(times[i - 1], birth, death) - intp1(times[i], birth, death))
distrranks.append(temp)
try:
dsum = sum(distrranks)
distrranks = [x / dsum for x in distrranks]
for i in range(1, len(distrranks)):
distrranks[i] = distrranks[i] + distrranks[i - 1]
r = random.uniform(0, 1)
addrank = min([idx for idx, x in enumerate(distrranks) if x > r])
except ZeroDivisionError:
addrank = 0
except ValueError:
addrank = 0
else:
addrank = 0
r = random.uniform(0, 1)
const = intp1(times[addrank], birth, death) - intp1(times[addrank + 1], birth, death)
try:
temp = intp1(times[addrank + 1], birth, death) / const
except ZeroDivisionError:
temp = 0.0
xnew = 1 / (death - birth) * log((1 - (r + temp) * const * birth) / (1 - (r + temp) * const * death))
only_new.append(xnew)
missing -= 1
only_new.sort(reverse=True)
return only_new
| 345 | 0 | 46 |
0871ebce7047ae5d4623ac096a3c5fa668f368a1 | 6,826 | py | Python | diktya/func_api_helpers.py | BioroboticsLab/diktyo | 1a81dc36005c4b44021fa69285f25fc0229115b8 | [
"Apache-2.0"
] | 3 | 2016-03-19T15:59:30.000Z | 2016-05-25T12:11:25.000Z | diktya/func_api_helpers.py | BioroboticsLab/diktyo | 1a81dc36005c4b44021fa69285f25fc0229115b8 | [
"Apache-2.0"
] | 2 | 2016-08-11T15:27:35.000Z | 2017-04-26T10:05:42.000Z | diktya/func_api_helpers.py | BioroboticsLab/diktyo | 1a81dc36005c4b44021fa69285f25fc0229115b8 | [
"Apache-2.0"
] | 1 | 2016-05-25T12:10:33.000Z | 2016-05-25T12:10:33.000Z | # Copyright 2015 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras.engine.topology import merge
from keras.layers.core import Activation
from keras.utils.layer_utils import layer_from_config
from contextlib import contextmanager
from collections import OrderedDict
import h5py
import json
@contextmanager
def trainable(model, trainable):
"""
Sets all layers in model to trainable and restores the state afterwards.
.. warning::
Be aware, that the keras ``Model.compile`` method is lazy.
You might want to call ``Model._make_train_function`` to force a compilation.
Args:
model: keras model
trainable (bool): set layer.traiable to this value
Example:
.. code:: python
model = Model(x, y)
with trainable(model, False):
# layers of model are now not trainable
# Do something
z = model(y)
[...]
# now the layers of `model` are trainable again
"""
trainables = []
for layer in model.layers:
trainables.append(layer.trainable)
layer.trainable = trainable
yield
for t, layer in zip(trainables, model.layers):
layer.trainable = t
def get_layer(keras_tensor):
"""
Returns the corresponding layer to a keras tensor.
"""
layer = keras_tensor._keras_history[0]
return layer
def sequential(layers, ns=None, trainable=True):
"""
The functional flexible counter part to the keras Sequential model.
Args:
layers (list): Can be a arbitrary nested list of layers.
The layers will be called sequentially. Can contain ``None``'s
ns (optional str): Namespace prefix of the layers
trainable (optional bool): set the layer's trainable attribute to this value.
Returns:
A function that takes a tensor as input, applies all the layers, and
returns the output tensor.
**Simple example:**
Call a list of layers.
.. code:: python
x = Input(shape=(32,))
y = sequential([
Dense(10),
LeakyReLU(0.4),
Dense(10, activation='sigmoid'),
])(x)
m = Model(x, y)
**Advanced example:**
Use a function to construct reoccuring blocks. The ``conv`` functions
returns a nested list of layers. This allows one to nicely combine and stack
different building blocks function.
.. code:: python
def conv(n, depth=2, f=3, activation='relu'):
layers = [
[
Convolution2D(n, f, f, border_mode='same'),
BatchNormalization(),
Activation(activation)
] for _ in range(depth)
]
return layers + [MaxPooling2D()]
x = Input(shape=(32,))
y = sequential([
conv(32),
conv(64),
conv(128),
Flatten(),
Dense(10, activation='sigmoid'),
])(x, ns='classifier')
m = Model(x, y)
"""
for i, l in enumerate(flatten(layers)):
if not hasattr(l, 'name'):
continue
if ns is not None:
if '.' not in l.name:
name = type(l).__name__.lower()
name = "{:02}_{}".format(i, name)
l.name = ns + '.' + name
l.trainable = trainable
return call
def concat(tensors, axis=1, **kwargs):
"""
Wrapper around keras merge function.
Args:
tensors: list of keras tensors
axis: concat on this axis
kwargs: passed to the merge function
Returns:
The concatenated tensor
"""
if type(tensors) not in (list, tuple):
return tensors
elif len(tensors) == 1:
return tensors[0]
return merge(tensors, mode='concat', concat_axis=axis,
**kwargs)
def rename_layer(keras_tensor, name):
"""
Renames the layer of the ``keras_tensor``
"""
layer = get_layer(keras_tensor)
layer.name = name
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
def keras_copy(obj):
"""
Copies a keras object by using the ``get_config`` method.
"""
config = obj.get_config()
if 'name' in config:
del config['name']
return type(obj)(**config)
def save_model(model, fname, overwrite=False, attrs={}):
"""
Saves the weights and the config of ``model`` in the HDF5 file ``fname``.
The model config is saved as: ``f.attrs["model"] = model.to_json().encode('utf-8')``,
where ``f`` is the HDF5 file.
"""
assert 'layer_names' not in attrs
model.save_weights(fname, overwrite)
f = h5py.File(fname, 'r+')
f.attrs['model'] = model.to_json().encode('utf-8')
for k, v in attrs.items():
if type(v) == str:
v = v.encode('utf-8')
f.attrs[k] = v
f.close()
def load_model(fname, custom_objects={}):
"""
Loads the model and weights from ``fname``. Counterpart to :py:func:`save_model`.
"""
json_config = get_hdf5_attr(fname, 'model').decode('utf-8')
config = json.loads(json_config)
model = layer_from_config(config, custom_objects)
model.load_weights(fname)
return model
def get_hdf5_attr(fname, attr_name, default=None):
"""
Returns the toplevel attribute ``attr_name`` of the hdf5 file ``fname``.
If ``default`` is not None and the attribute is not present, then
``default`` is returned.
"""
with h5py.File(fname, 'r') as f:
if attr_name not in f.attrs and default is not None:
return default
else:
return f.attrs[attr_name]
| 27.087302 | 89 | 0.595078 | # Copyright 2015 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras.engine.topology import merge
from keras.layers.core import Activation
from keras.utils.layer_utils import layer_from_config
from contextlib import contextmanager
from collections import OrderedDict
import h5py
import json
@contextmanager
def trainable(model, trainable):
"""
Sets all layers in model to trainable and restores the state afterwards.
.. warning::
Be aware, that the keras ``Model.compile`` method is lazy.
You might want to call ``Model._make_train_function`` to force a compilation.
Args:
model: keras model
trainable (bool): set layer.traiable to this value
Example:
.. code:: python
model = Model(x, y)
with trainable(model, False):
# layers of model are now not trainable
# Do something
z = model(y)
[...]
# now the layers of `model` are trainable again
"""
trainables = []
for layer in model.layers:
trainables.append(layer.trainable)
layer.trainable = trainable
yield
for t, layer in zip(trainables, model.layers):
layer.trainable = t
def get_layer(keras_tensor):
"""
Returns the corresponding layer to a keras tensor.
"""
layer = keras_tensor._keras_history[0]
return layer
def sequential(layers, ns=None, trainable=True):
"""
The functional flexible counter part to the keras Sequential model.
Args:
layers (list): Can be a arbitrary nested list of layers.
The layers will be called sequentially. Can contain ``None``'s
ns (optional str): Namespace prefix of the layers
trainable (optional bool): set the layer's trainable attribute to this value.
Returns:
A function that takes a tensor as input, applies all the layers, and
returns the output tensor.
**Simple example:**
Call a list of layers.
.. code:: python
x = Input(shape=(32,))
y = sequential([
Dense(10),
LeakyReLU(0.4),
Dense(10, activation='sigmoid'),
])(x)
m = Model(x, y)
**Advanced example:**
Use a function to construct reoccuring blocks. The ``conv`` functions
returns a nested list of layers. This allows one to nicely combine and stack
different building blocks function.
.. code:: python
def conv(n, depth=2, f=3, activation='relu'):
layers = [
[
Convolution2D(n, f, f, border_mode='same'),
BatchNormalization(),
Activation(activation)
] for _ in range(depth)
]
return layers + [MaxPooling2D()]
x = Input(shape=(32,))
y = sequential([
conv(32),
conv(64),
conv(128),
Flatten(),
Dense(10, activation='sigmoid'),
])(x, ns='classifier')
m = Model(x, y)
"""
def flatten(xs):
for x in xs:
try:
for f in flatten(x):
if f is not None:
yield f
except TypeError:
if x is not None:
yield x
for i, l in enumerate(flatten(layers)):
if not hasattr(l, 'name'):
continue
if ns is not None:
if '.' not in l.name:
name = type(l).__name__.lower()
name = "{:02}_{}".format(i, name)
l.name = ns + '.' + name
l.trainable = trainable
def call(input):
x = input
for l in flatten(layers):
x = l(x)
return x
return call
def concat(tensors, axis=1, **kwargs):
"""
Wrapper around keras merge function.
Args:
tensors: list of keras tensors
axis: concat on this axis
kwargs: passed to the merge function
Returns:
The concatenated tensor
"""
if type(tensors) not in (list, tuple):
return tensors
elif len(tensors) == 1:
return tensors[0]
return merge(tensors, mode='concat', concat_axis=axis,
**kwargs)
def rename_layer(keras_tensor, name):
"""
Renames the layer of the ``keras_tensor``
"""
layer = get_layer(keras_tensor)
layer.name = name
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
def keras_copy(obj):
"""
Copies a keras object by using the ``get_config`` method.
"""
config = obj.get_config()
if 'name' in config:
del config['name']
return type(obj)(**config)
def predict_wrapper(func, names):
def wrapper(*args, **kwargs):
out = func(*args, **kwargs)
return OrderedDict(zip(names, out))
return wrapper
def save_model(model, fname, overwrite=False, attrs={}):
"""
Saves the weights and the config of ``model`` in the HDF5 file ``fname``.
The model config is saved as: ``f.attrs["model"] = model.to_json().encode('utf-8')``,
where ``f`` is the HDF5 file.
"""
assert 'layer_names' not in attrs
model.save_weights(fname, overwrite)
f = h5py.File(fname, 'r+')
f.attrs['model'] = model.to_json().encode('utf-8')
for k, v in attrs.items():
if type(v) == str:
v = v.encode('utf-8')
f.attrs[k] = v
f.close()
def load_model(fname, custom_objects={}):
"""
Loads the model and weights from ``fname``. Counterpart to :py:func:`save_model`.
"""
json_config = get_hdf5_attr(fname, 'model').decode('utf-8')
config = json.loads(json_config)
model = layer_from_config(config, custom_objects)
model.load_weights(fname)
return model
def get_hdf5_attr(fname, attr_name, default=None):
"""
Returns the toplevel attribute ``attr_name`` of the hdf5 file ``fname``.
If ``default`` is not None and the attribute is not present, then
``default`` is returned.
"""
with h5py.File(fname, 'r') as f:
if attr_name not in f.attrs and default is not None:
return default
else:
return f.attrs[attr_name]
| 462 | 0 | 76 |
173584f4773c3d83d5d90972da22ae212b3cab80 | 179 | py | Python | miprometheus/__init__.py | tsjayram/mi-prometheus | cf163d9e246c3ae3c100045e58924148b2f81c39 | [
"Apache-2.0"
] | null | null | null | miprometheus/__init__.py | tsjayram/mi-prometheus | cf163d9e246c3ae3c100045e58924148b2f81c39 | [
"Apache-2.0"
] | null | null | null | miprometheus/__init__.py | tsjayram/mi-prometheus | cf163d9e246c3ae3c100045e58924148b2f81c39 | [
"Apache-2.0"
] | null | null | null | from .grid_workers import *
from .helpers import *
from .models import *
from .models.controllers import *
from .problems import *
from .utils import *
from .workers import *
| 14.916667 | 33 | 0.73743 | from .grid_workers import *
from .helpers import *
from .models import *
from .models.controllers import *
from .problems import *
from .utils import *
from .workers import *
| 0 | 0 | 0 |
7cf48f0776fa78574eee0509c098dc53bf646f2c | 475 | py | Python | libs/__init__.py | Liquidator2048/umbrel-crypto-display | 7ab2afaa1f52898e94cb03a2d0b1fbca4e842689 | [
"MIT"
] | null | null | null | libs/__init__.py | Liquidator2048/umbrel-crypto-display | 7ab2afaa1f52898e94cb03a2d0b1fbca4e842689 | [
"MIT"
] | null | null | null | libs/__init__.py | Liquidator2048/umbrel-crypto-display | 7ab2afaa1f52898e94cb03a2d0b1fbca4e842689 | [
"MIT"
] | null | null | null | try:
from coingecko_api import *
from config import *
from framebuffer import *
from utils import *
from fbi import *
from ifb import *
from script_interfaces import *
from http_client import *
except ImportError:
from .coingecko_api import *
from .config import *
from .framebuffer import *
from .utils import *
from .fbi import *
from .ifb import *
from .script_interfaces import *
from .http_client import *
| 25 | 36 | 0.669474 | try:
from coingecko_api import *
from config import *
from framebuffer import *
from utils import *
from fbi import *
from ifb import *
from script_interfaces import *
from http_client import *
except ImportError:
from .coingecko_api import *
from .config import *
from .framebuffer import *
from .utils import *
from .fbi import *
from .ifb import *
from .script_interfaces import *
from .http_client import *
| 0 | 0 | 0 |
3d3366e9d494493312e67aed10c324fe922df889 | 11,373 | py | Python | benlib/analyze_between_subjects_using_afni_proc_results.py | csea-lab/csea-lab | b61fb92f36f434f4d652ad366b5d046482493e6b | [
"MIT"
] | 4 | 2020-07-16T10:57:23.000Z | 2022-01-19T03:58:36.000Z | benlib/analyze_between_subjects_using_afni_proc_results.py | csea-lab/csea-lab | b61fb92f36f434f4d652ad366b5d046482493e6b | [
"MIT"
] | null | null | null | benlib/analyze_between_subjects_using_afni_proc_results.py | csea-lab/csea-lab | b61fb92f36f434f4d652ad366b5d046482493e6b | [
"MIT"
] | 2 | 2021-05-24T14:33:17.000Z | 2022-01-19T03:58:57.000Z | #!/usr/bin/env python3
"""
Tirelessly runs a 2nd level analysis using the results of our afni_proc.py execution script.
Created 12/17/2020 by Benjamin Velie.
veliebm@gmail.com
"""
# Import pedantic and boring standard Python libraries.
from datetime import datetime
import argparse
from pathlib import Path
import json
from shutil import copy2
# Import exciting and rejeuvenating CSEA custom libraries.
from reference import subject_id_of, the_path_that_matches, task_name_of
from afni import AFNI, subbrick_labels_of
class SecondLevel():
"""
This class runs a second level analysis on subjects for whom you've already run a first-level analysis.
"""
def __repr__(self):
"""
Defines how the class represents itself internally as a string.
To learn more, consider reading https://docs.python.org/3/reference/datamodel.html#basic-customization
"""
return f"SecondLevel(subject_ids={self.subject_ids}, bids_dir='{self.bids_dir}')"
def ttest(self):
"""
Run AFNI's 3dttest++ on the outfiles of each subject. Also concatenates 3dttest++ outfiles together.
3dttest++ info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dttest++_sphx.html#ahelp-3dttest
3dTcat info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dTcat_sphx.html#ahelp-3dtcat
"""
working_directory = self.dirs["output"] / "3dttest++"
# Gather the labels of the subbricks we want to include.
representative_dataset = list(self.paths.values())[0]["deconvolve_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dttest++.
results = {}
for label in labels:
if "_Coef" in label:
# Build arguments to pass to the program.
args = r"-zskip 100% -setA ttest".split()
for subject_id in self.subject_ids:
args += [f"sub-{subject_id}"] + [f'{self.paths[subject_id]["deconvolve_outfile"]}[{label}]']
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dttest++", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dttest++"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def mema(self):
"""
Runs AFNI's 3dMEMA 2nd-level analysis. Also concatenates the results together using 3dTcat.
3dMEMA info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dMEMA_sphx.html#ahelp-3dmema
How to gather specific sub-briks from the 3dREMLfit outfile: https://afni.nimh.nih.gov/pub/dist/doc/program_help/common_options.html
"""
working_directory = self.dirs["output"] / "3dMEMA"
# Gather the labels of the sub-bricks we want to include.
representative_dataset = list(self.paths.values())[0]["reml_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dMEMA.
results = {}
for i, label in enumerate(labels):
if "_Coef" in label:
# Create base arguments to pass to program.
args = (f"""
-prefix memamemamema
-jobs 5
-verb 1
-missing_data 0
-set activation-vs-0
""").split()
# Append our 3dREMLfit outfiles to the command.
for subject_id in self.subject_ids:
args += [
subject_id,
f'{self.paths[subject_id]["reml_outfile"]}[{i}]', # Append a beta sub-brick to the command
f'{self.paths[subject_id]["reml_outfile"]}[{i+1}]', # Append a Tstat sub-brick to the command
]
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dMEMA", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dMEMA"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def write_report(self):
"""
Writes files containing info about the analysis to help us stay sane.
"""
# Store workflow info into a dict.
workflow_info = {
"Start time": str(self.start_time),
"End time": str(self.end_time),
"Time to complete workflow": str(self.end_time - self.start_time),
"Subject IDs included": self.subject_ids
}
# Write the workflow dict to a json file.
output_json_path = self.dirs["output"] / "workflow_info.json"
print(f"Writing {output_json_path}")
with open(output_json_path, "w") as json_file:
json.dump(workflow_info, json_file, indent="\t")
def concatenate(self, paths_to_datasets: list, parent_working_directory: Path):
"""
Runs 3dTcat to neatly organize all subbricks from the datasets you specify.
"""
subbrick_labels = subbrick_labels_of(paths_to_datasets[0])
results = {}
for label in subbrick_labels:
tcat_args = "-tr 2".split()
for path in paths_to_datasets:
tcat_args += [f"{path}[{label}]"]
results[label] = AFNI(program="3dTcat", args=tcat_args, working_directory=parent_working_directory/f"{label}_concatenated")
return results
def download_TT_N27_brain_into(self, directory):
"""
Copies the TT_N27+tlrc brain into the target directory.
"""
home_dir = Path.home()
copy2(src=home_dir/"abin/TT_N27+tlrc.BRIK.gz", dst=directory)
copy2(src=home_dir/"abin/TT_N27+tlrc.HEAD", dst=directory)
if __name__ == "__main__":
"""
This section of the script only runs when you run the script directly from the shell.
It contains the parser that parses arguments from the command line.
"""
parser = argparse.ArgumentParser(description="Runs a 2nd-level analysis on subjects for whom you have already run a 1st-level analysis. You must specify the path to the raw BIDS dataset you ran your 1st-level analysis on. You must also specify whether to analyze EITHER a list of specific subjects OR all subjects. Finally, you must specify the title of the directory containing your 1st-level analysis results.", fromfile_prefix_chars="@")
parser.add_argument("--bids_dir", required=True, help="<Mandatory> Path to the root of the BIDS directory. Example: '--bids_dir /readwrite/contrascan/bids_attempt-2'")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--subjects", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze a list of specific subject IDs. Example: '--subjects 107 108 110'")
group.add_argument("--all", action="store_true", help="<Mandatory> Analyze all subjects. Example: '--all'")
group.add_argument("--all_except", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze all subjects but exclude those specified here. Example: '--all_except 109 111'")
# Parse args from the command line and create an empty list to store the subject ids we picked.
args = parser.parse_args()
subject_ids = []
# Option 1: Process all subjects.
if args.all or args.all_except:
bids_root = Path(args.bids_dir)
for subject_dir in bids_root.glob("sub-*"):
if subject_id_of(subject_dir) not in args.all_except:
subject_ids.append(subject_id_of(subject_dir))
# Option 2: Process specific subjects.
else:
subject_ids = args.subjects
# Launch the second level analysis on the subjects we picked.
SecondLevel(
subject_ids=subject_ids,
bids_dir=args.bids_dir,
)
| 45.310757 | 444 | 0.660863 | #!/usr/bin/env python3
"""
Tirelessly runs a 2nd level analysis using the results of our afni_proc.py execution script.
Created 12/17/2020 by Benjamin Velie.
veliebm@gmail.com
"""
# Import pedantic and boring standard Python libraries.
from datetime import datetime
import argparse
from pathlib import Path
import json
from shutil import copy2
# Import exciting and rejeuvenating CSEA custom libraries.
from reference import subject_id_of, the_path_that_matches, task_name_of
from afni import AFNI, subbrick_labels_of
class SecondLevel():
"""
This class runs a second level analysis on subjects for whom you've already run a first-level analysis.
"""
def __init__(self, subject_ids, bids_dir):
# Track when the program begins running.
self.start_time = datetime.now()
# Store input parameters.
self.subject_ids = subject_ids
self.bids_dir = bids_dir
# Tell the user what this class looks like internally.
print(f"Executing {self.__repr__()}")
# Store in self.dirs paths to directories we need.
self.dirs = {}
self.dirs["bids_root"] = Path(self.bids_dir) # Location of the raw BIDS dataset.
self.dirs["firstlevel"] = self.dirs["bids_root"] / "derivatives/analysis_level-1/preprocessing_AND_deconvolution_with_only_afniproc" # Location of the results of our first-level analyses.
self.dirs["output"] = self.dirs["bids_root"] / "derivatives/analysis_level-2/based_on_preprocessing_AND_deconvolution_with_only_afniproc" # Location where we'll store the results of this second-level analysis.
# Gather into a dict of dicts all the paths we'll use. Sort by subject ID.
self.paths = {}
for subject_id in self.subject_ids:
self.paths[subject_id] = {}
self.paths[subject_id]["1st_level_results"] = self.dirs["firstlevel"]/f"sub-{subject_id}/{subject_id}.results"
self.paths[subject_id]["deconvolve_outfile"] = the_path_that_matches(f"stats.{subject_id}+tlrc.HEAD", in_directory=self.paths[subject_id]["1st_level_results"])
self.paths[subject_id]["reml_outfile"] = the_path_that_matches(f"stats.{subject_id}_REML+tlrc.BRIK", in_directory=self.paths[subject_id]["1st_level_results"])
# Run our regressions.
self.results = {}
self.results["3dttest++"] = self.ttest()
self.results["3dMEMA"] = self.mema()
# Record end time and write our report.
self.end_time = datetime.now()
self.write_report()
def __repr__(self):
"""
Defines how the class represents itself internally as a string.
To learn more, consider reading https://docs.python.org/3/reference/datamodel.html#basic-customization
"""
return f"SecondLevel(subject_ids={self.subject_ids}, bids_dir='{self.bids_dir}')"
def ttest(self):
"""
Run AFNI's 3dttest++ on the outfiles of each subject. Also concatenates 3dttest++ outfiles together.
3dttest++ info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dttest++_sphx.html#ahelp-3dttest
3dTcat info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dTcat_sphx.html#ahelp-3dtcat
"""
working_directory = self.dirs["output"] / "3dttest++"
# Gather the labels of the subbricks we want to include.
representative_dataset = list(self.paths.values())[0]["deconvolve_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dttest++.
results = {}
for label in labels:
if "_Coef" in label:
# Build arguments to pass to the program.
args = r"-zskip 100% -setA ttest".split()
for subject_id in self.subject_ids:
args += [f"sub-{subject_id}"] + [f'{self.paths[subject_id]["deconvolve_outfile"]}[{label}]']
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dttest++", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dttest++"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def mema(self):
"""
Runs AFNI's 3dMEMA 2nd-level analysis. Also concatenates the results together using 3dTcat.
3dMEMA info: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/programs/3dMEMA_sphx.html#ahelp-3dmema
How to gather specific sub-briks from the 3dREMLfit outfile: https://afni.nimh.nih.gov/pub/dist/doc/program_help/common_options.html
"""
working_directory = self.dirs["output"] / "3dMEMA"
# Gather the labels of the sub-bricks we want to include.
representative_dataset = list(self.paths.values())[0]["reml_outfile"]
labels = subbrick_labels_of(representative_dataset)
# For each relevant subbrick for each subject, run 3dMEMA.
results = {}
for i, label in enumerate(labels):
if "_Coef" in label:
# Create base arguments to pass to program.
args = (f"""
-prefix memamemamema
-jobs 5
-verb 1
-missing_data 0
-set activation-vs-0
""").split()
# Append our 3dREMLfit outfiles to the command.
for subject_id in self.subject_ids:
args += [
subject_id,
f'{self.paths[subject_id]["reml_outfile"]}[{i}]', # Append a beta sub-brick to the command
f'{self.paths[subject_id]["reml_outfile"]}[{i+1}]', # Append a Tstat sub-brick to the command
]
# Run program. Store path to outfile as an attribute of the AFNI object.
label_working_directory = working_directory / f"subbrick-{label}"
results[label] = AFNI(program="3dMEMA", args=args, working_directory=label_working_directory)
results[label].outfile = the_path_that_matches("*.HEAD", in_directory=label_working_directory)
# Concatenate outfiles into some rockin' time series :)
outfiles = [result.outfile for result in results.values() if result.program == "3dMEMA"]
results["concatenated_results"] = self.concatenate(paths_to_datasets=outfiles, parent_working_directory=working_directory)
# Copy the MNI template to each directory so we can use it in the AFNI viewer.
directories = [path for path in working_directory.glob("*") if path.is_dir()]
for directory in directories:
self.download_TT_N27_brain_into(directory)
# Return the results as a dictionary. Keys = subbrick labels, values = 3dttest++ results.
return results
def write_report(self):
"""
Writes files containing info about the analysis to help us stay sane.
"""
# Store workflow info into a dict.
workflow_info = {
"Start time": str(self.start_time),
"End time": str(self.end_time),
"Time to complete workflow": str(self.end_time - self.start_time),
"Subject IDs included": self.subject_ids
}
# Write the workflow dict to a json file.
output_json_path = self.dirs["output"] / "workflow_info.json"
print(f"Writing {output_json_path}")
with open(output_json_path, "w") as json_file:
json.dump(workflow_info, json_file, indent="\t")
def concatenate(self, paths_to_datasets: list, parent_working_directory: Path):
"""
Runs 3dTcat to neatly organize all subbricks from the datasets you specify.
"""
subbrick_labels = subbrick_labels_of(paths_to_datasets[0])
results = {}
for label in subbrick_labels:
tcat_args = "-tr 2".split()
for path in paths_to_datasets:
tcat_args += [f"{path}[{label}]"]
results[label] = AFNI(program="3dTcat", args=tcat_args, working_directory=parent_working_directory/f"{label}_concatenated")
return results
def download_TT_N27_brain_into(self, directory):
"""
Copies the TT_N27+tlrc brain into the target directory.
"""
home_dir = Path.home()
copy2(src=home_dir/"abin/TT_N27+tlrc.BRIK.gz", dst=directory)
copy2(src=home_dir/"abin/TT_N27+tlrc.HEAD", dst=directory)
if __name__ == "__main__":
"""
This section of the script only runs when you run the script directly from the shell.
It contains the parser that parses arguments from the command line.
"""
parser = argparse.ArgumentParser(description="Runs a 2nd-level analysis on subjects for whom you have already run a 1st-level analysis. You must specify the path to the raw BIDS dataset you ran your 1st-level analysis on. You must also specify whether to analyze EITHER a list of specific subjects OR all subjects. Finally, you must specify the title of the directory containing your 1st-level analysis results.", fromfile_prefix_chars="@")
parser.add_argument("--bids_dir", required=True, help="<Mandatory> Path to the root of the BIDS directory. Example: '--bids_dir /readwrite/contrascan/bids_attempt-2'")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--subjects", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze a list of specific subject IDs. Example: '--subjects 107 108 110'")
group.add_argument("--all", action="store_true", help="<Mandatory> Analyze all subjects. Example: '--all'")
group.add_argument("--all_except", metavar="SUBJECT_ID", nargs="+", help="<Mandatory> Analyze all subjects but exclude those specified here. Example: '--all_except 109 111'")
# Parse args from the command line and create an empty list to store the subject ids we picked.
args = parser.parse_args()
subject_ids = []
# Option 1: Process all subjects.
if args.all or args.all_except:
bids_root = Path(args.bids_dir)
for subject_dir in bids_root.glob("sub-*"):
if subject_id_of(subject_dir) not in args.all_except:
subject_ids.append(subject_id_of(subject_dir))
# Option 2: Process specific subjects.
else:
subject_ids = args.subjects
# Launch the second level analysis on the subjects we picked.
SecondLevel(
subject_ids=subject_ids,
bids_dir=args.bids_dir,
)
| 1,852 | 0 | 27 |
ba6527ade5b80db6d32e10ee1ea1105d48b14279 | 1,535 | py | Python | gnmap2csv.py | bashexplode/gnmap2csv | c3ed62f326a24cb196c96105f4dd115771c89d70 | [
"BSD-3-Clause"
] | null | null | null | gnmap2csv.py | bashexplode/gnmap2csv | c3ed62f326a24cb196c96105f4dd115771c89d70 | [
"BSD-3-Clause"
] | null | null | null | gnmap2csv.py | bashexplode/gnmap2csv | c3ed62f326a24cb196c96105f4dd115771c89d70 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Scripted by Jesse Nebling (@bashexplode)
# Works with both masscan and nmap results
import re
import csv
import argparse
parser = argparse.ArgumentParser(description='Convert GNMap file to CSV by IP address | open ports')
parser.add_argument('inputfile')
parser.add_argument('outputfile')
args = parser.parse_args()
writer = csv.writer(open(args.outputfile, 'a+', newline=''), delimiter=',')
hostports = {}
for line in open(args.inputfile):
try:
if "Ports:" in line:
host = ""
if "Timestamp" in line:
host = line.split('\t')[1].split()[1]
else:
host = line.split('\t')[0].split()[1]
if host not in hostports.keys():
hostports[host] = {}
if "Ports" not in hostports[host].keys():
portslist = re.findall(r'(\d*)/open/',line)
hostports[host]["Ports"] = portslist
else:
portslist = re.findall(r'(\d*)/open/',line)
for port in portslist:
if port not in hostports[host]["Ports"]:
hostports[host]["Ports"].append(port)
# hostname = re.findall(r'Timestamp: [0-9]+\tHost: (.+?)\(.+\)\tPorts:', line)[0]
except:
pass
for host in hostports.keys():
ports = list(map(int, hostports[host]["Ports"]))
ports.sort()
ports = list(map(str, ports))
outputlist = [host, "; ".join(ports)]
writer.writerow(outputlist)
| 34.111111 | 100 | 0.551792 | #!/usr/bin/python
# Scripted by Jesse Nebling (@bashexplode)
# Works with both masscan and nmap results
import re
import csv
import argparse
parser = argparse.ArgumentParser(description='Convert GNMap file to CSV by IP address | open ports')
parser.add_argument('inputfile')
parser.add_argument('outputfile')
args = parser.parse_args()
writer = csv.writer(open(args.outputfile, 'a+', newline=''), delimiter=',')
hostports = {}
for line in open(args.inputfile):
try:
if "Ports:" in line:
host = ""
if "Timestamp" in line:
host = line.split('\t')[1].split()[1]
else:
host = line.split('\t')[0].split()[1]
if host not in hostports.keys():
hostports[host] = {}
if "Ports" not in hostports[host].keys():
portslist = re.findall(r'(\d*)/open/',line)
hostports[host]["Ports"] = portslist
else:
portslist = re.findall(r'(\d*)/open/',line)
for port in portslist:
if port not in hostports[host]["Ports"]:
hostports[host]["Ports"].append(port)
# hostname = re.findall(r'Timestamp: [0-9]+\tHost: (.+?)\(.+\)\tPorts:', line)[0]
except:
pass
for host in hostports.keys():
ports = list(map(int, hostports[host]["Ports"]))
ports.sort()
ports = list(map(str, ports))
outputlist = [host, "; ".join(ports)]
writer.writerow(outputlist)
| 0 | 0 | 0 |
9c70eace4d6b1abb0542f624f23d9adb7e3f85c9 | 1,814 | py | Python | src/numtoword/num2word_EN_GB.py | guildenstern70/pyfab | 34d786fec17192591ac3c5f73913a9b04311695a | [
"MIT"
] | null | null | null | src/numtoword/num2word_EN_GB.py | guildenstern70/pyfab | 34d786fec17192591ac3c5f73913a9b04311695a | [
"MIT"
] | 7 | 2015-12-18T15:37:07.000Z | 2021-06-10T23:35:04.000Z | src/numtoword/num2word_EN_GB.py | guildenstern70/pyfab | 34d786fec17192591ac3c5f73913a9b04311695a | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Module: num2word_EN_GB.py
Requires: num2word_EN.py
Version: 1.0
Author:
Taro Ogawa (tso@users.sourceforge.org)
Copyright:
Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
Licence:
This module is distributed under the Lesser General Public Licence.
http://www.opensource.org/licenses/lgpl-license.php
Data from:
http://www.uni-bonn.de/~manfear/large.php
Usage:
from num2word_EN import n2w, to_card, to_ord, to_ordnum
to_card(1234567890)
n2w.is_title = True
to_card(1234567890)
to_ord(1234567890)
to_ordnum(1234567890)
to_year(1976)
to_currency(pounds*100 + pence)
to_currency((pounds,pence))
History:
1.0: Split from num2word_EN with the addition of to_currency()
"""
from num2word_EN import Num2Word_EN
n2w = Num2Word_EN_GB()
to_card = n2w.to_cardinal
to_ord = n2w.to_ordinal
to_ordnum = n2w.to_ordinal_num
to_year = n2w.to_year
if __name__ == "__main__":
main()
| 27.484848 | 141 | 0.679713 | # coding=utf-8
"""
Module: num2word_EN_GB.py
Requires: num2word_EN.py
Version: 1.0
Author:
Taro Ogawa (tso@users.sourceforge.org)
Copyright:
Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
Licence:
This module is distributed under the Lesser General Public Licence.
http://www.opensource.org/licenses/lgpl-license.php
Data from:
http://www.uni-bonn.de/~manfear/large.php
Usage:
from num2word_EN import n2w, to_card, to_ord, to_ordnum
to_card(1234567890)
n2w.is_title = True
to_card(1234567890)
to_ord(1234567890)
to_ordnum(1234567890)
to_year(1976)
to_currency(pounds*100 + pence)
to_currency((pounds,pence))
History:
1.0: Split from num2word_EN with the addition of to_currency()
"""
from num2word_EN import Num2Word_EN
class Num2Word_EN_GB(Num2Word_EN):
def to_currency(self, val, longval=True):
return self.to_splitnum(val, hightxt="pound/s", lowtxt="pence",
jointxt="and", longval=longval)
n2w = Num2Word_EN_GB()
to_card = n2w.to_cardinal
to_ord = n2w.to_ordinal
to_ordnum = n2w.to_ordinal_num
to_year = n2w.to_year
def main():
for val in [ 1, 11, 12, 21, 31, 33, 71, 80, 81, 91, 99, 100, 101, 102, 155,
180, 300, 308, 832, 1000, 1001, 1061, 1100, 1500, 1701, 3000,
8280, 8291, 150000, 500000, 1000000, 2000000, 2000001,
-21212121211221211111, -2.121212, -1.0000100]:
n2w.test(val)
n2w.test(1325325436067876801768700107601001012212132143210473207540327057320957032975032975093275093275093270957329057320975093272950730)
for val in [1,120,1000,1120,1800, 1976,2000,2010,2099,2171]:
print val, "is", n2w.to_currency(val)
print val, "is", n2w.to_year(val)
if __name__ == "__main__":
main()
| 746 | 13 | 76 |
1abba296701a1ac6511e3cfd5363a84cbc122ab4 | 2,599 | py | Python | benchmarks/engines/busyring/neuron/parameters.py | bcumming/nbench | d9f96c79657d661b4b48f7ab7e3fdd86f7e90ceb | [
"BSD-3-Clause"
] | 2 | 2019-03-07T14:49:21.000Z | 2019-06-20T13:10:58.000Z | benchmarks/engines/busyring/neuron/parameters.py | bcumming/nbench | d9f96c79657d661b4b48f7ab7e3fdd86f7e90ceb | [
"BSD-3-Clause"
] | 68 | 2019-01-21T15:13:13.000Z | 2021-02-05T15:36:41.000Z | benchmarks/engines/busyring/neuron/parameters.py | eth-cscs/nsuite | d9f96c79657d661b4b48f7ab7e3fdd86f7e90ceb | [
"BSD-3-Clause"
] | 8 | 2019-02-01T16:23:49.000Z | 2021-11-23T08:50:00.000Z | import json
| 36.605634 | 102 | 0.477107 | import json
def from_json(o, key):
if key in o:
return o[key]
else:
raise Exception(str('parameter "'+ key+ '" not in input file'))
class cell_parameters:
def __repr__(self):
s = "cell parameters\n" \
" depth : {0:10d}\n" \
" branch prob : [{1:5.2f} : {2:5.2f}]\n" \
" compartments : [{3:5d} : {4:5d}]\n" \
" lengths : [{5:5.1f} : {6:5.1f}]\n" \
" connections : {7:5.1f}\n" \
.format(self.max_depth,
self.branch_probs[0], self.branch_probs[1],
self.compartments[0], self.compartments[1],
self.lengths[0], self.lengths[1],
self.synapses)
return s
def __init__(self, data=None):
if data:
self.max_depth = from_json(data, 'depth')
self.branch_probs = from_json(data, 'branch-probs')
self.compartments = from_json(data, 'compartments')
self.lengths = from_json(data, 'lengths')
self.synapses = from_json(data, 'synapses')
else:
self.max_depth = 5
self.branch_probs = [1.0, 0.5]
self.compartments = [20, 2]
self.lengths = [200, 20]
self.synapses = 1
class model_parameters:
def __repr__(self):
s = "parameters\n" \
" name : {0:>10s}\n" \
" cells : {1:10d}\n" \
" ring size : {2:10d}\n" \
" duration : {3:10.0f} ms\n" \
" min delay : {4:10.0f} ms\n" \
" dt : {5:10.0f} ms\n" \
.format(self.name, self.num_cells, self.ring_size, self.duration, self.min_delay, self.dt)
s+= str(self.cell)
return s
def __init__(self, filename=None):
self.name = 'default'
self.num_cells = 20
self.duration = 100
self.dt = 0.025
self.min_delay = 10
self.ring_size = 10
self.cell = cell_parameters()
if filename:
with open(filename) as f:
data = json.load(f)
self.name = from_json(data, 'name')
self.num_cells = from_json(data, 'num-cells')
self.ring_size = from_json(data, 'ring-size')
self.duration = from_json(data, 'duration')
self.dt = from_json(data, 'dt')
self.min_delay = from_json(data, 'min-delay')
self.cell = cell_parameters(data)
| 2,408 | 3 | 175 |
a0b33308819084aaf45759f296409f9035f471a5 | 1,432 | py | Python | lcd/mpcLCD.py | JKarthaus/RadioRaspi | e095caa8aa0c7ae76d1697c181cbcc2f58940228 | [
"Apache-2.0"
] | null | null | null | lcd/mpcLCD.py | JKarthaus/RadioRaspi | e095caa8aa0c7ae76d1697c181cbcc2f58940228 | [
"Apache-2.0"
] | 2 | 2018-11-16T20:19:03.000Z | 2018-12-17T20:13:13.000Z | lcd/mpcLCD.py | JKarthaus/RadioRaspi | e095caa8aa0c7ae76d1697c181cbcc2f58940228 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import lcddriver
import time
import os
import signal
if __name__ == "__main__":
lcd = lcddriver.lcd()
killer = GracefulKiller()
print "MPC - parser up and running"
while True :
if killer.kill_now:
print "Service Shutdown requestet..."
break
titel,interpret = parse_mpc()
writeToLCD(titel,interpret)
# print parse_mpc()
time.sleep(3) | 20.169014 | 55 | 0.583101 | #!/usr/bin/python
import lcddriver
import time
import os
import signal
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
def parse_mpc():
#raw = os.popen('simmpc.sh').read()
raw = os.popen('mpc').read()
playing=False
paused=False
if raw.find("playing") > 0 :
playing = True
if raw.find("paused") > 0 :
paused =True
if not playing and not paused :
return "Radio","Raspi"
if playing :
interpret = raw.splitlines()[0].split("-")[0]
title = raw.splitlines()[0].split("-")[1]
if paused :
interpret = "Pausiert"
title = raw.splitlines()[0].split("-")[1]
return title,interpret
def writeToLCD(row1,row2):
lcd.lcd_clear()
lcd.lcd_display_string(row1, 1)
lcd.lcd_display_string(row2, 2)
if __name__ == "__main__":
lcd = lcddriver.lcd()
killer = GracefulKiller()
print "MPC - parser up and running"
while True :
if killer.kill_now:
print "Service Shutdown requestet..."
break
titel,interpret = parse_mpc()
writeToLCD(titel,interpret)
# print parse_mpc()
time.sleep(3) | 816 | 68 | 69 |
8292a4fcd418ff13a790c01a607285c863a0dd0d | 638 | py | Python | triage/models.py | carlosmaniero/iwannacontrib | 3c95331f556cc34499948a4743565f63c4df0adb | [
"MIT"
] | null | null | null | triage/models.py | carlosmaniero/iwannacontrib | 3c95331f556cc34499948a4743565f63c4df0adb | [
"MIT"
] | 3 | 2020-08-25T15:08:00.000Z | 2020-08-25T15:11:33.000Z | triage/models.py | carlosmaniero/letmecontib | 3c95331f556cc34499948a4743565f63c4df0adb | [
"MIT"
] | null | null | null | from django.db import models
COMPLEXITY_LEVEL = [
(1, 'Very Easy'),
(2, 'Easy'),
(3, 'Medium'),
(4, 'Hard'),
(5, 'Very Hard'),
]
| 21.266667 | 73 | 0.659875 | from django.db import models
class ProgrammingLanguage(models.Model):
name = models.CharField(max_length=200, null=False, unique=True)
def __str__(self):
return self.name
@property
def link(self):
return f'/?language={self.name}&rate=all#search-section-form'
@staticmethod
def get_other_default_language():
return ProgrammingLanguage.objects.get_or_create(name='Other')[0]
COMPLEXITY_LEVEL = [
(1, 'Very Easy'),
(2, 'Easy'),
(3, 'Medium'),
(4, 'Hard'),
(5, 'Very Hard'),
]
class IssueRate(models.Model):
rate = models.IntegerField(choices=COMPLEXITY_LEVEL)
| 172 | 267 | 46 |
52a31f8ae544054871b70a733d52e4ecee72df0a | 5,363 | py | Python | tests/loaders_test.py | andrewrreed/few-shot-text-classification | 48b4b52858e97a3a6f4bfb496874d787927f7725 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:16:34.000Z | 2021-07-28T20:16:34.000Z | tests/loaders_test.py | andrewrreed/few-shot-text-classification | 48b4b52858e97a3a6f4bfb496874d787927f7725 | [
"Apache-2.0"
] | null | null | null | tests/loaders_test.py | andrewrreed/few-shot-text-classification | 48b4b52858e97a3a6f4bfb496874d787927f7725 | [
"Apache-2.0"
] | null | null | null | # ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import unittest
from typing import Any
import mock
from parameterized import parameterized
from fewshot.data.loaders import load_or_cache_sbert_embeddings
from fewshot.utils import fewshot_filename
class AnyObj(object):
"""Equal to anything"""
| 39.433824 | 81 | 0.660451 | # ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import unittest
from typing import Any
import mock
from parameterized import parameterized
from fewshot.data.loaders import load_or_cache_sbert_embeddings
from fewshot.utils import fewshot_filename
class AnyObj(object):
"""Equal to anything"""
def __eq__(self, other: Any) -> bool:
return True
class TestStringMethods(unittest.TestCase):
@parameterized.expand([
["test_amazon", "amazon", "amazon"],
["test_agnews", "agnews", "agnews"],
["test_lower_case", "aMaZoN", "amazon"],
])
@mock.patch("fewshot.data.loaders.get_transformer_embeddings")
@mock.patch("fewshot.data.loaders.load_transformer_model_and_tokenizer")
@mock.patch("fewshot.data.loaders._load_amazon_products_dataset")
@mock.patch("fewshot.data.loaders._load_agnews_dataset")
@mock.patch("os.path.exists")
def test_load_or_cache_sbert_embeddings_picks_right_dataset(
self,
test_name,
input_data_name,
target_data_name,
mock_exists,
mock_load_agnews,
mock_load_amazon,
mock_model_tokenizer,
mock_get_embeddings,
):
# Test-level constants
FAKE_DIR = "FAKE_DIR"
AMAZON_WORDS = ["amazon", "words"]
AGNEWS_WORDS = ["agnews", "words"]
OUTPUT = 123 # Doesn't resemble actual output.
# Mock values
mock_exists.return_value = False
mock_load_amazon.return_value = AMAZON_WORDS
mock_load_agnews.return_value = AGNEWS_WORDS
# Don't use these return values because we mock.
mock_model_tokenizer.return_value = (None, None)
mock_get_embeddings.return_value = OUTPUT
# Call load_or_cache_sbert_embeddings
self.assertEqual(
load_or_cache_sbert_embeddings(FAKE_DIR, input_data_name), OUTPUT)
# Expect functions are called with expected values.
expected_filename = fewshot_filename(FAKE_DIR,
f"{target_data_name}_embeddings.pt")
mock_exists.assert_called_once_with(expected_filename)
if target_data_name == "amazon":
mock_get_embeddings.assert_called_once_with(
AMAZON_WORDS, AnyObj(), AnyObj(),
output_filename=expected_filename,
)
if target_data_name == "agnews":
mock_get_embeddings.assert_called_once_with(
AGNEWS_WORDS, AnyObj(), AnyObj(),
output_filename=expected_filename,
)
@mock.patch("os.path.exists")
def test_load_or_cache_sbert_embeddings_picks_right_dataset(self,
mock_exists):
# Test-level constants
FAKE_DIR = "FAKE_DIR"
bad_name = "bad_name"
# Mock value
mock_exists.return_value = False
# Call load_or_cache_sbert_embeddings
with self.assertRaisesRegex(ValueError,
f"Unexpected dataset name: {bad_name}"):
load_or_cache_sbert_embeddings(FAKE_DIR, bad_name)
# Expect functions are called with expected values.
mock_exists.assert_called_once_with(
fewshot_filename(FAKE_DIR, f"{bad_name}_embeddings.pt"))
| 2,400 | 593 | 50 |
0a28cfdcae7293f9e7e2a643cd5e781e09edca73 | 511 | py | Python | forms.py | CarlottaCharlie/giornatamica | fedcf5012eb080071075ff2dea68e5e08e268218 | [
"MIT"
] | null | null | null | forms.py | CarlottaCharlie/giornatamica | fedcf5012eb080071075ff2dea68e5e08e268218 | [
"MIT"
] | null | null | null | forms.py | CarlottaCharlie/giornatamica | fedcf5012eb080071075ff2dea68e5e08e268218 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField,TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo,Length
from models import User
from flask import request
| 39.307692 | 87 | 0.792564 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField,TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo,Length
from models import User
from flask import request
class LoginForm(FlaskForm):
username = StringField('Nome utente', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Mantieni accesso')
submit = SubmitField('Accedi')
| 0 | 233 | 23 |
4840a643392cf6c045ab631deda9ef296f72ceb2 | 956 | py | Python | src/plotting/utils/functions.py | bradley-erickson/project1 | 075f6784fbb5893da93c279a64b590630f859e09 | [
"MIT"
] | null | null | null | src/plotting/utils/functions.py | bradley-erickson/project1 | 075f6784fbb5893da93c279a64b590630f859e09 | [
"MIT"
] | 30 | 2021-09-03T21:46:54.000Z | 2021-09-22T18:36:10.000Z | src/plotting/utils/functions.py | bradley-erickson/project1 | 075f6784fbb5893da93c279a64b590630f859e09 | [
"MIT"
] | 11 | 2021-09-26T16:09:42.000Z | 2021-11-03T03:25:26.000Z | def fetch_columns_options(data, table=False):
"""Handle creating column options based on the data
Parameters
----------
data: dict
data from stored dcc.Store component
table: bool (def. False)
Flag for returning table list
Returns
----------
options: list of dict
Options for each of the dropdowns in the form of
{'label': 'Example', 'value': 'example'}
"""
if table:
return [{'name': i, 'id': i} for i in data[0]]
else:
return [{'label': i, 'value': i} for i in data[0]]
def validate_store_data(data):
"""
Parameters
----------
data: dict
data from stored dcc.Store component
Returns
----------
data_in: bool
Determine if there is dataframe data in the data diction
"""
if data and 'df' in data and data['df'] is not None:
return True
return False
| 23.9 | 68 | 0.541841 | def fetch_columns_options(data, table=False):
"""Handle creating column options based on the data
Parameters
----------
data: dict
data from stored dcc.Store component
table: bool (def. False)
Flag for returning table list
Returns
----------
options: list of dict
Options for each of the dropdowns in the form of
{'label': 'Example', 'value': 'example'}
"""
if table:
return [{'name': i, 'id': i} for i in data[0]]
else:
return [{'label': i, 'value': i} for i in data[0]]
def validate_store_data(data):
"""
Parameters
----------
data: dict
data from stored dcc.Store component
Returns
----------
data_in: bool
Determine if there is dataframe data in the data diction
"""
if data and 'df' in data and data['df'] is not None:
return True
return False
| 0 | 0 | 0 |
d905c77b2033987a620df0d5c7b728ff1e610232 | 5,726 | py | Python | pyblp/results/simulation_results.py | yusukeaoki1223/pyblp | 71cea45251f3772ef8f3dc62c7e47b9820308396 | [
"MIT"
] | 1 | 2019-02-10T14:25:08.000Z | 2019-02-10T14:25:08.000Z | pyblp/results/simulation_results.py | yusukeaoki1223/pyblp | 71cea45251f3772ef8f3dc62c7e47b9820308396 | [
"MIT"
] | null | null | null | pyblp/results/simulation_results.py | yusukeaoki1223/pyblp | 71cea45251f3772ef8f3dc62c7e47b9820308396 | [
"MIT"
] | null | null | null | """Economy-level structuring of BLP simulation results."""
from typing import Dict, Hashable, Optional, Sequence, TYPE_CHECKING, Union
import numpy as np
from ..configurations.formulation import Formulation
from ..configurations.integration import Integration
from ..utilities.basics import Array, Mapping, RecArray, StringRepresentation, TableFormatter, format_seconds
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import Problem # noqa
from ..economies.simulation import Simulation # noqa
class SimulationResults(StringRepresentation):
"""Results of a solved simulation of synthetic BLP data.
The :meth:`SimulationResults.to_problem` method can be used to convert the full set of simulated data and configured
information into a :class:`Problem`.
Attributes
----------
simulation: `Simulation`
:class:`Simulation` that created these results.
product_data : `recarray`
Simulated :attr:`Simulation.product_data` that are updated with synthetic prices and shares.
computation_time : `float`
Number of seconds it took to compute synthetic prices and shares.
fp_converged : `ndarray`
Flags for convergence of the iteration routine used to compute synthetic prices in each market. Flags are in
the same order as :attr:`Simulation.unique_market_ids`.
fp_iterations : `ndarray`
Number of major iterations completed by the iteration routine used to compute synthetic prices in each market.
Counts are in the same order as :attr:`Simulation.unique_market_ids`.
contraction_evaluations : `ndarray`
Number of times the contraction used to compute synthetic prices was evaluated in each market. Counts are in the
same order as :attr:`Simulation.unique_market_ids`.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
simulation: 'Simulation'
product_data: RecArray
computation_time: float
fp_converged: Array
fp_iterations: Array
contraction_evaluations: Array
def __init__(
self, simulation: 'Simulation', prices: Array, shares: Array, start_time: float, end_time: float,
converged_mapping: Dict[Hashable, bool], iteration_mapping: Dict[Hashable, int],
evaluation_mapping: Dict[Hashable, int]) -> None:
"""Structure simulation results."""
self.simulation = simulation
self.product_data = simulation.product_data.copy()
self.product_data.prices = prices
self.product_data.shares = shares
self.computation_time = end_time - start_time
self.fp_converged = np.array([converged_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.fp_iterations = np.array([iteration_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.contraction_evaluations = np.array(
[evaluation_mapping[t] for t in simulation.unique_market_ids], dtype=np.int
)
def __str__(self) -> str:
"""Format simulation results as a string."""
header = [("Computation", "Time"), ("Fixed Point", "Iterations"), ("Contraction", "Evaluations")]
widths = [max(len(k1), len(k2)) for k1, k2 in header]
formatter = TableFormatter(widths)
return "\n".join([
"Simulation Results Summary:",
formatter.line(),
formatter([k[0] for k in header]),
formatter([k[1] for k in header], underline=True),
formatter([
format_seconds(self.computation_time),
self.fp_iterations.sum(),
self.contraction_evaluations.sum()
]),
formatter.line()
])
def to_problem(
self, product_formulations: Optional[Union[Formulation, Sequence[Optional[Formulation]]]] = None,
product_data: Optional[Mapping] = None, agent_formulation: Optional[Formulation] = None,
agent_data: Optional[Mapping] = None, integration: Optional[Integration] = None) -> 'Problem':
"""Convert the solved simulation into a problem.
Parameters are the same as those of :class:`Problem`. By default, the structure of the problem will be the same
as that of the solved simulation.
Parameters
----------
product_formulations : `Formulation or tuple of Formulation, optional`
By default, :attr:`Simulation.product_formulations`.
product_data : `structured array-like, optional`
By default, :attr:`SimulationResults.product_data`.
agent_formulation : `Formulation, optional`
By default, :attr:`Simulation.agent_formulation`.
agent_data : `structured array-like, optional`
By default, :attr:`Simulation.agent_data`.
integration : `Integration, optional`
By default, this is unspecified.
Returns
-------
`Problem`
A BLP problem.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
from ..economies.problem import Problem # noqa
if product_formulations is None:
product_formulations = self.simulation.product_formulations
if product_data is None:
product_data = self.product_data
if agent_formulation is None:
agent_formulation = self.simulation.agent_formulation
if agent_data is None:
agent_data = self.simulation.agent_data
assert product_formulations is not None and product_data is not None
return Problem(product_formulations, product_data, agent_formulation, agent_data, integration)
| 43.378788 | 120 | 0.670451 | """Economy-level structuring of BLP simulation results."""
from typing import Dict, Hashable, Optional, Sequence, TYPE_CHECKING, Union
import numpy as np
from ..configurations.formulation import Formulation
from ..configurations.integration import Integration
from ..utilities.basics import Array, Mapping, RecArray, StringRepresentation, TableFormatter, format_seconds
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import Problem # noqa
from ..economies.simulation import Simulation # noqa
class SimulationResults(StringRepresentation):
"""Results of a solved simulation of synthetic BLP data.
The :meth:`SimulationResults.to_problem` method can be used to convert the full set of simulated data and configured
information into a :class:`Problem`.
Attributes
----------
simulation: `Simulation`
:class:`Simulation` that created these results.
product_data : `recarray`
Simulated :attr:`Simulation.product_data` that are updated with synthetic prices and shares.
computation_time : `float`
Number of seconds it took to compute synthetic prices and shares.
fp_converged : `ndarray`
Flags for convergence of the iteration routine used to compute synthetic prices in each market. Flags are in
the same order as :attr:`Simulation.unique_market_ids`.
fp_iterations : `ndarray`
Number of major iterations completed by the iteration routine used to compute synthetic prices in each market.
Counts are in the same order as :attr:`Simulation.unique_market_ids`.
contraction_evaluations : `ndarray`
Number of times the contraction used to compute synthetic prices was evaluated in each market. Counts are in the
same order as :attr:`Simulation.unique_market_ids`.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
simulation: 'Simulation'
product_data: RecArray
computation_time: float
fp_converged: Array
fp_iterations: Array
contraction_evaluations: Array
def __init__(
self, simulation: 'Simulation', prices: Array, shares: Array, start_time: float, end_time: float,
converged_mapping: Dict[Hashable, bool], iteration_mapping: Dict[Hashable, int],
evaluation_mapping: Dict[Hashable, int]) -> None:
"""Structure simulation results."""
self.simulation = simulation
self.product_data = simulation.product_data.copy()
self.product_data.prices = prices
self.product_data.shares = shares
self.computation_time = end_time - start_time
self.fp_converged = np.array([converged_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.fp_iterations = np.array([iteration_mapping[t] for t in simulation.unique_market_ids], dtype=np.int)
self.contraction_evaluations = np.array(
[evaluation_mapping[t] for t in simulation.unique_market_ids], dtype=np.int
)
def __str__(self) -> str:
"""Format simulation results as a string."""
header = [("Computation", "Time"), ("Fixed Point", "Iterations"), ("Contraction", "Evaluations")]
widths = [max(len(k1), len(k2)) for k1, k2 in header]
formatter = TableFormatter(widths)
return "\n".join([
"Simulation Results Summary:",
formatter.line(),
formatter([k[0] for k in header]),
formatter([k[1] for k in header], underline=True),
formatter([
format_seconds(self.computation_time),
self.fp_iterations.sum(),
self.contraction_evaluations.sum()
]),
formatter.line()
])
def to_problem(
self, product_formulations: Optional[Union[Formulation, Sequence[Optional[Formulation]]]] = None,
product_data: Optional[Mapping] = None, agent_formulation: Optional[Formulation] = None,
agent_data: Optional[Mapping] = None, integration: Optional[Integration] = None) -> 'Problem':
"""Convert the solved simulation into a problem.
Parameters are the same as those of :class:`Problem`. By default, the structure of the problem will be the same
as that of the solved simulation.
Parameters
----------
product_formulations : `Formulation or tuple of Formulation, optional`
By default, :attr:`Simulation.product_formulations`.
product_data : `structured array-like, optional`
By default, :attr:`SimulationResults.product_data`.
agent_formulation : `Formulation, optional`
By default, :attr:`Simulation.agent_formulation`.
agent_data : `structured array-like, optional`
By default, :attr:`Simulation.agent_data`.
integration : `Integration, optional`
By default, this is unspecified.
Returns
-------
`Problem`
A BLP problem.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
from ..economies.problem import Problem # noqa
if product_formulations is None:
product_formulations = self.simulation.product_formulations
if product_data is None:
product_data = self.product_data
if agent_formulation is None:
agent_formulation = self.simulation.agent_formulation
if agent_data is None:
agent_data = self.simulation.agent_data
assert product_formulations is not None and product_data is not None
return Problem(product_formulations, product_data, agent_formulation, agent_data, integration)
| 0 | 0 | 0 |
d84162e92aa09e55d2d278436f2fa0316180671c | 641 | py | Python | examples/upload_avi.py | gertvdijk/peony-twitter | b68411ce66c1f0fca916e25f826a2f7fe6ded432 | [
"MIT"
] | 79 | 2016-08-30T07:18:07.000Z | 2022-03-24T11:02:48.000Z | examples/upload_avi.py | gertvdijk/peony-twitter | b68411ce66c1f0fca916e25f826a2f7fe6ded432 | [
"MIT"
] | 42 | 2016-11-07T13:38:14.000Z | 2022-02-11T11:59:48.000Z | examples/upload_avi.py | gertvdijk/peony-twitter | b68411ce66c1f0fca916e25f826a2f7fe6ded432 | [
"MIT"
] | 13 | 2016-10-05T20:53:42.000Z | 2022-01-26T09:24:37.000Z | #!/usr/bin/env python3
import asyncio
import base64
from urllib.parse import urlparse
try:
from . import peony, api
except (SystemError, ImportError):
from __init__ import peony
import api
client = peony.PeonyClient(**api.keys)
if __name__ == '__main__':
main()
| 19.424242 | 68 | 0.680187 | #!/usr/bin/env python3
import asyncio
import base64
from urllib.parse import urlparse
try:
from . import peony, api
except (SystemError, ImportError):
from __init__ import peony
import api
client = peony.PeonyClient(**api.keys)
async def set_avi(path):
with open(path, 'rb') as avi:
avib64 = base64.b64encode(avi.read()).decode('utf-8')
await client.api.account.update_profile_image.post(image=avib64)
def main():
path = input("avi: ")
path = urlparse(path).path.strip(" \"'")
loop = asyncio.get_event_loop()
loop.run_until_complete(set_avi(path))
if __name__ == '__main__':
main()
| 310 | 0 | 46 |
07960c1add95fc35673265f47f87b1cd29a9ef86 | 16,578 | py | Python | pennylane/wires.py | DanielPolatajko/pennylane | d603e810a4d34d727a436d852c540fdc0fe21a85 | [
"Apache-2.0"
] | 1 | 2021-02-18T02:14:27.000Z | 2021-02-18T02:14:27.000Z | pennylane/wires.py | DanielPolatajko/pennylane | d603e810a4d34d727a436d852c540fdc0fe21a85 | [
"Apache-2.0"
] | null | null | null | pennylane/wires.py | DanielPolatajko/pennylane | d603e810a4d34d727a436d852c540fdc0fe21a85 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`Wires` class, which takes care of wire bookkeeping.
"""
from collections.abc import Sequence, Iterable
import functools
import numpy as np
class WireError(Exception):
"""Exception raised by a :class:`~.pennylane.wires.Wire` object when it is unable to process wires."""
def _process(wires):
"""Converts the input to a tuple of wire labels.
If `wires` can be iterated over, its elements are interpreted as wire labels
and turned into a tuple. Otherwise, `wires` is interpreted as a single wire label.
The only exception to this are strings, which are always interpreted as a single
wire label, so users can address wires with labels such as `"ancilla"`.
Any type can be a wire label, as long as it is hashable. We need this to establish
the uniqueness of two labels. For example, `0` and `0.` are interpreted as
the same wire label because `hash(0.) == hash(0)` evaluates to true.
Note that opposed to numpy arrays, `pennylane.numpy` 0-dim array are hashable.
"""
if isinstance(wires, str):
# Interpret string as a non-iterable object.
# This is the only exception to the logic
# of considering the elements of iterables as wire labels.
wires = [wires]
try:
# Use tuple conversion as a check for whether `wires` can be iterated over.
# Note, this is not the same as `isinstance(wires, Iterable)` which would
# pass for 0-dim numpy arrays that cannot be iterated over.
tuple_of_wires = tuple(wires)
except TypeError:
# if not iterable, interpret as single wire label
try:
hash(wires)
except TypeError as e:
# if object is not hashable, cannot identify unique wires
if str(e).startswith("unhashable"):
raise WireError(
"Wires must be hashable; got object of type {}.".format(type(wires))
) from e
return (wires,)
try:
# We need the set for the uniqueness check,
# so we can use it for hashability check of iterables.
set_of_wires = set(wires)
except TypeError as e:
if str(e).startswith("unhashable"):
raise WireError("Wires must be hashable; got {}.".format(wires)) from e
if len(set_of_wires) != len(tuple_of_wires):
raise WireError("Wires must be unique; got {}.".format(wires))
return tuple_of_wires
class Wires(Sequence):
r"""
A bookkeeping class for wires, which are ordered collections of unique objects.
If the input `wires` can be iterated over, it is interpreted as a sequence of wire labels that have to be
unique and hashable. Else it is interpreted as a single wire label that has to be hashable. The
only exception are strings which are interpreted as wire labels.
The hash function of a wire label is considered the source of truth when deciding whether
two wire labels are the same or not.
Indexing an instance of this class will return a wire label.
Args:
wires (Any): the wire label(s)
"""
def __getitem__(self, idx):
"""Method to support indexing. Returns a Wires object if index is a slice, or a label if index is an integer."""
if isinstance(idx, slice):
return Wires(self._labels[idx])
return self._labels[idx]
def __len__(self):
"""Method to support ``len()``."""
return len(self._labels)
def __contains__(self, item):
"""Method checking if Wires object contains an object."""
return item in self._labels
def __repr__(self):
"""Method defining the string representation of this class."""
return "<Wires = {}>".format(list(self._labels))
def __eq__(self, other):
"""Method to support the '==' operator. This will also implicitly define the '!=' operator."""
# The order is respected in comparison, so that ``assert Wires([0, 1]) != Wires([1,0])``
if isinstance(other, Wires):
return self._labels == other.labels
return self._labels == other
def __hash__(self):
"""Implements the hash function."""
return hash(self._labels)
def __add__(self, other):
"""Defines the addition to return a Wires object containing all wires of the two terms.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the right
Returns:
Wires: all wires appearing in either object
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 2])
>>> wires1 + wires2
Wires([4, 0, 1, 2])
"""
other = Wires(other)
return Wires.all_wires([self, other])
def __radd__(self, other):
"""Defines addition according to __add__ if the left object has no addition defined.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the left
Returns:
Wires: all wires appearing in either object
"""
other = Wires(other)
return Wires.all_wires([other, self])
def __array__(self):
"""Defines a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
@property
def labels(self):
"""Get a tuple of the labels of this Wires object."""
return self._labels
def toarray(self):
"""Returns a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
def tolist(self):
"""Returns a list representation of the Wires object.
Returns:
List: list of wire labels
"""
return list(self._labels)
def toset(self):
"""Returns a set representation of the Wires object.
Returns:
Set: set of wire labels
"""
return set(self.labels)
def index(self, wire):
"""Overwrites a Sequence's ``index()`` function which returns the index of ``wire``.
Args:
wire (Any): Object whose index is to be found. If this is a Wires object of length 1, look for the object
representing the wire.
Returns:
int: index of the input
"""
if isinstance(wire, Wires):
if len(wire) != 1:
raise WireError("Can only retrieve index of a Wires object of length 1.")
wire = wire[0]
try:
return self._labels.index(wire)
except ValueError as e:
raise WireError("Wire with label {} not found in {}.".format(wire, self)) from e
def indices(self, wires):
"""
Return the indices of the wires in this Wires object.
Args:
wires (Iterable[Number, str], Number, str, Wires): Wire(s) whose indices are to be found
Returns:
List: index list
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 4])
>>> wires1.indices(wires2)
[2, 0]
>>> wires1.indices([1, 4])
[2, 0]
"""
if not isinstance(wires, Iterable):
return [self.index(wires)]
return [self.index(w) for w in wires]
def map(self, wire_map):
"""Returns a new Wires object with different labels, using the rule defined in mapping.
Args:
wire_map (dict): Dictionary containing all wire labels used in this object as keys, and unique
new labels as their values
**Example**
>>> wires = Wires(['a', 'b', 'c'])
>>> wire_map = {'a': 4, 'b':2, 'c': 3}
>>> wires.map(wire_map)
<Wires = [4, 2, 3]>
"""
# Make sure wire_map has `Wires` keys and values so that the `in` operator always works
for w in self:
if w not in wire_map:
raise WireError(
"No mapping for wire label {} specified in wire map {}.".format(w, wire_map)
)
new_wires = [wire_map[w] for w in self]
try:
new_wires = Wires(new_wires)
except WireError as e:
raise WireError(
"Failed to implement wire map {}. Make sure that the new labels are unique and "
"valid wire labels.".format(wire_map)
) from e
return new_wires
def subset(self, indices, periodic_boundary=False):
"""
Returns a new Wires object which is a subset of this Wires object. The wires of the new
object are the wires at positions specified by 'indices'. Also accepts a single index as input.
Args:
indices (List[int] or int): indices or index of the wires we want to select
periodic_boundary (bool): controls periodic boundary conditions in the indexing
Returns:
Wires: subset of wires
**Example**
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([2, 3, 0])
<Wires = [1, 5, 4]>
>>> wires.subset(1)
<Wires = [0]>
If ``periodic_boundary`` is True, the modulo of the number of wires of an index is used instead of an index,
so that ``wires.subset(i) == wires.subset(i % n_wires)`` where ``n_wires`` is the number of wires of this
object.
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([5, 1, 7], periodic_boundary=True)
<Wires = [4, 0, 1]>
"""
if isinstance(indices, int):
indices = [indices]
if periodic_boundary:
# replace indices by their modulo
indices = [i % len(self._labels) for i in indices]
for i in indices:
if i > len(self._labels):
raise WireError(
"Cannot subset wire at index {} from {} wires.".format(i, len(self._labels))
)
subset = tuple(self._labels[i] for i in indices)
return Wires(subset, _override=True)
def select_random(self, n_samples, seed=None):
"""
Returns a randomly sampled subset of Wires of length 'n_samples'.
Args:
n_samples (int): number of subsampled wires
seed (int): optional random seed used for selecting the wires
Returns:
Wires: random subset of wires
"""
if n_samples > len(self._labels):
raise WireError(
"Cannot sample {} wires from {} wires.".format(n_samples, len(self._labels))
)
if seed is not None:
np.random.seed(seed)
indices = np.random.choice(len(self._labels), size=n_samples, replace=False)
subset = tuple(self[i] for i in indices)
return Wires(subset, _override=True)
@staticmethod
def shared_wires(list_of_wires):
"""Return only the wires that appear in each Wires object in the list.
This is similar to a set intersection method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: shared wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([4, 0])
>>> Wires.shared_wires([wires1, wires2, wires3])
<Wires = [4, 0]>
>>> Wires.shared_wires([wires2, wires1, wires3])
<Wires = [0, 4]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
first_wires_obj = list_of_wires[0]
sets_of_wires = [wire.toset() for wire in list_of_wires]
# find the intersection of the labels of all wires in O(n) time.
intersecting_wires = functools.reduce(lambda a, b: a & b, sets_of_wires)
shared = []
# only need to iterate through the first object,
# since any wire not in this object will also not be shared
for wire in list_of_wires[0]:
if wire in intersecting_wires:
shared.append(wire)
return Wires(tuple(shared), _override=True)
@staticmethod
def all_wires(list_of_wires, sort=False):
"""Return the wires that appear in any of the Wires objects in the list.
This is similar to a set combine method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): List of Wires objects
sort (bool): Toggle for sorting the combined wire labels. The sorting is based on
value if all keys are int, else labels' str representations are used.
Returns:
Wires: combined wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([5, 3])
>>> list_of_wires = [wires1, wires2, wires3]
>>> Wires.all_wires(list_of_wires)
<Wires = [4, 0, 1, 3, 5]>
"""
combined = []
seen_labels = set()
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}".format(wires, type(wires))
)
extension = [label for label in wires.labels if label not in seen_labels]
combined.extend(extension)
seen_labels.update(extension)
if sort:
if all([isinstance(w, int) for w in combined]):
combined = sorted(combined)
else:
combined = sorted(combined, key=str)
return Wires(tuple(combined), _override=True)
@staticmethod
def unique_wires(list_of_wires):
"""Return the wires that are unique to any Wire object in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: unique wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([0, 2, 3])
>>> wires3 = Wires([5, 3])
>>> Wires.unique_wires([wires1, wires2, wires3])
<Wires = [4, 1, 2, 5]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
label_sets = [wire.toset() for wire in list_of_wires]
seen_ever = set()
seen_once = set()
# Find unique set in O(n) time.
for labels in label_sets:
# (seen_once ^ labels) finds all of the unique labels seen once
# (seen_ever - seen_once) is the set of labels already seen more than once
# Subtracting these two sets makes a set of labels only seen once so far.
seen_once = (seen_once ^ labels) - (seen_ever - seen_once)
# Update seen labels with all new seen labels
seen_ever.update(labels)
# Get unique values in order they appear.
unique = []
for wires in list_of_wires:
for wire in wires.tolist():
# check that wire is only contained in one of the Wires objects
if wire in seen_once:
unique.append(wire)
return Wires(tuple(unique), _override=True)
| 34.181443 | 120 | 0.589215 | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`Wires` class, which takes care of wire bookkeeping.
"""
from collections.abc import Sequence, Iterable
import functools
import numpy as np
class WireError(Exception):
"""Exception raised by a :class:`~.pennylane.wires.Wire` object when it is unable to process wires."""
def _process(wires):
"""Converts the input to a tuple of wire labels.
If `wires` can be iterated over, its elements are interpreted as wire labels
and turned into a tuple. Otherwise, `wires` is interpreted as a single wire label.
The only exception to this are strings, which are always interpreted as a single
wire label, so users can address wires with labels such as `"ancilla"`.
Any type can be a wire label, as long as it is hashable. We need this to establish
the uniqueness of two labels. For example, `0` and `0.` are interpreted as
the same wire label because `hash(0.) == hash(0)` evaluates to true.
Note that opposed to numpy arrays, `pennylane.numpy` 0-dim array are hashable.
"""
if isinstance(wires, str):
# Interpret string as a non-iterable object.
# This is the only exception to the logic
# of considering the elements of iterables as wire labels.
wires = [wires]
try:
# Use tuple conversion as a check for whether `wires` can be iterated over.
# Note, this is not the same as `isinstance(wires, Iterable)` which would
# pass for 0-dim numpy arrays that cannot be iterated over.
tuple_of_wires = tuple(wires)
except TypeError:
# if not iterable, interpret as single wire label
try:
hash(wires)
except TypeError as e:
# if object is not hashable, cannot identify unique wires
if str(e).startswith("unhashable"):
raise WireError(
"Wires must be hashable; got object of type {}.".format(type(wires))
) from e
return (wires,)
try:
# We need the set for the uniqueness check,
# so we can use it for hashability check of iterables.
set_of_wires = set(wires)
except TypeError as e:
if str(e).startswith("unhashable"):
raise WireError("Wires must be hashable; got {}.".format(wires)) from e
if len(set_of_wires) != len(tuple_of_wires):
raise WireError("Wires must be unique; got {}.".format(wires))
return tuple_of_wires
class Wires(Sequence):
r"""
A bookkeeping class for wires, which are ordered collections of unique objects.
If the input `wires` can be iterated over, it is interpreted as a sequence of wire labels that have to be
unique and hashable. Else it is interpreted as a single wire label that has to be hashable. The
only exception are strings which are interpreted as wire labels.
The hash function of a wire label is considered the source of truth when deciding whether
two wire labels are the same or not.
Indexing an instance of this class will return a wire label.
Args:
wires (Any): the wire label(s)
"""
def __init__(self, wires, _override=False):
if _override:
self._labels = wires
else:
self._labels = _process(wires)
def __getitem__(self, idx):
"""Method to support indexing. Returns a Wires object if index is a slice, or a label if index is an integer."""
if isinstance(idx, slice):
return Wires(self._labels[idx])
return self._labels[idx]
def __len__(self):
"""Method to support ``len()``."""
return len(self._labels)
def contains_wires(self, wires):
if isinstance(wires, Wires):
return set(wires.labels).issubset(set(self._labels))
return False
def __contains__(self, item):
"""Method checking if Wires object contains an object."""
return item in self._labels
def __repr__(self):
"""Method defining the string representation of this class."""
return "<Wires = {}>".format(list(self._labels))
def __eq__(self, other):
"""Method to support the '==' operator. This will also implicitly define the '!=' operator."""
# The order is respected in comparison, so that ``assert Wires([0, 1]) != Wires([1,0])``
if isinstance(other, Wires):
return self._labels == other.labels
return self._labels == other
def __hash__(self):
"""Implements the hash function."""
return hash(self._labels)
def __add__(self, other):
"""Defines the addition to return a Wires object containing all wires of the two terms.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the right
Returns:
Wires: all wires appearing in either object
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 2])
>>> wires1 + wires2
Wires([4, 0, 1, 2])
"""
other = Wires(other)
return Wires.all_wires([self, other])
def __radd__(self, other):
"""Defines addition according to __add__ if the left object has no addition defined.
Args:
other (Iterable[Number,str], Number, Wires): object to add from the left
Returns:
Wires: all wires appearing in either object
"""
other = Wires(other)
return Wires.all_wires([other, self])
def __array__(self):
"""Defines a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
@property
def labels(self):
"""Get a tuple of the labels of this Wires object."""
return self._labels
def toarray(self):
"""Returns a numpy array representation of the Wires object.
Returns:
ndarray: array representing Wires object
"""
return np.array(self._labels)
def tolist(self):
"""Returns a list representation of the Wires object.
Returns:
List: list of wire labels
"""
return list(self._labels)
def toset(self):
"""Returns a set representation of the Wires object.
Returns:
Set: set of wire labels
"""
return set(self.labels)
def index(self, wire):
"""Overwrites a Sequence's ``index()`` function which returns the index of ``wire``.
Args:
wire (Any): Object whose index is to be found. If this is a Wires object of length 1, look for the object
representing the wire.
Returns:
int: index of the input
"""
if isinstance(wire, Wires):
if len(wire) != 1:
raise WireError("Can only retrieve index of a Wires object of length 1.")
wire = wire[0]
try:
return self._labels.index(wire)
except ValueError as e:
raise WireError("Wire with label {} not found in {}.".format(wire, self)) from e
def indices(self, wires):
"""
Return the indices of the wires in this Wires object.
Args:
wires (Iterable[Number, str], Number, str, Wires): Wire(s) whose indices are to be found
Returns:
List: index list
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([1, 4])
>>> wires1.indices(wires2)
[2, 0]
>>> wires1.indices([1, 4])
[2, 0]
"""
if not isinstance(wires, Iterable):
return [self.index(wires)]
return [self.index(w) for w in wires]
def map(self, wire_map):
"""Returns a new Wires object with different labels, using the rule defined in mapping.
Args:
wire_map (dict): Dictionary containing all wire labels used in this object as keys, and unique
new labels as their values
**Example**
>>> wires = Wires(['a', 'b', 'c'])
>>> wire_map = {'a': 4, 'b':2, 'c': 3}
>>> wires.map(wire_map)
<Wires = [4, 2, 3]>
"""
# Make sure wire_map has `Wires` keys and values so that the `in` operator always works
for w in self:
if w not in wire_map:
raise WireError(
"No mapping for wire label {} specified in wire map {}.".format(w, wire_map)
)
new_wires = [wire_map[w] for w in self]
try:
new_wires = Wires(new_wires)
except WireError as e:
raise WireError(
"Failed to implement wire map {}. Make sure that the new labels are unique and "
"valid wire labels.".format(wire_map)
) from e
return new_wires
def subset(self, indices, periodic_boundary=False):
"""
Returns a new Wires object which is a subset of this Wires object. The wires of the new
object are the wires at positions specified by 'indices'. Also accepts a single index as input.
Args:
indices (List[int] or int): indices or index of the wires we want to select
periodic_boundary (bool): controls periodic boundary conditions in the indexing
Returns:
Wires: subset of wires
**Example**
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([2, 3, 0])
<Wires = [1, 5, 4]>
>>> wires.subset(1)
<Wires = [0]>
If ``periodic_boundary`` is True, the modulo of the number of wires of an index is used instead of an index,
so that ``wires.subset(i) == wires.subset(i % n_wires)`` where ``n_wires`` is the number of wires of this
object.
>>> wires = Wires([4, 0, 1, 5, 6])
>>> wires.subset([5, 1, 7], periodic_boundary=True)
<Wires = [4, 0, 1]>
"""
if isinstance(indices, int):
indices = [indices]
if periodic_boundary:
# replace indices by their modulo
indices = [i % len(self._labels) for i in indices]
for i in indices:
if i > len(self._labels):
raise WireError(
"Cannot subset wire at index {} from {} wires.".format(i, len(self._labels))
)
subset = tuple(self._labels[i] for i in indices)
return Wires(subset, _override=True)
def select_random(self, n_samples, seed=None):
"""
Returns a randomly sampled subset of Wires of length 'n_samples'.
Args:
n_samples (int): number of subsampled wires
seed (int): optional random seed used for selecting the wires
Returns:
Wires: random subset of wires
"""
if n_samples > len(self._labels):
raise WireError(
"Cannot sample {} wires from {} wires.".format(n_samples, len(self._labels))
)
if seed is not None:
np.random.seed(seed)
indices = np.random.choice(len(self._labels), size=n_samples, replace=False)
subset = tuple(self[i] for i in indices)
return Wires(subset, _override=True)
@staticmethod
def shared_wires(list_of_wires):
"""Return only the wires that appear in each Wires object in the list.
This is similar to a set intersection method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: shared wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([4, 0])
>>> Wires.shared_wires([wires1, wires2, wires3])
<Wires = [4, 0]>
>>> Wires.shared_wires([wires2, wires1, wires3])
<Wires = [0, 4]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
first_wires_obj = list_of_wires[0]
sets_of_wires = [wire.toset() for wire in list_of_wires]
# find the intersection of the labels of all wires in O(n) time.
intersecting_wires = functools.reduce(lambda a, b: a & b, sets_of_wires)
shared = []
# only need to iterate through the first object,
# since any wire not in this object will also not be shared
for wire in list_of_wires[0]:
if wire in intersecting_wires:
shared.append(wire)
return Wires(tuple(shared), _override=True)
@staticmethod
def all_wires(list_of_wires, sort=False):
"""Return the wires that appear in any of the Wires objects in the list.
This is similar to a set combine method, but keeps the order of wires as they appear in the list.
Args:
list_of_wires (List[Wires]): List of Wires objects
sort (bool): Toggle for sorting the combined wire labels. The sorting is based on
value if all keys are int, else labels' str representations are used.
Returns:
Wires: combined wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([3, 0, 4])
>>> wires3 = Wires([5, 3])
>>> list_of_wires = [wires1, wires2, wires3]
>>> Wires.all_wires(list_of_wires)
<Wires = [4, 0, 1, 3, 5]>
"""
combined = []
seen_labels = set()
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}".format(wires, type(wires))
)
extension = [label for label in wires.labels if label not in seen_labels]
combined.extend(extension)
seen_labels.update(extension)
if sort:
if all([isinstance(w, int) for w in combined]):
combined = sorted(combined)
else:
combined = sorted(combined, key=str)
return Wires(tuple(combined), _override=True)
@staticmethod
def unique_wires(list_of_wires):
"""Return the wires that are unique to any Wire object in the list.
Args:
list_of_wires (List[Wires]): list of Wires objects
Returns:
Wires: unique wires
**Example**
>>> wires1 = Wires([4, 0, 1])
>>> wires2 = Wires([0, 2, 3])
>>> wires3 = Wires([5, 3])
>>> Wires.unique_wires([wires1, wires2, wires3])
<Wires = [4, 1, 2, 5]>
"""
for wires in list_of_wires:
if not isinstance(wires, Wires):
raise WireError(
"Expected a Wires object; got {} of type {}.".format(wires, type(wires))
)
label_sets = [wire.toset() for wire in list_of_wires]
seen_ever = set()
seen_once = set()
# Find unique set in O(n) time.
for labels in label_sets:
# (seen_once ^ labels) finds all of the unique labels seen once
# (seen_ever - seen_once) is the set of labels already seen more than once
# Subtracting these two sets makes a set of labels only seen once so far.
seen_once = (seen_once ^ labels) - (seen_ever - seen_once)
# Update seen labels with all new seen labels
seen_ever.update(labels)
# Get unique values in order they appear.
unique = []
for wires in list_of_wires:
for wire in wires.tolist():
# check that wire is only contained in one of the Wires objects
if wire in seen_once:
unique.append(wire)
return Wires(tuple(unique), _override=True)
| 268 | 0 | 54 |
f0ab3b983ce824a58913e440bc9cee06283a5abe | 70 | py | Python | cytomine-datamining/algorithms/pyxit/__init__.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | 19 | 2016-03-15T19:48:36.000Z | 2021-05-25T14:38:12.000Z | cytomine-datamining/algorithms/pyxit/__init__.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | 1 | 2017-03-20T16:41:48.000Z | 2017-03-22T15:46:55.000Z | cytomine-datamining/algorithms/pyxit/__init__.py | Cytomine-ULiege/Cytomine-python-datamining | 16db995f175e8972b8731a8df9391625e1920288 | [
"Apache-2.0"
] | 14 | 2016-09-02T19:51:51.000Z | 2022-02-14T18:29:29.000Z | from .estimator import PyxitClassifier
__all__ = ["PyxitClassifier"]
| 17.5 | 38 | 0.8 | from .estimator import PyxitClassifier
__all__ = ["PyxitClassifier"]
| 0 | 0 | 0 |
4ca4784975f3a34d5d112c5bbc2228abd9df4008 | 3,621 | py | Python | src/compas_fab/utilities/numbers.py | yck011522/compas_fab | db7c8e54184dbbad9be5a818cf7ff814c95cf162 | [
"MIT"
] | null | null | null | src/compas_fab/utilities/numbers.py | yck011522/compas_fab | db7c8e54184dbbad9be5a818cf7ff814c95cf162 | [
"MIT"
] | null | null | null | src/compas_fab/utilities/numbers.py | yck011522/compas_fab | db7c8e54184dbbad9be5a818cf7ff814c95cf162 | [
"MIT"
] | null | null | null | import math
__all__ = [
'map_range',
'range_geometric_row',
'arange',
'diffs',
'allclose',
'argsort',
'argmin',
'argmax',
'clamp',
]
def map_range(value, from_min, from_max, to_min, to_max):
"""Performs a linear interpolation of a value within the range of [from_min,
from_max] to another range of [to_min, to_max].
"""
from_range = from_max - from_min
to_range = to_max - to_min
value_scaled = (value - from_min) / float(from_range)
return to_min + (value_scaled * to_range)
def range_geometric_row(number, d, r=1.1):
"""Returns a list of numbers with a certain relation to each other.
The function divides one number into a list of d numbers [n0, n1, ...], such
that their sum is number and the relation between the numbers is defined
with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ...
"""
if r <= 0:
raise ValueError("r must be > 0")
n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r))
numbers = [n0]
for i in range(d - 1):
numbers.append(numbers[-1] / r)
return numbers
def arange(start, stop, step):
"""Returns evenly spaced values within a given interval.
The function is similar to NumPy's *arange* function.
"""
if math.fabs(stop - (start + step)) > math.fabs(stop - start):
raise ValueError("Please check the sign of step.")
len = int(math.ceil((stop - start) / float(step)))
return [start + i * step for i in range(len)]
def diffs(l1, l2):
"""Returns the element-wise differences between two lists.
Raises
------
ValueError
If 2 lists of different length are passed.
"""
if len(l1) != len(l2):
raise ValueError("Pass 2 lists of equal length.")
return [math.fabs(a - b) for a, b in zip(l1, l2)]
def allclose(l1, l2, tol=1e-05):
"""Returns True if two lists are element-wise equal within a tolerance.
The function is similar to NumPy's *allclose* function.
"""
for a, b in zip(l1, l2):
if math.fabs(a - b) > tol:
return False
return True
def argsort(numbers):
"""Returns the indices that would sort an array of numbers.
The function is similar to NumPy's *argsort* function.
Note
----
For a large list of numbers reconsider using NumPy's *argsort* function,
since this function might take too long.
"""
return [i[0] for i in sorted(enumerate(numbers), key=lambda x:x[1])]
def argmin(numbers):
"""Returns the index of the minimum value in numbers.
The function is similar to NumPy's *argmin* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmin* function,
since this function might take too long.
"""
return argsort(numbers)[0]
def argmax(numbers):
"""Returns the index of the maximum value in numbers.
The function is similar to NumPy's *argmax* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmax* function,
since this function might take too long.
"""
return argsort(numbers)[-1]
def clamp(value, min_value, max_value):
"""Clamps a value witin the bound [min_value, max_value]
Returns
-------
float
"""
if min_value > max_value:
raise ValueError("min_value must be bigger than max_value")
return float(min(max(value, min_value), max_value))
if __name__ == "__main__":
print(map_range(2, 0, 10, 0, 100))
print(arange(3, -4, -0.2))
print(argsort([34, 1, 7, 2, 100]))
print(clamp(5, 1, 4))
print(clamp(0, 1, 4))
print(clamp(3, 1, 4))
| 26.23913 | 80 | 0.624137 | import math
__all__ = [
'map_range',
'range_geometric_row',
'arange',
'diffs',
'allclose',
'argsort',
'argmin',
'argmax',
'clamp',
]
def map_range(value, from_min, from_max, to_min, to_max):
"""Performs a linear interpolation of a value within the range of [from_min,
from_max] to another range of [to_min, to_max].
"""
from_range = from_max - from_min
to_range = to_max - to_min
value_scaled = (value - from_min) / float(from_range)
return to_min + (value_scaled * to_range)
def range_geometric_row(number, d, r=1.1):
"""Returns a list of numbers with a certain relation to each other.
The function divides one number into a list of d numbers [n0, n1, ...], such
that their sum is number and the relation between the numbers is defined
with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ...
"""
if r <= 0:
raise ValueError("r must be > 0")
n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r))
numbers = [n0]
for i in range(d - 1):
numbers.append(numbers[-1] / r)
return numbers
def arange(start, stop, step):
"""Returns evenly spaced values within a given interval.
The function is similar to NumPy's *arange* function.
"""
if math.fabs(stop - (start + step)) > math.fabs(stop - start):
raise ValueError("Please check the sign of step.")
len = int(math.ceil((stop - start) / float(step)))
return [start + i * step for i in range(len)]
def diffs(l1, l2):
"""Returns the element-wise differences between two lists.
Raises
------
ValueError
If 2 lists of different length are passed.
"""
if len(l1) != len(l2):
raise ValueError("Pass 2 lists of equal length.")
return [math.fabs(a - b) for a, b in zip(l1, l2)]
def allclose(l1, l2, tol=1e-05):
"""Returns True if two lists are element-wise equal within a tolerance.
The function is similar to NumPy's *allclose* function.
"""
for a, b in zip(l1, l2):
if math.fabs(a - b) > tol:
return False
return True
def argsort(numbers):
"""Returns the indices that would sort an array of numbers.
The function is similar to NumPy's *argsort* function.
Note
----
For a large list of numbers reconsider using NumPy's *argsort* function,
since this function might take too long.
"""
return [i[0] for i in sorted(enumerate(numbers), key=lambda x:x[1])]
def argmin(numbers):
"""Returns the index of the minimum value in numbers.
The function is similar to NumPy's *argmin* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmin* function,
since this function might take too long.
"""
return argsort(numbers)[0]
def argmax(numbers):
"""Returns the index of the maximum value in numbers.
The function is similar to NumPy's *argmax* function.
Note
----
For a large list of numbers reconsider using NumPy's *argmax* function,
since this function might take too long.
"""
return argsort(numbers)[-1]
def clamp(value, min_value, max_value):
"""Clamps a value witin the bound [min_value, max_value]
Returns
-------
float
"""
if min_value > max_value:
raise ValueError("min_value must be bigger than max_value")
return float(min(max(value, min_value), max_value))
if __name__ == "__main__":
print(map_range(2, 0, 10, 0, 100))
print(arange(3, -4, -0.2))
print(argsort([34, 1, 7, 2, 100]))
print(clamp(5, 1, 4))
print(clamp(0, 1, 4))
print(clamp(3, 1, 4))
| 0 | 0 | 0 |
bc920a9296bc6651eae9bbb483a0c953708be546 | 170 | py | Python | ummon/features/__init__.py | matherm/ummon3 | 08476d21ce17cc95180525d48202a1690dfc8a08 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T06:47:13.000Z | 2022-02-10T06:47:13.000Z | ummon/features/__init__.py | matherm/ummon3 | 08476d21ce17cc95180525d48202a1690dfc8a08 | [
"BSD-3-Clause"
] | null | null | null | ummon/features/__init__.py | matherm/ummon3 | 08476d21ce17cc95180525d48202a1690dfc8a08 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from .featurecache import *
from .psTMColorfeatures import *
from .psTMfeatures import *
from .swEVMfeatures import *
from .vgg19features import * | 28.333333 | 32 | 0.747059 | # -*- coding: utf-8 -*-
from .featurecache import *
from .psTMColorfeatures import *
from .psTMfeatures import *
from .swEVMfeatures import *
from .vgg19features import * | 0 | 0 | 0 |
5951af0895394d39717a2843e60300b400cc99c2 | 8,612 | py | Python | crossval/plot.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | 2 | 2019-04-26T19:40:31.000Z | 2019-10-12T15:18:29.000Z | crossval/plot.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | null | null | null | crossval/plot.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.metrics import check_scoring, roc_curve
from sklearn.model_selection import check_cv
from joblib import Parallel, delayed
from scipy import interp, stats
import matplotlib.pyplot as plt
import seaborn as sns
from .results import check_cvs
from ._curve import *
__all__ = [
'plot_learing_curve',
'plot_roc_auc',
'plot_ttest',
]
def plot_learing_curve(result, X, y, groups=None, max_iter=0, step=1,
mode='mean', train_score=False, n_jobs=None):
"""Plot learning curve for boosting estimators.
Currently supported:
- LGBMClassifier, LGBMRegressor
- CatBoostClassifier, CatBoostRegressor
Parameters
----------
result : dict
Cross-validation results, returned by <crossval> function.
Must contain 'estimator', 'scorer' and 'cv' keys.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions.
Must be the same as during cross-validation fit.
y : Series, shape [n_samples]
The target variable to try to predict.
Must be the same as during cross-validation fit.
groups : None
Group labels for the samples used while splitting the dataset into
train/test set.
Must be the same as during cross-validation fit.
max_iter : int (default=0)
Maximum number of trees. 0 means all.
step : int (default=1)
If greater than 1, plot score only for trees with indices:
step-1, 2*step-1, 3*step-1 & etc (zero-based indices).
Larger step speeds up prediction.
mode : {'mean', 'fold', 'both'} (default='mean')
- 'mean' : plot average score and std (default)
- 'fold' : plot score of each fold
- 'both' : plot both
train_score : bool (default=False)
Whether to plot learning curve for training scores.
If False, speeds up prediction.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
Returns
-------
trn_scores : ndarray, shape (n_folds, n_stages)
Train scores learning curve for each fold.
If train_score is False, return None.
val_scores : ndarray, shape (n_folds, n_stages)
Validation scores learning curve for each fold.
"""
estimators = result['estimator']
scorer = result['scorer']
cv = result['cv']
modes = ('mean', 'fold', 'both')
assert mode in modes, f'<mode> must be from {modes}. Found {mode}'
# Estimator Name
estimator = estimators[0]
name = estimator.__class__.__name__
if name.startswith('CatBoost'):
generator = _cat_staged_predict
if max_iter == 0:
max_iter = min([e.tree_count_ for e in estimators])
elif name.startswith('LGB'):
generator = _lgb_staged_predict
if max_iter == 0:
max_iter = min([e.booster_.num_trees() for e in estimators])
elif name.startswith('XGB'):
raise NotImplementedError('XGBoost currently does not supported')
generator = _xgb_staged_predict
if max_iter == 0:
max_iter = min([e.n_estimators for e in estimators])
else:
raise NotImplementedError('Only LGBM and CatBoost currently supported')
# Estimator Type
if estimator._estimator_type == 'classifier':
predictor = _StagedClassifier()
elif estimator._estimator_type == 'regressor':
predictor = _StagedRegressor()
# Predict in Parallel
stages = np.arange(step, max_iter+step, step)
folds = cv.split(X, y, groups)
scores = Parallel(n_jobs=n_jobs)(
delayed(_get_scores)(estimator, generator, predictor, trn, val, X, y,
scorer, max_iter, step, train_score)
for (trn, val), estimator in zip(folds, estimators)
)
trn_scores = np.array([s[0] for s in scores])
val_scores = np.array([s[1] for s in scores])
# Learning Curve(s)
plt.figure()
if not train_score:
trn_scores = None
else:
avg = trn_scores.mean(axis=0)
std = trn_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='b')
plt.plot(stages, avg, label='train score', color='b')
if mode in ['fold', 'both']:
for scores in trn_scores:
plt.plot(stages, scores, '--', color='b', lw=0.5, alpha=0.5)
if True:
avg = val_scores.mean(axis=0)
std = val_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='y')
plt.plot(stages, avg, label='valid score', color='y')
if mode in ['fold', 'both']:
for scores in val_scores:
plt.plot(stages, scores, '--', color='y', lw=0.5, alpha=0.5)
plt.legend()
plt.show()
return trn_scores, val_scores
| 28.328947 | 82 | 0.601602 | import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.metrics import check_scoring, roc_curve
from sklearn.model_selection import check_cv
from joblib import Parallel, delayed
from scipy import interp, stats
import matplotlib.pyplot as plt
import seaborn as sns
from .results import check_cvs
from ._curve import *
__all__ = [
'plot_learing_curve',
'plot_roc_auc',
'plot_ttest',
]
def plot_learing_curve(result, X, y, groups=None, max_iter=0, step=1,
mode='mean', train_score=False, n_jobs=None):
"""Plot learning curve for boosting estimators.
Currently supported:
- LGBMClassifier, LGBMRegressor
- CatBoostClassifier, CatBoostRegressor
Parameters
----------
result : dict
Cross-validation results, returned by <crossval> function.
Must contain 'estimator', 'scorer' and 'cv' keys.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions.
Must be the same as during cross-validation fit.
y : Series, shape [n_samples]
The target variable to try to predict.
Must be the same as during cross-validation fit.
groups : None
Group labels for the samples used while splitting the dataset into
train/test set.
Must be the same as during cross-validation fit.
max_iter : int (default=0)
Maximum number of trees. 0 means all.
step : int (default=1)
If greater than 1, plot score only for trees with indices:
step-1, 2*step-1, 3*step-1 & etc (zero-based indices).
Larger step speeds up prediction.
mode : {'mean', 'fold', 'both'} (default='mean')
- 'mean' : plot average score and std (default)
- 'fold' : plot score of each fold
- 'both' : plot both
train_score : bool (default=False)
Whether to plot learning curve for training scores.
If False, speeds up prediction.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
Returns
-------
trn_scores : ndarray, shape (n_folds, n_stages)
Train scores learning curve for each fold.
If train_score is False, return None.
val_scores : ndarray, shape (n_folds, n_stages)
Validation scores learning curve for each fold.
"""
estimators = result['estimator']
scorer = result['scorer']
cv = result['cv']
modes = ('mean', 'fold', 'both')
assert mode in modes, f'<mode> must be from {modes}. Found {mode}'
# Estimator Name
estimator = estimators[0]
name = estimator.__class__.__name__
if name.startswith('CatBoost'):
generator = _cat_staged_predict
if max_iter == 0:
max_iter = min([e.tree_count_ for e in estimators])
elif name.startswith('LGB'):
generator = _lgb_staged_predict
if max_iter == 0:
max_iter = min([e.booster_.num_trees() for e in estimators])
elif name.startswith('XGB'):
raise NotImplementedError('XGBoost currently does not supported')
generator = _xgb_staged_predict
if max_iter == 0:
max_iter = min([e.n_estimators for e in estimators])
else:
raise NotImplementedError('Only LGBM and CatBoost currently supported')
# Estimator Type
if estimator._estimator_type == 'classifier':
predictor = _StagedClassifier()
elif estimator._estimator_type == 'regressor':
predictor = _StagedRegressor()
# Predict in Parallel
stages = np.arange(step, max_iter+step, step)
folds = cv.split(X, y, groups)
scores = Parallel(n_jobs=n_jobs)(
delayed(_get_scores)(estimator, generator, predictor, trn, val, X, y,
scorer, max_iter, step, train_score)
for (trn, val), estimator in zip(folds, estimators)
)
trn_scores = np.array([s[0] for s in scores])
val_scores = np.array([s[1] for s in scores])
# Learning Curve(s)
plt.figure()
if not train_score:
trn_scores = None
else:
avg = trn_scores.mean(axis=0)
std = trn_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='b')
plt.plot(stages, avg, label='train score', color='b')
if mode in ['fold', 'both']:
for scores in trn_scores:
plt.plot(stages, scores, '--', color='b', lw=0.5, alpha=0.5)
if True:
avg = val_scores.mean(axis=0)
std = val_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='y')
plt.plot(stages, avg, label='valid score', color='y')
if mode in ['fold', 'both']:
for scores in val_scores:
plt.plot(stages, scores, '--', color='y', lw=0.5, alpha=0.5)
plt.legend()
plt.show()
return trn_scores, val_scores
def plot_ttest(resultA, resultB, score='val_score', label='label', cuped=False):
# Check input
assert score in resultA, f"<resultA> has no '{key}'"
assert score in resultB, f"<resultB> has no '{key}'"
a = resultA[score]
b = resultB[score]
assert len(a) == len(b), 'Both scores must be of the same size'
n = len(a)
# Check labels
labels = ['0', '1']
if label in resultA: labels[0] = resultA[label]
if label in resultB: labels[1] = resultB[label]
# CUPED
if cuped:
theta = np.cov(a, b)[0, 1] / np.var(a)
b -= (a - np.mean(b)) * theta
# t-test
t, p = stats.ttest_rel(a, b)
# Plot
_, axes = plt.subplots(2, 2)
# Plot box
ax = axes[0, 0]
sns.boxplot(labels, [a, b], linewidth=2.0, ax=ax)
ax.grid(alpha=0.2)
# Plot pairs
ax = axes[1, 0]
for x, y in zip(a, b):
ax.plot(labels, [x, y], 'o-', color='b', alpha=0.8)
ax.plot(labels, [np.mean(a), np.mean(b)], 'o-', color='w')
ax.grid(alpha=0.2)
# Plot dist
ax = axes[0, 1]
sns.distplot(a, 10, label=labels[0], ax=ax)
sns.distplot(b, 10, label=labels[1], ax=ax)
ax.grid(alpha=0.2)
ax.legend()
# Plot proba
ax = axes[1, 1]
x_abs = max(5, abs(t))
x_min, x_max = -x_abs, +x_abs
xx = np.arange(t, x_max, 0.001)
yy = stats.t.pdf(xx, n-1)
ax.plot(xx, yy, color='gray')
ax.fill_between(xx, yy, color='gray', alpha=0.2)
xx = np.arange(x_min, t, 0.001)
yy = stats.t.pdf(xx, n-1)
ax.plot(xx, yy, color='r')
ax.fill_between(xx, yy, color='r', alpha=0.2)
ax.legend(['t-value = {:.4f}'.format(t),
'p-value = {:.4f}'.format(p)])
ax.grid(alpha=0.2)
return t, p
def plot_roc_auc(results, X, y, groups=None, labels=None, colors=None, steps=200):
# Check input
cv = check_cvs(results, X, y, groups)
msg = "<labels> must be of same len as <results>"
if labels:
assert len(labels) == len(results), msg
else:
labels = list(range(len(results)))
msg = "<colors> must be of same len as <results>"
if colors:
assert len(colors) == len(results), msg
else:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
msg = "Each <result> must have 'estimator' key"
for result in results:
assert 'estimator' in result, msg
# Get curves
avg_fpr = np.linspace(0, 1, steps)
curves = defaultdict(list)
cv = results[0]['cv']
for i, (_, oof) in enumerate(cv.split(X, y, groups)):
X_oof = X.iloc[oof]
y_oof = y.iloc[oof]
for j, result in enumerate(results):
y_pred = result['estimator'][i].predict_proba(X_oof)[:, 1]
fpr, tpr, _ = roc_curve(y_oof, y_pred)
tpr = interp(avg_fpr, fpr, tpr)
tpr[0] = 0.0
curves[labels[j]].append(tpr)
# Plot
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
colors = dict(zip(labels, colors))
plt.figure()
for label, tprs in curves.items():
c = colors[label]
for tpr in tprs:
plt.plot(avg_fpr, tpr, c=c, alpha=0.2)
avg_tpr = np.mean(tprs, axis=0)
plt.plot(avg_fpr, avg_tpr, c=c, label=label)
std_tpr = np.std(tprs, axis=0)
tpr_upper = np.minimum(avg_tpr + std_tpr, 1)
tpr_lower = np.maximum(avg_tpr - std_tpr, 0)
plt.fill_between(avg_fpr, tpr_lower, tpr_upper, color=c, alpha=.1)
plt.legend(loc='lower right')
plt.show()
| 3,461 | 0 | 46 |
38adf2e32cb29942bdd74a4209a9981ed1ff5a77 | 1,640 | py | Python | Code/PyTorch/1_TwoLayerNet/data_preprocessing_all.py | volmen3/Bachelors_Thesis | 4b5e7f1d79a3533e4b6e9efb27f931e2f541992f | [
"MIT"
] | null | null | null | Code/PyTorch/1_TwoLayerNet/data_preprocessing_all.py | volmen3/Bachelors_Thesis | 4b5e7f1d79a3533e4b6e9efb27f931e2f541992f | [
"MIT"
] | null | null | null | Code/PyTorch/1_TwoLayerNet/data_preprocessing_all.py | volmen3/Bachelors_Thesis | 4b5e7f1d79a3533e4b6e9efb27f931e2f541992f | [
"MIT"
] | null | null | null | """
Last change: 10.06.2018
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
print("### data_preprocessing_all is running directly ###")
main()
else:
print("### data_preprocessing_all is running from the import ###")
| 34.893617 | 89 | 0.67439 | """
Last change: 10.06.2018
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
# Import input_data in a shape of csv file where each column represents a vector
input_data_matrix = np.loadtxt(open("input_data.csv", "rb"), delimiter=",")
# Transpose input_data_matrix to get a matrix where each row represents a vector
# Column order is:
# [ phi [deg], theta [deg],
# d_phi [rad/s], d_theta [rad/s], d_psi [rad/s],
# dd_x [m/s^2], dd_y [m/s^2], dd_z [m/s^2]
# labels (0,1) ]
input_data_matrix = input_data_matrix.T
# input_data_matrix contains 8 data vectors plus one label vector
# Delete the last label vector (8th row)
# input_data_matrix = np.delete(input_data_matrix, 8, axis=0)
# Number of elements
rows = len(input_data_matrix)
#print("data_preprocessing_all: input_data_matrix has: %d " % rows + "rows")
cols = len(input_data_matrix[0])
#print("data_preprocessing_all: input_data_matrix has: %d " % cols + "columns")
# Generate the label vector y
y = input_data_matrix[8, :]
y = np.reshape(y, newshape=(1, len(y))) # reshape to (1, number of samples=27188)
# Delete the label vector from the input_data_matrix leaving it with only for
# the input needed data
input_data_matrix = np.delete(input_data_matrix, 8, axis=0)
# Returns input_data_matrix (8, samples=27188), y (1, samples=27188)
return input_data_matrix, y
if __name__ == '__main__':
print("### data_preprocessing_all is running directly ###")
main()
else:
print("### data_preprocessing_all is running from the import ###")
| 1,349 | 0 | 23 |
8d09ce32d304afe2006db29e4f5b8a0c97e40bfe | 719 | py | Python | src/collective/solr/tests/test_doctests.py | IMIO/collective.solr | 844219eb3968b34d2b83a7bd5f59340d676d149e | [
"ZPL-1.1"
] | 15 | 2015-04-13T14:54:47.000Z | 2022-01-17T09:18:00.000Z | src/collective/solr/tests/test_doctests.py | IMIO/collective.solr | 844219eb3968b34d2b83a7bd5f59340d676d149e | [
"ZPL-1.1"
] | 198 | 2015-01-30T15:29:32.000Z | 2022-03-22T10:39:31.000Z | src/collective/solr/tests/test_doctests.py | adrianschulz/collective.solr | 2d76fe01a02174d383fdc335d38ee52afa8bfa27 | [
"ZPL-1.1"
] | 34 | 2015-02-24T09:23:31.000Z | 2022-03-01T02:31:39.000Z | # -*- coding: utf-8 -*-
from collective.solr.testing import LEGACY_COLLECTIVE_SOLR_FUNCTIONAL_TESTING
from plone.testing import layered
from unittest import TestSuite
import doctest
optionflags = (
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.REPORT_ONLY_FIRST_FAILURE
| doctest.IGNORE_EXCEPTION_DETAIL
)
| 28.76 | 83 | 0.72879 | # -*- coding: utf-8 -*-
from collective.solr.testing import LEGACY_COLLECTIVE_SOLR_FUNCTIONAL_TESTING
from plone.testing import layered
from unittest import TestSuite
import doctest
optionflags = (
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.REPORT_ONLY_FIRST_FAILURE
| doctest.IGNORE_EXCEPTION_DETAIL
)
def test_suite():
suite = TestSuite()
testfiles = ["errors.txt", "configlet.txt"]
for testfile in testfiles:
doc_suite = doctest.DocFileSuite(
testfile, package="collective.solr.tests", optionflags=optionflags
)
layer = layered(doc_suite, layer=LEGACY_COLLECTIVE_SOLR_FUNCTIONAL_TESTING)
suite.addTest(layer)
return suite
| 360 | 0 | 23 |
a1ddfb7228e80ed204b7d55f8256eb8c4275cd1e | 1,408 | py | Python | mmdet/datasets/weighted_editdistance.py | asenina/mmdetection | 951b23a7ecee7fa79caf7f80d71491b7f555a261 | [
"Apache-2.0"
] | 4 | 2020-01-19T08:00:31.000Z | 2020-02-14T03:25:45.000Z | mmdet/datasets/weighted_editdistance.py | asenina/mmdetection | 951b23a7ecee7fa79caf7f80d71491b7f555a261 | [
"Apache-2.0"
] | 3 | 2021-03-12T12:06:37.000Z | 2021-07-28T11:21:33.000Z | mmdet/datasets/weighted_editdistance.py | asenina/mmdetection | 951b23a7ecee7fa79caf7f80d71491b7f555a261 | [
"Apache-2.0"
] | 1 | 2020-04-21T01:44:04.000Z | 2020-04-21T01:44:04.000Z | # based on https://github.com/MhLiao/MaskTextSpotter/blob/master/evaluation/icdar2015/e2e/weighted_editdistance.py
# MIT license
import numpy as np
| 32 | 117 | 0.59375 | # based on https://github.com/MhLiao/MaskTextSpotter/blob/master/evaluation/icdar2015/e2e/weighted_editdistance.py
# MIT license
import numpy as np
def weighted_edit_distance(word1, word2, scores):
m = len(word1)
n = len(word2)
dp = np.zeros((n + 1, m + 1))
dp[0, :] = np.arange(m + 1)
dp[:, 0] = np.arange(n + 1)
for i in range(1, n + 1):
for j in range(1, m + 1):
delete_cost = ed_delete_cost(j - 1, word1, scores)
insert_cost = ed_insert_cost(j - 1, word1, scores)
replace_cost = ed_replace_cost(j - 1, i - 1, word1, word2, scores) if word1[j - 1] != word2[i - 1] else 0
dp[i][j] = min(dp[i - 1][j] + insert_cost, dp[i][j - 1] + delete_cost, dp[i - 1][j - 1] + replace_cost)
return dp[n][m]
def ed_delete_cost(j, word1, scores):
c = char2num(word1[j])
return scores[c][j]
def ed_insert_cost(i, word1, scores):
if i < len(word1) - 1:
c1 = char2num(word1[i])
c2 = char2num(word1[i + 1])
return (scores[c1][i] + scores[c2][i+1]) / 2
else:
c1 = char2num(word1[i])
return scores[c1][i]
def ed_replace_cost(i, j, word1, word2, scores):
c1 = char2num(word1[i])
c2 = char2num(word2[j])
return max(1 - scores[c2][i] / scores[c1][i] * 5, 0)
def char2num(char):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return alphabet.find(char.upper())
| 1,142 | 0 | 115 |
226868301b69f2a737a3185d705cfeef49167588 | 223 | py | Python | literal/literal/celery.py | spanickroon/Text-From-Photo-Django-API | e1ef79c90a443cc3e606dec9e1c531aa5943ca59 | [
"MIT"
] | null | null | null | literal/literal/celery.py | spanickroon/Text-From-Photo-Django-API | e1ef79c90a443cc3e606dec9e1c531aa5943ca59 | [
"MIT"
] | null | null | null | literal/literal/celery.py | spanickroon/Text-From-Photo-Django-API | e1ef79c90a443cc3e606dec9e1c531aa5943ca59 | [
"MIT"
] | 1 | 2021-06-08T18:06:21.000Z | 2021-06-08T18:06:21.000Z | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "literal.settings")
app = Celery("literal")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
| 22.3 | 67 | 0.793722 | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "literal.settings")
app = Celery("literal")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
| 0 | 0 | 0 |
a9db6fdbc1cec3cd59d5f33eb972b998a9a73a5b | 1,853 | py | Python | celebA/interpolate.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | celebA/interpolate.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | celebA/interpolate.py | rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | [
"MIT"
] | null | null | null | # python3
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
def slerp(val, low, high):
"""Spherical Linear Interpolation"""
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(high),
high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0:
# L'Hopital's rule/LERP
return (1.0 - val) * low + val * high
return np.sin((1.0 - val) * omega) / so * low + \
np.sin(val * omega) / so * high
# generate random points in latent space
def latent_points_interpolate(latent_dim: int, n_samples: int) -> np.ndarray:
""" Draw random points feom a normal distribution"""
# TODO: insert random seed
# np.random.seed(42)
z = np.random.randn(latent_dim * n_samples)
# reshape
z = z.reshape(n_samples, latent_dim)
# interpolate
Z = linear_interpolation(z[0], z[1])
return Z
# plot generated images
# RUN EXAMPLE
# load model
model = load_model('saved_models/model_40.h5')
n = 20
results = None
# generate poitns in latent space and interpolate
for i in range(0, n, 2):
interpolated_points = latent_points_interpolate(100, n)
X = model.predict(interpolated_points)
X = (X + 1) / 2.0
if results is None:
results = X
else:
results = np.vstack((results, X))
plot_faces(results, 10)
| 25.736111 | 77 | 0.623314 | # python3
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
def slerp(val, low, high):
"""Spherical Linear Interpolation"""
omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(high),
high/np.linalg.norm(high)), -1, 1))
so = np.sin(omega)
if so == 0:
# L'Hopital's rule/LERP
return (1.0 - val) * low + val * high
return np.sin((1.0 - val) * omega) / so * low + \
np.sin(val * omega) / so * high
def linear_interpolation(p1: int, p2: int, n_steps: int = 10) -> np.ndarray:
interval = np.linspace(0, 1, n_steps)
# calculate ratios and write to ndarray
vectors = list()
for value in interval:
v = slerp(value, p1, p2)
vectors.append(v)
return np.asarray(vectors)
# generate random points in latent space
def latent_points_interpolate(latent_dim: int, n_samples: int) -> np.ndarray:
""" Draw random points feom a normal distribution"""
# TODO: insert random seed
# np.random.seed(42)
z = np.random.randn(latent_dim * n_samples)
# reshape
z = z.reshape(n_samples, latent_dim)
# interpolate
Z = linear_interpolation(z[0], z[1])
return Z
# plot generated images
def plot_faces(examples, n):
for i in range(n * n):
plt.subplot(n, n, i + 1)
plt.axis('off')
plt.imshow(examples[i, :, :])
plt.show()
# RUN EXAMPLE
# load model
model = load_model('saved_models/model_40.h5')
n = 20
results = None
# generate poitns in latent space and interpolate
for i in range(0, n, 2):
interpolated_points = latent_points_interpolate(100, n)
X = model.predict(interpolated_points)
X = (X + 1) / 2.0
if results is None:
results = X
else:
results = np.vstack((results, X))
plot_faces(results, 10)
| 424 | 0 | 45 |
bed94a4405938358952fd4580ac18f459bec2d08 | 3,379 | py | Python | covid_phylo/src/ncbi.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | 1 | 2021-03-22T17:05:52.000Z | 2021-03-22T17:05:52.000Z | covid_phylo/src/ncbi.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | 6 | 2020-06-06T01:51:21.000Z | 2022-01-13T02:39:02.000Z | covid_phylo/src/ncbi.py | mrubio-chavarria/covidMonitor | 8d59b17dbff46a781527de181f22b115565e5c2d | [
"MIT"
] | null | null | null | import config
from pprint import pprint
import time
import shelve
import io, sys
import requests
from Bio import SeqIO
ENTREZ_COVID_SEARCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nucleotide&term=txid2697049[Organism:noexp]&retmode=json&retmax=10000'
ENTREZ_NUCL_DOWNLOAD_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nucleotide&id={uids}&retmode=text&rettype={format}'
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
| 34.131313 | 159 | 0.719444 | import config
from pprint import pprint
import time
import shelve
import io, sys
import requests
from Bio import SeqIO
ENTREZ_COVID_SEARCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nucleotide&term=txid2697049[Organism:noexp]&retmode=json&retmax=10000'
ENTREZ_NUCL_DOWNLOAD_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nucleotide&id={uids}&retmode=text&rettype={format}'
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
def update_progress(progress):
barLength = 30 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\n"
if progress < 0:
progress = 0
status = "Halt...\n"
if progress >= 1:
progress = 1
status = "Done\n"
block = int(round(barLength*progress))
text = f'\r\rRetrieving records: [{ "#"*block }{ "-"*(barLength-block) }] {(progress*100):.1f}% {status}'
sys.stdout.write(text)
sys.stdout.flush()
def _get_raw_sequence(uid, cache_dir=None, format='gb'):
if cache_dir is not None:
shelve_path = cache_dir / config.RAW_SEQUENCE_SHELVE_FNAME
shelved_raw_seqs = shelve.open(str(shelve_path))
if uid in shelved_raw_seqs:
# print('uid: ' + str(uid) + ' already in cache')
return shelved_raw_seqs[uid]
#print('uid: ' + str(uid) + ' not in cache, proceding to download')
nucl_download_url = ENTREZ_NUCL_DOWNLOAD_URL.format(uids=','.join([uid]), format=format)
response = requests.get(nucl_download_url)
if response.status_code != 200:
msg = 'Something went wrong downloading the nucleotide sequences. '
msg += 'response status: {response.status_code}'
raise RuntimeError(msg)
raw_seq = response.text
if cache_dir is not None:
shelved_raw_seqs[uid] = raw_seq
return raw_seq
def get_all_covid_nucleotide_seqs(cache_dir=None):
if cache_dir is not None:
cache_dir.mkdir(exist_ok=True)
response = requests.get(ENTREZ_COVID_SEARCH_URL)
if response.status_code != 200:
msg = 'Something went wrong searching for the SARS-CoV-2 nucleotide sequences. '
msg += 'response status: {response.status_code}'
raise RuntimeError(msg)
ncbi_search_result = response.json()['esearchresult']
n_seqs_found = int(ncbi_search_result['count'])
print('found ' + str(n_seqs_found) + ' sequences')
uids = ncbi_search_result['idlist']
if n_seqs_found > len(uids):
msg = 'Some sequences were not retrieved, you should implement the search with usehistory'
raise NotImplementedError(msg)
seq_records = []
for uid in uids:
raw_seq = _get_raw_sequence(uid, cache_dir=cache_dir, format='gb')
#print('retrieving record ' + str(len(seq_records)+1) + '/' + str(n_seqs_found))
update_progress((len(seq_records)+1)/n_seqs_found)
fhand = io.StringIO(raw_seq)
record = list(SeqIO.parse(fhand, 'gb'))[0]
seq_records.append(record) # used to be .append(seq_records) resulting in the list containing itself an exponetial number of times...
search_result = {'request_timestamp': time.time(),
'seqrecords': seq_records
}
return search_result
| 2,663 | 0 | 68 |
aa4132b9e2e4a4f857d41a1bb2070b7f38ba83de | 6,936 | py | Python | Python Projects/Hotel Reservation System/main.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | 1 | 2021-11-12T10:51:19.000Z | 2021-11-12T10:51:19.000Z | Python Projects/Hotel Reservation System/main.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | null | null | null | Python Projects/Hotel Reservation System/main.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | null | null | null | # Python-MySQL based Hotel Reservation System.
import mysql.connector as sql
# Creating some variables.
mydb = sql.connect(host="localhost", user="root", passwd="", db="python")
mycursor = mydb.cursor()
# MySQL Structure:
# DB : python
# Table Name: hotel
# cust_id
# cust_name
# address
# roomno
# mobileno
# check_in
# check_out
# adv_payment
# room_type
if __name__ == "__main__":
main() | 32.716981 | 231 | 0.559256 | # Python-MySQL based Hotel Reservation System.
import mysql.connector as sql
# Creating some variables.
mydb = sql.connect(host="localhost", user="root", passwd="", db="python")
mycursor = mydb.cursor()
# MySQL Structure:
# DB : python
# Table Name: hotel
# cust_id
# cust_name
# address
# roomno
# mobileno
# check_in
# check_out
# adv_payment
# room_type
def add(cust_id, cust_name, address, roomno, mobileno, check_in, check_out, adv_payment, room_type):
query = "INSERT INTO Hotel values({},'{}','{}',{},'{}','{}','{}',{},'{}')".format(
cust_id, cust_name, address, roomno, mobileno, check_in, check_out, adv_payment, room_type)
try:
mycursor.execute(query)
mydb.commit()
return "Data inserted sucessfully."
except:
return "Error occured while inserting values in database."
def display(cust_id):
query = f"SELECT * FROM Hotel WHERE cust_id={cust_id}"
data = mycursor.execute(query)
data = mycursor.fetchall()
if data:
for i in data:
cust_name=i[1]
address =i[2]
roomno = i[3]
mobileno =i[4]
check_in = i[5]
check_out =i[6]
adv_payment = i[7]
room_type=i[8]
string = f"""Customer ID: {cust_id}
Customer Name: {cust_name}
Address: {address}
Room Number: {roomno}
Customer Mobile Number: {mobileno}
Check In: {check_in}
Check Out: {check_out}
Advance Payment: {adv_payment}
Room Type: {room_type}"""
return string
else:
return "No user found"
def update(cust_id, cust_name, address, roomno, mobileno, check_in, check_out, adv_payment, room_type):
query = f"UPDATE Hotel set cust_name='{cust_name}',address='{address}',roomno='{roomno}',mobileno={mobileno},check_in='{check_in}',check_out='{check_out}',adv_payment={adv_payment},room_type={room_type} WHERE cust_id={cust_id}"
try:
mycursor.execute(query)
mydb.commit
return "Data sucessfully updated."
except:
return "Error occured while updating values in database."
def delete(cust_id):
query = f"DELETE FROM Hotel WHERE cust_id={cust_id}"
try:
mycursor.execute(query)
mydb.commit()
return "Data deleted sucessfully"
except:
return "Error ocurred while deleting data."
def generate(cust_id):
query = f"SELECT * FROM Hotel WHERE cust_id={cust_id}"
data = mycursor.execute(query)
data = mycursor.fetchall()
for i in data:
cust_name=i[1]
address =i[2]
roomno = i[3]
mobileno =i[4]
check_in = i[5]
check_out =i[6]
adv_payment = i[7]
room_type=i[8]
time = int(input("For how many days did he/she stayed: "))
if room_type=="suite":
amount = 1500*time
elif room_type=="delux":
amount = 1000*time
else:
amount = 500*time
price = amount-adv_payment
string = f"""=====LaKhWaN's Hotel======
==========================
Automated generated Innovoice.
==========================
Customer ID: {cust_id}
Customer Name: {cust_name}
Customer Address: {address}
Room No.: {roomno}
Customer Mobile No.: {mobileno}
Check In: {check_in}
Check Out: {check_out}
Advance Payment: {adv_payment}
Room Type: {room_type}
==========================
Total Price: {price}
==========================
Thanks for visiting LaKhWaN's Hotel
Hoping to see you soon.
"""
return string
def main():
print("===============================================")
print("")
print("Welcome to Hotel Reservation Program.")
print("What would you like to do?\n")
print("")
print("===============================================")
print("1. Insert data.")
print("2. Update data.")
print("3. Display data.")
print("4. Delete data.")
print("5. Generate Innovoice.")
print("===============================================")
print("Enter here: ", end="")
todo = int(input())
if todo == 1:
cust_id = int(input("Enter customer id here: "))
cust_name = input("Enter customer name here: ")
address = input("Enter customer address here: ")
roomno = int(input("Enter room number here: "))
mobileno = int(input("Enter customer mobile number here: "))
check_in = input("Enter check in date here (YYYY-MM-DD): ")
check_out = input("Enter check out date here (YYYY-MM-DD): ")
adv_payment = int(input("Enter advance payment here: "))
while True:
print("Enter room type here: ")
room_type = int(
input("1. Suite(1500/day)\n2. Delux (1000/day)\n3. Standard (500/day)\nEnter: "))
if room_type == 1:
room_type = "suite"
break
elif room_type == 2:
room_type = "delux"
break
elif room_type == 3:
room_type = "standard"
break
else:
print("Invalid.")
print(add(cust_id, cust_name, address, roomno, mobileno,
check_in, check_out, adv_payment, room_type))
elif todo == 2:
cust_id = int(input("Enter the customer id here: "))
print("Current data:")
print(display(cust_id))
cust_name = input("Enter updated customer name here: ")
address = input("Enter updated customer address here: ")
roomno = int(input("Enter updated room number here: "))
mobileno = int(input("Enter updated customer mobile number here: "))
check_in = input("Enter updated check in date here (YYYY-MM-DD): ")
check_out = input("Enter updated check out date here (YYYY-MM-DD): ")
adv_payment = int(input("Enter updated advance payment here: "))
while True:
print("Enter updated room type here: ")
room_type = int(
input("1. Suite(1500/day)\n2. Delux (1000/day)\n3. Standard (500/day)\nEnter: "))
if room_type == 1:
room_type = "suite"
break
elif room_type == 2:
room_type = "delux"
break
elif room_type == 3:
room_type = "standard"
break
else:
print("Invalid.")
print(update(cust_id, cust_name, address, roomno, mobileno,check_in, check_out, adv_payment, room_type))
elif todo==3:
cust_id = int(input("Enter the customer id here: "))
print(display(cust_id))
elif todo==4:
cust_id = int(input("Enter the customer id here: "))
print(delete(cust_id))
elif todo==5:
cust_id = int(input("Enter the customer id here: "))
print(generate(cust_id))
if __name__ == "__main__":
main() | 6,395 | 0 | 138 |
8ea30f0c7a02b521ad176f96697e27c7c6a83370 | 2,199 | py | Python | source/utils/binning_stats.py | kant/nbis-meta | 45c4f76d520a5a94ccf885c9c9a5a331190036aa | [
"MIT"
] | null | null | null | source/utils/binning_stats.py | kant/nbis-meta | 45c4f76d520a5a94ccf885c9c9a5a331190036aa | [
"MIT"
] | null | null | null | source/utils/binning_stats.py | kant/nbis-meta | 45c4f76d520a5a94ccf885c9c9a5a331190036aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from Bio.SeqIO import parse
from argparse import ArgumentParser
import pandas as pd
from glob import glob
from os.path import join as opj
from os.path import basename
import numpy as np
import sys
if __name__ == '__main__':
main()
| 28.934211 | 111 | 0.606185 | #!/usr/bin/env python
from Bio.SeqIO import parse
from argparse import ArgumentParser
import pandas as pd
from glob import glob
from os.path import join as opj
from os.path import basename
import numpy as np
import sys
def n50(lengths):
cumulative = 0
size = sum(lengths)
for l in sorted(lengths):
cumulative += l
if float(cumulative) / size >= 0.5:
return l
def bin_stats(f):
size = 0
gc = 0
contig_lengths = []
for record in parse(f, "fasta"):
l = len(record)
seq = record.seq.upper()
g = seq.count("G")
c = seq.count("C")
gc += g + c
size += l
contig_lengths.append(l)
gc_f = round(float(gc) / size * 100, 2)
size_mb = size / 1000000
mean_l = round(np.mean(contig_lengths), 2)
median_l = round(np.median(contig_lengths), 2)
min_l = np.min(contig_lengths)
max_l = np.max(contig_lengths)
n50_l = n50(contig_lengths)
return {'bp': size, 'GC': gc_f, 'Mbp': round(size_mb, 2), 'mean_contig': mean_l, 'median_contig': median_l,
'min_contig': min_l, 'max_contig': max_l, 'n50': n50_l, 'contigs': len(contig_lengths)}
def calculate_bin_stats(files, suffix):
stats = {}
for f in files:
name = basename(f)
name = name.rstrip(suffix)
stats[name] = bin_stats(f)
return stats
def main():
parser = ArgumentParser()
parser.add_argument("dir", type=str,
help="Directory with genome bins")
parser.add_argument("--suffix", type=str, default=".fa",
help="Suffix for fasta files. Defaults to '.fa'")
args = parser.parse_args()
files = glob(opj(args.dir, "*{}".format(args.suffix)))
stats = calculate_bin_stats(files, args.suffix)
if len(stats) == 0:
sys.stderr.write("No bins found\n")
return
cols = ["bp", "Mbp", "GC", "contigs", "n50", "mean_contig", "median_contig", "min_contig", "max_contig"]
df = pd.DataFrame(stats).T[cols]
df.index.name = "bin"
df.sort_values("bp", ascending=False, inplace=True)
df.to_csv(sys.stdout, sep="\t", index=True, header=True)
if __name__ == '__main__':
main()
| 1,843 | 0 | 92 |
14f99359ce367418e0d0b87b3f07eeccf825108b | 7,212 | py | Python | rhasspy/apps/rhasspy-app-weather/app.py | vigonotion/smarthome | f76afd29493492b1b2f0562a5af492369cd68f5c | [
"Apache-2.0"
] | null | null | null | rhasspy/apps/rhasspy-app-weather/app.py | vigonotion/smarthome | f76afd29493492b1b2f0562a5af492369cd68f5c | [
"Apache-2.0"
] | null | null | null | rhasspy/apps/rhasspy-app-weather/app.py | vigonotion/smarthome | f76afd29493492b1b2f0562a5af492369cd68f5c | [
"Apache-2.0"
] | null | null | null | """Example app to react to an intent to tell you the time."""
import random
import logging
from datetime import datetime
import os
from pyowm import OWM
from pyowm.commons.exceptions import PyOWMError
from pyowm.utils import config
from pyowm.utils import timestamps
from rhasspyhermes.intent import Slot
from rhasspyhermes.nlu import NluIntent
from rhasspyhermes_app import EndSession, HermesApp
_LOGGER = logging.getLogger("WeatherApp")
host=os.getenv("MQTT_HOST", "localhost")
port=int(os.getenv("MQTT_PORT", "1883"))
username=os.getenv("MQTT_USERNAME")
password=os.getenv("MQTT_PASSWORD")
owm_key=os.getenv("OWM_KEY")
owm_default_geolocation=os.getenv("OWM_DEFAULT_GEOLOCATION", "52.5065133,13.1445612")
app = HermesApp("WeatherApp", host=host, port=port, username=username, password=password)
config_dict = config.get_default_config()
config_dict['language'] = 'de'
owm = OWM(owm_key, config_dict)
mgr = owm.weather_manager()
city_id_registry = owm.city_id_registry()
def get_slot(intent: NluIntent, slot_name: str, default=None):
"""extracts the value of a slot"""
slot = next(filter(lambda slot: slot.slot_name == slot_name, intent.slots), None)
if slot:
return (slot.value.get("value", default), slot.raw_value)
return default, None
@app.on_intent("GetTemperature")
async def get_temperature_intent(intent: NluIntent):
"""Tell the temperature."""
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetTemperature: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
temperature_forecast = weather.forecast_daily[0].temperature('celsius')
temperature = weather.current.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
temp_current = round(temperature.get("temp"))
temp_max = round(temperature_forecast.get("max", -999))
temp_min = round(temperature_forecast.get("min", -999))
temp_feels_like = round(temperature.get("feels_like", -999))
text_temp = f"In {poi} beträgt die Temperatur aktuell {temp_current} °C." if raw_geolocation != owm_default_geolocation else f"Aktuell sind es {temp_current} °C."
if temp_feels_like != -999 and temp_feels_like != temp_current:
text_temp += f" Es fühlt sich an wie {temp_feels_like} °C."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} °C."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} °C."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
def relative_date_to_str(relative_date: int) -> str:
"""Convert a relative date to a human readable text."""
mapping = {
-2: "vorgestern",
-1: "gestern",
0: "heute",
1: "morgen",
2: "übermorgen"
}
return mapping.get(relative_date, f"vor {relative_date} Tagen" if relative_date < 0 else f"In {relative_date} Tagen")
def relative_time_to_str(relative_time: int) -> str:
"""Convert a relative time to a human readable text."""
mapping = {
0: "nacht",
6: "früh",
9: "morgen",
11: "vormittag",
12: "mittag",
15: "nachmittag",
18: "abend",
22: "spät"
}
return mapping.get(relative_time, f"um {relative_time}:00 Uhr")
@app.on_intent("GetWeatherForecast")
async def get_weather_intent(intent: NluIntent):
"""Tell the weather."""
# In H betr temp momentan 3 bei bew himmel. heute nacht höchstwahrscheinlich regenschauer bei tiefst 1 grad
# Hier ist der Wetterb für morgen in HE höchstwahr gibt es Schnee bei einer Höchsttemperatur von 4 und Tiefsttemperat von 2
# Sonntag 1C und wechselnd bewölkt usw...
# Morgen gibt es in Hamburg vereinzelte Schauer bei Temperaturen zwischen 2 und 4 Grad.
# Morgen wird es in Berlin schneien bei Temperat...
# In {poi} beträgt die Temperatur {temp} °C bei {condition}. Heute Nacht höchstwahrscheinlich {condition_forecast_night} bei einer Tiefsttemperatur von {} °C.
# Hier ist der Wetterbericht für
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
relative_time, _ = get_slot(intent, "relative_time")
relative_date, _ = get_slot(intent, "relative_date")
absolute_date, _ = get_slot(intent, "absolute_date")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetWeatherForecast: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
forecast_data = weather.forecast_daily[0]
if relative_date:
rel = int(relative_date)
if rel < 0:
return EndSession(random.choice(["Ich kann leider keine historischen Wetterberichte abrufen.", "Historische Wetterberichte werden zurzeit nicht unterstützt.", "Wetterdaten aus der Vergangenheit sind aktuell nicht verfügbar."]))
elif rel > 6:
return EndSession(random.choice(["Wetterdaten sind nur bis maximal 7 Tage in der Zukunft verfügbar.", "Der Wetterbericht kann nur für maximal eine Woche im Voraus abgefragt werden."]))
forecast_data = weather.forecast_daily[rel]
temperature = forecast_data.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
condition = forecast_data.detailed_status
temp_current = round(temperature.get("day"))
temp_max = round(temperature.get("max", -999))
temp_min = round(temperature.get("min", -999))
temp_feels_like = round(temperature.get("feels_like_day", -999))
is_default_location = raw_geolocation == owm_default_geolocation
if relative_date:
poi_data = f" in {poi}" if not is_default_location else ""
text_temp = f"Wetter {relative_date_to_str(int(relative_date))}{poi_data}: {condition} bei Temperaturen zwischen {temp_min} und {temp_max} Grad."
else:
poi_data = f"In {poi} ist es {condition.lower()}" if not is_default_location else condition
text_temp = f"{poi_data} bei aktuell {temp_current} Grad. Es fühlt sich an wie {temp_feels_like} Grad."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} Grad."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} Grad."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
_LOGGER.info(f"Starting app {app.client_name}.")
app.run() | 37.5625 | 243 | 0.687327 | """Example app to react to an intent to tell you the time."""
import random
import logging
from datetime import datetime
import os
from pyowm import OWM
from pyowm.commons.exceptions import PyOWMError
from pyowm.utils import config
from pyowm.utils import timestamps
from rhasspyhermes.intent import Slot
from rhasspyhermes.nlu import NluIntent
from rhasspyhermes_app import EndSession, HermesApp
_LOGGER = logging.getLogger("WeatherApp")
host=os.getenv("MQTT_HOST", "localhost")
port=int(os.getenv("MQTT_PORT", "1883"))
username=os.getenv("MQTT_USERNAME")
password=os.getenv("MQTT_PASSWORD")
owm_key=os.getenv("OWM_KEY")
owm_default_geolocation=os.getenv("OWM_DEFAULT_GEOLOCATION", "52.5065133,13.1445612")
app = HermesApp("WeatherApp", host=host, port=port, username=username, password=password)
config_dict = config.get_default_config()
config_dict['language'] = 'de'
owm = OWM(owm_key, config_dict)
mgr = owm.weather_manager()
city_id_registry = owm.city_id_registry()
def get_slot(intent: NluIntent, slot_name: str, default=None):
"""extracts the value of a slot"""
slot = next(filter(lambda slot: slot.slot_name == slot_name, intent.slots), None)
if slot:
return (slot.value.get("value", default), slot.raw_value)
return default, None
@app.on_intent("GetTemperature")
async def get_temperature_intent(intent: NluIntent):
"""Tell the temperature."""
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetTemperature: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
temperature_forecast = weather.forecast_daily[0].temperature('celsius')
temperature = weather.current.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
temp_current = round(temperature.get("temp"))
temp_max = round(temperature_forecast.get("max", -999))
temp_min = round(temperature_forecast.get("min", -999))
temp_feels_like = round(temperature.get("feels_like", -999))
text_temp = f"In {poi} beträgt die Temperatur aktuell {temp_current} °C." if raw_geolocation != owm_default_geolocation else f"Aktuell sind es {temp_current} °C."
if temp_feels_like != -999 and temp_feels_like != temp_current:
text_temp += f" Es fühlt sich an wie {temp_feels_like} °C."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} °C."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} °C."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
def relative_date_to_str(relative_date: int) -> str:
"""Convert a relative date to a human readable text."""
mapping = {
-2: "vorgestern",
-1: "gestern",
0: "heute",
1: "morgen",
2: "übermorgen"
}
return mapping.get(relative_date, f"vor {relative_date} Tagen" if relative_date < 0 else f"In {relative_date} Tagen")
def relative_time_to_str(relative_time: int) -> str:
"""Convert a relative time to a human readable text."""
mapping = {
0: "nacht",
6: "früh",
9: "morgen",
11: "vormittag",
12: "mittag",
15: "nachmittag",
18: "abend",
22: "spät"
}
return mapping.get(relative_time, f"um {relative_time}:00 Uhr")
@app.on_intent("GetWeatherForecast")
async def get_weather_intent(intent: NluIntent):
"""Tell the weather."""
# In H betr temp momentan 3 bei bew himmel. heute nacht höchstwahrscheinlich regenschauer bei tiefst 1 grad
# Hier ist der Wetterb für morgen in HE höchstwahr gibt es Schnee bei einer Höchsttemperatur von 4 und Tiefsttemperat von 2
# Sonntag 1C und wechselnd bewölkt usw...
# Morgen gibt es in Hamburg vereinzelte Schauer bei Temperaturen zwischen 2 und 4 Grad.
# Morgen wird es in Berlin schneien bei Temperat...
# In {poi} beträgt die Temperatur {temp} °C bei {condition}. Heute Nacht höchstwahrscheinlich {condition_forecast_night} bei einer Tiefsttemperatur von {} °C.
# Hier ist der Wetterbericht für
raw_geolocation, raw_value = get_slot(intent, "geolocation", owm_default_geolocation)
geolocation = raw_geolocation.split(",")
relative_time, _ = get_slot(intent, "relative_time")
relative_date, _ = get_slot(intent, "relative_date")
absolute_date, _ = get_slot(intent, "absolute_date")
poi = raw_value.title() if raw_value else "Default Location"
_LOGGER.info(f"GetWeatherForecast: {poi} ({geolocation})")
try:
weather = mgr.one_call(lat=float(geolocation[0]), lon=float(geolocation[1]))
forecast_data = weather.forecast_daily[0]
if relative_date:
rel = int(relative_date)
if rel < 0:
return EndSession(random.choice(["Ich kann leider keine historischen Wetterberichte abrufen.", "Historische Wetterberichte werden zurzeit nicht unterstützt.", "Wetterdaten aus der Vergangenheit sind aktuell nicht verfügbar."]))
elif rel > 6:
return EndSession(random.choice(["Wetterdaten sind nur bis maximal 7 Tage in der Zukunft verfügbar.", "Der Wetterbericht kann nur für maximal eine Woche im Voraus abgefragt werden."]))
forecast_data = weather.forecast_daily[rel]
temperature = forecast_data.temperature('celsius')
_LOGGER.info("Temperature: %s", temperature)
condition = forecast_data.detailed_status
temp_current = round(temperature.get("day"))
temp_max = round(temperature.get("max", -999))
temp_min = round(temperature.get("min", -999))
temp_feels_like = round(temperature.get("feels_like_day", -999))
is_default_location = raw_geolocation == owm_default_geolocation
if relative_date:
poi_data = f" in {poi}" if not is_default_location else ""
text_temp = f"Wetter {relative_date_to_str(int(relative_date))}{poi_data}: {condition} bei Temperaturen zwischen {temp_min} und {temp_max} Grad."
else:
poi_data = f"In {poi} ist es {condition.lower()}" if not is_default_location else condition
text_temp = f"{poi_data} bei aktuell {temp_current} Grad. Es fühlt sich an wie {temp_feels_like} Grad."
if temp_min != -999 and temp_min != temp_current:
text_temp += f" Die Tiefsttemperatur beträgt {temp_min} Grad."
if temp_max != -999 and temp_max != temp_current:
text_temp += f" Die Höchsttemperatur beträgt {temp_max} Grad."
return EndSession(text_temp)
except PyOWMError as e:
_LOGGER.exception("Could not get current temperature.", exc_info=e)
return EndSession(f"Etwas ist schiefgelaufen.")
_LOGGER.info(f"Starting app {app.client_name}.")
app.run() | 0 | 0 | 0 |
d6c34318c7db3e90cbc88eba815aecb24b40245b | 2,719 | py | Python | editor.py | Poggergg/verrus2 | c15962bf7bbe43f4d7ec29d2efbbe65466433429 | [
"MIT"
] | 1 | 2021-10-31T20:20:42.000Z | 2021-10-31T20:20:42.000Z | editor.py | Poggergg/verrus2 | c15962bf7bbe43f4d7ec29d2efbbe65466433429 | [
"MIT"
] | null | null | null | editor.py | Poggergg/verrus2 | c15962bf7bbe43f4d7ec29d2efbbe65466433429 | [
"MIT"
] | null | null | null |
@client.command()
@commands.has_permissions(kick_members=True)
@client.command()
@commands.has_permissions(kick_members=True) | 29.236559 | 151 | 0.679294 | def convert(time):
pos = ["s","m","h","d"]
time_dict = {"s" : 1, "m" : 60, "h" : 3600, "d": 3600*24}
unit = time[-1]
if unit not in pos:
return -1
try:
val = int(time[:-1])
except:
return -2
return val * time_dict[unit]
@client.command()
@commands.has_permissions(kick_members=True)
async def giveaway(ctx):
await ctx.send("Let's start with this giveaway! Answer these questions within 15 seconds!")
questions = ["Which channel should it be hosted in?", "What should be the duration of the giveaway? (s|m|h|d)", "What is the prize of the giveaway?"]
answers = []
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
for i in questions:
await ctx.send(i)
try:
msg = await client.wait_for('messsage', timeout=15.0, check=check)
except asyncio.TimeoutError:
await ctx.send('You didn\'t answer in time, please be quicker next time!')
return
else:
answers.append(msg.content)
try:
c_id = int(answers[0][2:-1])
except:
await ctx.send(f"You didn't mention a channel properly. Do it like this {ctx.channel.mention} next time.")
return
channel = client.get_channel(c_id)
time = convert(answers[1])
if time == -1:
await ctx.send(f"You didn't answer with a proper unit. Use (s|m|h|d) next time!")
return
elif time == -2:
await ctx.send(f"The time just be an integer. Please enter an integer next time.")
return
prize = answers[2]
await ctx.send(f"The giveaway will be in {channel.mention} and will last {answers[1]} seconds!")
embed = discord.embed(title = "Giveaway!", description = f"{prize}", color = ctx.author.color)
embed.add_field(name = "Hosted by:", value = ctx.author.mention)
embed.set_footer(text = f"Ends {answers[1]} from now!")
my_msg = await channel.send(embed = embed)
await my_msg.add_reaction("🎉")
await asyncio.sleep(time)
new_msg = await channel.fetch_message(my_msg.id)
users = await new_msg.reactions[0].users().flatten()
users.pop(users.index(client.user))
winner = random.choice(users)
await channel.send(f"Congratulations! {winner.mention} won the prize: {prize}!")
@client.command()
@commands.has_permissions(kick_members=True)
async def reroll(ctx, channel : discord.TextChannel, id_ : int):
try:
new_msg = await channel.fetch_message(id_)
except:
await ctx.send("The ID that was entered was incorrect, make sure you have entered the correct giveaway message ID.")
users = await new_msg.reactions[0].users().flatten()
users.pop(users.index(client.user))
winner = random.choice(users)
await channel.send(f"Congratulations the new winner is: {winner.mention} for the giveaway rerolled!") | 2,528 | 0 | 66 |
08169585af6b67a892b804b9525b9c55a21fa2eb | 5,436 | py | Python | vsutils.py | jeremypoulter/vsutils | ec855ae839fa58dafa92546d4e503c994240b2d6 | [
"Apache-2.0"
] | 2 | 2016-07-25T22:25:25.000Z | 2020-11-28T16:42:04.000Z | vsutils.py | jeremypoulter/vsutils | ec855ae839fa58dafa92546d4e503c994240b2d6 | [
"Apache-2.0"
] | null | null | null | vsutils.py | jeremypoulter/vsutils | ec855ae839fa58dafa92546d4e503c994240b2d6 | [
"Apache-2.0"
] | 1 | 2020-03-30T15:44:33.000Z | 2020-03-30T15:44:33.000Z | import vapoursynth as vs
import math
import functools
import sys
| 46.067797 | 259 | 0.706402 | import vapoursynth as vs
import math
import functools
import sys
class vsutils(object):
def __init__(self):
self.core = vs.get_core()
def Overlay(self, bottom, top, x, y):
# crop the top clip if needed
if x + top.width > bottom.width:
top = top.std.CropRel(right=((x + top.width) - bottom.width))
if y + top.height > bottom.height:
top = top.std.CropRel(right=((y + top.height) - bottom.height))
# create a mask for the overlay
mask = self.core.std.BlankClip(clip=top, format=vs.GRAY8, color=255).std.AddBorders(x, bottom.width - (x + top.width), y, bottom.height - (y + top.height), color=0)
# add boarders to the top clip
top = top.std.AddBorders(x, bottom.width - (x + top.width), y, bottom.height - (y + top.height))
# return return the merged clip
return self.core.std.MaskedMerge(bottom, top, mask)
def Subtitle(self, clip, message, x, y, font="sans-serif", size=20, align=7, primary_colour="00FFFFFF", secondary_colour="00000000FF", outline_colour="00000000", back_colour="00000000"):
return clip.assvapour.Subtitle(
"{\\pos("+str(x)+","+str(y)+")}{\\an"+str(align)+"}" + message,
style=font+","+str(size)+",&H"+primary_colour+",&H"+secondary_colour+",&H"+outline_colour+",&H"+back_colour+",0,0,0,0,100,100,0,0,1,2,0,7,10,10,10,1",
blend=True)
def FadeEachFrame(self, clipa, clipb, n, number_frames):
weight = (n+1)/(number_frames+1)
return self.core.std.Merge(clipa, clipb, weight=[weight, weight])
def FadeIn(self, clip, duration):
fps = clip.fps_num/clip.fps_den
number_frames = math.ceil(duration * fps)
return self.CrossFade(self.core.std.BlankClip(clip, length=number_frames), clip, duration)
def FadeOut(self, clip, duration):
fps = clip.fps_num/clip.fps_den
number_frames = math.ceil(duration * fps)
return self.CrossFade(clip, self.core.std.BlankClip(clip, length=number_frames), duration)
def CrossFade(self, clip1, clip2, duration):
fps = clip1.fps_num/clip1.fps_den
number_frames = math.floor(duration * fps) - 2
clip1_start_frame = clip1.num_frames - (number_frames + 1)
clip1_end_frame = clip1.num_frames - 1
clip2_start_frame = 1
clip2_end_frame = number_frames + 1
a=clip1[0:clip1_start_frame]
b1=clip1[clip1_start_frame:clip1_end_frame]
b2=clip2[clip2_start_frame:clip2_end_frame]
b=self.core.std.FrameEval(b1, functools.partial(self.FadeEachFrame, clipa=b1, clipb=b2, number_frames=number_frames))
c=clip2[clip2_end_frame:clip2.num_frames]
return a+b+c
def Colorbars(self, width=640, height=480, fpsnum=25, fpsden=1, format=vs.RGB24, duration=600):
length = round((duration*fpsnum)/fpsden)
colorbars = self.core.std.BlankClip(width=width, height=height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=[16,16,16], length=length)
top_width = math.ceil(width / 7.0)
bottom_width = math.ceil(width / 6.0)
bottom_width_small = math.ceil(top_width/3)
top_height = math.ceil(height * 2 / 3.0)
bottom_height = math.ceil(height / 4.0)
mid_height = height - top_height - bottom_height
top_colors = [
[180, 180, 180],
[180, 180, 16],
[16, 180, 180],
[16, 180, 16],
[180, 16, 180],
[180, 16, 16],
[16, 16, 180]
]
for (i, color) in enumerate(top_colors):
colorbars = self.Overlay(colorbars, self.core.std.BlankClip(width=top_width, height=top_height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=color, length=length), i * top_width, 0)
mid_colors = [
[16, 16, 180],
[16, 16, 16],
[180, 16, 180],
[16, 16, 16],
[16, 180, 180],
[16, 16, 16],
[180, 180, 180]
]
for (i, color) in enumerate(mid_colors):
colorbars = self.Overlay(colorbars, self.core.std.BlankClip(width=top_width, height=mid_height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=color, length=length), i * top_width, top_height)
bottom_colors = [
[0, 58, 98],
[235, 235, 235],
[75, 15, 126]
]
for (i, color) in enumerate(bottom_colors):
colorbars = self.Overlay(colorbars, self.core.std.BlankClip(width=bottom_width, height=bottom_height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=color, length=length), i * bottom_width, top_height + mid_height)
colorbars = self.Overlay(colorbars, self.core.std.BlankClip(width=bottom_width_small, height=bottom_height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=[0,0,0], length=length), 5 * top_width, top_height + mid_height)
colorbars = self.Overlay(colorbars, self.core.std.BlankClip(width=bottom_width_small, height=bottom_height, fpsnum=fpsnum, fpsden=fpsden, format=vs.RGB24, color=[25,25,25], length=length), (5 * top_width) + (bottom_width_small * 2), top_height + mid_height)
return colorbars.resize.Point(format=format, matrix_s="709")
def GetFrameTime(self, clip, frame_number):
clip_fps = clip.fps_num / clip.fps_den
all_in_seconds = frame_number / clip_fps
minutes = math.floor(all_in_seconds / 60)
seconds = math.floor(all_in_seconds) % 60
milliseconds = math.floor((all_in_seconds - math.floor(all_in_seconds)) * 1000)
return "{:1.0f}:{:02.0f}.{:03.0f}".format(minutes, seconds, milliseconds)
def TimeEachFrame(self, clip, n, x, y, align):
time = self.GetFrameTime(clip, n)
frame = str(n)
text = frame+" - "+time
clip = self.Subtitle(clip, text, x, y, align=align)
return clip
def ShowFrameAndTime(self, clip, x=0, y=0, align=7):
return clip.std.FrameEval(functools.partial(self.TimeEachFrame, clip=clip, x=x, y=y, align=align))
| 5,075 | 1 | 295 |
97e4facd18b9e8549012062092702b2831d3843e | 5,848 | py | Python | mediagoblin/media_types/__init__.py | Piratas/biblioteca | 1770feba67173a3beb43853eed33586d47abeac1 | [
"CC0-1.0"
] | null | null | null | mediagoblin/media_types/__init__.py | Piratas/biblioteca | 1770feba67173a3beb43853eed33586d47abeac1 | [
"CC0-1.0"
] | 1 | 2018-01-06T18:12:02.000Z | 2018-01-06T18:27:31.000Z | mediagoblin/media_types/__init__.py | Piratas/biblioteca | 1770feba67173a3beb43853eed33586d47abeac1 | [
"CC0-1.0"
] | 1 | 2019-05-13T15:04:10.000Z | 2019-05-13T15:04:10.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import shutil
import tempfile
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
class TypeNotFound(FileTypeNotSupported):
'''Raised if no mediagoblin plugin supporting this file type was found'''
pass
class MissingComponents(FileTypeNotSupported):
'''Raised if plugin found, but it can't process the file for some reason'''
pass
class MediaManagerBase(object):
"Base class for all media managers"
# Please override in actual media managers
media_fetch_order = None
@staticmethod
def sniff_media_contents(media_file, filename):
'''
Check media contents using 'expensive' scanning. For example, for video it
is checking the contents using gstreamer
:param media_file: file-like object with 'name' attribute
:param filename: expected filename of the media
'''
media_type = hook_handle('sniff_handler', media_file, filename)
if media_type:
_log.info('{0} accepts the file'.format(media_type))
return media_type, hook_handle(('media_manager', media_type))
else:
_log.debug('{0} did not accept the file'.format(media_type))
raise FileTypeNotSupported(
# TODO: Provide information on which file types are supported
_(u'Sorry, I don\'t support that file type :('))
def get_media_type_and_manager(filename):
'''
Try to find the media type based on the file name, extension
specifically. This is used as a speedup, the sniffing functionality
then falls back on more in-depth bitsniffing of the source file.
This hook is deprecated, 'type_match_handler' should be used instead
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
if hook_handle('get_media_type_and_manager', ext[1:]):
return hook_handle('get_media_type_and_manager', ext[1:])
else:
_log.info('File {0} has no file extension, let\'s hope the sniffers get it.'.format(
filename))
raise TypeNotFound(
_(u'Sorry, I don\'t support that file type :('))
def type_match_handler(media_file, filename):
'''Check media file by name and then by content
Try to find the media type based on the file name, extension
specifically. After that, if media type is one of supported ones, check the
contents of the file
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
hook_result = hook_handle('type_match_handler', ext[1:])
if hook_result:
_log.info('Info about file found, checking further')
MEDIA_TYPE, Manager, sniffer = hook_result
if not sniffer:
_log.debug('sniffer is None, plugin trusts the extension')
return MEDIA_TYPE, Manager
_log.info('checking the contents with sniffer')
try:
sniffer(media_file)
_log.info('checked, found')
return MEDIA_TYPE, Manager
except Exception as e:
_log.info('sniffer says it will not accept the file')
_log.debug(e)
raise
else:
_log.info('No plugins handled extension {0}'.format(ext))
else:
_log.info('File {0} has no known file extension, let\'s hope '
'the sniffers get it.'.format(filename))
raise TypeNotFound(_(u'Sorry, I don\'t support that file type :('))
def sniff_media(media_file, filename):
'''
Iterate through the enabled media types and find those suited
for a certain file.
'''
# copy the contents to a .name-enabled temporary file for further checks
# TODO: there are cases when copying is not required
tmp_media_file = tempfile.NamedTemporaryFile()
shutil.copyfileobj(media_file, tmp_media_file)
media_file.seek(0)
try:
return type_match_handler(tmp_media_file, filename)
except TypeNotFound as e:
_log.info('No plugins using two-step checking found')
# keep trying, using old `get_media_type_and_manager`
try:
return get_media_type_and_manager(filename)
except TypeNotFound as e:
# again, no luck. Do it expensive way
_log.info('No media handler found by file extension')
_log.info('Doing it the expensive way...')
return sniff_media_contents(tmp_media_file, filename)
| 35.877301 | 92 | 0.679378 | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import shutil
import tempfile
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
class FileTypeNotSupported(Exception):
pass
class TypeNotFound(FileTypeNotSupported):
'''Raised if no mediagoblin plugin supporting this file type was found'''
pass
class MissingComponents(FileTypeNotSupported):
'''Raised if plugin found, but it can't process the file for some reason'''
pass
class MediaManagerBase(object):
"Base class for all media managers"
# Please override in actual media managers
media_fetch_order = None
@staticmethod
def sniff_handler(*args, **kwargs):
return False
def __init__(self, entry):
self.entry = entry
def __getitem__(self, i):
return getattr(self, i)
def __contains__(self, i):
return hasattr(self, i)
def sniff_media_contents(media_file, filename):
'''
Check media contents using 'expensive' scanning. For example, for video it
is checking the contents using gstreamer
:param media_file: file-like object with 'name' attribute
:param filename: expected filename of the media
'''
media_type = hook_handle('sniff_handler', media_file, filename)
if media_type:
_log.info('{0} accepts the file'.format(media_type))
return media_type, hook_handle(('media_manager', media_type))
else:
_log.debug('{0} did not accept the file'.format(media_type))
raise FileTypeNotSupported(
# TODO: Provide information on which file types are supported
_(u'Sorry, I don\'t support that file type :('))
def get_media_type_and_manager(filename):
'''
Try to find the media type based on the file name, extension
specifically. This is used as a speedup, the sniffing functionality
then falls back on more in-depth bitsniffing of the source file.
This hook is deprecated, 'type_match_handler' should be used instead
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
if hook_handle('get_media_type_and_manager', ext[1:]):
return hook_handle('get_media_type_and_manager', ext[1:])
else:
_log.info('File {0} has no file extension, let\'s hope the sniffers get it.'.format(
filename))
raise TypeNotFound(
_(u'Sorry, I don\'t support that file type :('))
def type_match_handler(media_file, filename):
'''Check media file by name and then by content
Try to find the media type based on the file name, extension
specifically. After that, if media type is one of supported ones, check the
contents of the file
'''
if os.path.basename(filename).find('.') > 0:
# Get the file extension
ext = os.path.splitext(filename)[1].lower()
# Omit the dot from the extension and match it against
# the media manager
hook_result = hook_handle('type_match_handler', ext[1:])
if hook_result:
_log.info('Info about file found, checking further')
MEDIA_TYPE, Manager, sniffer = hook_result
if not sniffer:
_log.debug('sniffer is None, plugin trusts the extension')
return MEDIA_TYPE, Manager
_log.info('checking the contents with sniffer')
try:
sniffer(media_file)
_log.info('checked, found')
return MEDIA_TYPE, Manager
except Exception as e:
_log.info('sniffer says it will not accept the file')
_log.debug(e)
raise
else:
_log.info('No plugins handled extension {0}'.format(ext))
else:
_log.info('File {0} has no known file extension, let\'s hope '
'the sniffers get it.'.format(filename))
raise TypeNotFound(_(u'Sorry, I don\'t support that file type :('))
def sniff_media(media_file, filename):
'''
Iterate through the enabled media types and find those suited
for a certain file.
'''
# copy the contents to a .name-enabled temporary file for further checks
# TODO: there are cases when copying is not required
tmp_media_file = tempfile.NamedTemporaryFile()
shutil.copyfileobj(media_file, tmp_media_file)
media_file.seek(0)
try:
return type_match_handler(tmp_media_file, filename)
except TypeNotFound as e:
_log.info('No plugins using two-step checking found')
# keep trying, using old `get_media_type_and_manager`
try:
return get_media_type_and_manager(filename)
except TypeNotFound as e:
# again, no luck. Do it expensive way
_log.info('No media handler found by file extension')
_log.info('Doing it the expensive way...')
return sniff_media_contents(tmp_media_file, filename)
| 140 | 26 | 130 |
b728e80eaed69698b5241319e605f40f86b51911 | 813 | py | Python | killer/app/killer.py | GeorgeNiece/docker-experiments | acd736ce28be03e1aceebbfd8da1a6d316659b51 | [
"MIT"
] | 1 | 2019-09-10T01:24:31.000Z | 2019-09-10T01:24:31.000Z | killer/app/killer.py | reselbob/dockerdemos | f6bb4b15f78000040e2c92aa41baccb666f595d8 | [
"MIT"
] | 4 | 2020-09-11T05:54:14.000Z | 2020-09-11T05:55:10.000Z | killer/app/killer.py | GeorgeNiece/docker-experiments | acd736ce28be03e1aceebbfd8da1a6d316659b51 | [
"MIT"
] | 1 | 2019-11-30T16:37:21.000Z | 2019-11-30T16:37:21.000Z | import time
import datetime
import os
from signal import signal, SIGINT, SIGTERM
from sys import exit
app_name = os.environ.get('APP_NAME', 'UNKNOWN')
if __name__ == '__main__':
# Tell Python to run the handler() function when SIGINT is received
signal(SIGINT, handler)
signal(SIGTERM, handler)
print('Running App {} at {}').format(app_name, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
while True:
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
message = '{} pulse {}'.format(app_name, dt)
print message
time.sleep(1)
| 31.269231 | 105 | 0.649446 | import time
import datetime
import os
from signal import signal, SIGINT, SIGTERM
from sys import exit
app_name = os.environ.get('APP_NAME', 'UNKNOWN')
def handler(signal_received, frame):
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
message = '{} is stopping at {} signal: {}'.format(app_name, dt, signal_received)
print message
exit(0)
if __name__ == '__main__':
# Tell Python to run the handler() function when SIGINT is received
signal(SIGINT, handler)
signal(SIGTERM, handler)
print('Running App {} at {}').format(app_name, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
while True:
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
message = '{} pulse {}'.format(app_name, dt)
print message
time.sleep(1)
| 194 | 0 | 23 |
2f3577427ecb13f9276f1f0f093427ff3d293b4a | 216 | py | Python | abusehelper/core/tests/test_events.py | AbuseSA/abusehelper | 3e953632d20317c6bfe7eeb987ea9104d8f2a957 | [
"MIT"
] | 117 | 2015-11-30T09:52:52.000Z | 2021-11-24T23:58:13.000Z | abusehelper/core/tests/test_events.py | AbuseSA/abusehelper | 3e953632d20317c6bfe7eeb987ea9104d8f2a957 | [
"MIT"
] | 57 | 2015-12-08T10:06:57.000Z | 2018-03-28T11:13:11.000Z | abusehelper/core/tests/test_events.py | AbuseSA/abusehelper | 3e953632d20317c6bfe7eeb987ea9104d8f2a957 | [
"MIT"
] | 29 | 2016-02-08T08:24:30.000Z | 2022-03-31T13:53:15.000Z | import pickle
import unittest
from .. import events
| 19.636364 | 58 | 0.671296 | import pickle
import unittest
from .. import events
class TestEvent(unittest.TestCase):
def test_pickling(self):
e = events.Event({"a": "b"})
self.assertEqual(e, pickle.loads(pickle.dumps(e)))
| 99 | 14 | 49 |
c97a0a476b61f7702aa6c4af4cd84290787ef039 | 627 | py | Python | tests/integration_with_db/test_para_helper.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | 1 | 2020-07-14T20:13:05.000Z | 2020-07-14T20:13:05.000Z | tests/integration_with_db/test_para_helper.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | 3 | 2021-04-06T20:40:08.000Z | 2021-06-03T21:54:21.000Z | tests/integration_with_db/test_para_helper.py | lizschley/number_six | a427202397822fca1f49d43d138c24fffdbe95da | [
"MIT"
] | null | null | null | '''Tests for methods in helpers/import_common_class/paragraph_helpers.py
These are integration tests requiring a db. Will do later'''
# pylint: disable=missing-function-docstring
import helpers.no_import_common_class.paragraph_helpers as para_helper
import testing.data.list_constants as list_data
# Todo: Implement when I start doing db update integration tests
# Todo: delete or instantiate with db after refactoring
# Todo: This can be one of first db tests... need existing data
| 29.857143 | 72 | 0.795853 | '''Tests for methods in helpers/import_common_class/paragraph_helpers.py
These are integration tests requiring a db. Will do later'''
# pylint: disable=missing-function-docstring
import helpers.no_import_common_class.paragraph_helpers as para_helper
import testing.data.list_constants as list_data
# Todo: Implement when I start doing db update integration tests
def test_paragraph_json_to_db():
pass
# Todo: delete or instantiate with db after refactoring
def test_paragraph_view_input():
pass
# Todo: This can be one of first db tests... need existing data
def test_get_initial_classifications():
pass
| 67 | 0 | 66 |
25b814aa25b1e447faa8fd399cd957104f79284e | 1,574 | py | Python | backends/tests/unit/core/mailers_tests.py | sltk/crmint | bc8417bc4ed225faa5caa88daca48f1f12f2ac94 | [
"Apache-2.0"
] | null | null | null | backends/tests/unit/core/mailers_tests.py | sltk/crmint | bc8417bc4ed225faa5caa88daca48f1f12f2ac94 | [
"Apache-2.0"
] | null | null | null | backends/tests/unit/core/mailers_tests.py | sltk/crmint | bc8417bc4ed225faa5caa88daca48f1f12f2ac94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import testbed
from core import mailers
from core import models
from tests import utils
| 34.217391 | 79 | 0.757306 | # Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import testbed
from core import mailers
from core import models
from tests import utils
class TestNotificationMailer(utils.ModelTestCase):
def setUp(self):
super(TestNotificationMailer, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
# Activate which service we want to stub
self.testbed.init_mail_stub()
self.testbed.init_memcache_stub()
self.mailer = mailers.NotificationMailer()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
def tearDown(self):
super(TestNotificationMailer, self).tearDown()
self.testbed.deactivate()
def test_mail_has_been_sent(self):
pipeline = models.Pipeline()
pipeline.assign_attributes(dict(emails_for_notifications='john@lenon.com'))
self.mailer.finished_pipeline(pipeline)
messages = self.mail_stub.get_sent_messages(to='john@lenon.com')
self.assertEqual(1, len(messages))
self.assertEqual('john@lenon.com', messages[0].to)
| 754 | 29 | 98 |
7513d68a24bfddd2fb575f859a53b707010c0272 | 3,852 | py | Python | dev/results/half_wing_swept_45_deg/machline_iterator.py | usuaero/MachLine | dacc984e8430bcbff2773ff325bb471503ff47fc | [
"MIT"
] | 2 | 2022-02-17T21:41:11.000Z | 2022-03-07T23:11:43.000Z | dev/results/half_wing_swept_45_deg/machline_iterator.py | usuaero/MachLine | dacc984e8430bcbff2773ff325bb471503ff47fc | [
"MIT"
] | 1 | 2022-01-26T02:04:43.000Z | 2022-02-07T17:07:23.000Z | dev/results/half_wing_swept_45_deg/machline_iterator.py | usuaero/MachLine | dacc984e8430bcbff2773ff325bb471503ff47fc | [
"MIT"
] | null | null | null | # This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time)) | 31.317073 | 151 | 0.632918 | # This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
def mach_iter(AoA, Node, formulation, freestream):
if formulation == "source-free":
formulation_adjusted = "source_free"
else:
formulation_adjusted = formulation
# Modify freestream velocities based on angle of attack
AoA_rad = float(AoA)*np.pi/180
x_flow = freestream * np.cos(AoA_rad)
z_flow = freestream * np.sin(AoA_rad)
# Identify filebases used throughout iterator
filebase = "dev/results/half_wing_swept_45_deg/"
output_filebase = filebase + "MachLine_Results/" + AoA + "_degrees_AoA/half_wing_A_" + Node + "_nodes_" + AoA + "_deg_AoA_" + formulation_adjusted
# Rewrite the input files based on angle of attack and node densities
dict1 = {
"flow": {
"freestream_velocity": [
x_flow,
0.0,
z_flow
]
},
"geometry": {
"file": filebase + "half_wing_A_meshes/half_wing_A_" + Node + "_nodes.vtk",
"mirror_about": "xz",
"singularity_order": {
"doublet": 1,
"source": 0
},
"wake_model": {
"wake_shedding_angle": 90.0,
"trefftz_distance": 10000.0,
"N_panels": 1
},
"reference": {
"area": 1.0
}
},
"solver": {
"formulation": formulation,
"control_point_offset": 1.1e-05
},
"post_processing" : {
},
"output": {
"body_file": output_filebase + "_formulation.vtk",
"wake_file": output_filebase + "_formulation_wake.vtk",
"control_point_file": output_filebase + "_control_points.vtk",
"report_file": "../../report.txt"
}
}
# Identify output file location
filename = AoA + "_deg_angle_of_attack_input.json"
inputfile = filebase + 'half_wing_A_swept_inputs/' + filename
# file_location = "dev/results/half_wing_swept_45deg/test/" + AoA + "_degree_AoA_test_file_" + Node + "_nodes.json"
with open(inputfile, "w") as output_file:
json.dump(dict1, output_file, indent=4)
print("\n***",Node, "node input file saved successfully ***\n")
# Run machline with current input file
# machline_command = "./machline.exe {0}".format(inputfile)
subprocess.call(["./machline.exe", inputfile])
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time)) | 2,402 | 0 | 23 |
468a5d220234e8e41827a0feef74281eff91ec8d | 12,232 | py | Python | lib/ansible/modules/cloud/alicloud/ali_slb_vsg.py | able8/ansible-provider | 9d1450e7ccd7a58d333cd03603a528bc7989a24d | [
"Apache-2.0"
] | null | null | null | lib/ansible/modules/cloud/alicloud/ali_slb_vsg.py | able8/ansible-provider | 9d1450e7ccd7a58d333cd03603a528bc7989a24d | [
"Apache-2.0"
] | null | null | null | lib/ansible/modules/cloud/alicloud/ali_slb_vsg.py | able8/ansible-provider | 9d1450e7ccd7a58d333cd03603a528bc7989a24d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg
version_added: "2.8"
short_description: Create, Delete VServerGroup and Modify its name or backend servers.
description:
- Create and delete a VServer group
- Add or remove backend servers or network interfaces to/from the VServer group
options:
state:
description:
- Create and delete a VServer group.
default: 'present'
choices: ['present', 'absent']
load_balancer_id:
description:
- The Server Load Balancer instance ID.
This is used in combination with C(name) to determine if a VServer group already exists.
required: True
aliases: ['lb_id']
vserver_group_name:
description:
- Virtual server group name.
This is used in conjunction with the C(load_balancer_id) to ensure idempotence.
required: True
aliases: [ 'group_name', 'name' ]
backend_servers:
description:
- List of that need to be added or.
- List of hash/dictionaries backend servers or network interfaces to add in this group (see example).
If none are supplied, no backend servers will be enabled. Each server has several keys and refer to
https://www.alibabacloud.com/help/doc-detail/35215.htm. Each key should be format as under_score.
Currently the valid keys including "server_id", "port", "weight" and "type".
purge_backend_servers:
description:
- Purge existing backend servers or ENIs on VServer group that are not found in backend_servers.
- If True, existing servers or ENIs will be purged from the resource to match exactly what is defined by
I(backend_servers). If the I(backend_servers) is not set then servers will not be modified.
- If True, it means you have to specify all the desired backend servers or ENIs on each task affecting a VServer group.
default: False
type: bool
vserver_group_id:
description:
- (Deprecated) Virtual server group id.
aliases: [ 'group_id' ]
multi_ok:
description:
- By default the module will not create another Load Balancer if there is another Load Balancer
with the same I(name). Specify this as true if you want duplicate Load Balancers created.
default: False
type: bool
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
author:
- "He Guimin (@xiaozhu36)"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Create VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
- name: Add backend servers to vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'i-f2n3cn34c'
port: 8080
weight: 100
type: ecs
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
- name: Purge backend servers from vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'eni-f2n3cn34c'
port: 8080
weight: 100
type: eni
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
purge_backend_servers: True
- name: Delete VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
state: absent
'''
RETURN = '''
vserver_group:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
VALID_SERVER_PARAMS = ["server_id", "port", "weight", "type"]
if __name__ == '__main__':
main()
| 34.456338 | 127 | 0.606279 | #!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg
version_added: "2.8"
short_description: Create, Delete VServerGroup and Modify its name or backend servers.
description:
- Create and delete a VServer group
- Add or remove backend servers or network interfaces to/from the VServer group
options:
state:
description:
- Create and delete a VServer group.
default: 'present'
choices: ['present', 'absent']
load_balancer_id:
description:
- The Server Load Balancer instance ID.
This is used in combination with C(name) to determine if a VServer group already exists.
required: True
aliases: ['lb_id']
vserver_group_name:
description:
- Virtual server group name.
This is used in conjunction with the C(load_balancer_id) to ensure idempotence.
required: True
aliases: [ 'group_name', 'name' ]
backend_servers:
description:
- List of that need to be added or.
- List of hash/dictionaries backend servers or network interfaces to add in this group (see example).
If none are supplied, no backend servers will be enabled. Each server has several keys and refer to
https://www.alibabacloud.com/help/doc-detail/35215.htm. Each key should be format as under_score.
Currently the valid keys including "server_id", "port", "weight" and "type".
purge_backend_servers:
description:
- Purge existing backend servers or ENIs on VServer group that are not found in backend_servers.
- If True, existing servers or ENIs will be purged from the resource to match exactly what is defined by
I(backend_servers). If the I(backend_servers) is not set then servers will not be modified.
- If True, it means you have to specify all the desired backend servers or ENIs on each task affecting a VServer group.
default: False
type: bool
vserver_group_id:
description:
- (Deprecated) Virtual server group id.
aliases: [ 'group_id' ]
multi_ok:
description:
- By default the module will not create another Load Balancer if there is another Load Balancer
with the same I(name). Specify this as true if you want duplicate Load Balancers created.
default: False
type: bool
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
author:
- "He Guimin (@xiaozhu36)"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Create VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
- name: Add backend servers to vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'i-f2n3cn34c'
port: 8080
weight: 100
type: ecs
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
- name: Purge backend servers from vserver group
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
backend_servers:
- instance_id: 'eni-f2n3cn34c'
port: 8080
weight: 100
type: eni
- instance_id: 'eni-n34cjf4vd'
port: 8081
weight: 100
type: eni
purge_backend_servers: True
- name: Delete VServer Group in SLB
ali_slb_vsg:
load_balancer_id: 'lb-cnqnc234'
name: 'ansible-vsg'
state: absent
'''
RETURN = '''
vserver_group:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
VALID_SERVER_PARAMS = ["server_id", "port", "weight", "type"]
def check_backend_servers(module, servers):
for s in servers:
for key in list(s.keys()):
if key not in VALID_SERVER_PARAMS:
module.fail_json(msg='Invalid backend server key {0}. Valid keys: {1}.'.format(key, VALID_SERVER_PARAMS))
def format_backend_servers(servers):
backend_servers = []
if servers:
for s in servers:
server = {}
for key, value in list(s.items()):
split = []
for k in str(key).split("_"):
split.append(str.upper(k[0]) + k[1:])
server["".join(split)] = value
backend_servers.append(server)
return backend_servers
def filter_backend_servers(existing, inputting):
old = []
new = []
removed = []
existingList = []
inputtingList = []
oldList = []
for s in existing:
existingList.append(s['server_id'])
for s in inputting:
inputtingList.append(s['server_id'])
for s in inputting:
if s['server_id'] in existingList:
old.append(s)
oldList.append([s['server_id']])
continue
new.append(s)
for s in existing:
key = s['server_id']
if key in inputtingList:
if key not in oldList:
old.append(s)
continue
removed.append(s)
return old, new, removed
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
load_balancer_id=dict(type='str', required=True, aliases=['lb_id']),
vserver_group_name=dict(type='str', required=True, aliases=['group_name', 'name']),
backend_servers=dict(type='list'),
vserver_group_id=dict(type='str', aliases=['group_id']),
purge_backend_servers=dict(type='bool', default=False),
multi_ok=dict(type='bool', default=False)
))
module = AnsibleModule(argument_spec=argument_spec,
required_if=([
('state', 'present', ['backend_servers'])
])
)
if HAS_FOOTMARK is False:
module.fail_json(msg='footmark required for the module ali_slb_vsg.')
slb = slb_connect(module)
state = module.params['state']
lb_id = module.params['load_balancer_id']
vsg_name = module.params['vserver_group_name']
changed = False
matching = None
if not module.params['multi_ok']:
try:
matching_vsgs = []
for group in slb.describe_vserver_groups(**{'load_balancer_id': lb_id}):
if group.name != vsg_name:
continue
matching_vsgs.append(group)
if len(matching_vsgs) == 1:
matching = matching_vsgs[0]
elif len(matching_vsgs) > 1:
module.fail_json(msg='Currently there are {0} virtual server groups that have the same name {1}. '
'If you would like to create anyway '
'please pass True to the multi_ok param.'.format(len(matching_vsgs), vsg_name))
except Exception as e:
module.fail_json(msg=str("Unable to describe vserver group attribute, error:{0}".format(e)))
if state == 'absent':
if matching:
try:
changed = matching.delete()
except Exception as e:
module.fail_json(msg=str("Unable to delete vserver group, error: {0}".format(e)))
module.exit_json(changed=changed, vserver_group={})
backend_servers = module.params['backend_servers']
check_backend_servers(module, backend_servers)
if not matching:
try:
params = module.params
params['backend_servers'] = format_backend_servers(backend_servers[:20])
matching = slb.create_vserver_group(**params)
changed = True
except Exception as e:
module.fail_json(msg=str("Unable to create vserver group error:{0}".format(e)))
if backend_servers:
old, new, removed = filter_backend_servers(matching.backend_servers['backend_server'], backend_servers)
if old:
try:
if matching.modify(backend_servers=old):
changed = True
except Exception as e:
module.fail_json(msg='Modify backend servers failed: {0}'.format(e))
if new:
try:
if matching.add(backend_servers=new):
changed = True
except Exception as e:
module.fail_json(msg='Add backend servers failed: {0}'.format(e))
if module.params['purge_backend_servers'] and removed:
try:
if matching.remove(backend_servers=removed):
changed = True
except Exception as e:
module.fail_json(msg='Remove backend servers failed: {0}'.format(e))
module.exit_json(changed=changed, vserver_group=matching.get().read())
if __name__ == '__main__':
main()
| 5,031 | 0 | 92 |
16bf0e3677e5cf3c81f9e6afc25f05e942df2770 | 7,318 | py | Python | html_visual.py | kris314/deep-text-recognition-benchmark | 741dd9abc8b7b2f29ba088b308f0e8c1483153d9 | [
"Apache-2.0"
] | null | null | null | html_visual.py | kris314/deep-text-recognition-benchmark | 741dd9abc8b7b2f29ba088b308f0e8c1483153d9 | [
"Apache-2.0"
] | null | null | null | html_visual.py | kris314/deep-text-recognition-benchmark | 741dd9abc8b7b2f29ba088b308f0e8c1483153d9 | [
"Apache-2.0"
] | null | null | null | import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
# self.img_dir = os.path.join(self.web_dir, 'images')
self.img_dir = os.path.join(self.web_dir)
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, width=400, realFlag=False):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
for im, txt in zip(ims, txts):
with tr():
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[0])
br()
p(txt[0])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[1])
br()
p(txt[1])
if not(realFlag):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[2])
br()
p(txt[2])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[3])
br()
p(txt[3])
if len(im)>4:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[4])
br()
p(txt[4])
if len(im)>5:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[5])
br()
p(txt[5])
if len(im)>6:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[6])
br()
p(txt[6])
if len(im)>7:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[7])
br()
p(txt[7])
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| 47.830065 | 157 | 0.441651 | import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
# self.img_dir = os.path.join(self.web_dir, 'images')
self.img_dir = os.path.join(self.web_dir)
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, width=400, realFlag=False):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
for im, txt in zip(ims, txts):
with tr():
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[0])
br()
p(txt[0])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[1])
br()
p(txt[1])
if not(realFlag):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[2])
br()
p(txt[2])
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[3])
br()
p(txt[3])
if len(im)>4:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[4])
br()
p(txt[4])
if len(im)>5:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[5])
br()
p(txt[5])
if len(im)>6:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[6])
br()
p(txt[6])
if len(im)>7:
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
# with a(href=os.path.join('images', link)):
# with a(href=os.path.join(link)):
# # img(style="width:%dpx" % width, src=os.path.join('images', im))
img(style="width:%dpx" % width, src=im[7])
br()
p(txt[7])
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| 0 | 0 | 0 |
dcfdc122a070b85ec9e3790ba2d899697bfe5d4d | 679 | py | Python | examples/bs4demo/utils.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | 139 | 2015-01-08T22:27:06.000Z | 2021-08-19T03:36:58.000Z | examples/bs4demo/utils.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | 286 | 2015-01-02T14:15:14.000Z | 2022-03-22T11:00:12.000Z | examples/bs4demo/utils.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | 91 | 2015-01-16T15:06:23.000Z | 2022-03-23T23:36:54.000Z |
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
import imp
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations' # conforms to Django 1.7 defaults
| 42.4375 | 89 | 0.714286 |
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
import imp
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations' # conforms to Django 1.7 defaults
| 0 | 0 | 0 |
897b2f761f942793b82d028620d46839e3e65517 | 1,960 | py | Python | tools/game_utils.py | JakubPetriska/poker-agent-kit | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 19 | 2018-09-21T15:27:09.000Z | 2022-03-09T03:55:21.000Z | tools/game_utils.py | JakubPetriska/poker-agent-kit | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 6 | 2018-05-09T17:09:58.000Z | 2019-07-09T15:15:05.000Z | tools/game_utils.py | JakubPetriska/poker-cfr | 12c28711c91447c708719454d1fbd224fa03189e | [
"MIT"
] | 2 | 2018-09-11T02:49:57.000Z | 2018-11-17T00:29:38.000Z | from math import isclose
import numpy as np
import scipy.misc
import scipy.special
from tools.walk_trees import walk_trees
from tools.game_tree.nodes import ActionNode
| 32.666667 | 84 | 0.668878 | from math import isclose
import numpy as np
import scipy.misc
import scipy.special
from tools.walk_trees import walk_trees
from tools.game_tree.nodes import ActionNode
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards, exact=True) \
* scipy.special.perm(num_total_hole_cards, num_total_hole_cards, exact=True)
def is_correct_strategy(strategy_tree):
correct = True
def on_node(node):
if isinstance(node, ActionNode):
nonlocal correct
strategy_sum = np.sum(node.strategy)
if not isclose(strategy_sum, 1):
correct = False
for i in range(3):
if i not in node.children and node.strategy[i] != 0:
correct = False
walk_trees(on_node, strategy_tree)
return correct
def copy_strategy(dst, src):
def on_node(dst_node, src_node):
if isinstance(dst_node, ActionNode):
np.copyto(dst_node.strategy, src_node.strategy)
return [src_node.children[a] for a in src_node.children]
walk_trees(on_node, dst, src)
def is_strategies_equal(first, second):
equal = True
def on_node(first_node, second_node):
if isinstance(first_node, ActionNode):
for a in range(3):
if not isclose(first_node.strategy[a], second_node.strategy[a]):
nonlocal equal
equal = False
walk_trees(on_node, first, second)
return equal
def get_big_blind_size(game):
big_blind = None
for i in range(game.get_num_players()):
player_blind = game.get_blind(i)
if big_blind == None or player_blind > big_blind:
big_blind = player_blind
return big_blind
| 1,672 | 0 | 115 |