hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdfd37283d7f866b32b90513f2b90bfba42aac56
| 4,765
|
py
|
Python
|
4_Code/BasicScripts/MainCode/GAN_VAE_MainScript.py
|
ninadkgaikwad/GAN_VAE_Project
|
eb1970a44517c086e77df2fdf8bf6366fb6108ef
|
[
"MIT"
] | null | null | null |
4_Code/BasicScripts/MainCode/GAN_VAE_MainScript.py
|
ninadkgaikwad/GAN_VAE_Project
|
eb1970a44517c086e77df2fdf8bf6366fb6108ef
|
[
"MIT"
] | null | null | null |
4_Code/BasicScripts/MainCode/GAN_VAE_MainScript.py
|
ninadkgaikwad/GAN_VAE_Project
|
eb1970a44517c086e77df2fdf8bf6366fb6108ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: ninad k gaikwad
GAN and VAE on MNIST Data - Main File
"""
############################ Initial SET UP ##################################
## Importing Modules
import datapreprocessing
import ganbuilder
import vaebuilder
import trainingplotter
import time
import tensorflow as tf
## Choose Architecture
# DenseGAN = 1
# DenseGAN_Deep = 2
# CNNGAN = 3
# DenseVAE= 4
# CNNVAE= 5
Architecture = 3
## Setting up Hyperparameters - Data
Buffer_Size = 60000 # Total = 60000
Batch_Size = 128
TestData_Size = 1000 # Total = 10000
## Setting up HyperParameters - Training
Epochs = 50
LearningRate=1e-4
## Setting up HyperParameters - GAN
GAN_Noise_Dimension = 100
## Setting up HyperParameters - VAE
VAE_LaentVar_Dimension = 2
############################ Data Preprocessing ###############################
TrainingData, TestingData = datapreprocessing.DataPreprocessor(Architecture, Batch_Size, Buffer_Size, TestData_Size )
############################## Model Creation #################################
if (Architecture <= 3): # Model is GAN
Generator_Model , Discriminator_Model = ganbuilder.GANModel_Create(Architecture, GAN_Noise_Dimension)
else : # Model is VAE
Decoder_Model , Encoder_Model = vaebuilder.VAEModel_Create(Architecture, GAN_Noise_Dimension)
############################ Test Data Creation ###############################
if (Architecture <= 3): # Model is GAN
Test_Sample= tf.random.normal([16, GAN_Noise_Dimension])
else : # Model is VAE
for Test_Batch in Test_Sample.take(1):
Test_Sample = Test_Batch[0:16, :, :, :]
######################### Initializing Optimizers #############################
if (Architecture <= 3): # Model is GAN
# Generator Optimizer
Generator_Optimizer = tf.keras.optimizers.Adam(LearningRate)
# Discriminator Optimizer
Discriminator_Optimizer = tf.keras.optimizers.Adam(LearningRate)
else : # Model is VAE
VAE_Optimizer = tf.keras.optimizers.Adam(LearningRate)
############################## Training Model #################################
if (Architecture <= 3): # Model is GAN
epoch_num = 0
epoch_store = []
gen_loss_store = []
disc_loss_store = []
time_epoch_store = []
for epoch in range(Epochs):
epoch_num = epoch_num+1
epoch_store.append(epoch_num)
start_time = time.time()
for Image_Batch in TrainingData:
gen_loss, disc_loss, Generator_Model1, Discriminator_Model1 = ganbuilder.GAN_Training_Step(Generator_Model, Discriminator_Model, Generator_Optimizer, Discriminator_Optimizer, Batch_Size, GAN_Noise_Dimension, Image_Batch)
Generator_Model = Generator_Model1
Discriminator_Model = Discriminator_Model1
gen_loss_store.append(gen_loss.numpy())
disc_loss_store.append(disc_loss.numpy())
trainingplotter.Plot_Training_GAN(Generator_Model, epoch + 1, Test_Sample, gen_loss_store, disc_loss_store, epoch_store)
end_time = time.time()
time_epoch = end_time - start_time
time_epoch_store.append(time_epoch)
time_epochs_total = sum(time_epoch_store)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time_epoch))
print ('Total time = {} sec'.format(time_epochs_total))
print ('Gen Loss = {} ; Disc Loss {} '.format(gen_loss, disc_loss))
else : # Model is VAE
epoch_num = 0
epoch_store = []
loss_store = []
time_epoch_store = []
for epoch in range(Epochs ):
epoch_num = epoch_num+1
epoch_store.append(epoch_num)
start_time = time.time()
for Image_Batch in TrainingData:
VAE_Loss, Encoder_Model1, Decoder_Model1= vaebuilder.VAE_Training_Step(Encoder_Model, Decoder_Model, VAE_Optimizer, Image_Batch )
Decoder_Model = Decoder_Model1
Encoder_Model = Encoder_Model1
end_time = time.time()
loss_store.append(VAE_Loss.numpy())
time_epoch = end_time - start_time
time_epoch_store.append(time_epoch)
time_epochs_total = sum(time_epoch_store)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time_epoch))
print ('Total time = {} sec'.format(time_epochs_total))
print ('VAE Loss = {} '.format(VAE_Loss))
| 26.325967
| 232
| 0.577964
|
022e0e06f270298e22c410bf7262e0d8e2f14f34
| 13,525
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2020-01-29T10:19:40.000Z
|
2020-01-29T10:19:40.000Z
|
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 2
|
2021-01-15T09:24:07.000Z
|
2021-01-15T09:30:10.000Z
|
src/azure-cli/azure/cli/command_modules/backup/custom_help.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2020-11-12T01:49:27.000Z
|
2020-11-12T01:49:27.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import json
import re
import os
from datetime import datetime, timedelta
from six.moves.urllib.parse import urlparse # pylint: disable=import-error
from knack.log import get_logger
from msrest.paging import Paged
from msrestazure.tools import parse_resource_id, is_valid_resource_id
from azure.mgmt.recoveryservicesbackup.models import OperationStatusValues, JobStatus
from azure.cli.core.util import CLIError, sdk_no_wait
from azure.cli.command_modules.backup._client_factory import (
job_details_cf, protection_container_refresh_operation_results_cf,
backup_operation_statuses_cf, protection_container_operation_results_cf)
logger = get_logger(__name__)
fabric_name = "Azure"
os_windows = 'Windows'
os_linux = 'Linux'
password_offset = 33
password_length = 15
backup_management_type_map = {"AzureVM": "AzureIaasVM", "AzureWorkload": "AzureWorkLoad",
"AzureStorage": "AzureStorage"}
# Client Utilities
def is_native_name(name):
return ";" in name
def is_id(identity):
return "/" in identity
def is_sql(resource_type):
return resource_type.lower() == 'sqldatabase'
def is_hana(resource_type):
return resource_type.lower() == 'saphanadatabase'
def is_wl_container(name):
return 'vmappcontainer' in name.lower()
def is_range_valid(start_date, end_date):
if start_date > end_date:
raise CLIError("""Start date must be earlier than end date.""")
def get_resource_id(resource_id):
return "/".join(resource_id.split('/')[3:])
def get_containers(client, container_type, status, resource_group_name, vault_name, container_name=None):
filter_dict = {
'backupManagementType': container_type,
'status': status
}
if container_name and not is_native_name(container_name):
filter_dict['friendlyName'] = container_name
filter_string = get_filter_string(filter_dict)
paged_containers = client.list(vault_name, resource_group_name, filter_string)
containers = get_list_from_paged_response(paged_containers)
if container_name and is_native_name(container_name):
return [container for container in containers if container.name == container_name]
return containers
def get_resource_name_and_rg(resource_group_name, name_or_id):
if is_valid_resource_id(name_or_id):
id_parts = parse_resource_id(name_or_id)
name = id_parts['name']
resource_group = id_parts['resource_group']
else:
name = name_or_id
resource_group = resource_group_name
return name, resource_group
def validate_container(container):
validate_object(container, "Container not found. Please provide a valid container_name.")
def validate_item(item):
validate_object(item, "Item not found. Please provide a valid item_name.")
def validate_policy(policy):
validate_object(policy, "Policy not found. Please provide a valid policy_name.")
def validate_object(obj, error_message):
if obj is None:
raise ValueError(error_message)
def get_target_path(resource_type, path, logical_name, data_directory_paths):
for filepath in data_directory_paths:
if filepath.type == resource_type:
data_directory_path = filepath
file_type = path.split('\\')[-1].split('.')[1]
file_name = logical_name + '_' + str(int(time.time())) + '.' + file_type
return data_directory_path.path + file_name
# Tracking Utilities
# pylint: disable=inconsistent-return-statements
def track_backup_ilr(cli_ctx, result, vault_name, resource_group):
operation_status = track_backup_operation(cli_ctx, resource_group, result, vault_name)
if operation_status.properties:
recovery_target = operation_status.properties.recovery_target
return recovery_target.client_scripts
# pylint: disable=inconsistent-return-statements
def track_backup_job(cli_ctx, result, vault_name, resource_group):
job_details_client = job_details_cf(cli_ctx)
operation_status = track_backup_operation(cli_ctx, resource_group, result, vault_name)
if operation_status.properties:
job_id = operation_status.properties.job_id
job_details = job_details_client.get(vault_name, resource_group, job_id)
return job_details
def track_backup_operation(cli_ctx, resource_group, result, vault_name):
backup_operation_statuses_client = backup_operation_statuses_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Azure-AsyncOperation'])
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id)
while operation_status.status == OperationStatusValues.in_progress.value:
time.sleep(1)
operation_status = backup_operation_statuses_client.get(vault_name, resource_group, operation_id)
return operation_status
def track_refresh_operation(cli_ctx, result, vault_name, resource_group):
protection_container_refresh_operation_results_client = protection_container_refresh_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = sdk_no_wait(True, protection_container_refresh_operation_results_client.get,
vault_name, resource_group, fabric_name, operation_id)
while result.response.status_code == 202:
time.sleep(1)
result = sdk_no_wait(True, protection_container_refresh_operation_results_client.get,
vault_name, resource_group, fabric_name, operation_id)
def track_register_operation(cli_ctx, result, vault_name, resource_group, container_name):
protection_container_operation_results_client = protection_container_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = sdk_no_wait(True, protection_container_operation_results_client.get,
vault_name, resource_group, fabric_name, container_name, operation_id)
while result.response.status_code == 202:
time.sleep(1)
result = sdk_no_wait(True, protection_container_operation_results_client.get,
vault_name, resource_group, fabric_name, container_name, operation_id)
def track_inquiry_operation(cli_ctx, result, vault_name, resource_group, container_name):
protection_container_operation_results_client = protection_container_operation_results_cf(cli_ctx)
operation_id = get_operation_id_from_header(result.response.headers['Location'])
result = sdk_no_wait(True, protection_container_operation_results_client.get,
vault_name, resource_group, fabric_name, container_name, operation_id)
while result.response.status_code == 202:
time.sleep(1)
result = sdk_no_wait(True, protection_container_operation_results_client.get,
vault_name, resource_group, fabric_name, container_name, operation_id)
def job_in_progress(job_status):
return job_status in [JobStatus.in_progress.value, JobStatus.cancelling.value]
# List Utilities
def get_list_from_paged_response(obj_list):
return list(obj_list) if isinstance(obj_list, Paged) else obj_list
def get_none_one_or_many(obj_list):
if not obj_list:
return None
if len(obj_list) == 1:
return obj_list[0]
return obj_list
def get_filter_string(filter_dict):
filter_list = []
for k, v in sorted(filter_dict.items()):
filter_segment = None
if isinstance(v, str):
filter_segment = "{} eq '{}'".format(k, v)
elif isinstance(v, datetime):
filter_segment = "{} eq '{}'".format(k, v.strftime('%Y-%m-%d %I:%M:%S %p')) # yyyy-MM-dd hh:mm:ss tt
elif isinstance(v, bool):
filter_segment = "{} eq '{}'".format(k, str(v))
if filter_segment is not None:
filter_list.append(filter_segment)
filter_string = " and ".join(filter_list)
return None if not filter_string else filter_string
def get_query_dates(end_date, start_date):
query_start_date = None
query_end_date = None
if start_date and end_date:
query_start_date = start_date
query_end_date = end_date
elif not start_date and end_date:
query_end_date = end_date
query_start_date = query_end_date - timedelta(days=30)
elif start_date and not end_date:
query_start_date = start_date
query_end_date = query_start_date + timedelta(days=30)
return query_end_date, query_start_date
# JSON Utilities
def get_container_from_json(client, container):
return get_object_from_json(client, container, 'ProtectionContainerResource')
def get_vault_from_json(client, vault):
return get_object_from_json(client, vault, 'Vault')
def get_vm_from_json(client, vm):
return get_object_from_json(client, vm, 'VirtualMachine')
def get_policy_from_json(client, policy):
return get_object_from_json(client, policy, 'ProtectionPolicyResource')
def get_item_from_json(client, item):
return get_object_from_json(client, item, 'ProtectedItemResource')
def get_protectable_item_from_json(client, item):
return get_object_from_json(client, item, 'WorkloadProtectableItemResource')
def get_job_from_json(client, job):
return get_object_from_json(client, job, 'JobResource')
def get_recovery_point_from_json(client, recovery_point):
return get_object_from_json(client, recovery_point, 'RecoveryPointResource')
def get_or_read_json(json_or_file):
json_obj = None
if is_json(json_or_file):
json_obj = json.loads(json_or_file)
elif os.path.exists(json_or_file):
with open(json_or_file) as f:
json_obj = json.load(f)
if json_obj is None:
raise ValueError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return json_obj
def get_object_from_json(client, json_or_file, class_name):
# Determine if input is json or file
json_obj = get_or_read_json(json_or_file)
# Deserialize json to object
param = client._deserialize(class_name, json_obj) # pylint: disable=protected-access
if param is None:
raise ValueError(
"""
The variable passed should be in valid JSON format and be supplied by az backup CLI commands.
Make sure that you use output of relevant 'az backup show' commands and the --out is 'json'
(use -o json for explicit JSON output) while assigning value to this variable.
Take care to edit only the values and not the keys within the JSON file or string.
""")
return param
def is_json(content):
try:
json.loads(content)
except ValueError:
return False
return True
# ID Utilities
def get_protection_container_uri_from_id(arm_id):
m = re.search('(?<=protectionContainers/)[^/]+', arm_id)
return m.group(0)
def get_protectable_item_uri_from_id(arm_id):
m = re.search('(?<=protectableItems/)[^/]+', arm_id)
return m.group(0)
def get_protected_item_uri_from_id(arm_id):
m = re.search('(?<=protectedItems/)[^/]+', arm_id)
return m.group(0)
def get_vm_name_from_vm_id(arm_id):
m = re.search('(?<=virtualMachines/)[^/]+', arm_id)
return m.group(0)
def get_resource_group_from_id(arm_id):
m = re.search('(?<=resourceGroups/)[^/]+', arm_id)
return m.group(0)
def get_operation_id_from_header(header):
parse_object = urlparse(header)
return parse_object.path.split("/")[-1]
def get_vault_from_arm_id(arm_id):
m = re.search('(?<=vaults/)[^/]+', arm_id)
return m.group(0)
def validate_and_extract_container_type(container_name, backup_management_type):
if not is_native_name(container_name) and backup_management_type is None:
raise CLIError("""backup management type required""")
if backup_management_type is not None:
if backup_management_type in backup_management_type_map.values():
return backup_management_type
return backup_management_type_map[backup_management_type]
container_type = container_name.split(";")[0]
container_type_mappings = {"IaasVMContainer": "AzureIaasVM", "StorageContainer": "AzureStorage",
"VMAppContainer": "AzureWorkload"}
if container_type in container_type_mappings:
return container_type_mappings[container_type]
return None
def validate_update_policy_request(existing_policy, new_policy):
existing_backup_management_type = existing_policy.properties.backup_management_type
new_backup_management_type = new_policy.properties.backup_management_type
if existing_backup_management_type != new_backup_management_type:
raise CLIError("BackupManagementType cannot be different than the existing type.")
| 35.498688
| 118
| 0.725545
|
4fc56b72dce0919f8d842c98ff284a1feb46fa53
| 461
|
py
|
Python
|
botoy/decorators/_from_botself.py
|
yuban10703/botoy
|
892a170caea08362c22f1d909545a8c7a962ba3b
|
[
"MIT"
] | 32
|
2020-10-11T15:18:59.000Z
|
2021-04-11T10:39:07.000Z
|
botoy/decorators/_from_botself.py
|
BrandTime/botoy
|
8b4b1172dfdcac00ff9f17157538cf7e5732aae6
|
[
"MIT"
] | 17
|
2020-10-12T15:56:19.000Z
|
2021-04-03T01:53:05.000Z
|
botoy/decorators/_from_botself.py
|
BrandTime/botoy
|
8b4b1172dfdcac00ff9f17157538cf7e5732aae6
|
[
"MIT"
] | 7
|
2020-10-12T23:55:57.000Z
|
2021-04-15T16:14:46.000Z
|
from ..model import FriendMsg, GroupMsg
def from_botself(func=None):
"""只处理机器人自身的消息 GroupMsg, FriendMsg"""
if func is None:
return from_botself
def inner(ctx):
assert isinstance(ctx, (GroupMsg, FriendMsg))
if isinstance(ctx, GroupMsg):
userid = ctx.FromUserId
else:
userid = ctx.FromUin
if userid == ctx.CurrentQQ:
return func(ctx)
return None
return inner
| 23.05
| 53
| 0.598698
|
c2dbffee0eeb9647598a5376bee3834f0c9ccff5
| 4,420
|
py
|
Python
|
sjb/actions/clonerefs.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 45
|
2017-05-09T15:49:06.000Z
|
2021-11-07T19:48:35.000Z
|
sjb/actions/clonerefs.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 1,313
|
2017-01-19T13:40:43.000Z
|
2022-03-30T14:25:44.000Z
|
sjb/actions/clonerefs.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 165
|
2017-01-17T22:19:04.000Z
|
2022-03-02T12:15:13.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
from jinja2 import Template
from .forward_parameter import ForwardParametersAction
from .interface import Action
from .named_shell_task import render_task
from .script import ScriptAction
_PARAMETER_TEMPLATE = Template(""" <hudson.model.StringParameterDefinition>
<name>{{ name }}</name>
<description>{{ description }}</description>
<defaultValue></defaultValue>
</hudson.model.StringParameterDefinition>""")
_GCS_UPLOAD = """docker run -e JOB_SPEC="${JOB_SPEC}" -v /data:/data:z registry.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin
"""
_CLONEREFS_ACTION_TEMPLATE = Template("""JOB_SPEC="$( jq --compact-output '.buildid |= "'"${BUILD_NUMBER}"'"' <<<"${JOB_SPEC}" )"
for image in 'registry.ci.openshift.org/ci/clonerefs:latest' 'registry.ci.openshift.org/ci/initupload:latest'; do
for (( i = 0; i < 5; i++ )); do
if docker pull "${image}"; then
break
fi
done
done
clonerefs_args=${CLONEREFS_ARGS:-{% for repo in repos %}--repo={{repo}} {% endfor %}}
docker run -v /data:/data:z registry.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json ${PULL_REFS:+--repo=${REPO_OWNER},${REPO_NAME}=${PULL_REFS}} ${clonerefs_args}
{{upload_to_gcs_step}}
sudo chmod -R a+rwX /data
sudo chown -R origin:origin-git /data
""")
class ClonerefsAction(Action):
"""
A ClonerefsAction generates a build step that
synchronizes repositories on the remote host
"""
def __init__(self, repos):
self.repos = repos
def generate_parameters(self):
return [
_PARAMETER_TEMPLATE.render(name='JOB_SPEC', decsription='JSON form of job specification.'),
_PARAMETER_TEMPLATE.render(name='buildId', decsription='Unique build number for each run.'),
_PARAMETER_TEMPLATE.render(name='BUILD_ID', decsription='Unique build number for each run.'),
_PARAMETER_TEMPLATE.render(name='REPO_OWNER', decsription='GitHub org that triggered the job.'),
_PARAMETER_TEMPLATE.render(name='REPO_NAME', decsription='GitHub repo that triggered the job.'),
_PARAMETER_TEMPLATE.render(name='PULL_BASE_REF', decsription='Ref name of the base branch.'),
_PARAMETER_TEMPLATE.render(name='PULL_BASE_SHA', decsription='Git SHA of the base branch.'),
_PARAMETER_TEMPLATE.render(name='PULL_REFS', decsription='All refs to test.'),
_PARAMETER_TEMPLATE.render(name='PULL_NUMBER', decsription='Pull request number.'),
_PARAMETER_TEMPLATE.render(name='PULL_PULL_SHA', decsription='Pull request head SHA.'),
_PARAMETER_TEMPLATE.render(name='CLONEREFS_ARGS', decsription='Pull request head SHA.'),
]
def generate_build_steps(self):
steps = []
upload_to_gcs_step = ""
if self.output_format == "xml":
steps = [render_task(
title="FORWARD GCS CREDENTIALS TO REMOTE HOST",
command="""for (( i = 0; i < 10; i++ )); do
if scp -F ${WORKSPACE}/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json; then
break
fi
done""",
output_format=self.output_format
)
]
upload_to_gcs_step = _GCS_UPLOAD
forward_action = ForwardParametersAction(
parameters=['JOB_SPEC', 'buildId', 'BUILD_ID', 'REPO_OWNER', 'REPO_NAME', 'PULL_BASE_REF', 'PULL_BASE_SHA',
'PULL_REFS', 'PULL_NUMBER', 'PULL_PULL_SHA', 'JOB_SPEC', 'BUILD_NUMBER', 'CLONEREFS_ARGS']
)
forward_action.output_format = self.output_format
steps += forward_action.generate_build_steps()
script_action = ScriptAction(
repository=None,
title="SYNC REPOSITORIES",
script=_CLONEREFS_ACTION_TEMPLATE.render(repos=self.repos, upload_to_gcs_step=upload_to_gcs_step),
timeout=None,
output_format=self.output_format
)
steps += script_action.generate_build_steps()
return steps
| 47.021277
| 308
| 0.664932
|
a60a54823f2fca971925e2f361f01f1672fe6ea9
| 2,136
|
py
|
Python
|
cloudshell/networking/arista/command_actions/enable_disable_snmp_actions.py
|
QualiSystems/cloudshell-networking-arista-
|
011ff605244a98bb488fec985bd0e053af9855d0
|
[
"Apache-2.0"
] | null | null | null |
cloudshell/networking/arista/command_actions/enable_disable_snmp_actions.py
|
QualiSystems/cloudshell-networking-arista-
|
011ff605244a98bb488fec985bd0e053af9855d0
|
[
"Apache-2.0"
] | 9
|
2018-04-03T12:02:29.000Z
|
2021-07-08T09:07:29.000Z
|
cloudshell/networking/arista/command_actions/enable_disable_snmp_actions.py
|
QualiSystems/cloudshell-networking-arista-
|
011ff605244a98bb488fec985bd0e053af9855d0
|
[
"Apache-2.0"
] | 2
|
2017-02-08T23:52:21.000Z
|
2018-07-04T15:33:36.000Z
|
import re
from cloudshell.cli.command_template.command_template_executor import (
CommandTemplateExecutor,
)
from cloudshell.networking.arista.command_templates.arista_configuration_templates import ( # noqa: E501
DISABLE_SNMP,
ENABLE_SNMP,
ENABLE_VRF_FOR_SNMP,
SHOW_SNMP,
SHOW_SNMP_COMMUNITY,
)
class EnableDisableSnmpActions(object):
def __init__(self, cli_service, logger):
"""Reboot actions.
:param cli_service: config mode cli service
:type cli_service: cloudshell.cli.cli_service_impl.CliService
:param logger:
:type logger: Logger
"""
self._cli_service = cli_service
self._logger = logger
def is_configured(self, snmp_community):
"""Check snmp community configured."""
output = CommandTemplateExecutor(
self._cli_service, SHOW_SNMP_COMMUNITY
).execute_command()
return snmp_community in output
def enable_snmp(self, snmp_community):
"""Enable snmp on the device."""
return CommandTemplateExecutor(self._cli_service, ENABLE_SNMP).execute_command(
snmp_community=snmp_community
)
def disable_snmp(self, snmp_community):
"""Disable SNMP."""
return CommandTemplateExecutor(self._cli_service, DISABLE_SNMP).execute_command(
snmp_community=snmp_community
)
def is_configured_vrf(self, vrf_name):
"""Check that vrf name is enabled for SNMP agent.
:param str vrf_name:
:rtype: bool
"""
output = CommandTemplateExecutor(
self._cli_service,
SHOW_SNMP,
).execute_command()
vrfs = re.search(r"^SNMP agent enabled in VRFs:\s(.+)$", output, re.MULTILINE)
vrfs = map(str.strip, vrfs.group(1).split(","))
return vrf_name in vrfs
def enable_vrf_for_snmp_server(self, vrf_name):
"""Enable vrf for SNMP server.
:param str vrf_name:
"""
return CommandTemplateExecutor(
self._cli_service,
ENABLE_VRF_FOR_SNMP,
).execute_command(vrf_name=vrf_name)
| 29.666667
| 105
| 0.656367
|
b259061e9170fd864d512941537113e4373cfd02
| 2,375
|
py
|
Python
|
tools/distrib/python/grpcio_tools/_parallel_compile_patch.py
|
arghyadip01/grpc
|
9e10bfc8a096ef91a327e22f84f10c0fabff4417
|
[
"Apache-2.0"
] | 36,552
|
2015-02-26T17:30:13.000Z
|
2022-03-31T22:41:33.000Z
|
tools/distrib/python/grpcio_tools/_parallel_compile_patch.py
|
SanjanaSingh897/grpc
|
2d858866eb95ce5de8ccc8c35189a12733d8ca79
|
[
"Apache-2.0"
] | 23,536
|
2015-02-26T17:50:56.000Z
|
2022-03-31T23:39:42.000Z
|
tools/distrib/python/grpcio_tools/_parallel_compile_patch.py
|
SanjanaSingh897/grpc
|
2d858866eb95ce5de8ccc8c35189a12733d8ca79
|
[
"Apache-2.0"
] | 11,050
|
2015-02-26T17:22:10.000Z
|
2022-03-31T10:12:35.000Z
|
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patches the compile() to allow enable parallel compilation of C/C++.
build_ext has lots of C/C++ files and normally them one by one.
Enabling parallel build helps a lot.
"""
import distutils.ccompiler
import os
try:
BUILD_EXT_COMPILER_JOBS = int(
os.environ['GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS'])
except KeyError:
import multiprocessing
BUILD_EXT_COMPILER_JOBS = multiprocessing.cpu_count()
# monkey-patch for parallel compilation
def _parallel_compile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# setup the same way as distutils.ccompiler.CCompiler
# https://github.com/python/cpython/blob/31368a4f0e531c19affe2a1becd25fc316bc7501/Lib/distutils/ccompiler.py#L564
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
def _compile_single_file(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# run compilation of individual files in parallel
import multiprocessing.pool
multiprocessing.pool.ThreadPool(BUILD_EXT_COMPILER_JOBS).map(
_compile_single_file, objects)
return objects
def monkeypatch_compile_maybe():
"""Monkeypatching is dumb, but the build speed gain is worth it."""
if BUILD_EXT_COMPILER_JOBS > 1:
distutils.ccompiler.CCompiler.compile = _parallel_compile
| 36.538462
| 117
| 0.693474
|
d58021a089f6a8908be8c186e662e5cef86fae80
| 9,091
|
py
|
Python
|
TransMagic/patch-TransMagic103b.exe-English.py
|
beardedfoo/wonderwitch-english-translation
|
0bd6f067767644a596e88c2bcfa24ba7f8c02a32
|
[
"Apache-2.0"
] | 3
|
2020-03-31T02:29:49.000Z
|
2021-02-22T12:04:50.000Z
|
TransMagic/patch-TransMagic103b.exe-English.py
|
beardedfoo/wonderwitch-english-translation
|
0bd6f067767644a596e88c2bcfa24ba7f8c02a32
|
[
"Apache-2.0"
] | null | null | null |
TransMagic/patch-TransMagic103b.exe-English.py
|
beardedfoo/wonderwitch-english-translation
|
0bd6f067767644a596e88c2bcfa24ba7f8c02a32
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import sys
from io import BytesIO
DEFAULT_ENCODING_IN='shiftjis'
DEFAULT_ENCODING_OUT='utf-8'
def patch(s, location, original, replacement, encoding_in=DEFAULT_ENCODING_IN, encoding_out=DEFAULT_ENCODING_OUT, pad=None):
# Encode the expected and replacement text as bytes
original_bytes = original.encode(encoding_in)
replacement_bytes = replacement.encode(encoding_out)
# Add any specified padding as necessary
if pad != None:
if len(pad.encode(encoding_out)) != 1:
raise ValueError('only single-byte padding is supported')
while len(replacement_bytes) < len(original_bytes):
replacement_bytes += pad.encode(encoding_out)
# Check the existing value is correct at this location
s.seek(location)
existing = s.read(len(original_bytes))
if existing != original_bytes:
raise ValueError('unexpected value @ {:X} found "{}" instead'.format(location, existing))
# Ensure the replacement fits
if len(replacement_bytes) != len(original_bytes):
raise ValueError('replacement value does not match original size @ {:X}'.format(location))
# Write the patch
s.seek(location)
s.write(replacement_bytes)
def multipatch(s, locations, original, replacement, encoding_in=DEFAULT_ENCODING_IN, encoding_out=DEFAULT_ENCODING_OUT, pad=None):
for l in locations:
patch(s, l, original, replacement, encoding_in=encoding_in, encoding_out=encoding_out, pad=pad)
def main():
# Read in the original exe
with open('C:\\WWitch\\beta\\TransMagic103b.exe', 'rb') as f:
exe = BytesIO(f.read())
# /rom0/ (プログラムエリア)
patch(exe, 0xdd030, "/rom0/ (プログラムエリア)", "/rom0/ (program)", pad='\0')
# /ram0/ (ワークエリア)
patch(exe, 0xdd04b, "/ram0/ (ワークエリア)", "/ram0/ (work)", pad='\0')
# カートリッジ
multipatch(exe, [0xdefce, 0xdcd1b], "カートリッジ", "Cartridge", pad='\0')
# WonderWitchに接続されていません
multipatch(
exe,
[0xb89de, 0xb89fe, 0xb8a1e, 0xb8a3e, 0xb8bc4, 0xb8c97,
0xb8d64, 0xb8efa, 0xb8f29, 0xb8f49, 0xb902d, 0xb9151,
0xb924d, 0xb9456, 0xb9c15, 0xb9cf8, 0xb9d3f],
"WonderWitchに接続されていません",
"WonderWitch connection error", pad='\0')
# プロトコルログ
patch(exe, 0xdea5a, "プロトコルログ", "Protocol Log", pad='\0')
# バイト
# Using a space pad here as these could occur mid-string
multipatch(exe, [0xb9e1e, 0xb9e2d, 0xb9e39, 0xb9e48, 0xb9e4f], "バイト", " bytes", pad=' ')
# マイコンピュータ
patch(exe, 0xdd0a6, "マイコンピュータ", "My Computer", pad='\0')
# 名前
multipatch(exe, [0xdd10e, 0xdcd80, 0xe0ecf], "名前", "Name")
# 接続中
# This should be "connection" etc., but that's too big
multipatch(exe, [0xb8d89, 0xd9499], "接続中", "Link", pad='\0')
# ファイル(&F)
patch(exe, 0xdec8d, "ファイル(&F)", "&File", pad='\0')
# カートリッジ(&C)
patch(exe, 0xdeea1, "カートリッジ(&C)", "&Cartridge", pad='\0')
# ヘルプ(&H)
patch(exe, 0xdf0ee, "ヘルプ(&H)", "&Help", pad='\0')
# 接続(&C)
patch(exe, 0xdecb6, "接続(&C)", "&Connect", pad='\0')
# 切断(&D)
# Should be "Disconnect", but that's too big
patch(exe, 0xdecf5, "切断(&D)", "Close", pad='\0')
# プロトコルログ保存(&S)
patch(exe, 0xded4e, "プロトコルログ保存(&S)", "&Save Log", pad='\0')
# シリアルポート設定...
patch(exe, 0xdee04, "シリアルポート設定...", "Serial Settings", pad='\0')
# プロトコルログクリア
patch(exe, 0xded9c, "プロトコルログクリア", "Clear Log", pad='\0')
# エリア整頓(&F)
patch(exe, 0xdeecd, "エリア整頓(&F)", "Defragment", pad='\0')
# エリア整頓
patch(exe, 0xe04a7, "エリア整頓", "Defragment")
# 終了(&X)
patch(exe, 0xdee68, "終了(&X)", "E&xit", pad='\0')
# エリア全クリア(&A)
patch(exe, 0xdef27, "エリア全クリア(&A)", "Format", pad='\0')
# エリア全クリア
patch(exe, 0xe04df, "エリア全クリア", "Format", pad='\0')
# 時刻設定...
patch(exe, 0xdef82, "時刻設定...", "Time/Date", pad='\0')
# アップグレード...
patch(exe, 0xdf069, "アップグレード...", "Upgrade", pad='\0')
# バックアップ...
patch(exe, 0xdf0ad, "バックアップ...", "Backup", pad='\0')
# TransMagicについて...
patch(exe, 0xdf11d, "TransMagicについて...", "About TransMagic", pad='\0')
# アイコンの整列
multipatch(exe, [0xdfe3b, 0xe02ff], "アイコンの整列", "Arrange Icons", pad='\0')
# 名前順
multipatch(exe, [0xdfe6e, 0xe0333], "名前順", "ByName", pad='\0')
# サイズ順
multipatch(exe, [0xdfeb8, 0xe037f], "サイズ順", "BySize", pad='\0')
# 日付順
multipatch(exe, [0xdff04, 0xe03cd], "日付順", "ByDate", pad='\0')
# 最新の情報に更新
multipatch(exe, [0xdff6e, 0xe0440], "最新の情報に更新", "Refresh", pad='\0')
# フォルダ作成...
patch(exe, 0xdffdf, "フォルダ作成...", "New Folder", pad='\0')
# 開く(&O)
patch(exe, 0xe0056, "開く(&O)", "&Open", pad='\0')
# WonderWitchへ送信
patch(exe, 0xe00b8, "WonderWitchへ送信", "Send To WWitch", pad='\0')
# 削除(&D)
patch(exe, 0xe0127, "削除(&D)", "&Delete", pad='\0')
# 名前の変更(&M)
patch(exe, 0xe016b, "名前の変更(&M)", "Rena&me", pad='\0')
# 空き容量
patch(exe, 0xb89b3, "空き容量", "Free: ", pad='\0')
# 名称未設定.txt
patch(exe, 0xb8f1a, "名称未設定.txt", "wwitch-log.txt", pad='\0')
# テキストファイル
# This occurs mid-string, so we must not null terminate. Pad with space.
patch(exe, 0xe020f, "テキストファイル", "Text Document", pad=' ')
# リッチテキストファイル
# This occurs mid-string, so we must not null terminate. Pad with space.
patch(exe, 0xe022d, "リッチテキストファイル", "Rich Text Document", pad=' ')
# WonderWitchをモニタモードで立ち上げて
# Recv System しておいてください。
#
# OSをアップグレードします。よろしいですか?
patch(exe, 0xb9a6c, "WonderWitchをモニタモードで立ち上げて", "Launch WonderWitch in monitor mode.\r\n", pad=' ')
patch(exe, 0xb9a93, "Recv System しておいてください。\r\n", "Do not poweroff WonderWitch!\r\n", pad=' ')
patch(exe, 0xb9ab7, "OSをアップグレードします。よろしいですか?", "Proceed with OS upgrade?", pad=' ')
# 切断中
# TODO: Improve the following translation. translate.google.com says this means "cutting", but that doesn't make sense.
patch(exe, 0xdec34, "切断中", "Cut?", pad='\0')
# 接続
# This should be connection, but that doesn't fit.
patch(exe, 0xdebb1, "接続", "Link")
# 通信ポート:
patch(exe, 0xd921e, "通信ポート:", "Port:", pad='\0')
# 通信速度
patch(exe, 0xd9262, "通信速度", "Speed:", pad='\0')
# キャンセル
multipatch(exe, [0xd9414, 0xd95f9, 0xd9af2, 0xe0a8b, 0xe0cbf, 0xe1175, 0xe178c], "キャンセル", "Cancel", pad='\0')
# フォルダを選択してください
patch(exe, 0xb8995, "フォルダを選択してください", "Select Folder:", pad='\0')
# 通信設定
patch(exe, 0xd90e7, "通信設定", "Serial", pad='\0')
# %sを全クリアします。エリア内のファイルは全て消去されます。
# よろしいですか?
patch(exe, 0xb9104, "%sを全クリアします。エリア内のファイルは全て消去されます。", "All files on %s will be deleted.", pad=' ')
patch(exe, 0xb9140, "よろしいですか?", "Proceed?", pad='\0')
# WonderWitchに接続しています・・・
patch(exe, 0xd9599, "WonderWitchに接続しています・・・", "Connecting to WonderWitch...", pad='\0')
# フォルダ作成
patch(exe, 0xd968e, "フォルダ作成", "New Folder", pad='\0')
# 作成
patch(exe, 0xd982b, "作成", "Make", pad='\0')
# キャンセル
patch(exe, 0xd9891, "キャンセル", "Cancel", pad='\0')
# 時刻設定
patch(exe, 0xd995f, "時刻設定", "Set Time", pad='\0')
# カートリッジ時刻
patch(exe, 0xd9b54, "カートリッジ時刻", "Cartridge Time", pad='\0')
# 設定時刻
patch(exe, 0xd9c5e, "設定時刻", "Set Time", pad='\0')
# 作成するフォルダの名前の入力してください。
patch(exe, 0xd97c6, "作成するフォルダの名前の入力してください。", "Please enter folder name", pad='\0')
# サイズ
multipatch(exe, [0xdcdb2, 0xdd140], "サイズ", "Size", pad='\0')
# 日付
multipatch(exe, [0xdcdcc, 0xdd15a], "日付", "Date", pad='\0')
# 属性
patch(exe, 0xdcde4, "属性", "Attr", pad='\0')
# 説明
multipatch(exe, [0xdcdf4, 0xe0f0d], "説明", "Info", pad='\0')
# 通信ポートのオープンに失敗しました。
multipatch(exe, [0xb9651, 0xb8d90], "通信ポートのオープンに失敗しました。", "Failed to open comm port", pad='\0')
# TODO: This doesn't work
# Set the dialog font/charset to a western one instead of Japanese
# patch(exe, 0xdd177, "SHIFTJIS_CHARSET", "ANSI_CHARSET", pad='\0')
# patch(exe, 0xdd1ba, "MS Pゴシック", "MS PGothic", pad='\0')
# WonderWitchをモニタモードで立ち上げて
# Send System しておいてください。
#
# OSをバックアップします。よろしいですか?
patch(exe, 0xb9885, "WonderWitchをモニタモードで立ち上げて\r\n", "Launch WonderWitch in monitor mode.\r\n", pad=' ')
patch(exe, 0xb98ac, "Send System しておいてください。\r\n", "Do not poweroff WonderWitch!\r\n", pad=' ')
patch(exe, 0xb98d0, "OSをバックアップします。よろしいですか?", "Proceed with OS backup?", pad='\0')
# パケット数:
patch(exe, 0xe0a40, "パケット数:", "Packets: ", pad='\0')
# ファイル転送中・・・
patch(exe, 0xe0b1a, "ファイル転送中・・・", "Transferring file", pad='\0')
# 転送中
patch(exe, 0xe0c48, "転送中", "Xfer..", pad='\0')
# TODO: This causes a crash on program launch
# 以下の情報で送信します
# patch(exe, 0xe0d9d, "以下の情報で送信します", "Send the following:", pad=' ')
# FreyaOSを転送しています
patch(exe, 0xe08d2, "FreyaOSを転送しています", "FreyaOS Upgrade/Backup", pad=' ')
# Write a new exe
with open('C:\\WWitch\\beta\\TransMagic103b-EnglishPatched.exe', 'wb') as f:
exe.seek(0)
f.write(exe.read())
return 0
if __name__ == '__main__':
main()
| 30.712838
| 130
| 0.609724
|
43cb0ccb3615bd7b82d7e287a24483b8a68bdcf0
| 1,010
|
py
|
Python
|
apps/delete_memo.py
|
dgu-dna/dna-bot
|
81777a894e92cdee0acc20f9abae5fe30ee03b45
|
[
"MIT"
] | null | null | null |
apps/delete_memo.py
|
dgu-dna/dna-bot
|
81777a894e92cdee0acc20f9abae5fe30ee03b45
|
[
"MIT"
] | null | null | null |
apps/delete_memo.py
|
dgu-dna/dna-bot
|
81777a894e92cdee0acc20f9abae5fe30ee03b45
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from apps.decorators import on_command
from apps.slackutils import isNumber
from time import localtime, strftime
import json
CACHE_DEFAULT_URL = './apps/memo_cache/memo_cache.json'
@on_command(['!메모삭제', '!ㅁㅁㅅㅈ', '!aatw'])
def run(robot, channel, tokens, user, command):
'''메모 지워줌'''
token_count = len(tokens)
user = str(user)
if token_count < 1:
return channel, '사용법) !메모삭제 <메모 번호>'
del_line = []
for num in tokens:
if(isNumber(num)):
del_line.append(int(num))
del_line.sort(reverse=True)
jdat = json.loads(open(CACHE_DEFAULT_URL).read())
if del_line[0] > len(jdat[user]):
return channel, '그건 안댐;'
for line in del_line:
del jdat[user][line - 1]
with open(CACHE_DEFAULT_URL, 'w') as fp:
json.dump(jdat, fp, indent=4)
del_line = map(lambda s: str(s), del_line)
msg = '<' + ', '.join(sorted(del_line)) + '> 메모를 삭제 했음.'
return channel, msg
| 31.5625
| 60
| 0.635644
|
363188b5004ec25039959c0c84743eb8a924487b
| 4,650
|
py
|
Python
|
personal_context_builder/wenet_analysis_models.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
personal_context_builder/wenet_analysis_models.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
personal_context_builder/wenet_analysis_models.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
"""
models (ML sense) used for create user profile
Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
Written by William Droz <william.droz@idiap.ch>,
"""
import pickle
from functools import partial
from os.path import join
from typing import Callable, List, Optional
import numpy as np
from gensim.corpora import Dictionary # type: ignore
from sklearn.decomposition import LatentDirichletAllocation # type: ignore
from personal_context_builder import config
from personal_context_builder.gensim_hdp import HdpTransformer # type: ignore
class BaseModel(object):
def predict(self, *args, **kwargs):
raise NotImplementedError("not implemented")
def transform(self, *args, **kwargs):
raise NotImplementedError("not implemented")
def fit(self, *args, **kwargs):
raise NotImplementedError("not implemented")
class BaseModelWrapper(BaseModel):
def __init__(self, model_class: Optional[Callable] = None, name: str = "unamed"):
self._model_class = model_class
self._name = name
if self._model_class is not None:
self._model_instance = self._model_class()
else:
self._model_instance = None
def transform(self, *args, **kwargs):
if self._model_instance is None:
raise Exception("model has to be loaded of trained before transform")
return self._model_instance.transform(*args, **kwargs)
def predict(self, *args, **kwargs):
if self._model_instance is None:
raise Exception("model has to be loaded of trained before predict")
return self._model_instance.predict(*args, **kwargs)
def fit(self, *args, **kwargs):
self._model_instance.fit(*args, **kwargs)
def save(
self,
filename: str = config.PCB_GENERIC_MODEL_NAME,
dump_fct: Callable = pickle.dump,
):
"""save this current instance of BaseModelWrapper
Args:
filename: file that will be used to store the instance
dump_fct: function to use to dump the instance into a file
"""
location = join(config.PCB_DATA_FOLDER, filename)
with open(location, "wb") as f:
dump_fct(self.__dict__, f)
@classmethod
def load(
cls,
filename: str = config.PCB_GENERIC_MODEL_NAME,
load_fct: Callable = pickle.load,
):
"""Create a instance of BaseModelWrapper from a previously saved file
Args:
filename: file that contain the saved BaseModelWrapper instance
load_fct: function to use to load the instance from a file
Return:
An instance of BaseModelWrapper
"""
location = join(config.PCB_DATA_FOLDER, filename)
with open(location, "rb") as f:
wrapper = cls()
wrapper.__dict__ = load_fct(f)
return wrapper
class SimpleLDA(BaseModelWrapper):
"""Simple LDA over all the users, with 15 topics"""
def __init__(
self,
name: str = "simple_lda",
n_components: int = 15,
random_state: int = 0,
n_jobs: int = -1,
**kwargs
):
my_lda = partial(
LatentDirichletAllocation,
n_components=15,
random_state=0,
n_jobs=-1,
**kwargs
)
super().__init__(my_lda, name)
def predict(self, *args, **kwargs):
return super().transform(*args, **kwargs)
class SimpleBOW(BaseModelWrapper):
"""Bag-of-words approach, compute the mean of all days"""
def __init__(self, name: str = "simple_bow"):
super().__init__(None, name)
def transform(self, *args, **kwargs):
return self.predict(*args, **kwargs)
def predict(self, *args, **kwargs):
X = args[0]
return np.mean(X, axis=0)
def fit(self, *args, **kwargs):
pass
class SimpleHDP(BaseModelWrapper):
"""Bag-of-words approach, compute the mean of all days"""
def __init__(self, name: str = "simple_hdp"):
super().__init__(None, name)
self._gensim_dict = None
def to_bow_format(self, X: List):
if self._gensim_dict is not None:
return [self._gensim_dict.doc2bow(x) for x in X]
def predict(self, X, *args, **kwargs):
bow_format = self.to_bow_format(X)
return super().transform(bow_format, *args, **kwargs)
def fit(self, X, *args, **kwargs):
self._gensim_dict = Dictionary(X)
self._model_instance = HdpTransformer(id2word=self._gensim_dict)
bow_format = self.to_bow_format(X)
super().fit(bow_format, *args, **kwargs)
| 31.208054
| 85
| 0.634194
|
edfb2c801d5e42a16a9df5b2f07e0039e408d080
| 7,512
|
py
|
Python
|
tempest/tests/lib/cmd/test_check_uuid.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 254
|
2015-01-05T19:22:52.000Z
|
2022-03-29T08:14:54.000Z
|
tempest/tests/lib/cmd/test_check_uuid.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 13
|
2015-03-02T15:53:04.000Z
|
2022-02-16T02:28:14.000Z
|
tempest/tests/lib/cmd/test_check_uuid.py
|
cityofships/tempest
|
59aa6811a3664d88b8939603b8e974644fbe21fa
|
[
"Apache-2.0"
] | 367
|
2015-01-07T15:05:39.000Z
|
2022-03-04T09:50:35.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import importlib
import os
import shutil
import sys
import tempfile
from unittest import mock
from tempest.lib.cmd import check_uuid
from tempest.tests import base
class TestCLInterface(base.TestCase):
CODE = "import unittest\n" \
"class TestClass(unittest.TestCase):\n" \
" def test_tests(self):\n" \
" pass"
def setUp(self):
super(TestCLInterface, self).setUp()
self.directory = tempfile.mkdtemp(prefix='check-uuid', dir=".")
self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
init_file = open(self.directory + "/__init__.py", "w")
init_file.close()
self.tests_file = self.directory + "/tests.py"
with open(self.tests_file, "w") as fake_file:
fake_file.write(TestCLInterface.CODE)
fake_file.close()
def test_fix_argument_no(self):
sys.argv = [sys.argv[0]] + ["--package",
os.path.relpath(self.directory)]
self.assertRaises(SystemExit, check_uuid.run)
with open(self.tests_file, "r") as f:
self.assertTrue(TestCLInterface.CODE == f.read())
def test_fix_argument_yes(self):
sys.argv = [sys.argv[0]] + ["--fix", "--package",
os.path.relpath(self.directory)]
check_uuid.run()
with open(self.tests_file, "r") as f:
self.assertTrue(TestCLInterface.CODE != f.read())
class TestSourcePatcher(base.TestCase):
def test_add_patch(self):
patcher = check_uuid.SourcePatcher()
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
file_contents = 'first_line\nsecond_line'
fake_file.write(file_contents)
fake_file.close()
patcher.add_patch(fake_file.name, 'patch', 2)
source_file = patcher.source_files[fake_file.name]
self.assertEqual(1, len(patcher.patches))
(patch_id, patch), = patcher.patches.items()
self.assertEqual(patcher._quote('patch\n'), patch)
self.assertEqual('first_line\n{%s:s}second_line' % patch_id,
patcher._unquote(source_file))
def test_apply_patches(self):
fake_file = tempfile.NamedTemporaryFile("w+t")
patcher = check_uuid.SourcePatcher()
patcher.patches = {'fake-uuid': patcher._quote('patch\n')}
patcher.source_files = {
fake_file.name: patcher._quote('first_line\n') +
'{fake-uuid:s}second_line'}
with mock.patch('sys.stdout'):
patcher.apply_patches()
lines = fake_file.read().split('\n')
fake_file.close()
self.assertEqual(['first_line', 'patch', 'second_line'], lines)
self.assertFalse(patcher.patches)
self.assertFalse(patcher.source_files)
class TestTestChecker(base.TestCase):
IMPORT_LINE = "from tempest.lib import decorators\n"
def _test_add_uuid_to_test(self, source_file):
class Fake_test_node():
lineno = 1
col_offset = 4
patcher = check_uuid.SourcePatcher()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
fake_file.write(source_file)
fake_file.close()
checker._add_uuid_to_test(patcher, Fake_test_node(), fake_file.name)
self.assertEqual(1, len(patcher.patches))
self.assertEqual(1, len(patcher.source_files))
(patch_id, patch), = patcher.patches.items()
changed_source_file, = patcher.source_files.values()
self.assertEqual('{%s:s}%s' % (patch_id, patcher._quote(source_file)),
changed_source_file)
expected_patch_start = patcher._quote(
' ' + check_uuid.DECORATOR_TEMPLATE.split('(')[0])
self.assertTrue(patch.startswith(expected_patch_start))
def test_add_uuid_to_test_def(self):
source_file = (" def test_test():\n"
" pass")
self._test_add_uuid_to_test(source_file)
def test_add_uuid_to_test_decorator(self):
source_file = (" @decorators.idempotent_id\n"
" def test_test():\n"
" pass")
self._test_add_uuid_to_test(source_file)
@staticmethod
def get_mocked_ast_object(lineno, col_offset, module, name, object_type):
ast_object = mock.Mock(spec=object_type)
name_obj = mock.Mock()
ast_object.lineno = lineno
ast_object.col_offset = col_offset
name_obj.name = name
ast_object.module = module
ast_object.names = [name_obj]
return ast_object
def test_add_import_for_test_uuid_no_tempest(self):
patcher = check_uuid.SourcePatcher()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
source_code = "from unittest import mock\n"
fake_file.write(source_code)
fake_file.close()
class Fake_src_parsed():
body = [TestTestChecker.get_mocked_ast_object(
1, 4, 'unittest', 'mock', ast.ImportFrom)]
checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
fake_file.name)
patcher.apply_patches()
with open(fake_file.name, "r") as f:
expected_result = source_code + '\n' + TestTestChecker.IMPORT_LINE
self.assertTrue(expected_result == f.read())
def test_add_import_for_test_uuid_tempest(self):
patcher = check_uuid.SourcePatcher()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
source_code = "from tempest import a_fake_module\n"
fake_file.write(source_code)
fake_file.close()
class Fake_src_parsed:
body = [TestTestChecker.get_mocked_ast_object(
1, 4, 'tempest', 'a_fake_module', ast.ImportFrom)]
checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
fake_file.name)
patcher.apply_patches()
with open(fake_file.name, "r") as f:
expected_result = source_code + TestTestChecker.IMPORT_LINE
self.assertTrue(expected_result == f.read())
def test_add_import_no_import(self):
patcher = check_uuid.SourcePatcher()
patcher.add_patch = mock.Mock()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
fake_file.close()
class Fake_src_parsed:
body = []
checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
fake_file.name)
self.assertTrue(not patcher.add_patch.called)
| 38.523077
| 78
| 0.637913
|
1408769738e5678ea7c6af23aa307efaa00d59e8
| 17,252
|
py
|
Python
|
apps/deepstream-ssd-parser/deepstream_ssd_parser.py
|
gbakke1/deepstream_python_apps
|
1a7e7bc104e216f8bc3c21b3787ac96ebc623058
|
[
"MIT"
] | 4
|
2021-02-19T06:39:16.000Z
|
2021-11-20T22:06:33.000Z
|
apps/deepstream-ssd-parser/deepstream_ssd_parser.py
|
gbakke1/deepstream_python_apps
|
1a7e7bc104e216f8bc3c21b3787ac96ebc623058
|
[
"MIT"
] | 1
|
2020-07-02T17:22:40.000Z
|
2020-07-24T11:56:07.000Z
|
apps/deepstream-ssd-parser/deepstream_ssd_parser.py
|
gbakke1/deepstream_python_apps
|
1a7e7bc104e216f8bc3c21b3787ac96ebc623058
|
[
"MIT"
] | 1
|
2021-02-25T15:17:12.000Z
|
2021-02-25T15:17:12.000Z
|
#!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
""" Example of deepstream using SSD neural network and parsing SSD's outputs. """
import sys
import io
sys.path.append("../")
import gi
gi.require_version("Gst", "1.0")
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from ssd_parser import nvds_infer_parse_custom_tf_ssd, DetectionParam, NmsParam, BoxSizeParam
import pyds
CLASS_NB = 91
ACCURACY_ALL_CLASS = 0.5
UNTRACKED_OBJECT_ID = 0xffffffffffffffff
IMAGE_HEIGHT = 1080
IMAGE_WIDTH = 1920
MIN_BOX_WIDTH = 32
MIN_BOX_HEIGHT = 32
TOP_K = 20
IOU_THRESHOLD = 0.3
OUTPUT_VIDEO_NAME = "./out.mp4"
def get_label_names_from_file(filepath):
""" Read a label file and convert it to string list """
f = io.open(filepath, "r")
labels = f.readlines()
labels = [elm[:-1] for elm in labels]
f.close()
return labels
def make_elm_or_print_err(factoryname, name, printedname, detail=""):
""" Creates an element with Gst Element Factory make.
Return the element if successfully created, otherwise print
to stderr and return None.
"""
print("Creating", printedname)
elm = Gst.ElementFactory.make(factoryname, name)
if not elm:
sys.stderr.write("Unable to create " + printedname + " \n")
if detail:
sys.stderr.write(detail)
return elm
def osd_sink_pad_buffer_probe(pad, info, u_data):
frame_number = 0
# Intiallizing object counter with 0.
obj_counter = dict(enumerate([0] * CLASS_NB))
num_rects = 0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number = frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj = l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
id_dict = {
val: index
for index, val in enumerate(get_label_names_from_file("labels.txt"))
}
disp_string = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}"
py_nvosd_text_params.display_text = disp_string.format(
frame_number,
num_rects,
obj_counter[id_dict["car"]],
obj_counter[id_dict["person"]],
)
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def add_obj_meta_to_frame(frame_object, batch_meta, frame_meta, label_names):
""" Inserts an object into the metadata """
# this is a good place to insert objects into the metadata.
# Here's an example of inserting a single object.
obj_meta = pyds.nvds_acquire_obj_meta_from_pool(batch_meta)
# Set bbox properties. These are in input resolution.
rect_params = obj_meta.rect_params
rect_params.left = int(IMAGE_WIDTH * frame_object.left)
rect_params.top = int(IMAGE_HEIGHT * frame_object.top)
rect_params.width = int(IMAGE_WIDTH * frame_object.width)
rect_params.height = int(IMAGE_HEIGHT * frame_object.height)
# Semi-transparent yellow backgroud
rect_params.has_bg_color = 0
rect_params.bg_color.set(1, 1, 0, 0.4)
# Red border of width 3
rect_params.border_width = 3
rect_params.border_color.set(1, 0, 0, 1)
# Set object info including class, detection confidence, etc.
obj_meta.confidence = frame_object.detectionConfidence
obj_meta.class_id = frame_object.classId
# There is no tracking ID upon detection. The tracker will
# assign an ID.
obj_meta.object_id = UNTRACKED_OBJECT_ID
lbl_id = frame_object.classId
if lbl_id >= len(label_names):
lbl_id = 0
# Set the object classification label.
obj_meta.obj_label = label_names[lbl_id]
# Set display text for the object.
txt_params = obj_meta.text_params
if txt_params.display_text:
pyds.free_buffer(txt_params.display_text)
txt_params.x_offset = int(rect_params.left)
txt_params.y_offset = max(0, int(rect_params.top) - 10)
txt_params.display_text = (
label_names[lbl_id] + " " + "{:04.3f}".format(frame_object.detectionConfidence)
)
# Font , font-color and font-size
txt_params.font_params.font_name = "Serif"
txt_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
txt_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Inser the object into current frame meta
# This object has no parent
pyds.nvds_add_obj_meta_to_frame(frame_meta, obj_meta, None)
def pgie_src_pad_buffer_probe(pad, info, u_data):
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
detection_params = DetectionParam(CLASS_NB, ACCURACY_ALL_CLASS)
box_size_param = BoxSizeParam(IMAGE_HEIGHT, IMAGE_WIDTH,
MIN_BOX_WIDTH, MIN_BOX_HEIGHT)
nms_param = NmsParam(TOP_K, IOU_THRESHOLD)
label_names = get_label_names_from_file("labels.txt")
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
l_user = frame_meta.frame_user_meta_list
while l_user is not None:
try:
# Note that l_user.data needs a cast to pyds.NvDsUserMeta
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
user_meta = pyds.NvDsUserMeta.cast(l_user.data)
except StopIteration:
break
if (
user_meta.base_meta.meta_type
!= pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META
):
continue
tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data)
# Boxes in the tensor meta should be in network resolution which is
# found in tensor_meta.network_info. Use this info to scale boxes to
# the input frame resolution.
layers_info = []
for i in range(tensor_meta.num_output_layers):
layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
layers_info.append(layer)
frame_object_list = nvds_infer_parse_custom_tf_ssd(
layers_info, detection_params, box_size_param, nms_param
)
try:
l_user = l_user.next
except StopIteration:
break
for frame_object in frame_object_list:
add_obj_meta_to_frame(frame_object, batch_meta, frame_meta, label_names)
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
source = make_elm_or_print_err("filesrc", "file-source", "Source")
# Since the data format in the input file is elementary h264 stream,
# we need a h264parser
h264parser = make_elm_or_print_err("h264parse", "h264-parser", "H264Parser")
# Use nvdec_h264 for hardware accelerated decode on GPU
decoder = make_elm_or_print_err("nvv4l2decoder", "nvv4l2-decoder", "Decoder")
# Create nvstreammux instance to form batches from one or more sources.
streammux = make_elm_or_print_err("nvstreammux", "Stream-muxer", "NvStreamMux")
# Use nvinferserver to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = make_elm_or_print_err("nvinferserver", "primary-inference", "Nvinferserver")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = make_elm_or_print_err("nvvideoconvert", "convertor", "Nvvidconv")
# Create OSD to draw on the converted RGBA buffer
nvosd = make_elm_or_print_err("nvdsosd", "onscreendisplay", "OSD (nvosd)")
# Finally encode and save the osd output
queue = make_elm_or_print_err("queue", "queue", "Queue")
nvvidconv2 = make_elm_or_print_err("nvvideoconvert", "convertor2", "Converter 2 (nvvidconv2)")
capsfilter = make_elm_or_print_err("capsfilter", "capsfilter", "capsfilter")
caps = Gst.Caps.from_string("video/x-raw, format=I420")
capsfilter.set_property("caps", caps)
# On Jetson, there is a problem with the encoder failing to initialize
# due to limitation on TLS usage. To work around this, preload libgomp.
# Add a reminder here in case the user forgets.
preload_reminder = "If the following error is encountered:\n" + \
"/usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block\n" + \
"Preload the offending library:\n" + \
"export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n"
encoder = make_elm_or_print_err("avenc_mpeg4", "encoder", "Encoder", preload_reminder)
encoder.set_property("bitrate", 2000000)
codeparser = make_elm_or_print_err("mpeg4videoparse", "mpeg4-parser", 'Code Parser')
container = make_elm_or_print_err("qtmux", "qtmux", "Container")
sink = make_elm_or_print_err("filesink", "filesink", "Sink")
sink.set_property("location", OUTPUT_VIDEO_NAME)
sink.set_property("sync", 0)
sink.set_property("async", 0)
print("Playing file %s " % args[1])
source.set_property("location", args[1])
streammux.set_property("width", IMAGE_WIDTH)
streammux.set_property("height", IMAGE_HEIGHT)
streammux.set_property("batch-size", 1)
streammux.set_property("batched-push-timeout", 4000000)
pgie.set_property("config-file-path", "dstest_ssd_nopostprocess.txt")
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(h264parser)
pipeline.add(decoder)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(queue)
pipeline.add(nvvidconv2)
pipeline.add(capsfilter)
pipeline.add(encoder)
pipeline.add(codeparser)
pipeline.add(container)
pipeline.add(sink)
# we link the elements together
# file-source -> h264-parser -> nvh264-decoder ->
# nvinfer -> nvvidconv -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(h264parser)
h264parser.link(decoder)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = decoder.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(queue)
queue.link(nvvidconv2)
nvvidconv2.link(capsfilter)
capsfilter.link(encoder)
encoder.link(codeparser)
codeparser.link(container)
container.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
# Add a probe on the primary-infer source pad to get inference output tensors
pgiesrcpad = pgie.get_static_pad("src")
if not pgiesrcpad:
sys.stderr.write(" Unable to get src pad of primary infer \n")
pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 37.586057
| 114
| 0.675052
|
863aa7a92842cc734b76b4031dfee42409f5956e
| 7,105
|
py
|
Python
|
std/captum/16.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
std/captum/16.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
std/captum/16.py
|
quantapix/qnarre.com
|
f51d5945c20ef8182c4aa11f1b407d064c190c70
|
[
"MIT"
] | null | null | null |
import numpy as np
from os import path
import matplotlib.pyplot as plt
import sklearn
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import torch
import torch.nn as nn
import torch.optim as optim
from captum.attr import LayerConductance, LayerActivation, LayerIntegratedGradients
from captum.attr import IntegratedGradients, DeepLift, GradientShap, NoiseTunnel, FeatureAblation
boston = load_boston()
feature_names = boston.feature_names
X = boston.data
y = boston.target
torch.manual_seed(1234)
np.random.seed(1234)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
fig, axs = plt.subplots(nrows=3, ncols=5, figsize=(30, 20))
for i, (ax, col) in enumerate(zip(axs.flat, feature_names)):
x = X[:, i]
pf = np.polyfit(x, y, 1)
p = np.poly1d(pf)
ax.plot(x, y, "o")
ax.plot(x, p(x), "r--")
ax.set_title(col + " vs Prices")
ax.set_xlabel(col)
ax.set_ylabel("Prices")
X_train = torch.tensor(X_train).float()
y_train = torch.tensor(y_train).view(-1, 1).float()
X_test = torch.tensor(X_test).float()
y_test = torch.tensor(y_test).view(-1, 1).float()
datasets = torch.utils.data.TensorDataset(X_train, y_train)
train_iter = torch.utils.data.DataLoader(datasets, batch_size=10, shuffle=True)
batch_size = 50
num_epochs = 200
learning_rate = 0.0001
size_hidden1 = 100
size_hidden2 = 50
size_hidden3 = 10
size_hidden4 = 1
class BostonModel(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(13, size_hidden1)
self.relu1 = nn.ReLU()
self.lin2 = nn.Linear(size_hidden1, size_hidden2)
self.relu2 = nn.ReLU()
self.lin3 = nn.Linear(size_hidden2, size_hidden3)
self.relu3 = nn.ReLU()
self.lin4 = nn.Linear(size_hidden3, size_hidden4)
def forward(self, input):
return self.lin4(self.relu3(self.lin3(self.relu2(self.lin2(self.relu1(self.lin1(input)))))))
model = BostonModel()
model.train()
criterion = nn.MSELoss(reduction="sum")
def train(model_inp, num_epochs=num_epochs):
optimizer = torch.optim.RMSprop(model_inp.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
running_loss = 0.0
for inputs, labels in train_iter:
outputs = model_inp(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
running_loss += loss.item()
optimizer.step()
if epoch % 20 == 0:
print(
"Epoch [%d]/[%d] running accumulative loss across all batches: %.3f"
% (epoch + 1, num_epochs, running_loss)
)
running_loss = 0.0
def train_load_save_model(model_obj, model_path):
if path.isfile(model_path):
print("Loading pre-trained model from: {}".format(model_path))
model_obj.load_state_dict(torch.load(model_path))
else:
train(model_obj)
print("Finished training the model. Saving the model to the path: {}".format(model_path))
torch.save(model_obj.state_dict(), model_path)
SAVED_MODEL_PATH = ".save/boston_model.pt"
train_load_save_model(model, SAVED_MODEL_PATH)
model.eval()
outputs = model(X_test)
err = np.sqrt(mean_squared_error(outputs.detach().numpy(), y_test.detach().numpy()))
print("model err: ", err)
ig = IntegratedGradients(model)
ig_nt = NoiseTunnel(ig)
dl = DeepLift(model)
gs = GradientShap(model)
fa = FeatureAblation(model)
ig_attr_test = ig.attribute(X_test, n_steps=50)
ig_nt_attr_test = ig_nt.attribute(X_test)
dl_attr_test = dl.attribute(X_test)
gs_attr_test = gs.attribute(X_test, X_train)
fa_attr_test = fa.attribute(X_test)
x_axis_data = np.arange(X_test.shape[1])
x_axis_data_labels = list(map(lambda idx: feature_names[idx], x_axis_data))
ig_attr_test_sum = ig_attr_test.detach().numpy().sum(0)
ig_attr_test_norm_sum = ig_attr_test_sum / np.linalg.norm(ig_attr_test_sum, ord=1)
ig_nt_attr_test_sum = ig_nt_attr_test.detach().numpy().sum(0)
ig_nt_attr_test_norm_sum = ig_nt_attr_test_sum / np.linalg.norm(ig_nt_attr_test_sum, ord=1)
dl_attr_test_sum = dl_attr_test.detach().numpy().sum(0)
dl_attr_test_norm_sum = dl_attr_test_sum / np.linalg.norm(dl_attr_test_sum, ord=1)
gs_attr_test_sum = gs_attr_test.detach().numpy().sum(0)
gs_attr_test_norm_sum = gs_attr_test_sum / np.linalg.norm(gs_attr_test_sum, ord=1)
fa_attr_test_sum = fa_attr_test.detach().numpy().sum(0)
fa_attr_test_norm_sum = fa_attr_test_sum / np.linalg.norm(fa_attr_test_sum, ord=1)
lin_weight = model.lin1.weight[0].detach().numpy()
y_axis_lin_weight = lin_weight / np.linalg.norm(lin_weight, ord=1)
width = 0.14
legends = ["Int Grads", "Int Grads w/SmoothGrad", "DeepLift", "GradientSHAP", "Feature Ablation", "Weights"]
plt.figure(figsize=(20, 10))
ax = plt.subplot()
ax.set_title("Comparing input feature importances across multiple algorithms and learned weights")
ax.set_ylabel("Attributions")
FONT_SIZE = 16
plt.rc("font", size=FONT_SIZE)
plt.rc("axes", titlesize=FONT_SIZE)
plt.rc("axes", labelsize=FONT_SIZE)
plt.rc("legend", fontsize=FONT_SIZE - 4)
ax.bar(x_axis_data, ig_attr_test_norm_sum, width, align="center", alpha=0.8, color="#eb5e7c")
ax.bar(x_axis_data + width, ig_nt_attr_test_norm_sum, width, align="center", alpha=0.7, color="#A90000")
ax.bar(x_axis_data + 2 * width, dl_attr_test_norm_sum, width, align="center", alpha=0.6, color="#34b8e0")
ax.bar(x_axis_data + 3 * width, gs_attr_test_norm_sum, width, align="center", alpha=0.8, color="#4260f5")
ax.bar(x_axis_data + 4 * width, fa_attr_test_norm_sum, width, align="center", alpha=1.0, color="#49ba81")
ax.bar(x_axis_data + 5 * width, y_axis_lin_weight, width, align="center", alpha=1.0, color="grey")
ax.autoscale_view()
plt.tight_layout()
ax.set_xticks(x_axis_data + 0.5)
ax.set_xticklabels(x_axis_data_labels)
plt.legend(legends, loc=3)
plt.show()
lc = LayerConductance(model, model.lin4)
lc_attr_test = lc.attribute(X_test, n_steps=100, attribute_to_layer_input=True)
lc_attr_test = lc_attr_test[0]
lin4_weight = model.lin4.weight
plt.figure(figsize=(15, 8))
x_axis_data = np.arange(lc_attr_test.shape[1])
y_axis_lc_attr_test = lc_attr_test.mean(0).detach().numpy()
y_axis_lc_attr_test = y_axis_lc_attr_test / np.linalg.norm(y_axis_lc_attr_test, ord=1)
y_axis_lin4_weight = lin4_weight[0].detach().numpy()
y_axis_lin4_weight = y_axis_lin4_weight / np.linalg.norm(y_axis_lin4_weight, ord=1)
width = 0.25
legends = ["Attributions", "Weights"]
x_axis_labels = ["Neuron {}".format(i) for i in range(len(y_axis_lin4_weight))]
ax = plt.subplot()
ax.set_title("Aggregated neuron importances and learned weights in the last linear layer of the model")
ax.bar(x_axis_data + width, y_axis_lc_attr_test, width, align="center", alpha=0.5, color="red")
ax.bar(x_axis_data + 2 * width, y_axis_lin4_weight, width, align="center", alpha=0.5, color="green")
plt.legend(legends, loc=2, prop={"size": 20})
ax.autoscale_view()
plt.tight_layout()
ax.set_xticks(x_axis_data + 0.5)
ax.set_xticklabels(x_axis_labels)
plt.show()
| 33.356808
| 108
| 0.727516
|
927fdc2e950e89d336dbf4a88642a6e2065e7893
| 6,295
|
py
|
Python
|
test/test_graph_http.py
|
cclauss/rdflib
|
1729243621d5679ad4c8c156332e91f4b078dda5
|
[
"BSD-3-Clause"
] | 1,424
|
2015-01-04T13:10:22.000Z
|
2022-03-29T15:12:38.000Z
|
test/test_graph_http.py
|
cclauss/rdflib
|
1729243621d5679ad4c8c156332e91f4b078dda5
|
[
"BSD-3-Clause"
] | 1,148
|
2015-01-01T18:26:18.000Z
|
2022-03-31T21:51:53.000Z
|
test/test_graph_http.py
|
cclauss/rdflib
|
1729243621d5679ad4c8c156332e91f4b078dda5
|
[
"BSD-3-Clause"
] | 459
|
2015-01-03T14:41:34.000Z
|
2022-03-14T22:06:47.000Z
|
from rdflib import Graph, Namespace
from http.server import BaseHTTPRequestHandler
from urllib.error import HTTPError
from .testutils import SimpleHTTPMock, MockHTTPResponse, ctx_http_server, GraphHelper
import unittest
"""
Test that correct content negoation headers are passed
by graph.parse
"""
xmltestdoc = """<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns="http://example.org/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
>
<rdf:Description rdf:about="http://example.org/a">
<b rdf:resource="http://example.org/c"/>
</rdf:Description>
</rdf:RDF>
"""
n3testdoc = """@prefix : <http://example.org/> .
:a :b :c .
"""
nttestdoc = "<http://example.org/a> <http://example.org/b> <http://example.org/c> .\n"
class ContentNegotiationHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200, "OK")
# fun fun fun parsing accept header.
acs = self.headers["Accept"].split(",")
acq = [x.split(";") for x in acs if ";" in x]
acn = [(x, "q=1") for x in acs if ";" not in x]
acs = [(x[0], float(x[1].strip()[2:])) for x in acq + acn]
ac = sorted(acs, key=lambda x: x[1])
ct = ac[-1]
if "application/rdf+xml" in ct:
rct = "application/rdf+xml"
content = xmltestdoc
elif "text/n3" in ct:
rct = "text/n3"
content = n3testdoc
elif "text/plain" in ct:
rct = "text/plain"
content = nttestdoc
self.send_header("Content-type", rct)
self.end_headers()
self.wfile.write(content.encode("utf-8"))
def log_message(self, *args):
pass
class TestGraphHTTP(unittest.TestCase):
def test_content_negotiation(self) -> None:
EG = Namespace("http://example.org/")
expected = Graph()
expected.add((EG["a"], EG["b"], EG["c"]))
expected_triples = GraphHelper.triple_set(expected)
with ctx_http_server(ContentNegotiationHandler) as server:
(host, port) = server.server_address
url = f"http://{host}:{port}/foo"
for format in ("xml", "n3", "nt"):
graph = Graph()
graph.parse(url, format=format)
self.assertEqual(expected_triples, GraphHelper.triple_set(graph))
def test_source(self) -> None:
EG = Namespace("http://example.org/")
expected = Graph()
expected.add((EG["a"], EG["b"], EG["c"]))
expected_triples = GraphHelper.triple_set(expected)
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
(host, port) = server.server_address
url = f"http://{host}:{port}/"
httpmock.do_get_responses.append(
MockHTTPResponse(
200,
"OK",
f"<{EG['a']}> <{EG['b']}> <{EG['c']}>.".encode(),
{"Content-Type": ["text/turtle"]},
)
)
graph = Graph()
graph.parse(source=url)
self.assertEqual(expected_triples, GraphHelper.triple_set(graph))
def test_3xx(self) -> None:
EG = Namespace("http://example.com/")
expected = Graph()
expected.add((EG["a"], EG["b"], EG["c"]))
expected_triples = GraphHelper.triple_set(expected)
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
(host, port) = server.server_address
url = f"http://{host}:{port}/"
for idx in range(3):
httpmock.do_get_responses.append(
MockHTTPResponse(
302, "FOUND", "".encode(), {"Location": [f"{url}loc/302/{idx}"]}
)
)
for idx in range(3):
httpmock.do_get_responses.append(
MockHTTPResponse(
303,
"See Other",
"".encode(),
{"Location": [f"{url}loc/303/{idx}"]},
)
)
for idx in range(3):
httpmock.do_get_responses.append(
MockHTTPResponse(
308,
"Permanent Redirect",
"".encode(),
{"Location": [f"{url}loc/308/{idx}"]},
)
)
httpmock.do_get_responses.append(
MockHTTPResponse(
200,
"OK",
f"<{EG['a']}> <{EG['b']}> <{EG['c']}>.".encode(),
{"Content-Type": ["text/turtle"]},
)
)
graph = Graph()
graph.parse(location=url, format="turtle")
self.assertEqual(expected_triples, GraphHelper.triple_set(graph))
httpmock.do_get_mock.assert_called()
assert len(httpmock.do_get_requests) == 10
for request in httpmock.do_get_requests:
self.assertRegex(request.headers.get("Accept"), "text/turtle")
request_paths = [request.path for request in httpmock.do_get_requests]
self.assertEqual(
request_paths,
[
"/",
"/loc/302/0",
"/loc/302/1",
"/loc/302/2",
"/loc/303/0",
"/loc/303/1",
"/loc/303/2",
"/loc/308/0",
"/loc/308/1",
"/loc/308/2",
],
)
def test_5xx(self):
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
(host, port) = server.server_address
url = f"http://{host}:{port}/"
response = MockHTTPResponse(500, "Internal Server Error", "".encode(), {})
httpmock.do_get_responses.append(response)
graph = Graph()
with self.assertRaises(HTTPError) as raised:
graph.parse(location=url, format="turtle")
self.assertEqual(raised.exception.code, 500)
if __name__ == "__main__":
unittest.main()
| 32.786458
| 88
| 0.504051
|
3992889bf04f68baa698ac949ec43712ab3834eb
| 273
|
py
|
Python
|
shoppinglyx/urls.py
|
ruh-iziki-orz/Book-Store
|
025576bb2c23ec385c76a5df4ce4f92b4243538b
|
[
"MIT"
] | null | null | null |
shoppinglyx/urls.py
|
ruh-iziki-orz/Book-Store
|
025576bb2c23ec385c76a5df4ce4f92b4243538b
|
[
"MIT"
] | null | null | null |
shoppinglyx/urls.py
|
ruh-iziki-orz/Book-Store
|
025576bb2c23ec385c76a5df4ce4f92b4243538b
|
[
"MIT"
] | 2
|
2021-07-02T15:49:42.000Z
|
2021-11-23T06:30:45.000Z
|
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('app.urls')),
path('', include('django.contrib.auth.urls')),
# path('accounts/', include('django.contrib.auth.urls')),
]
| 27.3
| 61
| 0.666667
|
a0fae1b4ea81df9e41f62d3c91cfd64c60d286b3
| 3,386
|
py
|
Python
|
domino/routes.py
|
imarchenko/python-domino
|
930e8a5f485d47a0017ceb7cac6cf720da986a48
|
[
"Apache-2.0"
] | null | null | null |
domino/routes.py
|
imarchenko/python-domino
|
930e8a5f485d47a0017ceb7cac6cf720da986a48
|
[
"Apache-2.0"
] | null | null | null |
domino/routes.py
|
imarchenko/python-domino
|
930e8a5f485d47a0017ceb7cac6cf720da986a48
|
[
"Apache-2.0"
] | null | null | null |
class _Routes:
def __init__(self, host, owner_username, project_name):
self.host = host
self._owner_username = owner_username
self._project_name = project_name
# URL builders
def _build_project_url(self):
return self.host + '/v1/projects/' + \
self._owner_username + '/' + self._project_name
def _build_project_url_private_api(self):
return self.host + '/u/' + self._owner_username + \
'/' + self._project_name
def _build_old_project_url(self):
# TODO refactor once these API endpoints are supported in REST API
return self.host + '/' \
+ self._owner_username + '/' + self._project_name
def _build_models_url(self):
return self.host + '/v1/models'
# Project URLs
def runs_list(self):
return self._build_project_url() + '/runs'
def runs_start(self):
return self._build_project_url() + '/runs'
def run_stop(self, runId):
return self._build_project_url_private_api() + '/run/stop/' + runId
def runs_status(self, runId):
return self._build_project_url() + '/runs/' + runId
def runs_stdout(self, runId):
return self._build_project_url() + '/run/' + runId + '/stdout'
def files_list(self, commitId, path):
return self._build_project_url() + '/files/' + commitId + '/' + path
def files_upload(self, path):
return self._build_project_url() + path
def commits_list(self):
return self._build_project_url() + '/commits'
def blobs_get(self, key):
return self._build_project_url() + '/blobs/' + key
def fork_project(self):
return self._build_project_url_private_api() + '/fork'
def collaborators_get(self):
return self._build_old_project_url() + '/collaborators'
def collaborators_add(self):
return self._build_old_project_url() + '/addCollaborator'
def collaborators_remove(self):
return self._build_old_project_url() + '/removeCollaborator'
# API Endpoint URLs
def _build_endpoint_url(self):
return self.host + '/v1/' + \
self._owner_username + '/' + self._project_name + '/endpoint'
def endpoint(self):
return self._build_endpoint_url()
def endpoint_state(self):
return self._build_endpoint_url() + '/state'
def endpoint_publish(self):
return self._build_endpoint_url() + '/publishRelease'
# Model Manager URLs
def models_list(self):
return self._build_project_url() + '/models'
def model_publish(self):
return self._build_models_url()
def model_versions_get(self, model_id):
return self._build_models_url() + '/' + model_id + '/versions'
def model_version_publish(self, model_id):
return self._build_models_url() + '/' + model_id + '/versions'
def publish_ui_legacy(self):
return self._build_project_url_private_api() + '/endpoints'
def publish_ui(self):
return self._build_project_url_private_api() + '/endpoints/modelManager'
# Environment URLs
def environments_list(self):
return self.host + '/v1/environments'
# Deployment URLs
def deployment_version(self):
return self.host + '/version'
# App URLs
def app_publish(self):
return self._build_project_url_private_api() + '/nb/startSession'
| 31.351852
| 80
| 0.652688
|
547d49dbab4685c1e514cd6e3f8f9bdc8209caf3
| 2,112
|
py
|
Python
|
CaRM_HD189733/scripts/probfun_GPs_slope.py
|
EduardoCristo/CaRM
|
9ccc87bfbf33b38f2ffab1fb95fbdf2b2d4606e4
|
[
"MIT"
] | null | null | null |
CaRM_HD189733/scripts/probfun_GPs_slope.py
|
EduardoCristo/CaRM
|
9ccc87bfbf33b38f2ffab1fb95fbdf2b2d4606e4
|
[
"MIT"
] | null | null | null |
CaRM_HD189733/scripts/probfun_GPs_slope.py
|
EduardoCristo/CaRM
|
9ccc87bfbf33b38f2ffab1fb95fbdf2b2d4606e4
|
[
"MIT"
] | null | null | null |
from .priors import*
from .aromefit import*
import numpy as np
# from .constants import parguess#,cpar,prior_type,prior_int
from copy import deepcopy as dpcy
import matplotlib.pyplot as plt
import time
import sys
import scripts.globalvar as gb
import os
import time
#import george
#from george import kernels
import celerite
from celerite import terms
def lnlike(par, t, rv, yerr,tout,rvout,yerrout):
f,slope,sigw,ln_a,ln_tau=fitmodel(t, par)
fout,_,_,_,_=fitmodel(tout, par)
kernel = terms.RealTerm(ln_a,ln_tau)
gp = celerite.GP(kernel)
gp.compute(t,np.sqrt((np.array(yerr)**2. + np.exp(2.*sigw))))
mdif=rv-f
return(gp.log_likelihood(mdif)+lnlin(slope,tout,rvout-fout,yerrout,sigw))
def lnlin(slope, t, rv, yerr,sigw):
#print(slope)
f=slope*t
model = np.array(f)
inv_sigma2 = 1.0/(np.array(yerr)**2. + np.exp(2.*sigw))
return -0.5*(np.sum((rv-model)**2.*inv_sigma2 - np.log(inv_sigma2)))
def lnprior(par, prior_type, prior_interval):
pfun=0
if str(prior_type)=="U":
pfun+=Uprior(par,prior_interval)
elif str(prior_type)=="G":
pfun+=Gprior(par,prior_interval)
else:
None
return(pfun)
import scripts.globalvar as gb
def lnprob(par):
rv, ph, sigrv,guessdict,pardict,priordict,odict,dlen,outoftransitph,outoftransitrv,outoftransitsigrv=dpcy(gb.interpar)
tempdict=dict()
c=0
for key in odict:
tempdict[key]=par[c]
c+=1
fprob=0
for k in range(dlen):
lnprior_val=0
intdict=dpcy(guessdict[k])
for j in odict:
try:
lnprior_val+= lnprior(tempdict[j],pardict[k][str(j)],priordict[k][str(j)])
intdict[str(j)]=dpcy(tempdict[j])
except:
None
if np.isfinite(lnprior_val)==False:
return(-np.inf)
else:
None
ll=lnlike(intdict, ph[k], rv[k], sigrv[k],outoftransitph[k],outoftransitrv[k],outoftransitsigrv[k])
fprob+=lnprior_val+ll
if np.isnan(fprob) == 1:
return(-np.inf)
return(fprob)
| 25.756098
| 122
| 0.629261
|
e7898102518d066921d7ad9026528d25425d1202
| 2,635
|
py
|
Python
|
tests/test_haproxy.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_haproxy.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_haproxy.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import datetime
import random
import pytz
import pytest
from jinja2 import Environment, environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
env = Environment()
haproxy_testdata = [
r"{{ mark }}{{ bsd }} {{ host }} haproxy[{{ pid }}]: 10.0.0.0:1000 [something]",
]
@pytest.mark.parametrize("event", haproxy_testdata)
def test_haproxy(record_property, setup_wordlist, setup_splunk, setup_sc4s, event):
host = "{}-{}".format(
random.choice(setup_wordlist), random.choice(setup_wordlist)
)
pid = random.randint(1000, 32000)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(event + "\n")
message = mt.render(mark="<111>", bsd=bsd, host=host, pid=pid)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netlb host={{ host }} sourcetype="haproxy:tcp"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
haproxy_testdata_splunk = [
r"{{ mark }}{{ bsd }} {{ host }} haproxy[{{ pid }}]: client_ip=10.0.0.0 client_port=1000",
]
@pytest.mark.parametrize("event", haproxy_testdata_splunk)
def test_haproxy_splunk(record_property, setup_wordlist, setup_splunk, setup_sc4s, event):
host = "{}-{}".format(
random.choice(setup_wordlist), random.choice(setup_wordlist)
)
pid = random.randint(1000, 32000)
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(event + "\n")
message = mt.render(mark="<111>", bsd=bsd, host=host, pid=pid)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string(
'search _time={{ epoch }} index=netlb host={{ host }} sourcetype="haproxy:splunk:http"'
)
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| 29.277778
| 95
| 0.680835
|
8100573682df6731edc77c6d7ddcf47c170926dc
| 383
|
py
|
Python
|
237-delete-node-in-a-linked-list/237-delete-node-in-a-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
237-delete-node-in-a-linked-list/237-delete-node-in-a-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
237-delete-node-in-a-linked-list/237-delete-node-in-a-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
| 27.357143
| 74
| 0.569191
|
b319dcc1d74a11363fc4350b8b00ee98234dec6a
| 8,652
|
py
|
Python
|
yolo3/utils.py
|
NewYinbao/keras_yolov3
|
e84449faa467b9e540d4e53af6b9ac8f0c031ebb
|
[
"MIT"
] | 4
|
2020-01-10T09:35:09.000Z
|
2020-04-25T00:52:16.000Z
|
yolo3/utils.py
|
NewYinbao/keras_yolov3
|
e84449faa467b9e540d4e53af6b9ac8f0c031ebb
|
[
"MIT"
] | null | null | null |
yolo3/utils.py
|
NewYinbao/keras_yolov3
|
e84449faa467b9e540d4e53af6b9ac8f0c031ebb
|
[
"MIT"
] | 2
|
2019-11-30T13:11:23.000Z
|
2020-04-25T00:52:20.000Z
|
"""Miscellaneous utility functions."""
from functools import reduce
import cv2
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
# from yolo3.model import preprocess_true_boxes
# from model import preprocess_true_boxes
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3 # default setting
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b,t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.1, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
# rgb
image = Image.open(line[0])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(float,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
# image.show()
image_data = np.array(new_image)/255.
# image.show()
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
# 拉伸系数
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
# 缩放系数
scale = rand(.5, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# image_data = x
# image_data = image_data.swapaxes(1,0)
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box_wo = box[:, 2] - box[:, 0]
box_ho = box[:, 3] - box[:, 1]
box_wo[box_wo<15] = 15
box_ho[box_ho<15] = 15
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>0.3*box_wo, box_h>0.3*box_ho)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [int(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=1)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
| 34.887097
| 129
| 0.580097
|
f932e6fe7b48ddcff1bf6887ae1ff8bdf53e5bb4
| 38,535
|
py
|
Python
|
tests/test_api.py
|
satyavijay/flask-restful
|
f815437784273e5e16f43d1253d59e585cf411f6
|
[
"BSD-3-Clause"
] | 1
|
2018-05-05T12:16:09.000Z
|
2018-05-05T12:16:09.000Z
|
tests/test_api.py
|
satyavijay/flask-restful
|
f815437784273e5e16f43d1253d59e585cf411f6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_api.py
|
satyavijay/flask-restful
|
f815437784273e5e16f43d1253d59e585cf411f6
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import json
from flask import Flask, Blueprint, redirect, views, abort as flask_abort
from flask.signals import got_request_exception, signals_available
try:
from mock import Mock
except:
# python3
from unittest.mock import Mock
import flask
import werkzeug
from werkzeug.exceptions import HTTPException, Unauthorized, BadRequest, NotFound, _aborter
from werkzeug.http import quote_etag, unquote_etag
from flask_restful.utils import http_status_message, unpack
import flask_restful
import flask_restful.fields
from flask_restful import OrderedDict
from json import dumps, loads, JSONEncoder
#noinspection PyUnresolvedReferences
from nose.tools import assert_equals, assert_true, assert_false # you need it for tests in form of continuations
import six
def check_unpack(expected, value):
assert_equals(expected, value)
def test_unpack():
yield check_unpack, ("hey", 200, {}), unpack("hey")
yield check_unpack, (("hey",), 200, {}), unpack(("hey",))
yield check_unpack, ("hey", 201, {}), unpack(("hey", 201))
yield check_unpack, ("hey", 201, "foo"), unpack(("hey", 201, "foo"))
yield check_unpack, (["hey", 201], 200, {}), unpack(["hey", 201])
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(flask_restful.Resource):
def get(self):
return {}
class BadMojoError(HTTPException):
pass
# Resource that always errors out
class HelloBomb(flask_restful.Resource):
def get(self):
raise BadMojoError("It burns..")
class APITestCase(unittest.TestCase):
def test_http_code(self):
self.assertEquals(http_status_message(200), 'OK')
self.assertEquals(http_status_message(404), 'Not Found')
def test_unauthorized_no_challenge_by_default(self):
app = Flask(__name__)
api = flask_restful.Api(app)
response = Mock()
response.headers = {}
with app.test_request_context('/foo'):
response = api.unauthorized(response)
assert_false('WWW-Authenticate' in response.headers)
def test_unauthorized(self):
app = Flask(__name__)
api = flask_restful.Api(app, serve_challenge_on_401=True)
response = Mock()
response.headers = {}
with app.test_request_context('/foo'):
response = api.unauthorized(response)
self.assertEquals(response.headers['WWW-Authenticate'],
'Basic realm="flask-restful"')
def test_unauthorized_custom_realm(self):
app = Flask(__name__)
app.config['HTTP_BASIC_AUTH_REALM'] = 'Foo'
api = flask_restful.Api(app, serve_challenge_on_401=True)
response = Mock()
response.headers = {}
with app.test_request_context('/foo'):
response = api.unauthorized(response)
self.assertEquals(response.headers['WWW-Authenticate'], 'Basic realm="Foo"')
def test_handle_error_401_no_challenge_by_default(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context('/foo'):
resp = api.handle_error(Unauthorized())
self.assertEquals(resp.status_code, 401)
assert_false('WWW-Autheneticate' in resp.headers)
def test_handle_error_401_sends_challege_default_realm(self):
app = Flask(__name__)
api = flask_restful.Api(app, serve_challenge_on_401=True)
exception = HTTPException()
exception.code = 401
exception.data = {'foo': 'bar'}
with app.test_request_context('/foo'):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 401)
self.assertEquals(resp.headers['WWW-Authenticate'],
'Basic realm="flask-restful"')
def test_handle_error_401_sends_challege_configured_realm(self):
app = Flask(__name__)
app.config['HTTP_BASIC_AUTH_REALM'] = 'test-realm'
api = flask_restful.Api(app, serve_challenge_on_401=True)
with app.test_request_context('/foo'):
resp = api.handle_error(Unauthorized())
self.assertEquals(resp.status_code, 401)
self.assertEquals(resp.headers['WWW-Authenticate'],
'Basic realm="test-realm"')
def test_handle_error_does_not_swallow_exceptions(self):
app = Flask(__name__)
api = flask_restful.Api(app)
exception = BadRequest('x')
with app.test_request_context('/foo'):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 400)
self.assertEquals(resp.get_data(), b'{"message": "x"}\n')
def test_handle_error_does_not_swallow_custom_exceptions(self):
app = Flask(__name__)
errors = {'BadMojoError': {'status': 409, 'message': 'go away'}}
api = flask_restful.Api(app, errors=errors)
api.add_resource(HelloBomb, '/bomb')
app = app.test_client()
resp = app.get('/bomb')
self.assertEquals(resp.status_code, 409)
self.assertEquals(resp.content_type, api.default_mediatype)
resp_dict = json.loads(resp.data.decode())
self.assertEqual(resp_dict.get('status'), 409)
self.assertEqual(resp_dict.get('message'), 'go away')
def test_marshal(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal(marshal_dict, fields)
self.assertEquals(output, {'foo': 'bar'})
def test_marshal_with_envelope(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal(marshal_dict, fields, envelope='hey')
self.assertEquals(output, {'hey': {'foo': 'bar'}})
def test_marshal_decorator(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')])
self.assertEquals(try_me(), {'foo': 'bar'})
def test_marshal_decorator_with_envelope(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields, envelope='hey')
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')])
self.assertEquals(try_me(), {'hey': {'foo': 'bar'}})
def test_marshal_decorator_tuple(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, {'X-test': 123}
self.assertEquals(try_me(), ({'foo': 'bar'}, 200, {'X-test': 123}))
def test_marshal_decorator_tuple_with_envelope(self):
fields = OrderedDict([('foo', flask_restful.fields.Raw)])
@flask_restful.marshal_with(fields, envelope='hey')
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, {'X-test': 123}
self.assertEquals(try_me(), ({'hey': {'foo': 'bar'}}, 200, {'X-test': 123}))
def test_marshal_field_decorator(self):
field = flask_restful.fields.Raw
@flask_restful.marshal_with_field(field)
def try_me():
return 'foo'
self.assertEquals(try_me(), 'foo')
def test_marshal_field_decorator_tuple(self):
field = flask_restful.fields.Raw
@flask_restful.marshal_with_field(field)
def try_me():
return 'foo', 200, {'X-test': 123}
self.assertEquals(('foo', 200, {'X-test': 123}), try_me())
def test_marshal_field(self):
fields = OrderedDict({'foo': flask_restful.fields.Raw()})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal(marshal_fields, fields)
self.assertEquals(output, {'foo': 'bar'})
def test_marshal_tuple(self):
fields = OrderedDict({'foo': flask_restful.fields.Raw})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal((marshal_fields,), fields)
self.assertEquals(output, [{'foo': 'bar'}])
def test_marshal_tuple_with_envelope(self):
fields = OrderedDict({'foo': flask_restful.fields.Raw})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = flask_restful.marshal((marshal_fields,), fields, envelope='hey')
self.assertEquals(output, {'hey': [{'foo': 'bar'}]})
def test_marshal_nested(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested({
'fye': flask_restful.fields.String,
}))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'fye': 'fum'})])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', 'fum')]))])
self.assertEquals(output, expected)
def test_marshal_nested_with_non_null(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=False))
])
marshal_fields = [OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])]
output = flask_restful.marshal(marshal_fields, fields)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', None)]))])]
self.assertEquals(output, expected)
def test_marshal_nested_with_null(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=True))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', None)])
self.assertEquals(output, expected)
def test_allow_null_presents_data(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=True))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'blah': 'cool'})])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', 'cool')]))])
self.assertEquals(output, expected)
def test_marshal_nested_property(self):
class TestObject(object):
@property
def fee(self):
return {'blah': 'cool'}
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.Nested(
OrderedDict([
('fye', flask_restful.fields.String),
('blah', flask_restful.fields.String)
]), allow_null=True))
])
obj = TestObject()
obj.foo = 'bar'
obj.bat = 'baz'
output = flask_restful.marshal([obj], fields)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', 'cool')]))])]
self.assertEquals(output, expected)
def test_marshal_list(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.List(flask_restful.fields.String))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', ['fye', 'fum'])])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', (['fye', 'fum']))])
self.assertEquals(output, expected)
def test_marshal_list_of_nesteds(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.List(flask_restful.fields.Nested({
'fye': flask_restful.fields.String
})))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'fye': 'fum'})])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', [OrderedDict([('fye', 'fum')])])])
self.assertEquals(output, expected)
def test_marshal_list_of_lists(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('fee', flask_restful.fields.List(flask_restful.fields.List(
flask_restful.fields.String)))
])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', [['fye'], ['fum']])])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'bar'), ('fee', [['fye'], ['fum']])])
self.assertEquals(output, expected)
def test_marshal_nested_dict(self):
fields = OrderedDict([
('foo', flask_restful.fields.Raw),
('bar', OrderedDict([
('a', flask_restful.fields.Raw),
('b', flask_restful.fields.Raw),
])),
])
marshal_fields = OrderedDict([('foo', 'foo-val'), ('bar', 'bar-val'), ('bat', 'bat-val'),
('a', 1), ('b', 2), ('c', 3)])
output = flask_restful.marshal(marshal_fields, fields)
expected = OrderedDict([('foo', 'foo-val'), ('bar', OrderedDict([('a', 1), ('b', 2)]))])
self.assertEquals(output, expected)
def test_api_representation(self):
app = Mock()
api = flask_restful.Api(app)
@api.representation('foo')
def foo():
pass
self.assertEquals(api.representations['foo'], foo)
def test_api_base(self):
app = Mock()
app.configure_mock(**{'record.side_effect': AttributeError})
api = flask_restful.Api(app)
self.assertEquals(api.urls, {})
self.assertEquals(api.prefix, '')
self.assertEquals(api.default_mediatype, 'application/json')
def test_api_delayed_initialization(self):
app = Flask(__name__)
api = flask_restful.Api()
api.add_resource(HelloWorld, '/', endpoint="hello")
api.init_app(app)
with app.test_client() as client:
self.assertEquals(client.get('/').status_code, 200)
def test_api_prefix(self):
app = Mock()
app.configure_mock(**{'record.side_effect': AttributeError})
api = flask_restful.Api(app, prefix='/foo')
self.assertEquals(api.prefix, '/foo')
def test_handle_server_error(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo"):
resp = api.handle_error(Exception())
self.assertEquals(resp.status_code, 500)
self.assertEquals(resp.data.decode(), dumps({
"message": "Internal Server Error"
}) + "\n")
def test_handle_error_with_code(self):
app = Flask(__name__)
api = flask_restful.Api(app, serve_challenge_on_401=True)
exception = Exception()
exception.code = "Not an integer"
exception.data = {'foo': 'bar'}
with app.test_request_context("/foo"):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 500)
self.assertEquals(resp.data.decode(), dumps({"foo": "bar"}) + "\n")
def test_handle_auth(self):
app = Flask(__name__)
api = flask_restful.Api(app, serve_challenge_on_401=True)
with app.test_request_context("/foo"):
resp = api.handle_error(Unauthorized())
self.assertEquals(resp.status_code, 401)
expected_data = dumps({'message': Unauthorized.description}) + "\n"
self.assertEquals(resp.data.decode(), expected_data)
self.assertTrue('WWW-Authenticate' in resp.headers)
def test_handle_api_error(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class Test(flask_restful.Resource):
def get(self):
flask.abort(404)
api.add_resource(Test(), '/api', endpoint='api')
app = app.test_client()
resp = app.get("/api")
assert_equals(resp.status_code, 404)
assert_equals('application/json', resp.headers['Content-Type'])
data = loads(resp.data.decode())
assert_true('message' in data)
def test_handle_non_api_error(self):
app = Flask(__name__)
flask_restful.Api(app)
app = app.test_client()
resp = app.get("/foo")
self.assertEquals(resp.status_code, 404)
self.assertEquals('text/html', resp.headers['Content-Type'])
def test_non_api_error_404_catchall(self):
app = Flask(__name__)
api = flask_restful.Api(app, catch_all_404s=True)
app = app.test_client()
resp = app.get("/foo")
self.assertEquals(api.default_mediatype, resp.headers['Content-Type'])
def test_handle_error_signal(self):
if not signals_available:
# This test requires the blinker lib to run.
print("Can't test signals without signal support")
return
app = Flask(__name__)
api = flask_restful.Api(app)
exception = BadRequest()
recorded = []
def record(sender, exception):
recorded.append(exception)
got_request_exception.connect(record, app)
try:
with app.test_request_context("/foo"):
api.handle_error(exception)
self.assertEquals(len(recorded), 1)
self.assertTrue(exception is recorded[0])
finally:
got_request_exception.disconnect(record, app)
def test_handle_error(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo"):
resp = api.handle_error(BadRequest())
self.assertEquals(resp.status_code, 400)
self.assertEquals(resp.data.decode(), dumps({
'message': BadRequest.description,
}) + "\n")
def test_handle_smart_errors(self):
app = Flask(__name__)
api = flask_restful.Api(app)
view = flask_restful.Resource
api.add_resource(view, '/foo', endpoint='bor')
api.add_resource(view, '/fee', endpoint='bir')
api.add_resource(view, '/fii', endpoint='ber')
with app.test_request_context("/faaaaa"):
resp = api.handle_error(NotFound())
self.assertEquals(resp.status_code, 404)
self.assertEquals(resp.data.decode(), dumps({
"message": NotFound.description,
}) + "\n")
with app.test_request_context("/fOo"):
resp = api.handle_error(NotFound())
self.assertEquals(resp.status_code, 404)
self.assertTrue('did you mean /foo ?' in resp.data.decode())
app.config['ERROR_404_HELP'] = False
with app.test_request_context("/fOo"):
resp = api.handle_error(NotFound())
self.assertEquals(resp.status_code, 404)
self.assertEquals(resp.data.decode(), dumps({
"message": NotFound.description
}) + "\n")
def test_error_router_falls_back_to_original(self):
"""Verify that if an exception occurs in the Flask-RESTful error handler,
the error_router will call the original flask error handler instead.
"""
app = Flask(__name__)
api = flask_restful.Api(app)
app.handle_exception = Mock()
api.handle_error = Mock(side_effect=Exception())
api._has_fr_route = Mock(return_value=True)
exception = Mock(spec=HTTPException)
with app.test_request_context('/foo'):
api.error_router(exception, app.handle_exception)
self.assertTrue(app.handle_exception.called_with(exception))
def test_media_types(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo", headers={
'Accept': 'application/json'
}):
self.assertEquals(api.mediatypes(), ['application/json'])
def test_media_types_method(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo", headers={
'Accept': 'application/xml; q=.5'
}):
self.assertEquals(api.mediatypes_method()(Mock()),
['application/xml', 'application/json'])
def test_media_types_q(self):
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo", headers={
'Accept': 'application/json; q=1, application/xml; q=.5'
}):
self.assertEquals(api.mediatypes(),
['application/json', 'application/xml'])
def test_decorator(self):
def return_zero(func):
return 0
app = Mock(flask.Flask)
app.view_functions = {}
view = Mock()
api = flask_restful.Api(app)
api.decorators.append(return_zero)
api.output = Mock()
api.add_resource(view, '/foo', endpoint='bar')
app.add_url_rule.assert_called_with('/foo', view_func=0)
def test_add_resource_endpoint(self):
app = Mock()
app.view_functions = {}
view = Mock()
api = flask_restful.Api(app)
api.output = Mock()
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar')
def test_add_two_conflicting_resources_on_same_endpoint(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class Foo1(flask_restful.Resource):
def get(self):
return 'foo1'
class Foo2(flask_restful.Resource):
def get(self):
return 'foo2'
api.add_resource(Foo1, '/foo', endpoint='bar')
self.assertRaises(ValueError, api.add_resource, Foo2, '/foo/toto', endpoint='bar')
def test_add_the_same_resource_on_same_endpoint(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class Foo1(flask_restful.Resource):
def get(self):
return 'foo1'
api.add_resource(Foo1, '/foo', endpoint='bar')
api.add_resource(Foo1, '/foo/toto', endpoint='blah')
with app.test_client() as client:
foo1 = client.get('/foo')
self.assertEquals(foo1.data, b'"foo1"\n')
foo2 = client.get('/foo/toto')
self.assertEquals(foo2.data, b'"foo1"\n')
def test_add_resource(self):
app = Mock(flask.Flask)
app.view_functions = {}
api = flask_restful.Api(app)
api.output = Mock()
api.add_resource(views.MethodView, '/foo')
app.add_url_rule.assert_called_with('/foo',
view_func=api.output())
def test_resource_decorator(self):
app = Mock(flask.Flask)
app.view_functions = {}
api = flask_restful.Api(app)
api.output = Mock()
@api.resource('/foo', endpoint='bar')
class Foo(flask_restful.Resource):
pass
app.add_url_rule.assert_called_with('/foo',
view_func=api.output())
def test_add_resource_kwargs(self):
app = Mock(flask.Flask)
app.view_functions = {}
api = flask_restful.Api(app)
api.output = Mock()
api.add_resource(views.MethodView, '/foo', defaults={"bar": "baz"})
app.add_url_rule.assert_called_with('/foo',
view_func=api.output(),
defaults={"bar": "baz"})
def test_add_resource_forward_resource_class_parameters(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class Foo(flask_restful.Resource):
def __init__(self, *args, **kwargs):
self.one = args[0]
self.two = kwargs['secret_state']
def get(self):
return "{0} {1}".format(self.one, self.two)
api.add_resource(Foo, '/foo',
resource_class_args=('wonderful',),
resource_class_kwargs={'secret_state': 'slurm'})
with app.test_client() as client:
foo = client.get('/foo')
self.assertEquals(foo.data, b'"wonderful slurm"\n')
def test_output_unpack(self):
def make_empty_response():
return {'foo': 'bar'}
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo"):
wrapper = api.output(make_empty_response)
resp = wrapper()
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data.decode(), '{"foo": "bar"}\n')
def test_output_func(self):
def make_empty_response():
return flask.make_response('')
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context("/foo"):
wrapper = api.output(make_empty_response)
resp = wrapper()
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data.decode(), '')
def test_resource(self):
app = Flask(__name__)
resource = flask_restful.Resource()
resource.get = Mock()
with app.test_request_context("/foo"):
resource.dispatch_request()
def test_resource_resp(self):
app = Flask(__name__)
resource = flask_restful.Resource()
resource.get = Mock()
with app.test_request_context("/foo"):
resource.get.return_value = flask.make_response('')
resource.dispatch_request()
def test_resource_text_plain(self):
app = Flask(__name__)
def text(data, code, headers=None):
return flask.make_response(six.text_type(data))
class Foo(flask_restful.Resource):
representations = {
'text/plain': text,
}
def get(self):
return 'hello'
with app.test_request_context("/foo", headers={'Accept': 'text/plain'}):
resource = Foo()
resp = resource.dispatch_request()
self.assertEquals(resp.data.decode(), 'hello')
def test_resource_error(self):
app = Flask(__name__)
resource = flask_restful.Resource()
with app.test_request_context("/foo"):
self.assertRaises(AssertionError, lambda: resource.dispatch_request())
def test_resource_head(self):
app = Flask(__name__)
resource = flask_restful.Resource()
with app.test_request_context("/foo", method="HEAD"):
self.assertRaises(AssertionError, lambda: resource.dispatch_request())
def test_abort_data(self):
try:
flask_restful.abort(404, foo='bar')
assert False # We should never get here
except Exception as e:
self.assertEquals(e.data, {'foo': 'bar'})
def test_abort_no_data(self):
try:
flask_restful.abort(404)
assert False # We should never get here
except Exception as e:
self.assertEquals(False, hasattr(e, "data"))
def test_abort_custom_message(self):
try:
flask_restful.abort(404, message="no user")
assert False # We should never get here
except Exception as e:
assert_equals(e.data['message'], "no user")
def test_abort_type(self):
self.assertRaises(HTTPException, lambda: flask_restful.abort(404))
def test_endpoints(self):
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/ids/<int:id>', endpoint="hello")
with app.test_request_context('/foo'):
self.assertFalse(api._has_fr_route())
with app.test_request_context('/ids/3'):
self.assertTrue(api._has_fr_route())
def test_url_for(self):
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/ids/<int:id>')
with app.test_request_context('/foo'):
self.assertEqual(api.url_for(HelloWorld, id=123), '/ids/123')
def test_url_for_with_blueprint(self):
"""Verify that url_for works when an Api object is mounted on a
Blueprint.
"""
api_bp = Blueprint('api', __name__)
app = Flask(__name__)
api = flask_restful.Api(api_bp)
api.add_resource(HelloWorld, '/foo/<string:bar>')
app.register_blueprint(api_bp)
with app.test_request_context('/foo'):
self.assertEqual(api.url_for(HelloWorld, bar='baz'), '/foo/baz')
def test_fr_405(self):
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(HelloWorld, '/ids/<int:id>', endpoint="hello")
app = app.test_client()
resp = app.post('/ids/3')
self.assertEquals(resp.status_code, 405)
self.assertEquals(resp.content_type, api.default_mediatype)
# Allow can be of the form 'GET, PUT, POST'
allow = ', '.join(set(resp.headers.get_all('Allow')))
allow = set(method.strip() for method in allow.split(','))
self.assertEquals(allow,
set(['HEAD', 'OPTIONS'] + HelloWorld.methods))
def test_exception_header_forwarded(self):
"""Test that HTTPException's headers are extended properly"""
app = Flask(__name__)
app.config['DEBUG'] = True
api = flask_restful.Api(app)
class NotModified(HTTPException):
code = 304
def __init__(self, etag, *args, **kwargs):
super(NotModified, self).__init__(*args, **kwargs)
self.etag = quote_etag(etag)
def get_headers(self, *args, **kwargs):
"""Get a list of headers."""
return [('ETag', self.etag)]
class Foo1(flask_restful.Resource):
def get(self):
flask_abort(304, etag='myETag')
api.add_resource(Foo1, '/foo')
_aborter.mapping.update({304: NotModified})
with app.test_client() as client:
foo = client.get('/foo')
self.assertEquals(foo.get_etag(),
unquote_etag(quote_etag('myETag')))
def test_exception_header_forwarding_doesnt_duplicate_headers(self):
"""Test that HTTPException's headers do not add a duplicate
Content-Length header
https://github.com/flask-restful/flask-restful/issues/534
"""
app = Flask(__name__)
api = flask_restful.Api(app)
with app.test_request_context('/'):
r = api.handle_error(BadRequest())
self.assertEqual(len(r.headers.getlist('Content-Length')), 1)
def test_will_prettyprint_json_in_debug_mode(self):
app = Flask(__name__)
app.config['DEBUG'] = True
api = flask_restful.Api(app)
class Foo1(flask_restful.Resource):
def get(self):
return {'foo': 'bar', 'baz': 'asdf'}
api.add_resource(Foo1, '/foo', endpoint='bar')
with app.test_client() as client:
foo = client.get('/foo')
# Python's dictionaries have random order (as of "new" Pythons,
# anyway), so we can't verify the actual output here. We just
# assert that they're properly prettyprinted.
lines = foo.data.splitlines()
lines = [line.decode() for line in lines]
self.assertEquals("{", lines[0])
self.assertTrue(lines[1].startswith(' '))
self.assertTrue(lines[2].startswith(' '))
self.assertEquals("}", lines[3])
# Assert our trailing newline.
self.assertTrue(foo.data.endswith(b'\n'))
def test_read_json_settings_from_config(self):
class TestConfig(object):
RESTFUL_JSON = {'indent': 2,
'sort_keys': True,
'separators': (', ', ': ')}
app = Flask(__name__)
app.config.from_object(TestConfig)
api = flask_restful.Api(app)
class Foo(flask_restful.Resource):
def get(self):
return {'foo': 'bar', 'baz': 'qux'}
api.add_resource(Foo, '/foo')
with app.test_client() as client:
data = client.get('/foo').data
expected = b'{\n "baz": "qux", \n "foo": "bar"\n}\n'
self.assertEquals(data, expected)
def test_use_custom_jsonencoder(self):
class CabageEncoder(JSONEncoder):
def default(self, obj):
return 'cabbage'
class TestConfig(object):
RESTFUL_JSON = {'cls': CabageEncoder}
app = Flask(__name__)
app.config.from_object(TestConfig)
api = flask_restful.Api(app)
class Cabbage(flask_restful.Resource):
def get(self):
return {'frob': object()}
api.add_resource(Cabbage, '/cabbage')
with app.test_client() as client:
data = client.get('/cabbage').data
expected = b'{"frob": "cabbage"}\n'
self.assertEquals(data, expected)
def test_json_with_no_settings(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class Foo(flask_restful.Resource):
def get(self):
return {'foo': 'bar'}
api.add_resource(Foo, '/foo')
with app.test_client() as client:
data = client.get('/foo').data
expected = b'{"foo": "bar"}\n'
self.assertEquals(data, expected)
def test_redirect(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class FooResource(flask_restful.Resource):
def get(self):
return redirect('/')
api.add_resource(FooResource, '/api')
app = app.test_client()
resp = app.get('/api')
self.assertEquals(resp.status_code, 302)
self.assertEquals(resp.headers['Location'], 'http://localhost/')
def test_json_float_marshalled(self):
app = Flask(__name__)
api = flask_restful.Api(app)
class FooResource(flask_restful.Resource):
fields = {'foo': flask_restful.fields.Float}
def get(self):
return flask_restful.marshal({"foo": 3.0}, self.fields)
api.add_resource(FooResource, '/api')
app = app.test_client()
resp = app.get('/api')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data.decode('utf-8'), '{"foo": 3.0}\n')
def test_custom_error_message(self):
errors = {
'FooError': {
'message': "api is foobar",
'status': 418,
}
}
class FooError(ValueError):
pass
app = Flask(__name__)
api = flask_restful.Api(app, errors=errors)
exception = FooError()
exception.code = 400
exception.data = {'message': 'FooError'}
with app.test_request_context("/foo"):
resp = api.handle_error(exception)
self.assertEquals(resp.status_code, 418)
self.assertEqual(loads(resp.data.decode('utf8')), {"message": "api is foobar", "status": 418})
def test_calling_owns_endpoint_before_api_init(self):
api = flask_restful.Api()
try:
api.owns_endpoint('endpoint')
except AttributeError as ae:
self.fail(ae.message)
def test_selectively_apply_method_decorators(self):
def upper_deco(f):
def upper(*args, **kwargs):
return f(*args, **kwargs).upper()
return upper
class TestResource(flask_restful.Resource):
method_decorators = {'get': [upper_deco]}
def get(self):
return 'get test'
def post(self):
return 'post test'
app = Flask(__name__)
with app.test_request_context('/', method='POST'):
r = TestResource().dispatch_request()
assert r == 'post test'
with app.test_request_context('/', method='GET'):
r = TestResource().dispatch_request()
assert r == 'GET TEST'
def test_apply_all_method_decorators_if_not_mapping(self):
def upper_deco(f):
def upper(*args, **kwargs):
return f(*args, **kwargs).upper()
return upper
class TestResource(flask_restful.Resource):
method_decorators = [upper_deco]
def get(self):
return 'get test'
def post(self):
return 'post test'
app = Flask(__name__)
with app.test_request_context('/', method='POST'):
r = TestResource().dispatch_request()
assert r == 'POST TEST'
with app.test_request_context('/', method='GET'):
r = TestResource().dispatch_request()
assert r == 'GET TEST'
def test_decorators_only_applied_at_dispatch(self):
def upper_deco(f):
def upper(*args, **kwargs):
return f(*args, **kwargs).upper()
return upper
class TestResource(flask_restful.Resource):
method_decorators = [upper_deco]
def get(self):
return 'get test'
def post(self):
return 'post test'
r = TestResource()
assert r.get() == 'get test'
assert r.post() == 'post test'
if __name__ == '__main__':
unittest.main()
| 35.614603
| 113
| 0.588167
|
abde3500ace9b33e3a95096a9d472b2efa05e007
| 30
|
py
|
Python
|
dmc/__init__.py
|
surbhijain1502/dmc-python
|
075b4b5d5ee8422963da153ce96349f1a937f884
|
[
"Apache-2.0"
] | null | null | null |
dmc/__init__.py
|
surbhijain1502/dmc-python
|
075b4b5d5ee8422963da153ce96349f1a937f884
|
[
"Apache-2.0"
] | null | null | null |
dmc/__init__.py
|
surbhijain1502/dmc-python
|
075b4b5d5ee8422963da153ce96349f1a937f884
|
[
"Apache-2.0"
] | 3
|
2020-09-04T12:15:11.000Z
|
2021-09-07T17:55:50.000Z
|
from dmc.lrpc2 import gettoken
| 30
| 30
| 0.866667
|
1a210373b16ffd29e2d26baaf237ba83dfef4817
| 998
|
py
|
Python
|
algorithms_in_python/_4_recursion/examples/ruler.py
|
junteudjio/algorithms_in_python
|
90ceced09828aedf845605e5236f48ea92a4419e
|
[
"MIT"
] | null | null | null |
algorithms_in_python/_4_recursion/examples/ruler.py
|
junteudjio/algorithms_in_python
|
90ceced09828aedf845605e5236f48ea92a4419e
|
[
"MIT"
] | null | null | null |
algorithms_in_python/_4_recursion/examples/ruler.py
|
junteudjio/algorithms_in_python
|
90ceced09828aedf845605e5236f48ea92a4419e
|
[
"MIT"
] | 1
|
2018-10-15T06:28:45.000Z
|
2018-10-15T06:28:45.000Z
|
__author__ = 'Junior Teudjio'
def draw_ruler(number_of_inches, major_tick_length):
def draw_inch_containt(tick_length):
if tick_length > 0:
draw_inch_containt(tick_length - 1)
print '-'*tick_length
draw_inch_containt(tick_length - 1)
def draw_ruler_helper(current_inch):
print '-' * major_tick_length, ' ', current_inch
if number_of_inches == current_inch:
return
else:
draw_inch_containt(major_tick_length - 1)
draw_ruler_helper(current_inch + 1)
draw_ruler_helper(0)
if __name__ == '__main__':
print 'Ruler of 2 inches with major tick length of 4'
draw_ruler(number_of_inches=2, major_tick_length=4)
print
print 'Ruler of 1 inches with major tick length of 5'
draw_ruler(number_of_inches=1, major_tick_length=5)
print
print 'Ruler of 3 inches with major tick length of 3'
draw_ruler(number_of_inches=3, major_tick_length=3)
print
| 24.95
| 57
| 0.673347
|
5c3d4c244955ce4757c5329c6b95c20f8a23d26c
| 3,650
|
py
|
Python
|
tests/preprocessing/test_image.py
|
BioroboticsLab/diktyo
|
1a81dc36005c4b44021fa69285f25fc0229115b8
|
[
"Apache-2.0"
] | 3
|
2016-03-19T15:59:30.000Z
|
2016-05-25T12:11:25.000Z
|
tests/preprocessing/test_image.py
|
BioroboticsLab/diktyo
|
1a81dc36005c4b44021fa69285f25fc0229115b8
|
[
"Apache-2.0"
] | 2
|
2015-12-17T12:58:22.000Z
|
2016-05-25T16:38:53.000Z
|
tests/preprocessing/test_image.py
|
BioroboticsLab/diktyo
|
1a81dc36005c4b44021fa69285f25fc0229115b8
|
[
"Apache-2.0"
] | 1
|
2016-05-25T12:10:33.000Z
|
2016-05-25T12:10:33.000Z
|
import numpy as np
import pytest
from diktya.preprocessing.image import chain_augmentations, WarpAugmentation, \
NoiseAugmentation, random_std, ChannelScaleShiftAugmentation, CropAugmentation, \
LambdaAugmentation
@pytest.fixture
def batchsize():
return 16
@pytest.fixture
def shape2d():
return (32, 32)
@pytest.fixture
def shape3d():
return (3, 32, 32)
@pytest.fixture
def data_gen1d(batchsize):
return lambda: np.random.rand(batchsize, 1)
@pytest.fixture
def data_gen2d(batchsize, shape2d):
return lambda: np.random.rand(batchsize, shape2d[0], shape2d[1])
@pytest.fixture
def data_gen3d(batchsize, shape3d):
return lambda: np.random.rand(batchsize, *shape3d)
def test_crop_augmentation(data_gen3d):
x = data_gen3d()
crop_shape = (16, 16)
crop_aug = CropAugmentation(5, crop_shape)
trans = crop_aug.get_transformation(x.shape[1:])
assert trans.translation == [5, 5]
start = x.shape[-1] // 2 - crop_shape[0] // 2 + 5
end = start + crop_shape[0]
assert (trans(x[0]) == x[0, :, start:end, start:end]).all()
x_crop = crop_aug(x)
assert x_crop.shape[-2:] == crop_shape
def test_channel_scale_shift(data_gen3d):
x = data_gen3d()
aug = ChannelScaleShiftAugmentation((0.8, 1.2), (-0.3, 0.3))
x_aug = aug(x)
assert x_aug.max() <= aug.max
assert aug.min <= x_aug.min()
trans = aug.get_transformation(x.shape[1:])
assert len(trans.scale) == x.shape[1]
assert len(trans.shift) == x.shape[1]
with pytest.raises(Exception):
aug.get_transformation((1, 1, 1, 1))
with pytest.raises(Exception):
aug.get_transformation((1, 1))
def test_warp(data_gen1d, data_gen2d):
X = data_gen2d()
t = int(3)
aug = WarpAugmentation(translation=lambda: t)
Xa = aug(X)
assert(Xa.shape == X.shape)
x = X[0]
id_aug = WarpAugmentation()
identity = id_aug.get_transformation(x.shape)
np.testing.assert_allclose(identity(x), x, rtol=1e-5)
trans = aug.get_transformation(x.shape)
matrix = trans.affine_transformation.params
np.testing.assert_allclose(
matrix,
[[1, 0, t],
[0, 1, t],
[0, 0, 1]], rtol=1e-5)
x_aug = trans(x)
# transformations stays constant
assert (x_aug == trans(x)).all()
np.testing.assert_allclose(x_aug[:-t, :-t], x[t:, t:], rtol=1e-5)
def test_noise_augmentation(data_gen1d, data_gen2d):
X = data_gen2d()
mean, std = 0.03, 0.01
aug = NoiseAugmentation(random_std(mean, std))
Xa = aug(X)
stds = [aug.std() for _ in range(1000)]
assert abs(np.std(stds) - std) <= 0.001
assert abs(np.mean(stds) - mean) <= 0.001
trans = aug.get_transformation((10000,))
assert abs(trans.noise.std() - trans.std) <= 0.001
assert abs(trans.noise.mean()) <= 0.001
assert(Xa.shape == X.shape)
def test_multiple(data_gen1d, data_gen2d):
X = data_gen2d()
y = data_gen1d()
aug = chain_augmentations(NoiseAugmentation(),
WarpAugmentation())
Xa, ya = aug((X, y))
assert(Xa.shape == X.shape)
assert(ya.shape == y.shape)
def test_label_augmentation(data_gen2d):
X = data_gen2d()
y = X.copy()
aug = chain_augmentations(WarpAugmentation(), augment_y=True)
Xa, ya = aug((X, y))
assert(Xa.shape == X.shape)
assert(ya.shape == y.shape)
assert (Xa == ya).all()
def test_lambda_augmentation(data_gen2d):
X = data_gen2d()
aug = LambdaAugmentation(lambda a: np.zeros_like(a))
assert (aug(X) == 0).all()
aug = LambdaAugmentation(lambda a, factor: factor*a, factor=2)
assert (aug(X) == 2*X).all()
| 25
| 85
| 0.643288
|
cc8209e6fd0889805281d6b4cefcd5863aabcb53
| 225
|
py
|
Python
|
tests/tests_CBEObject.py
|
terrorizer1980/coinbase-exchange-python
|
a7307876745d513b36516f3ad2b59ab27495e265
|
[
"MIT",
"Unlicense"
] | 20
|
2015-02-28T19:02:49.000Z
|
2018-02-05T14:00:55.000Z
|
tests/tests_CBEObject.py
|
terrorizer1980/coinbase-exchange-python
|
a7307876745d513b36516f3ad2b59ab27495e265
|
[
"MIT",
"Unlicense"
] | null | null | null |
tests/tests_CBEObject.py
|
terrorizer1980/coinbase-exchange-python
|
a7307876745d513b36516f3ad2b59ab27495e265
|
[
"MIT",
"Unlicense"
] | 9
|
2015-05-08T16:39:57.000Z
|
2020-11-17T22:41:32.000Z
|
import unittest
from cbe import CoinbaseExchange
class TestCBEObject(unittest.TestCase):
def setUp(self):
self.cbe = CoinbaseExchange()
def test_CBEExists(self):
self.assertEqual( type(self.cbe), CoinbaseExchange )
| 22.5
| 54
| 0.777778
|
fdf3cc11a9cde823a6975e89590709e97cda8583
| 804
|
py
|
Python
|
magulbot/rawpages/migrations/0001_initial.py
|
magul/magulbot
|
8a1c78d0ec8dfd8f7730ffd0647b9f139c2c806e
|
[
"MIT"
] | null | null | null |
magulbot/rawpages/migrations/0001_initial.py
|
magul/magulbot
|
8a1c78d0ec8dfd8f7730ffd0647b9f139c2c806e
|
[
"MIT"
] | null | null | null |
magulbot/rawpages/migrations/0001_initial.py
|
magul/magulbot
|
8a1c78d0ec8dfd8f7730ffd0647b9f139c2c806e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-07 22:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RawPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('wiki_family', models.CharField(max_length=20)),
('wiki_lang', models.CharField(max_length=4)),
('name', models.CharField(max_length=1000)),
('new_id', models.CharField(max_length=15)),
('rev_id', models.CharField(max_length=15)),
],
),
]
| 28.714286
| 114
| 0.580846
|
74ad2f2c652818ab724948aecd5e4b715f3fd043
| 2,534
|
py
|
Python
|
sa/profiles/Orion/NOS/profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/Orion/NOS/profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/Orion/NOS/profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | null | null | null |
# ---------------------------------------------------------------------
# Vendor: Orion (Orion Networks)
# OS: NOS
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Orion.NOS"
pattern_unprivileged_prompt = r"^(?P<hostname>\S+)\s*>"
pattern_prompt = r"^(?P<hostname>\S+)\s*#"
pattern_syntax_error = r"^% \" .+ \" Unknown command."
command_super = "enable"
command_disable_pager = "terminal page-break disable"
pattern_more = " --More-- "
command_more = " "
command_exit = "exit"
config_volatile = [
r"radius(-| accounting-server )encrypt-key \S+\n",
r"tacacs(-server | accounting-server )encrypt-key \S+\n",
]
rx_interface_name = re.compile(r"port\s*(?P<re_port>\d+)")
rx_ver = re.compile(
r"^Product name\s*:\s*(?P<platform>.+)\s*\n"
r"^NOS\s+Version:? NOS_(?P<version>\d+\.\d+\.\d+).+\n"
r"(^Support ipv6\s*:\s*(?P<ipv6_support>\S+)\s*\n)?"
r"^Bootstrap\s+Version:? (?P<bootprom>(Bootstrap_\d+\.\d+\.\d+|UNKNOWN)).*\n"
r"(^FPGA Version\s*\n)?"
r"^Hardware( \S+| \S+\s\S+|) Version( Rev.|\: ?|\s*)(?P<hardware>\S+)\s*(\nCPLD Version: 1.0)?\n*"
r"\n"
r"^System MacAddress is\s*:\s*(?P<mac>\S+)\s*\n"
r"^Serial number\s*:\s*(?P<serial>\S+)\s*\n",
re.MULTILINE,
)
rx_ver2 = re.compile(
r"^Product Name\s*:\s*(?P<platform>.+)\s*\n"
r"^Hardware Version: (?P<hardware>\S+)\s*\n"
r"^Software Version: NOS_(?P<version>\d+\.\d+\.\d+).+\n"
r"(^PCB Version.+\n)?"
r"(^CPLD Version.+\n)?"
r"(^NOS Version.+\n)?"
r"^Bootstrap Version: (?P<bootprom>\d+\.\d+\.\d+).*\n"
r"(^Compiled.+\n)?"
r"\s*\n"
r"^System MacAddress:\s*(?P<mac>\S+)\s*\n"
r"^Serial number:\s*(?P<serial>\S+)\s*\n",
re.MULTILINE,
)
def get_version(self, script):
c = script.cli("show version", cached=True)
match = self.rx_ver.search(c)
if not match:
match = self.rx_ver2.search(c)
return match.groupdict()
def convert_interface_name(self, s):
if self.rx_interface_name.match(s):
return self.rx_interface_name.match(s).group("re_port")
return s
| 35.194444
| 106
| 0.506314
|
8d20ac5b29531460659a3ab51c4e5c011df91108
| 953
|
py
|
Python
|
example/05.Distance_sensor/distance_sensor.get_distance_inches.py
|
rundhall/PC-LEGO-SPIKE-Simulator
|
5b2fae19293875b2f60d599940d77237700798d3
|
[
"MIT"
] | null | null | null |
example/05.Distance_sensor/distance_sensor.get_distance_inches.py
|
rundhall/PC-LEGO-SPIKE-Simulator
|
5b2fae19293875b2f60d599940d77237700798d3
|
[
"MIT"
] | null | null | null |
example/05.Distance_sensor/distance_sensor.get_distance_inches.py
|
rundhall/PC-LEGO-SPIKE-Simulator
|
5b2fae19293875b2f60d599940d77237700798d3
|
[
"MIT"
] | null | null | null |
'''get_distance_inches(short_range=False)
Gets the measured distance in inches.
Parameters
short_range
Whether or not to use short range mode. Short range mode increases accuracy, but it can only detect nearby objects.
Type:boolean
Values:True or False
Default:False
Returns
The measured distance or "none" if the distance can't be measured.
Type:float (decimal number)
Values:any value between 0 and 79
Errors
TypeError
short_range is not a boolean.
RuntimeError
The sensor has been disconnected from the Port.
Events'''
from spike import DistanceSensor
import time
# Initialize the Distance Sensor
wall_detector = DistanceSensor('E')
# Measure the distances between the Distance Sensor and object in centimeters or inches
while True:
dist_cm = wall_detector.get_distance_cm()
dist_inches = wall_detector.get_distance_inches()
# Print both results to the console
print('cm:', dist_cm, 'Inches:', dist_inches)
time.sleep_ms(500)
| 28.029412
| 115
| 0.789087
|
33a4c556fb72c1954a9dc738c8b1e436fa087b70
| 3,783
|
py
|
Python
|
segmentation_models_pytorch/base/modules.py
|
yuqinghuang01/segmentation_models.pytorch
|
7e338ec0ca4ac512ba3fc5fe113f4ad1a915c828
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/base/modules.py
|
yuqinghuang01/segmentation_models.pytorch
|
7e338ec0ca4ac512ba3fc5fe113f4ad1a915c828
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/base/modules.py
|
yuqinghuang01/segmentation_models.pytorch
|
7e338ec0ca4ac512ba3fc5fe113f4ad1a915c828
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
try:
from inplace_abn import InPlaceABN
except ImportError:
InPlaceABN = None
class Conv2dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
if use_batchnorm == "inplace" and InPlaceABN is None:
raise RuntimeError(
"In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. "
+ "To install see: https://github.com/mapillary/inplace_abn"
)
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
if use_batchnorm == "inplace":
bn = InPlaceABN(out_channels, activation="leaky_relu", activation_param=0.0)
relu = nn.Identity()
elif use_batchnorm and use_batchnorm != "inplace":
bn = nn.BatchNorm2d(out_channels)
else:
bn = nn.Identity()
super(Conv2dReLU, self).__init__(conv, bn, relu)
class SCSEModule(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.cSE = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // reduction, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // reduction, in_channels, 1),
nn.Sigmoid(),
)
self.sSE = nn.Sequential(nn.Conv2d(in_channels, 1, 1), nn.Sigmoid())
def forward(self, x):
return x * self.cSE(x) + x * self.sSE(x)
class ArgMax(nn.Module):
def __init__(self, dim=None):
super().__init__()
self.dim = dim
def forward(self, x):
return torch.argmax(x, dim=self.dim)
class Clamp(nn.Module):
def __init__(self, min=0, max=1):
super().__init__()
self.min, self.max = min, max
def forward(self, x):
return torch.clamp(x, self.min, self.max)
class Activation(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None or name == 'identity':
self.activation = nn.Identity(**params)
elif name == 'sigmoid':
self.activation = nn.Sigmoid()
elif name == 'softmax2d':
self.activation = nn.Softmax(dim=1, **params)
elif name == 'softmax':
self.activation = nn.Softmax(**params)
elif name == 'logsoftmax':
self.activation = nn.LogSoftmax(**params)
elif name == 'tanh':
self.activation = nn.Tanh()
elif name == 'argmax':
self.activation = ArgMax(**params)
elif name == 'argmax2d':
self.activation = ArgMax(dim=1, **params)
elif name == 'clamp':
self.activation = Clamp(**params)
elif name == 'relu':
self.activation = nn.ReLU(inplace=True)
elif callable(name):
self.activation = name(**params)
else:
raise ValueError('Activation should be callable/sigmoid/softmax/logsoftmax/tanh/None; got {}'.format(name))
def forward(self, x):
return self.activation(x)
class Attention(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None:
self.attention = nn.Identity(**params)
elif name == 'scse':
self.attention = SCSEModule(**params)
else:
raise ValueError("Attention {} is not implemented".format(name))
def forward(self, x):
return self.attention(x)
| 28.231343
| 119
| 0.562781
|
c315a691f0eb3552f960142ce0912a21510acf54
| 722
|
py
|
Python
|
project_euler003.py
|
dcassells/Project-Euler
|
e9d1d6990fc50d8f6013a40e9666b2d0bf173d78
|
[
"MIT"
] | null | null | null |
project_euler003.py
|
dcassells/Project-Euler
|
e9d1d6990fc50d8f6013a40e9666b2d0bf173d78
|
[
"MIT"
] | null | null | null |
project_euler003.py
|
dcassells/Project-Euler
|
e9d1d6990fc50d8f6013a40e9666b2d0bf173d78
|
[
"MIT"
] | null | null | null |
"""
The prime factors of 13195 are 5, 7, 13, and 29.
What is the largest prime factor of the number 600851475143?
"""
def prime(i,primes):
for prime in primes:
if not (i%prime):
return False
primes.add(i)
return i
def prime_factors(x):
# set of primes
primes = set()
# initiate iterator
i = 2
# prime factors
factors = list()
while True:
if prime(i,primes):
while True:
# if x = 1 then we are done
if x == 1:
return factors
# check if i is a factor
elif x%i == 0:
# append new prime factor
factors.append(i)
# update x
x = x/i
# else move on to the next prime
else:
break
# increment iterator
i += 1
print(prime_factors(600851475143))
| 16.409091
| 60
| 0.620499
|
893919d81ce29c356948aceb7cb239aeb96e299f
| 7,439
|
py
|
Python
|
aodh/tests/unit/test_evaluator.py
|
yi-cloud/aodh
|
bf2371a1f2175b87b7769ebf76f3cc74b35d3c87
|
[
"Apache-2.0"
] | null | null | null |
aodh/tests/unit/test_evaluator.py
|
yi-cloud/aodh
|
bf2371a1f2175b87b7769ebf76f3cc74b35d3c87
|
[
"Apache-2.0"
] | null | null | null |
aodh/tests/unit/test_evaluator.py
|
yi-cloud/aodh
|
bf2371a1f2175b87b7769ebf76f3cc74b35d3c87
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for aodh.evaluator.AlarmEvaluationService.
"""
import fixtures
import time
from unittest import mock
from oslo_config import fixture as fixture_config
from stevedore import extension
from aodh import evaluator
from aodh import service
from aodh.tests import base as tests_base
class TestAlarmEvaluationService(tests_base.BaseTestCase):
def setUp(self):
super(TestAlarmEvaluationService, self).setUp()
conf = service.prepare_service(argv=[], config_files=[])
self.CONF = self.useFixture(fixture_config.Config(conf)).conf
self.CONF.set_override('workers', 1, 'evaluator')
self.setup_messaging(self.CONF)
self.threshold_eval = mock.MagicMock()
self._fake_conn = mock.Mock()
self._fake_conn.get_alarms.return_value = []
self._fake_pc = mock.Mock()
self._fake_em = extension.ExtensionManager.make_test_instance(
[
extension.Extension(
'gnocchi_aggregation_by_metrics_threshold',
None,
None,
self.threshold_eval),
]
)
self.useFixture(fixtures.MockPatch(
'stevedore.extension.ExtensionManager',
return_value=self._fake_em
))
self.useFixture(fixtures.MockPatch(
'aodh.coordination.PartitionCoordinator',
return_value=self._fake_pc
))
self.useFixture(fixtures.MockPatch(
'aodh.storage.get_connection_from_config',
return_value=self._fake_conn
))
def _do_test_start(self, test_interval=120,
coordination_heartbeat=1.0,
coordination_active=False):
self.CONF.set_override('evaluation_interval',
test_interval)
self.CONF.set_override('heartbeat',
coordination_heartbeat,
group='coordination')
self._fake_pc.is_active.return_value = coordination_active
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
svc.terminate()
svc.partition_coordinator.start.assert_called_once_with()
svc.partition_coordinator.join_group.assert_called_once_with(
svc.PARTITIONING_GROUP_NAME)
def test_start_singleton(self):
self._do_test_start(coordination_active=False)
def test_start_coordinated(self):
self._do_test_start(coordination_active=True)
def test_start_coordinated_high_hb_interval(self):
self._do_test_start(coordination_active=True, test_interval=10,
coordination_heartbeat=5)
def test_evaluation_cycle(self):
alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
alarm_id="alarm_id1")
self._fake_pc.extract_my_subset.return_value = ["alarm_id1"]
self._fake_pc.is_active.side_effect = [False, False, True, True]
self._fake_conn.get_alarms.return_value = [alarm]
self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None]
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
target = svc.partition_coordinator.extract_my_subset
target.assert_called_once_with(svc.PARTITIONING_GROUP_NAME,
["alarm_id1"])
self.threshold_eval.evaluate.assert_called_once_with(alarm)
def test_evaluation_cycle_with_bad_alarm(self):
alarms = [
mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
name='bad', alarm_id='a'),
mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
name='good', alarm_id='b'),
]
self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None]
self._fake_pc.is_active.side_effect = [False, False, True, True, True]
self._fake_pc.extract_my_subset.return_value = ['a', 'b']
self._fake_conn.get_alarms.return_value = alarms
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])],
self.threshold_eval.evaluate.call_args_list)
def test_unknown_extension_skipped(self):
alarms = [
mock.Mock(type='not_existing_type', alarm_id='a'),
mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
alarm_id='b')
]
self._fake_pc.is_active.return_value = False
self._fake_pc.extract_my_subset.return_value = ['a', 'b']
self._fake_conn.get_alarms.return_value = alarms
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
self.threshold_eval.evaluate.assert_called_once_with(alarms[1])
def test_check_alarm_query_constraints(self):
self._fake_conn.get_alarms.return_value = []
self._fake_pc.extract_my_subset.return_value = []
self._fake_pc.is_active.return_value = False
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
child = {'enabled': True, 'type': {'ne': 'event'}}
self.assertDictContains(svc.storage_conn.get_alarms.call_args[1],
child)
def test_evaluation_cycle_no_coordination(self):
alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
alarm_id="alarm_id1")
self._fake_pc.is_active.return_value = False
self._fake_conn.get_alarms.return_value = [alarm]
self._fake_conn.conditional_update.return_value = True
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
target = svc.partition_coordinator.extract_my_subset
self.assertEqual(0, target.call_count)
self.threshold_eval.evaluate.assert_called_once_with(alarm)
def test_evaluation_cycle_no_coordination_alarm_modified(self):
alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold',
alarm_id="alarm_id1")
self._fake_pc.is_active.return_value = False
self._fake_conn.get_alarms.return_value = [alarm]
self._fake_conn.conditional_update.return_value = False
svc = evaluator.AlarmEvaluationService(0, self.CONF)
self.addCleanup(svc.terminate)
time.sleep(1)
target = svc.partition_coordinator.extract_my_subset
self.assertEqual(0, target.call_count)
self.assertEqual(0, self.threshold_eval.evaluate.call_count)
| 38.744792
| 78
| 0.663261
|
0c4940830194ff7ea62a34286800ffa84ec996e8
| 1,031
|
py
|
Python
|
examples/Cliner/CliNER/code/feature_extraction/func_cache.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
examples/Cliner/CliNER/code/feature_extraction/func_cache.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
examples/Cliner/CliNER/code/feature_extraction/func_cache.py
|
swapnull7/forte
|
737a72afd440d40c3826c3a7c5e4e44235c0f701
|
[
"Apache-2.0"
] | null | null | null |
from repoze.lru import lru_cache
class func_cache(lru_cache):
def __init__(self, verbose=False):
super(func_cache, self).__init__(500)
self.verbose = verbose
def ShowInfo(self):
# This function is only implicitly called if verbose flag is set.
print("Cache results for:", self.FuncName)
print(" hits:", self.cache.hits)
print(" misses:", self.cache.misses)
print(" lookups:{}", self.cache.lookups, "\n")
def __call__(self, f):
lru_cached = super(func_cache, self).__call__(f)
lru_cached.ShowInfo = self.ShowInfo
# pylint: disable=attribute-defined-outside-init
self.FuncName = f.__name__
return lru_cached
def __del__(self):
if self.verbose:
self.ShowInfo()
# Test functionality
if __name__ == '__main__':
@func_cache()
def rec(n):
if not n:
return n
return rec(n - 1)
rec.ShowInfo()
rec(3)
rec.ShowInfo()
rec(3)
rec.ShowInfo()
| 24.547619
| 73
| 0.601358
|
a4831bbd7f7eff0bb36ad8173219eef452106fae
| 1,668
|
py
|
Python
|
ckan/cli/server.py
|
delyajoseph/ckan-dc
|
cebce8693ec6576ee179cd1eca85a6bdf3878692
|
[
"Apache-2.0"
] | 1
|
2020-08-13T11:49:56.000Z
|
2020-08-13T11:49:56.000Z
|
ckan/cli/server.py
|
delyajoseph/ckan-dc
|
cebce8693ec6576ee179cd1eca85a6bdf3878692
|
[
"Apache-2.0"
] | 4
|
2019-11-25T10:52:34.000Z
|
2020-12-07T15:28:43.000Z
|
ckan/cli/server.py
|
delyajoseph/ckan-dc
|
cebce8693ec6576ee179cd1eca85a6bdf3878692
|
[
"Apache-2.0"
] | 1
|
2020-09-10T11:51:09.000Z
|
2020-09-10T11:51:09.000Z
|
# encoding: utf-8
import logging
import click
from werkzeug.serving import run_simple
from ckan.common import config
import ckan.plugins.toolkit as tk
log = logging.getLogger(__name__)
@click.command(u"run", short_help=u"Start development server")
@click.option(u"-H", u"--host", default=u"localhost", help=u"Set host")
@click.option(u"-p", u"--port", default=5000, help=u"Set port")
@click.option(u"-r", u"--reloader", default=True, help=u"Use reloader")
@click.option(
u"-t", u"--threaded", is_flag=True,
help=u"Handle each request in a separate thread"
)
@click.option(u"-e", u"--extra-files", multiple=True)
@click.option(
u"--processes", type=int, default=0,
help=u"Maximum number of concurrent processes"
)
@click.pass_context
def run(ctx, host, port, reloader, threaded, extra_files, processes):
u"""Runs the Werkzeug development server"""
threaded = threaded or tk.asbool(config.get(u"ckan.devserver.threaded"))
processes = processes or tk.asint(
config.get(u"ckan.devserver.multiprocess", 1)
)
if threaded and processes > 1:
tk.error_shout(u"Cannot have a multithreaded and multi process server")
raise click.Abort()
log.info(u"Running server {0} on port {1}".format(host, port))
config_extra_files = tk.aslist(
config.get(u"ckan.devserver.watch_patterns")
)
extra_files = list(extra_files) + [
config[u"__file__"]
] + config_extra_files
run_simple(
host,
port,
ctx.obj.app,
use_reloader=reloader,
use_evalex=True,
threaded=threaded,
processes=processes,
extra_files=extra_files,
)
| 29.263158
| 79
| 0.673261
|
9bea9c83a39265dc72f603772ad7c9f1b6591974
| 1,190
|
py
|
Python
|
tests/test_demo.py
|
sveneberth/unittests-workshop-demo
|
d23ca4e74aa59e9c825a7bcc6430d02763517d9b
|
[
"MIT"
] | null | null | null |
tests/test_demo.py
|
sveneberth/unittests-workshop-demo
|
d23ca4e74aa59e9c825a7bcc6430d02763517d9b
|
[
"MIT"
] | null | null | null |
tests/test_demo.py
|
sveneberth/unittests-workshop-demo
|
d23ca4e74aa59e9c825a7bcc6430d02763517d9b
|
[
"MIT"
] | 1
|
2022-03-31T09:36:51.000Z
|
2022-03-31T09:36:51.000Z
|
#!/usr/bin/env python3
import unittest
class TestDemo(unittest.TestCase):
def test_str(self):
from code.demo import duplicate_value
self.assertEqual("abab", duplicate_value("ab"))
self.assertEqual(b"abab", duplicate_value(b"ab"))
self.assertEqual(2, duplicate_value(1))
self.assertLess(1, duplicate_value(1))
self.assertIsNotNone(duplicate_value("anything"))
# simulate a wrong case
# self.assertEqual("abab", duplicate_value(b"ab"))
def test_list(self):
from code.demo import duplicate_value
self.assertEqual([1, 1], duplicate_value([1]))
self.assertIsNotNone(duplicate_value(["anything"]))
class TestDemo2(unittest.TestCase):
def test_str(self):
from code.demo import duplicate_str_value
self.assertEqual("abab", duplicate_str_value("ab"))
def test_list(self):
from code.demo import duplicate_str_value
with self.assertRaises(TypeError):
duplicate_str_value([1, 2])
def test_bytes(self):
from code.demo import duplicate_str_value
with self.assertRaises(TypeError):
duplicate_str_value(b"ab")
| 26.444444
| 59
| 0.668067
|
ae80a8bd7e0ad53de3c4f9425d55a02578f9c2a7
| 1,301
|
py
|
Python
|
python/pyarrow/flight.py
|
findmyway/arrow
|
78105f90d4bd5c1e7b1d85a75e4fcd5e978201ef
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/flight.py
|
findmyway/arrow
|
78105f90d4bd5c1e7b1d85a75e4fcd5e978201ef
|
[
"Apache-2.0"
] | 1
|
2019-02-11T17:01:44.000Z
|
2019-02-23T04:11:14.000Z
|
python/pyarrow/flight.py
|
findmyway/arrow
|
78105f90d4bd5c1e7b1d85a75e4fcd5e978201ef
|
[
"Apache-2.0"
] | 1
|
2020-11-26T07:50:58.000Z
|
2020-11-26T07:50:58.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow._flight import (Action, # noqa
ActionType,
DescriptorType,
FlightClient,
FlightDescriptor,
FlightEndpoint,
FlightInfo,
FlightServerBase,
Location,
Ticket,
RecordBatchStream,
Result)
| 43.366667
| 62
| 0.591852
|
fb4e0105037e225321ae2c59ff2fc1431f5624c6
| 2,306
|
py
|
Python
|
dplace2/adapters.py
|
clld/dplace
|
834e7f64045ca01c0b719d87ae4c57710f6a5740
|
[
"Apache-2.0"
] | 3
|
2018-11-17T15:55:32.000Z
|
2020-11-17T08:49:37.000Z
|
dplace2/adapters.py
|
D-PLACE/dplace2
|
834e7f64045ca01c0b719d87ae4c57710f6a5740
|
[
"Apache-2.0"
] | 12
|
2018-01-31T17:55:31.000Z
|
2020-07-24T05:33:15.000Z
|
dplace2/adapters.py
|
D-PLACE/dplace2
|
834e7f64045ca01c0b719d87ae4c57710f6a5740
|
[
"Apache-2.0"
] | null | null | null |
from itertools import chain
from sqlalchemy.orm import joinedload
from clld.db.meta import DBSession
from clld.db.models import common
from clld.interfaces import IParameter
from clld.web.adapters.geojson import GeoJson, GeoJsonParameter
from clld.web.adapters.csv import CsvAdapter
from clld.interfaces import IIndex
from clld_phylogeny_plugin.tree import Tree
from clld_phylogeny_plugin.interfaces import ITree
from csvw.dsv import UnicodeWriter
from dplace2.models import get_icon
class VariableCsvAdapter(CsvAdapter):
def render(self, ctx, req):
cols = None
with UnicodeWriter() as writer:
for v in chain(*[vs.values for vs in ctx.valuesets]):
if not cols:
cols = v.csv_head()
writer.writerow(cols)
writer.writerow(v.to_csv(ctx=ctx, req=req, cols=cols))
return writer.read()
class VariableTree(Tree):
def get_marker(self, valueset):
icon = get_icon(valueset)
return icon[:1], '#' + icon[1:]
class GeoJsonSocieties(GeoJson):
def feature_properties(self, ctx, req, language):
if hasattr(ctx, 'icon_url'): # pragma: no cover
# special handling for domain elements of feature combinations
return {'icon': ctx.icon_url}
class GeoJsonVariable(GeoJsonParameter):
def feature_iterator(self, ctx, req):
de = req.params.get('domainelement')
if de:
return DBSession.query(common.ValueSet)\
.join(common.Value)\
.join(common.DomainElement)\
.filter(common.DomainElement.id == de)\
.options(
joinedload(common.ValueSet.values).joinedload(common.Value.domainelement),
joinedload(common.ValueSet.language),
)\
.distinct()
return self.get_query(ctx, req)
def feature_properties(self, ctx, req, valueset):
value = valueset.values[0]
return {
'label': value.domainelement.name if value.domainelement else ('{0:,}'.format(value.value_float) if value.value_float else value.name),
}
def includeme(config):
config.register_adapter(GeoJsonVariable, IParameter)
config.registry.registerUtility(VariableTree, ITree)
| 34.41791
| 147
| 0.653513
|
3086a412f722b7dff6a922865a3311bef9f5c522
| 3,657
|
py
|
Python
|
yolo_utils.py
|
milanbhadja7932/Seating-Arrangement
|
5c89dd3e66e1e5539a2fea7e4dd52b398051fcd6
|
[
"MIT"
] | null | null | null |
yolo_utils.py
|
milanbhadja7932/Seating-Arrangement
|
5c89dd3e66e1e5539a2fea7e4dd52b398051fcd6
|
[
"MIT"
] | null | null | null |
yolo_utils.py
|
milanbhadja7932/Seating-Arrangement
|
5c89dd3e66e1e5539a2fea7e4dd52b398051fcd6
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
def show_image(img):
cv.imshow("Image", img)
cv.waitKey(0)
def draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels):
# If there are any detections
if len(idxs) > 0:
for i in idxs.flatten():
# Get the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# Get the unique color for this class
color = [int(c) for c in colors[classids[i]]]
# Draw the bounding box rectangle and label on the image
cv.rectangle(img, (x, y), (x+w, y+h), (255,255,255), 0)
text = "{}: {:4f}".format(labels[classids[i]], confidences[i])
cv.putText(img, text, (x, y-5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img
def generate_boxes_confidences_classids(outs, height, width, tconf):
boxes = []
confidences = []
classids = []
for out in outs:
for detection in out:
#print (detection)
#a = input('GO!')
# Get the scores, classid, and the confidence of the prediction
scores = detection[5:]
classid = np.argmax(scores)
confidence = scores[classid]
# Consider only the predictions that are above a certain confidence level
if confidence > tconf:
# TODO Check detection
box = detection[0:4] * np.array([width, height, width, height])
centerX, centerY, bwidth, bheight = box.astype('int')
# Using the center x, y coordinates to derive the top
# and the left corner of the bounding box
x = int(centerX - (bwidth / 2))
y = int(centerY - (bheight / 2))
# Append to list
boxes.append([x, y, int(bwidth), int(bheight)])
confidences.append(float(confidence))
classids.append(classid)
#print("classid",classid)
return boxes, confidences, classids
def infer_image(net, layer_names, height, width, img, colors, labels, FLAGS,
boxes=None, confidences=None, classids=None, idxs=None, infer=True):
if infer:
# Contructing a blob from the input image
blob = cv.dnn.blobFromImage(img, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
# Perform a forward pass of the YOLO object detector
net.setInput(blob)
# Getting the outputs from the output layers
start = time.time()
outs = net.forward(layer_names)
end = time.time()
if FLAGS.show_time:
print ("[INFO] YOLOv3 took {:6f} seconds".format(end - start))
# Generate the boxes, confidences, and classIDs
boxes, confidences, classids = generate_boxes_confidences_classids(outs, height, width, FLAGS.confidence)
# Apply Non-Maxima Suppression to suppress overlapping bounding boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, FLAGS.confidence, FLAGS.threshold)
if boxes is None or confidences is None or idxs is None or classids is None:
raise '[ERROR] Required variables are set to None before drawing boxes on images.'
# Draw labels and boxes on the image
#for clas in classids:
#print(clas)
#if clas==0:
img = draw_labels_and_boxes(img, boxes, confidences, classids, idxs, colors, labels)
return img, boxes, confidences, classids, idxs
| 34.828571
| 113
| 0.587367
|
5bc0c4bf99802f80eeedc5e344296a5ec2c99ea8
| 11,509
|
py
|
Python
|
streamm/structures/particle.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | 4
|
2017-01-04T02:20:52.000Z
|
2022-01-23T21:14:32.000Z
|
streamm/structures/particle.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | null | null | null |
streamm/structures/particle.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | 4
|
2017-04-25T06:23:08.000Z
|
2021-04-14T07:10:24.000Z
|
# coding: utf-8
# Copyright (c) Alliance for Sustainable Energy, LLC
# Distributed under the terms of the Apache License, Version 2.0
"""
This module defines the classes relating to general particles
"""
__author__ = "Travis W. Kemper, Ph.D."
__copyright__ = "Copyright 2015, Alliance for Sustainable Energy, LLC"
__version__ = "0.3.4"
__email__ = "organicelectronics@nrel.gov"
__status__ = "Beta"
import logging
logger = logging.getLogger(__name__)
import pymatgen_core.core.periodic_table as periodictable
import pymatgen_core.core.units as units
class Particle(units.ObjectUnits):
"""Data structure for describing any localized object in QM/MD simulation, such as an atom.
Kwargs:
* type (str): Particle type
* label (str): Identifier in output files
* symbol (str): Atomic symbol
* unit_conf (dict): Unit types with units used by this object
Particles have fundamental attributes which are used throughout the streamm code:
.. attribute:: mass (float)
Mass of the particle
.. attribute:: charge (float)
Charge of the particle in e
.. attribute:: bonded_radius (float)
Radius in used in determining if two particles are bonded
.. attribute:: nonbonded_radius (float)
Radius in used in determining if two particles are interacting
Additional attributes include:
.. attribute:: mol (int)
Molecule index
.. attribute:: ring (int)
Ring index
.. attribute:: residue (int)
Residue index
.. attribute:: resname (str)
Residue name
.. attribute:: qgroup (int)
Charge group index
.. attribute:: index (int)
Index of particle in particle dictionary
.. attribute:: ffkey (str)
Particletype key
The ``element`` attribute can be set to an element object from the
``periodic_table`` object in ``pymatgen``.
The ``ff`` attribute can be set to a ``Particletype`` object.
"""
@property
def mass(self):
return self._property['mass']
@mass.setter
def mass(self,value):
self._property['mass'] = value
@property
def charge(self):
return self._property['charge']
@charge.setter
def charge(self,value):
self._property['charge'] = value
@property
def bonded_radius(self):
return self._property['bonded_radius']
@bonded_radius.setter
def bonded_radius(self,value):
self._property['bonded_radius'] = value
@property
def nonbonded_radius(self):
return self._property['nonbonded_radius']
@nonbonded_radius.setter
def nonbonded_radius(self,value):
self._property['nonbonded_radius'] = value
def set_element(self,symbol=None,number=None,mass=None,mass_precision=0):
'''
Set the element of property of the particle
Kwargs:
* symbol (str): Atomic symbol.
* number (int): Atomic number
* mass (float): Atomic mass (AMU)
* mass_precision (int): precision to match the mass with value from periodic table
This will set the ``symbol`` property of the particle to the Atomic symbol,
the ``mass`` property of the particle to the ``atomic_mass``,
the ``bonded_radius`` property of the particle to the ``atomic_radius_calculated`` and
the ``nonbonded_radius`` property of the particle to the ``van_der_waals_radius``
'''
if( symbol != None ):
self.symbol = str(symbol)
logger.debug("Finding element by atomic symbol {}".format(self.symbol))
self.element = periodictable.Element(self.symbol)
elif( number != None ):
number = int(number)
logger.debug("Finding element by atomic number {}".format(number))
self.element = periodictable.Element.from_Z(number)
self.symbol = self.element.symbol
elif( mass != None ):
mass = float(mass)
logger.debug("Finding element by atomic mass {} ".format(mass))
for symbol, data in periodictable._pt_data.items():
if round(data["Atomic mass"],mass_precision) == round(mass,mass_precision):
self.symbol = symbol
self.element = periodictable.Element(symbol)
break
if( self.symbol == None ):
logger.warning("Atomic mass of {} was not found in periodic table with precision of {}".format(mass,ATOMIC_MASS_PRECISION))
return
else:
logger.warning("No arguments supplied to function element will not be set")
return
#
# Set properties based on element properties
#
self.mass = float(self.element.atomic_mass) # amu
if( self.element.atomic_radius_calculated != None ):
self.bonded_radius = self.element.atomic_radius_calculated
else:
self.nonbonded_radius = 1.5
if( self.element.van_der_waals_radius != None ):
self.nonbonded_radius = self.element.van_der_waals_radius
else:
self.nonbonded_radius = 2.5
logger.debug("Particle has been set to {} with mass:{} bonded_radius:{} nonbonded_radius:{}".format(self.symbol,self.mass,self.bonded_radius,self.nonbonded_radius))
# Set values to be the same as mendeleev for easy
# upgrade in next revision
self.element.atomic_weight = self.element.atomic_mass
self.element.covalent_radius = self.element.atomic_radius_calculated
self.element.vdw_radius = self.element.van_der_waals_radius
return
def __init__(self,type='atom',label=None,symbol = None,unit_conf=units.unit_conf):
# init object's units dictionaries
units.ObjectUnits.__init__(self,unit_conf=unit_conf)
#
logger.debug("Particle created type:{} label:{} symbol:{}".format(type,label,symbol))
#
self.type = type
self.label = label
self.symbol = symbol
#
self._property['mass'] = 1.0 # Choose reasonable values for initialization
self._property['charge'] = 0.0 # Choose reasonable values for initialization
#
self._property['bonded_radius'] = 1.0 # Choose reasonable values for initialization
self._property['nonbonded_radius'] = 2.0 # Choose reasonable values for initialization
#
self._property_units['mass'].append('mass')
self._property_units['charge'].append('charge')
self._property_units['length'].append('bonded_radius')
self._property_units['length'].append('nonbonded_radius')
#
self.mol = 0
self.ring = 0
self.residue = 0
self.resname = "RES"
self.qgroup = 0
#
# Reactive site type
self.rsite = ''
#
# Atomic properties
self.symbol = symbol
self.element = None
#
if( symbol != None ):
self.set_element(symbol=symbol)
logger.debug("No label is given using symbol as label")
if( label == None ):
self.label = symbol
elif( label != None ):
logger.debug("No symbol is given using label as symbol")
self.symbol = label
# Force field
self.paramkey = None
self.param = None
# Lammps and gromacs index
self.param_index = 0
self.lammps_index = 0
self.gromacs_index = 0
def __del__(self):
del self.type
del self.label
#
del self.mol
del self.ring
del self.residue
del self.resname
del self.qgroup
# Atomic properties
del self.symbol
del self.element
# Force field
del self.paramkey
del self.param
del self.param_index
del self.lammps_index
del self.gromacs_index
#
del self.rsite
def __str__(self):
return "{} {} ({})".format(self.type,self.label,self.symbol)
def show_attributes(self):
'''
Like __str__ but with all the values of the instance's attributes
'''
property_msg = " type:{} ".format(self.type)
property_msg += "\n label:{}".format(self.label)
property_msg += "\n symbol:{}".format(self.symbol)
property_msg += "\n mass:{} ({})".format(self.mass,self._unit_conf['mass'])
property_msg += "\n charge:{} ({})".format(self.charge,self._unit_conf['charge'])
property_msg += "\n bonded_radius:{} ({})".format(self.bonded_radius,self._unit_conf['length'])
property_msg += "\n nonbonded_radius:{} ({})".format(self.nonbonded_radius,self._unit_conf['length'])
return property_msg
def export_json(self):
'''
Export object to json
Returns:
* json_data (dict) json representation of the object
'''
json_data = {}
json_data['type'] = self.type
json_data['label'] = self.label
json_data['symbol'] = self.symbol
json_data['mass'] = self.mass
json_data['charge'] = self.charge
json_data['bonded_radius'] = self.bonded_radius
json_data['nonbonded_radius'] = self.nonbonded_radius
#
json_data['mol'] = self.mol
json_data['ring'] = self.ring
json_data['residue'] = self.residue
json_data['resname'] = self.resname
json_data['qgroup'] = self.qgroup
json_data['paramkey'] = self.paramkey
json_data['param_index'] = self.param_index
json_data['lammps_index'] = self.lammps_index
json_data['gromacs_index'] = self.gromacs_index
json_data['rsite'] = self.rsite
return json_data
def import_json(self,json_data):
'''
Export object to json
Args:
* json_data (dict) json representation of the object
'''
self.type = json_data['type']
self.label = json_data['label']
self.symbol = json_data['symbol']
# Set element
self.set_element(symbol = self.symbol )
self.mass = json_data['mass']
self.charge = json_data['charge']
self.bonded_radius = json_data['bonded_radius']
self.nonbonded_radius = json_data['nonbonded_radius']
self.mol = json_data['mol']
self.ring = json_data['ring']
self.residue = json_data['residue']
self.resname = json_data['resname']
self.qgroup = json_data['qgroup']
self.paramkey = json_data['paramkey']
self.param_index = json_data['param_index']
self.lammps_index = json_data['lammps_index']
self.gromacs_index = json_data['gromacs_index']
self.rsite = json_data['rsite']
| 33.652047
| 172
| 0.579025
|
bdcbd151b59f14556f3cad36b7bc41bb15d70ea7
| 3,509
|
py
|
Python
|
neo-node/p2p_ws/node.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
neo-node/p2p_ws/node.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
neo-node/p2p_ws/node.py
|
i25959341/Happynodes
|
ef6825d17b181c451476d4ef722f2bcfa52e0161
|
[
"MIT"
] | null | null | null |
import threading
from time import sleep
from logzero import logger
from twisted.internet import reactor, task
from neo.Network.NodeLeader import NodeLeader
from neo.Core.Blockchain import Blockchain
from neo.Implementations.Blockchains.LevelDB.LevelDBBlockchain import LevelDBBlockchain
from neo.Settings import settings
import dns.resolver
import socket
import os
import psycopg2
import time
import datetime
host = str(os.environ['PGHOST'])
databasename = str(os.environ['PGDATABASE'])
user = str(os.environ['PGUSER'])
password = str(os.environ['PGPASSWORD'])
connection_str = "dbname='{}' user='{}' host='{}' password='{}'".format(databasename, user, host, password)
dsn="postgresql://{}:{}@{}/{}".format(user, password, host, databasename)
def getSqlDateTime(ts):
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def getIpAddressMap(cursor):
ip_dict = {}
cursor.execute("""select ce.id, n.id, n.hostname, n.ip
from connection_endpoints ce
inner join nodes n
on n.id=ce.node_id""")
results = cursor.fetchall()
for result in results:
address_id, ip_id, address, ip = result
if ip not in ip_dict:
ip_dict[ip] = [address_id]
else:
ip_dict[ip].append(address_id)
return ip_dict
def custom_background_code(connection_str, ip_dict):
""" Custom code run in a background thread.
This function is run in a daemonized thread, which means it can be instantly killed at any
moment, whenever the main thread quits. If you need more safety, don't use a daemonized
thread and handle exiting this thread in another way (eg. with signals and events).
"""
while True:
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
logger.info("Block %s / %s", str(Blockchain.Default().Height), str(Blockchain.Default().HeaderHeight))
print(len(NodeLeader.Instance().Peers))
insert_time = time.time()
if len(NodeLeader.Instance().Peers)>0:
for peer in NodeLeader.Instance().Peers:
print(peer.host)
if peer.host in ip_dict:
address_list = ip_dict[peer.host]
for address_id in address_list:
cursor.execute("INSERT INTO p2p_ws_status_history (ts, connection_id, p2p_ws_status) VALUES (%s, %s, %s)", [getSqlDateTime(insert_time), address_id, True])
else:
print("ip not in database")
conn.commit()
cursor.close()
conn.close()
sleep(15)
def main():
conn = psycopg2.connect(connection_str)
cursor = conn.cursor()
ip_dict = getIpAddressMap(cursor)
cursor.close()
conn.close()
# Use Custom config
settings.setup("./config.json")
settings.set_max_peers(500)
# Setup the blockchain
blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
Blockchain.RegisterBlockchain(blockchain)
dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
dbloop.start(.1)
NodeLeader.Instance().Start()
# Start a thread with custom code
d = threading.Thread(target=custom_background_code, args=(connection_str, ip_dict,))
d.setDaemon(True) # daemonizing the thread will kill it when the main thread is quit
d.start()
# Run all the things (blocking call)
reactor.run()
logger.info("Shutting down.")
if __name__ == "__main__":
main()
| 34.067961
| 179
| 0.662867
|
213cd57c9d7cbe284e0ed44c925449b33c08dcad
| 17,487
|
py
|
Python
|
ApiManager/utils/common.py
|
wishchen/HttpRunnerManager
|
1e72baa8a84ca70124af908ac683bf88b869d48a
|
[
"MIT"
] | null | null | null |
ApiManager/utils/common.py
|
wishchen/HttpRunnerManager
|
1e72baa8a84ca70124af908ac683bf88b869d48a
|
[
"MIT"
] | null | null | null |
ApiManager/utils/common.py
|
wishchen/HttpRunnerManager
|
1e72baa8a84ca70124af908ac683bf88b869d48a
|
[
"MIT"
] | null | null | null |
import io
import json
import logging
import os
from json import JSONDecodeError
import yaml
from djcelery.models import PeriodicTask
from ApiManager.models import ModuleInfo, TestCaseInfo
from ApiManager.utils.operation import add_project_data, add_module_data, add_case_data, add_config_data, \
add_register_data
from ApiManager.utils.task_opt import create_task
logger = logging.getLogger('HttpRunnerManager')
def type_change(type, value):
"""
数据类型转换
:param type: str: 类型
:param value: object: 待转换的值
:return: ok or error
"""
try:
if type == 'float':
value = float(value)
elif type == 'int':
value = int(value)
except ValueError:
logger.error('{value}转换{type}失败'.format(value=value, type=type))
return 'exception'
if type == 'boolean':
if value == 'False':
value = False
elif value == 'True':
value = True
else:
return 'exception'
return value
def key_value_list(keyword, **kwargs):
"""
dict change to list
:param keyword: str: 关键字标识
:param kwargs: dict: 待转换的字典
:return: ok or tips
"""
if not isinstance(kwargs, dict) or not kwargs:
return None
else:
lists = []
test = kwargs.pop('test')
for value in test:
if keyword == 'setup_hooks':
if value.get('key') != '':
lists.append(value.get('key'))
elif keyword == 'teardown_hooks':
if value.get('value') != '':
lists.append(value.get('value'))
else:
key = value.pop('key')
val = value.pop('value')
if 'type' in value.keys():
type = value.pop('type')
else:
type = 'str'
tips = '{keyword}: {val}格式错误,不是{type}类型'.format(keyword=keyword, val=val, type=type)
if key != '' and val != '':
if keyword == 'validate':
value['check'] = key
msg = type_change(type, val)
if msg == 'exception':
return tips
value['expected'] = msg
elif keyword == 'extract':
value[key] = val
elif keyword == 'variables':
msg = type_change(type, val)
if msg == 'exception':
return tips
value[key] = msg
elif keyword == 'parameters':
try:
if not isinstance(eval(val), list):
return '{keyword}: {val}格式错误'.format(keyword=keyword, val=val)
value[key] = eval(val)
except Exception:
logging.error('{val}->eval 异常'.format(val=val))
return '{keyword}: {val}格式错误'.format(keyword=keyword, val=val)
lists.append(value)
return lists
def key_value_dict(keyword, **kwargs):
"""
字典二次处理
:param keyword: str: 关键字标识
:param kwargs: dict: 原字典值
:return: ok or tips
"""
if not isinstance(kwargs, dict) or not kwargs:
return None
else:
dicts = {}
test = kwargs.pop('test')
for value in test:
key = value.pop('key')
val = value.pop('value')
if 'type' in value.keys():
type = value.pop('type')
else:
type = 'str'
if key != '' and val != '':
if keyword == 'headers':
value[key] = val
elif keyword == 'data':
msg = type_change(type, val)
if msg == 'exception':
return '{keyword}: {val}格式错误,不是{type}类型'.format(keyword=keyword, val=val, type=type)
value[key] = msg
dicts.update(value)
return dicts
def load_modules(**kwargs):
"""
加载对应项目的模块信息,用户前端ajax请求返回
:param kwargs: dict:项目相关信息
:return: str: module_info
"""
belong_project = kwargs.get('name').get('project')
module_info = ModuleInfo.objects.filter(belong_project__project_name=belong_project).values_list(
'id',
'module_name').order_by(
'-create_time')
module_info = list(module_info)
string = ''
for value in module_info:
string = string + str(value[0]) + '^=' + value[1] + 'replaceFlag'
return string[:len(string) - 11]
def load_cases(**kwargs):
"""
加载指定项目模块下的用例
:param kwargs: dict: 项目与模块信息
:return: str: 用例信息
"""
belong_project = kwargs.get('name').get('project')
module = kwargs.get('name').get('module')
if module == '请选择':
return ''
case_info = TestCaseInfo.objects.filter(belong_project=belong_project,
belong_module=module, type=1).values_list('id', 'name').order_by(
'-create_time')
case_info = list(case_info)
string = ''
for value in case_info:
string = string + str(value[0]) + '^=' + value[1] + 'replaceFlag'
return string[:len(string) - 11]
def load_configs():
"""
加载指定项目下的配置信息
:return:
"""
config_info = TestCaseInfo.objects.filter(type=2).values_list(
'id',
'name').order_by(
'-create_time')
config_info = list(config_info)
string = ''
for value in config_info:
string = string + str(value[0]) + '^=' + value[1] + 'replaceFlag'
return string[:len(string) - 11]
def module_info_logic(type=True, **kwargs):
"""
模块信息逻辑处理
:param type: boolean: True:默认新增模块
:param kwargs: dict: 模块信息
:return:
"""
if kwargs.get('module_name') is '':
return '模块名称不能为空'
if kwargs.get('belong_project') == '请选择':
return '请选择项目,没有请先添加哦'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
return add_module_data(type, **kwargs)
def project_info_logic(type=True, **kwargs):
"""
项目信息逻辑处理
:param type: boolean:True 默认新增项目
:param kwargs: dict: 项目信息
:return:
"""
if kwargs.get('project_name') is '':
return '项目名称不能为空'
if kwargs.get('responsible_name') is '':
return '负责人不能为空'
if kwargs.get('test_user') is '':
return '测试人员不能为空'
if kwargs.get('dev_user') is '':
return '开发人员不能为空'
if kwargs.get('publish_app') is '':
return '发布应用不能为空'
return add_project_data(type, **kwargs)
def case_info_logic(type=True, **kwargs):
"""
用例信息逻辑处理以数据处理
:param type: boolean: True 默认新增用例信息, False: 更新用例
:param kwargs: dict: 用例信息
:return: str: ok or tips
"""
test = kwargs.pop('test')
'''
动态展示模块
'''
if 'request' not in test.keys():
type = test.pop('type')
return load_modules(**test) if type == 'module' else load_cases(**test)
else:
logging.info('用例原始信息: {kwargs}'.format(kwargs=kwargs))
if test.get('name').get('case_name') is '':
return '用例名称不可为空'
if test.get('name').get('author') is '':
return '创建者不能为空'
if test.get('request').get('url') is '':
return '接口地址不能为空'
if test.get('name').get('module') == '请选择':
return '请选择模块'
if test.get('name').get('project') == '请选择':
return '请选择项目'
if test.get('name').get('project') == '':
return '请先添加项目'
if test.get('name').get('module') == '':
return '请先添加模块'
name = test.pop('name')
test.setdefault('name', name.pop('case_name'))
test.setdefault('case_info', name)
validate = test.pop('validate')
if validate:
validate_list = key_value_list('validate', **validate)
if not isinstance(validate_list, list):
return validate_list
test.setdefault('validate', validate_list)
extract = test.pop('extract')
if extract:
test.setdefault('extract', key_value_list('extract', **extract))
request_data = test.get('request').pop('request_data')
data_type = test.get('request').pop('type')
if request_data and data_type:
if data_type == 'json':
test.get('request').setdefault(data_type, request_data)
else:
data_dict = key_value_dict('data', **request_data)
if not isinstance(data_dict, dict):
return data_dict
test.get('request').setdefault(data_type, data_dict)
headers = test.get('request').pop('headers')
if headers:
test.get('request').setdefault('headers', key_value_dict('headers', **headers))
variables = test.pop('variables')
if variables:
variables_list = key_value_list('variables', **variables)
if not isinstance(variables_list, list):
return variables_list
test.setdefault('variables', variables_list)
parameters = test.pop('parameters')
if parameters:
params_list = key_value_list('parameters', **parameters)
if not isinstance(params_list, list):
return params_list
test.setdefault('parameters', params_list)
hooks = test.pop('hooks')
if hooks:
setup_hooks_list = key_value_list('setup_hooks', **hooks)
if not isinstance(setup_hooks_list, list):
return setup_hooks_list
test.setdefault('setup_hooks', setup_hooks_list)
teardown_hooks_list = key_value_list('teardown_hooks', **hooks)
if not isinstance(teardown_hooks_list, list):
return teardown_hooks_list
test.setdefault('teardown_hooks', teardown_hooks_list)
kwargs.setdefault('test', test)
return add_case_data(type, **kwargs)
def config_info_logic(type=True, **kwargs):
"""
模块信息逻辑处理及数据处理
:param type: boolean: True 默认新增 False:更新数据
:param kwargs: dict: 模块信息
:return: ok or tips
"""
config = kwargs.pop('config')
'''
动态展示模块
'''
if 'request' not in config.keys():
return load_modules(**config)
else:
logging.debug('配置原始信息: {kwargs}'.format(kwargs=kwargs))
if config.get('name').get('config_name') is '':
return '配置名称不可为空'
if config.get('name').get('author') is '':
return '创建者不能为空'
if config.get('name').get('project') == '请选择':
return '请选择项目'
if config.get('name').get('module') == '请选择':
return '请选择模块'
if config.get('name').get('project') == '':
return '请先添加项目'
if config.get('name').get('module') == '':
return '请先添加模块'
name = config.pop('name')
config.setdefault('name', name.pop('config_name'))
config.setdefault('config_info', name)
request_data = config.get('request').pop('request_data')
data_type = config.get('request').pop('type')
if request_data and data_type:
if data_type == 'json':
config.get('request').setdefault(data_type, request_data)
else:
data_dict = key_value_dict('data', **request_data)
if not isinstance(data_dict, dict):
return data_dict
config.get('request').setdefault(data_type, data_dict)
headers = config.get('request').pop('headers')
if headers:
config.get('request').setdefault('headers', key_value_dict('headers', **headers))
variables = config.pop('variables')
if variables:
variables_list = key_value_list('variables', **variables)
if not isinstance(variables_list, list):
return variables_list
config.setdefault('variables', variables_list)
parameters = config.pop('parameters')
if parameters:
params_list = key_value_list('parameters', **parameters)
if not isinstance(params_list, list):
return params_list
config.setdefault('parameters', params_list)
hooks = config.pop('hooks')
if hooks:
setup_hooks_list = key_value_list('setup_hooks', **hooks)
if not isinstance(setup_hooks_list, list):
return setup_hooks_list
config.setdefault('setup_hooks', setup_hooks_list)
teardown_hooks_list = key_value_list('teardown_hooks', **hooks)
if not isinstance(teardown_hooks_list, list):
return teardown_hooks_list
config.setdefault('teardown_hooks', teardown_hooks_list)
kwargs.setdefault('config', config)
return add_config_data(type, **kwargs)
def task_logic(**kwargs):
"""
定时任务逻辑处理
:param kwargs: dict: 定时任务数据
:return:
"""
if 'task' in kwargs.keys():
return load_modules(**kwargs.pop('task'))
if kwargs.get('name') is '':
return '任务名称不可为空'
elif kwargs.get('project') is '':
return '请选择一个项目'
elif kwargs.get('crontab_time') is '':
return '定时配置不可为空'
elif not kwargs.get('module'):
kwargs.pop('module')
try:
crontab_time = kwargs.pop('crontab_time').split(' ')
if len(crontab_time) > 5:
return '定时配置参数格式不正确'
crontab = {
'day_of_week': crontab_time[-1],
'month_of_year': crontab_time[3], # 月份
'day_of_month': crontab_time[2], # 日期
'hour': crontab_time[1], # 小时
'minute': crontab_time[0], # 分钟
}
except Exception:
return '定时配置参数格式不正确'
if PeriodicTask.objects.filter(name__exact=kwargs.get('name')).count() > 0:
return '任务名称重复,请重新命名'
desc = " ".join(str(i) for i in crontab_time)
name = kwargs.pop('name')
if 'module' in kwargs.keys():
return create_task(name, 'ApiManager.tasks.module_hrun', kwargs, crontab, desc)
else:
return create_task(name, 'ApiManager.tasks.project_hrun', kwargs, crontab, desc)
def set_filter_session(request):
"""
查询session
:param request:
:return:
"""
request.session['user'] = '' if 'user' not in request.POST.keys() else request.POST.get('user')
request.session['name'] = '' if 'name' not in request.POST.keys() else request.POST.get('name')
request.session['belong_project'] = '' if 'belong_project' not in request.POST.keys() else request.POST.get(
'belong_project')
request.session['belong_module'] = '' if 'belong_module' not in request.POST.keys() else request.POST.get(
'belong_module')
request.session['report_name'] = '' if 'report_name' not in request.POST.keys() else request.POST.get('report_name')
filter_query = {
'user': request.session['user'],
'name': request.session['name'],
'belong_project': request.session['belong_project'],
'belong_module': request.session['belong_module'],
'report_name': request.session['report_name']
}
return filter_query
def get_ajax_msg(msg, success):
"""
ajax提示信息
:param msg: str:msg
:param success: str:
:return:
"""
return success if msg is 'ok' else msg
def register_info_logic(**kwargs):
"""
:param kwargs:
:return:
"""
return add_register_data(**kwargs)
def upload_file_logic(files, project, module, account):
"""
解析yaml或者json用例
:param files:
:param project:
:param module:
:param account:
:return:
"""
for file in files:
file_suffix = os.path.splitext(file)[1].lower()
if file_suffix == '.json':
with io.open(file, encoding='utf-8') as data_file:
try:
content = json.load(data_file)
except JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(file)
logging.error(err_msg)
elif file_suffix in ['.yaml', '.yml']:
with io.open(file, 'r', encoding='utf-8') as stream:
content = yaml.load(stream)
for test_case in content:
test_dict = {
'project': project,
'module': module,
'author': account,
'include': []
}
if 'config' in test_case.keys():
test_case.get('config')['config_info'] = test_dict
add_config_data(type=True, **test_case)
if 'test' in test_case.keys(): # 忽略config
test_case.get('test')['case_info'] = test_dict
if 'validate' in test_case.get('test').keys(): # 适配validate两种格式
validate = test_case.get('test').pop('validate')
new_validate = []
for check in validate:
if 'comparator' not in check.keys():
for key, value in check.items():
tmp_check = {"check": value[0], "comparator": key, "expected": value[1]}
new_validate.append(tmp_check)
test_case.get('test')['validate'] = new_validate
add_case_data(type=True, **test_case)
| 33.245247
| 120
| 0.552468
|
4bae8e9aca0f2b52b6b8fa3d4f89b108dbfe7a75
| 871
|
py
|
Python
|
helper_functions/split_date.py
|
Joseph-Maulin/lambdata-Joseph-Maulin
|
a2c616d61778e81a764429bda39fe3414d76bdea
|
[
"MIT"
] | null | null | null |
helper_functions/split_date.py
|
Joseph-Maulin/lambdata-Joseph-Maulin
|
a2c616d61778e81a764429bda39fe3414d76bdea
|
[
"MIT"
] | null | null | null |
helper_functions/split_date.py
|
Joseph-Maulin/lambdata-Joseph-Maulin
|
a2c616d61778e81a764429bda39fe3414d76bdea
|
[
"MIT"
] | null | null | null |
import pandas as pd
def split_date(df, date_column):
"""
Function
________
Expands pandas dataframe date column into 'Year', 'Month', 'Day'
Params
__________
args:
df(pd.DataFrame) : df to modify
date_column(String) : column name of the date column to expand
Return
______
Returns a copy of passed df with df['Year'], df['Month'], and df['Day'] columns
"""
try:
split_df = df.copy()
except:
return "df needs to be a pandas.DataFrame"
try:
split_df['Year'] = pd.DatetimeIndex(df[date_column]).year
split_df['Month'] = pd.DatetimeIndex(df[date_column]).month
split_df['Day'] = pd.DatetimeIndex(df[date_column]).day
return split_df.drop(columns = [date_column])
except:
return "date_column passed is not datetime"
| 24.885714
| 87
| 0.613088
|
e1a147a70a2c50f7761e51473579a8d30ff4b591
| 2,617
|
py
|
Python
|
.ycm_extra_conf.py
|
aslResearch/aslRover
|
eec6a772b90d18a38ac036773651c5b95eb4f22e
|
[
"BSD-3-Clause"
] | 9
|
2017-09-22T06:31:47.000Z
|
2021-09-09T23:06:56.000Z
|
.ycm_extra_conf.py
|
aslResearch/aslRover
|
eec6a772b90d18a38ac036773651c5b95eb4f22e
|
[
"BSD-3-Clause"
] | null | null | null |
.ycm_extra_conf.py
|
aslResearch/aslRover
|
eec6a772b90d18a38ac036773651c5b95eb4f22e
|
[
"BSD-3-Clause"
] | 7
|
2018-09-12T10:36:05.000Z
|
2021-09-22T08:11:58.000Z
|
#!/usr/bin/env python
import os
import ycm_core
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
'-std=c++11',
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/opt/ros/' + os.getenv('ROS_DISTRO') + '/include',
'-isystem',
'/home/vnv/asl_gremlin1/src/devel/include',
'-isystem',
'/home/vnv/asl_gremlin1/src/src/CMakeLists.txt/include'
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 24.92381
| 72
| 0.682461
|
0c4407c2cfe33d028a6f7da7673a30251d19697d
| 3,447
|
py
|
Python
|
core/jobs_registry.py
|
klenathstar/oppia
|
6cfe84e772f767dffef8f4e451c78cf70ef3c428
|
[
"Apache-2.0"
] | null | null | null |
core/jobs_registry.py
|
klenathstar/oppia
|
6cfe84e772f767dffef8f4e451c78cf70ef3c428
|
[
"Apache-2.0"
] | null | null | null |
core/jobs_registry.py
|
klenathstar/oppia
|
6cfe84e772f767dffef8f4e451c78cf70ef3c428
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job registries."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import activity_jobs_one_off
from core.domain import collection_jobs_one_off
from core.domain import exp_jobs_one_off
from core.domain import feedback_jobs_continuous
from core.domain import question_jobs_one_off
from core.domain import recommendations_jobs_one_off
from core.domain import skill_jobs_one_off
from core.domain import story_jobs_one_off
from core.domain import suggestion_jobs_one_off
from core.domain import topic_jobs_one_off
from core.domain import user_jobs_continuous
import python_utils
# List of all manager classes for one-off batch jobs for which to show controls
# on the admin dashboard.
ONE_OFF_JOB_MANAGERS = [
activity_jobs_one_off.IndexAllActivitiesJobManager,
collection_jobs_one_off.CollectionMigrationOneOffJob,
exp_jobs_one_off.ExplorationMigrationJobManager,
exp_jobs_one_off.ExpSnapshotsMigrationJob,
question_jobs_one_off.QuestionMigrationOneOffJob,
question_jobs_one_off.QuestionSnapshotsMigrationJob,
recommendations_jobs_one_off.ExplorationRecommendationsOneOffJob,
skill_jobs_one_off.SkillMigrationOneOffJob,
skill_jobs_one_off.SkillCommitCmdMigrationOneOffJob,
story_jobs_one_off.StoryMigrationOneOffJob,
suggestion_jobs_one_off.QuestionSuggestionMigrationJobManager,
topic_jobs_one_off.TopicMigrationOneOffJob,
]
# List of all manager classes for prod validation one-off batch jobs for which
# to show controls on the admin dashboard.
AUDIT_JOB_MANAGERS = [
]
# List of all ContinuousComputation managers to show controls for on the
# admin dashboard.
# NOTE TO DEVELOPERS: When a new ContinuousComputation manager is defined,
# it should be registered here.
ALL_CONTINUOUS_COMPUTATION_MANAGERS = [
feedback_jobs_continuous.FeedbackAnalyticsAggregator,
user_jobs_continuous.DashboardRecentUpdatesAggregator,
user_jobs_continuous.UserStatsAggregator,
]
class ContinuousComputationEventDispatcher(python_utils.OBJECT):
"""Dispatches events to the relevant ContinuousComputation classes."""
@classmethod
def dispatch_event(cls, event_type, *args, **kwargs):
"""Dispatches an incoming event to the ContinuousComputation
classes which listen to events of that type.
Args:
event_type: str. The type of the event.
*args: list(*). Positional arguments to pass to on_incoming_event().
**kwargs: *. Keyword arguments to pass to on_incoming_event().
"""
for klass in ALL_CONTINUOUS_COMPUTATION_MANAGERS:
if event_type in klass.get_event_types_listened_to():
klass.on_incoming_event(event_type, *args, **kwargs)
| 41.035714
| 80
| 0.793153
|
00a524015e886996b2c4716e23ef0c97fd1b053e
| 2,701
|
py
|
Python
|
aldryn_faq/search_indexes.py
|
mcldev/aldryn-faq
|
f0b05c5e7a90ce201e1dfe866092a69bd2960873
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_faq/search_indexes.py
|
mcldev/aldryn-faq
|
f0b05c5e7a90ce201e1dfe866092a69bd2960873
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_faq/search_indexes.py
|
mcldev/aldryn-faq
|
f0b05c5e7a90ce201e1dfe866092a69bd2960873
|
[
"BSD-3-Clause"
] | 2
|
2019-05-14T07:59:02.000Z
|
2020-11-01T14:58:34.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.template import RequestContext
from aldryn_search.utils import get_index_base, strip_tags
from parler.utils.context import switch_language
from .models import Question, Category
class QuestionIndex(get_index_base()):
haystack_use_for_indexing = getattr(
settings, "ALDRYN_FAQ_QUESTION_SEARCH", True)
index_title = True
def get_title(self, obj):
with switch_language(obj):
return obj.safe_translation_getter('title')
def get_index_queryset(self, language):
questions = self.get_model().objects.language(language)
return questions.active_translations(language)
def get_model(self):
return Question
def get_search_data(self, obj, language, request):
with switch_language(obj, language):
context = RequestContext(request)
text_bits = [
strip_tags(obj.safe_translation_getter('title') or ''),
strip_tags(obj.safe_translation_getter('answer_text') or '')
]
plugins = obj.answer.cmsplugin_set.filter(language=language)
for base_plugin in plugins:
instance, plugin_type = base_plugin.get_plugin_instance()
if instance is not None:
plugin_content = strip_tags(
instance.render_plugin(context=context)
)
text_bits.append(plugin_content)
return ' '.join(text_bits)
def get_index_kwargs(self, language):
return {'translations__language_code': language}
def get_language(self, obj):
if hasattr(obj, 'language_code'):
return obj.language_code
return None
class CategoryIndex(get_index_base()):
haystack_use_for_indexing = getattr(
settings, "ALDRYN_FAQ_CATEGORY_SEARCH", True)
index_title = True
def get_title(self, obj):
with switch_language(obj):
return obj.safe_translation_getter('name')
def get_index_queryset(self, language):
categories = self.get_model().objects.language(language).active_translations(language)
return categories
def get_model(self):
return Category
def get_search_data(self, obj, language, request):
with switch_language(obj):
return strip_tags(obj.safe_translation_getter('name'))
def get_index_kwargs(self, language):
return {'translations__language_code': language}
def get_language(self, obj):
if hasattr(obj, 'language_code'):
return obj.language_code
return None
| 31.045977
| 94
| 0.661977
|
f2c963cdc1032aea362d114b6a7b54b2624adc3b
| 305
|
py
|
Python
|
data/multilingual/Hans.NAN/Serif_12/pdf_to_json_test_Hans.NAN_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Hans.NAN/Serif_12/pdf_to_json_test_Hans.NAN_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Hans.NAN/Serif_12/pdf_to_json_test_Hans.NAN_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Hans.NAN/Serif_12/udhr_Hans.NAN_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5
| 75
| 0.813115
|
3001f2fa97dcd9803b23e4d5b7865c9d14a032e2
| 1,499
|
py
|
Python
|
oldcode/lib_angle/generate_winrt_projects.py
|
xriss/gamecake
|
015e6d324761f46235ee61a61a71dbd9a49f6192
|
[
"MIT"
] | 28
|
2017-04-20T06:21:26.000Z
|
2021-12-10T15:22:51.000Z
|
oldcode/lib_angle/generate_winrt_projects.py
|
sahwar/gamecake
|
9abcb937c0edc22dee2940cb06ec9a84597e989c
|
[
"MIT"
] | 3
|
2017-04-05T00:41:45.000Z
|
2020-04-04T00:44:24.000Z
|
oldcode/lib_angle/generate_winrt_projects.py
|
sahwar/gamecake
|
9abcb937c0edc22dee2940cb06ec9a84597e989c
|
[
"MIT"
] | 5
|
2016-11-26T14:44:55.000Z
|
2021-07-29T04:25:53.000Z
|
# Copyright (c) 2013-2014 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script generates visual studio projects that can be used on WinRT
import os
import sys
script_dir = os.path.join(os.path.dirname(__file__), 'build')
angle_dir = os.path.normpath(os.path.join(script_dir, os.pardir))
gyp_dir = os.path.join(angle_dir, 'third_party', 'gyp')
gyp_generators = "msvs"
msvs_version = "2013e"
def generateProjects(generation_dir, build_winphone):
gyp_cmd = os.path.join(gyp_dir, 'gyp')
gyp_cmd += ' --ignore-environment'
gyp_cmd += ' --depth=.'
gyp_cmd += ' --include=' + os.path.join(script_dir, 'common.gypi')
gyp_cmd += ' --generator-output=' + generation_dir
gyp_cmd += ' --format=' + gyp_generators
gyp_cmd += ' -G msvs_version=' + msvs_version
gyp_cmd += ' -D angle_use_commit_id=0'
gyp_cmd += ' -D angle_build_winrt=1'
gyp_cmd += ' -D angle_build_winphone=' + ('1' if build_winphone else '0')
gyp_cmd += ' -D angle_enable_d3d9=0'
gyp_cmd += ' -D angle_standalone=1'
gyp_cmd += ' ' + os.path.join(script_dir, 'all.gyp')
print 'Generating projects to ' + generation_dir + ' from gyp files...'
print gyp_cmd
sys.stdout.flush()
os.system(gyp_cmd)
if __name__ == '__main__':
# Generate Windows 8.1 projects
generateProjects("winrt/8.1/windows", False);
generateProjects("winrt/8.1/windowsphone", True);
| 37.475
| 77
| 0.685791
|
a17c4b1f9ae2f369c7a4ed032379151363a705fb
| 3,598
|
py
|
Python
|
examples/webui_server.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 180
|
2015-01-12T23:57:06.000Z
|
2022-03-17T00:24:35.000Z
|
examples/webui_server.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 262
|
2015-01-16T21:14:50.000Z
|
2022-02-25T01:33:42.000Z
|
examples/webui_server.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 61
|
2015-01-05T01:10:57.000Z
|
2022-01-04T08:13:39.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from twisted.internet import reactor
from nevow.appserver import NevowSite
from nevow import loaders, tags, livepage
import txtorcon
def setup_failed(fail):
print("It went sideways!", fail)
return fail
class TorPage(livepage.LivePage):
# override for Nevow/twisted.web
addSlash = True
# defaults for this class
continuous_update = True
ctx = None
torstate = None
# Could be done with XHTML 1.0, or a "real" templating language
docFactory = loaders.stan(
tags.html[
tags.head[
tags.directive('liveglue')],
tags.body[
tags.h1["Tor Launching..."],
# obviously you might want a javascript library or
# something here instead of this hackery...
tags.div(id='progress', style='position:abso lute; left:20em; top:10px; width:300px; height:50px; border:2px solid black;background-color:#ffaaaa;')[
tags.div(id='progress_done', style='position:absolute; top:0px; left:0px; width:0%; height: 100%; background-color:#aaffaa;')],
# this is where the messages will go
tags.div(id='status', style='padding:5px; background-color:#ffaaaa; text-indent:2em; width: 50em; font-weight:bold; border: 2px solid black;')[""]]])
def goingLive(self, ctx, client):
'''
Overrides nevow method; not really safe to just save ctx,
client in self for multiple clients, but nice and simple.
'''
self.ctx = ctx
self.client = client
def set_tor_state(self, state):
self.tor_state = state
def tor_update(self, percent, tag, summary):
if self.ctx is None:
print("I have no Web client yet, but got a Tor update:", percent, tag, summary)
return
point = int(300 * (float(percent) / 100.0))
self.client.send(livepage.js('''document.getElementById('progress_done').style.width = "%dpx";''' % point))
if percent == 100:
# done, turn message box green too
self.client.send(livepage.js('''document.getElementById("status").style.backgroundColor="#aaffaa";'''))
if self.continuous_update:
# add a text node for each update, creating a continuous list
self.client.send(livepage.js('''var newNode = document.createElement('div');
newNode.appendChild(document.createTextNode("%d%% -- %s"));
document.getElementById('status').appendChild(newNode);''' % (percent, summary)))
else:
self.client.send(livepage.set('status', "%d%% — %s" % (percent, summary)))
# This only properly works with one client (the last one to load the
# page). To work with multiples, we'd have to track all clients so
# sending async updates to them worked properly.
top_level = TorPage()
# minimal Tor configuration
config = txtorcon.TorConfig()
config.OrPort = 1234
config.SocksPort = 9999
# launch a Tor based on the above config; the callback will trigger
# when the TorControlProtocol and TorState instances are up and
# running (i.e. Tor process is launched, and we connected to it via
# control protocol and bootstrapped our notion of its state).
d = txtorcon.launch_tor(config, reactor, progress_updates=top_level.tor_update)
d.addCallback(top_level.set_tor_state)
d.addErrback(setup_failed)
print("Launching Tor and providing a Web interface on: \nhttp://localhost:8080\n")
# Start up the Web server
site = NevowSite(top_level)
reactor.listenTCP(8080, site)
reactor.run()
| 36.714286
| 165
| 0.66537
|
f7babf1339c53b82df96b389da39fa84808108db
| 464
|
py
|
Python
|
tests/gsfpy3_08/test_version.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 7
|
2020-07-01T07:12:19.000Z
|
2022-01-20T20:39:57.000Z
|
tests/gsfpy3_08/test_version.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 36
|
2020-06-23T09:10:15.000Z
|
2022-03-22T10:27:58.000Z
|
tests/gsfpy3_08/test_version.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 2
|
2021-02-07T13:21:52.000Z
|
2021-06-24T19:16:16.000Z
|
from pathlib import Path
from unittest import TestCase
import toml
from assertpy import assert_that
from gsfpy3_08 import __version__ as module_version
class TestVersion(TestCase):
def test_version(self):
pyproject_file = Path(__file__).parents[2] / "pyproject.toml"
pyproject = toml.load(pyproject_file)
pyproject_version = pyproject["tool"]["poetry"]["version"]
assert_that(module_version).is_equal_to(pyproject_version)
| 27.294118
| 69
| 0.752155
|
5c33209dd5991969cd07496c6ff80bb38bbda01a
| 85
|
py
|
Python
|
backend/tests/pathmagic.py
|
teruto725/kanji-visualization
|
7a980546d32846ad0eb4883215b727936cdcb51f
|
[
"MIT"
] | 4
|
2022-01-07T06:00:36.000Z
|
2022-01-07T07:58:04.000Z
|
backend/tests/pathmagic.py
|
teruto725/kanji-visualization
|
7a980546d32846ad0eb4883215b727936cdcb51f
|
[
"MIT"
] | 66
|
2022-01-07T06:15:44.000Z
|
2022-02-22T06:04:17.000Z
|
backend/tests/pathmagic.py
|
teruto725/kanji-visualization
|
7a980546d32846ad0eb4883215b727936cdcb51f
|
[
"MIT"
] | 1
|
2022-01-24T12:49:09.000Z
|
2022-01-24T12:49:09.000Z
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
| 17
| 62
| 0.717647
|
a3b69cb118936ff56cda93a657d34ea50c036f20
| 2,069
|
py
|
Python
|
Jupyter notebooks/Circuitos Eletricos I - Semana 7.1.py
|
Jefferson-Lopes/ElectricCircuits
|
bf2075dc0731cacece75f7b0b378c180630bdf85
|
[
"MIT"
] | 9
|
2021-05-19T18:36:53.000Z
|
2022-01-18T16:30:17.000Z
|
Jupyter notebooks/Circuitos Eletricos I - Semana 7.1.py
|
Jefferson-Lopes/ElectricCircuits
|
bf2075dc0731cacece75f7b0b378c180630bdf85
|
[
"MIT"
] | null | null | null |
Jupyter notebooks/Circuitos Eletricos I - Semana 7.1.py
|
Jefferson-Lopes/ElectricCircuits
|
bf2075dc0731cacece75f7b0b378c180630bdf85
|
[
"MIT"
] | 10
|
2021-06-25T12:52:40.000Z
|
2022-03-11T14:25:48.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import HTML
from IPython.display import Image
HTML("""
<style>
.output_png {
display: table-cell;
text-align: center;
vertical-align: middle;
}
</style>
""")
# # *Circuitos Elétricos I - Semana 7*
#
# ## Resumo dos elementos passivos ideais de dois terminais
#
# $$
# \begin{array}{|l|c|c|c|}
# \hline \text { Propriedade } & R & L & C \\
# \hline \text { Relação } i-v & i=\frac{v}{R} & i=\frac{1}{L} \int_{t_{0}}^{t} v(\tau) d \tau+i\left(t_{0}\right) & i=C \frac{d v}{d t} \\
# \text { Relação } v-i & v=Ri & v=L \frac{d i}{d t} & v=\frac{1}{C} \int_{t_{0}}^{t} i(\tau) d \tau+v\left(t_{0}\right) \\
# p \text { (potência }) & p=Ri^{2} & p=L i \frac{d i}{d t} & p=C v \frac{d v}{d t} \\
# w \text { (energia armazenada) } & 0 & w=\frac{1}{2} L i^{2} & w=\frac{1}{2} C v^{2} \\
# \text { Associação em série } & R_{\mathrm{eq}}=R_{1}+R_{2} & L_{\mathrm{eq}}=L_{1}+L_{2} & \frac{1}{C_{\mathrm{eq}}}=\frac{1}{C_{1}}+\frac{1}{C_{2}} \\
# \text { Associação em paralelo } & \frac{1}{R_{\mathrm{eq}}}=\frac{1}{R_{1}}+\frac{1}{R_{2}} & \frac{1}{L_{\mathrm{eq}}}=\frac{1}{R_{1}}+\frac{1}{R_{2}} & C_{\mathrm{eq}}=C_{1}+C_{2} \\
# \text { Comportamento em regime estacionário } & \text { sem mudanças } & \text { curto-circuito } & \text { circuito aberto } \\
# \text { Pode } v \text { variar instantaneamente? } & \text { sim } & \text { sim } & \text { não } \\
# \text { Pode } i \text { variar instantaneamente? } & \text { sim } & \text { não } & \text { sim }\\ \hline
# \end{array}
# $$
# ### Divisores de tensão e corrente com indutores e capacitores
#
# Determine as expressões para os circuitos divisores de tensão e corrente ilustrados na figura abaixo.
#
Image("./figures/J10C1.png", width=700)
| 37.618182
| 187
| 0.580957
|
97a3fd225e211fb4a22a0c8c600afc2d881cbf8b
| 122,034
|
py
|
Python
|
emsdk.py
|
lewing/emsdk
|
db872c7baedcaf94c6cbcc0270eaf663dee747df
|
[
"MIT"
] | null | null | null |
emsdk.py
|
lewing/emsdk
|
db872c7baedcaf94c6cbcc0270eaf663dee747df
|
[
"MIT"
] | null | null | null |
emsdk.py
|
lewing/emsdk
|
db872c7baedcaf94c6cbcc0270eaf663dee747df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import copy
from collections import OrderedDict
import errno
import json
import multiprocessing
import os
import os.path
import platform
import re
import shutil
import stat
import subprocess
import sys
import sysconfig
import zipfile
if os.name == 'nt':
try:
import winreg
except ImportError:
# old python 2 name
import _winreg as winreg
import ctypes.wintypes
if sys.version_info >= (3,):
from urllib.parse import urljoin
from urllib.request import urlopen
import functools
else:
from urlparse import urljoin
from urllib2 import urlopen
emsdk_packages_url = 'https://storage.googleapis.com/webassembly/emscripten-releases-builds/deps/'
emscripten_releases_repo = 'https://chromium.googlesource.com/emscripten-releases'
emscripten_releases_download_url_template = "https://storage.googleapis.com/webassembly/emscripten-releases-builds/%s/%s/wasm-binaries.%s"
emsdk_zip_download_url = 'https://github.com/emscripten-core/emsdk/archive/master.zip'
zips_subdir = 'zips/'
extra_release_tag = None
# Enable this to do very verbose printing about the different steps that are
# being run. Useful for debugging.
VERBOSE = int(os.getenv('EMSDK_VERBOSE', '0'))
TTY_OUTPUT = not os.getenv('EMSDK_NOTTY', not sys.stdout.isatty())
WINDOWS = False
if os.name == 'nt' or (os.getenv('SYSTEMROOT') is not None and 'windows' in os.getenv('SYSTEMROOT').lower()) or (os.getenv('COMSPEC') is not None and 'windows' in os.getenv('COMSPEC').lower()):
WINDOWS = True
def errlog(msg):
print(msg, file=sys.stderr)
MINGW = False
MSYS = False
if os.getenv('MSYSTEM'):
MSYS = True
# Some functions like os.path.normpath() exhibit different behavior between
# different versions of Python, so we need to distinguish between the MinGW
# and MSYS versions of Python
if sysconfig.get_platform() == 'mingw':
MINGW = True
if os.getenv('MSYSTEM') != 'MSYS' and os.getenv('MSYSTEM') != 'MINGW64':
# https://stackoverflow.com/questions/37460073/msys-vs-mingw-internal-environment-variables
errlog('Warning: MSYSTEM environment variable is present, and is set to "' + os.getenv('MSYSTEM') + '". This shell has not been tested with emsdk and may not work.')
MACOS = False
if platform.mac_ver()[0] != '':
MACOS = True
LINUX = False
if not MACOS and (platform.system() == 'Linux'):
LINUX = True
UNIX = (MACOS or LINUX)
# Pick which shell of 4 shells to use
POWERSHELL = bool(os.getenv('EMSDK_POWERSHELL'))
CSH = bool(os.getenv('EMSDK_CSH'))
CMD = bool(os.getenv('EMSDK_CMD'))
BASH = bool(os.getenv('EMSDK_BASH'))
if WINDOWS and BASH:
MSYS = True
if not CSH and not POWERSHELL and not BASH and not CMD:
# Fall back to default of `cmd` on windows and `bash` otherwise
if WINDOWS and not MSYS:
CMD = True
else:
BASH = True
if WINDOWS:
ENVPATH_SEPARATOR = ';'
else:
ENVPATH_SEPARATOR = ':'
ARCH = 'unknown'
# platform.machine() may return AMD64 on windows, so standardize the case.
machine = platform.machine().lower()
if machine.startswith('x64') or machine.startswith('amd64') or machine.startswith('x86_64'):
ARCH = 'x86_64'
elif machine.endswith('86'):
ARCH = 'x86'
elif machine.startswith('aarch64') or machine.lower().startswith('arm64'):
ARCH = 'aarch64'
elif platform.machine().startswith('arm'):
ARCH = 'arm'
else:
errlog("Warning: unknown machine architecture " + machine)
errlog()
# Don't saturate all cores to not steal the whole system, but be aggressive.
CPU_CORES = int(os.environ.get('EMSDK_NUM_CORES', max(multiprocessing.cpu_count() - 1, 1)))
CMAKE_BUILD_TYPE_OVERRIDE = None
# If true, perform a --shallow clone of git.
GIT_CLONE_SHALLOW = False
# If true, LLVM backend is built with tests enabled, and Binaryen is built with
# Visual Studio static analyzer enabled.
BUILD_FOR_TESTING = False
# If 'auto', assertions are decided by the build type
# (Release&MinSizeRel=disabled, Debug&RelWithDebInfo=enabled)
# Other valid values are 'ON' and 'OFF'
ENABLE_LLVM_ASSERTIONS = 'auto'
def os_name():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif MACOS:
return 'macos'
else:
raise Exception('unknown OS')
def os_name_for_emscripten_releases():
if WINDOWS:
return 'win'
elif LINUX:
return 'linux'
elif MACOS:
return 'mac'
else:
raise Exception('unknown OS')
def debug_print(msg):
if VERBOSE:
errlog(msg)
def to_unix_path(p):
return p.replace('\\', '/')
def emsdk_path():
return to_unix_path(os.path.dirname(os.path.realpath(__file__)))
EMSDK_SET_ENV = ""
if POWERSHELL:
EMSDK_SET_ENV = os.path.join(emsdk_path(), 'emsdk_set_env.ps1')
else:
EMSDK_SET_ENV = os.path.join(emsdk_path(), 'emsdk_set_env.bat')
ARCHIVE_SUFFIXES = ('zip', '.tar', '.gz', '.xz', '.tbz2', '.bz2')
# Finds the given executable 'program' in PATH. Operates like the Unix tool 'which'.
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and (WINDOWS or os.access(fpath, os.X_OK))
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if WINDOWS and '.' not in fname:
if is_exe(exe_file + '.exe'):
return exe_file + '.exe'
if is_exe(exe_file + '.cmd'):
return exe_file + '.cmd'
if is_exe(exe_file + '.bat'):
return exe_file + '.bat'
return None
def vswhere(version):
try:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vswhere_path = os.path.join(program_files, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64', '-property', 'installationPath', '-format', 'json']))
# Visual Studio 2017 Express is not included in the above search, and it
# does not have the VC.Tools.x86.x64 tool, so do a catch-all attempt as a
# fallback, to detect Express version.
if not output:
output = json.loads(subprocess.check_output([vswhere_path, '-latest', '-version', '[%s.0,%s.0)' % (version, version + 1), '-products', '*', '-property', 'installationPath', '-format', 'json']))
if not output:
return ''
return str(output[0]['installationPath'])
except Exception:
return ''
def vs_filewhere(installation_path, platform, file):
try:
vcvarsall = os.path.join(installation_path, 'VC\\Auxiliary\\Build\\vcvarsall.bat')
env = subprocess.check_output('cmd /c "%s" %s & where %s' % (vcvarsall, platform, file))
paths = [path[:-len(file)] for path in env.split('\r\n') if path.endswith(file)]
return paths[0]
except Exception:
return ''
CMAKE_GENERATOR = 'Unix Makefiles'
if WINDOWS:
# Detect which CMake generator to use when building on Windows
if '--mingw' in sys.argv:
CMAKE_GENERATOR = 'MinGW Makefiles'
elif '--vs2017' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 15'
elif '--vs2019' in sys.argv:
CMAKE_GENERATOR = 'Visual Studio 16'
else:
program_files = os.environ['ProgramFiles(x86)'] if 'ProgramFiles(x86)' in os.environ else os.environ['ProgramFiles']
vs2019_exists = len(vswhere(16)) > 0
vs2017_exists = len(vswhere(15)) > 0
mingw_exists = which('mingw32-make') is not None and which('g++') is not None
if vs2019_exists:
CMAKE_GENERATOR = 'Visual Studio 16'
elif vs2017_exists:
# VS2017 has an LLVM build issue, see
# https://github.com/kripken/emscripten-fastcomp/issues/185
CMAKE_GENERATOR = 'Visual Studio 15'
elif mingw_exists:
CMAKE_GENERATOR = 'MinGW Makefiles'
else:
# No detected generator
CMAKE_GENERATOR = ''
sys.argv = [a for a in sys.argv if a not in ('--mingw', '--vs2017', '--vs2019')]
# Computes a suitable path prefix to use when building with a given generator.
def cmake_generator_prefix():
if CMAKE_GENERATOR == 'Visual Studio 16':
return '_vs2019'
if CMAKE_GENERATOR == 'Visual Studio 15':
return '_vs2017'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
return '_mingw'
# Unix Makefiles do not specify a path prefix for backwards path compatibility
return ''
# Removes a directory tree even if it was readonly, and doesn't throw exception
# on failure.
def remove_tree(d):
debug_print('remove_tree(' + str(d) + ')')
if not os.path.exists(d):
return
try:
def remove_readonly_and_try_again(func, path, exc_info):
if not (os.stat(path).st_mode & stat.S_IWRITE):
os.chmod(path, stat.S_IWRITE)
func(path)
else:
raise
shutil.rmtree(d, onerror=remove_readonly_and_try_again)
except Exception as e:
debug_print('remove_tree threw an exception, ignoring: ' + str(e))
def win_set_environment_variable_direct(key, value, system=True):
folder = None
try:
if system:
# Read globally from ALL USERS section.
folder = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', 0, winreg.KEY_ALL_ACCESS)
else:
# Register locally from CURRENT USER section.
folder = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER, 'Environment', 0, winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(folder, key, 0, winreg.REG_EXPAND_SZ, value)
debug_print('Set key=' + key + ' with value ' + value + ' in registry.')
return True
except Exception as e:
# 'Access is denied.'
if e.args[3] == 5:
exit_with_error('Error! Failed to set the environment variable \'' + key + '\'! Setting environment variables permanently requires administrator access. Please rerun this command with administrative privileges. This can be done for example by holding down the Ctrl and Shift keys while opening a command prompt in start menu.')
errlog('Failed to write environment variable ' + key + ':')
errlog(str(e))
return False
finally:
if folder is not None:
folder.Close()
def win_get_environment_variable(key, system=True, user=True, fallback=True):
if (not system and not user and fallback):
# if no --system or --permanent flag is provided use shell's value
return os.environ[key]
try:
folder = None
try:
if system:
# Read globally from ALL USERS section.
folder = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')
else:
# Register locally from CURRENT USER section.
folder = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Environment')
value = str(winreg.QueryValueEx(folder, key)[0])
except Exception:
# If reading registry fails for some reason - read via os.environ. This has the drawback
# that expansion items such as %PROGRAMFILES% will have been expanded, so
# need to be precise not to set these back to system registry, or
# expansion items would be lost.
if fallback:
return os.environ[key]
return None
finally:
if folder is not None:
folder.Close()
except Exception as e:
# this catch is if both the registry key threw an exception and the key is not in os.environ
if e.args[0] != 2:
# 'The system cannot find the file specified.'
errlog('Failed to read environment variable ' + key + ':')
errlog(str(e))
return None
return value
def win_set_environment_variable(key, value, system, user):
debug_print('set ' + str(key) + '=' + str(value) + ', in system=' + str(system))
previous_value = win_get_environment_variable(key, system=system, user=user)
if previous_value == value:
debug_print(' no need to set, since same value already exists.')
# No need to elevate UAC for nothing to set the same value, skip.
return False
if not value:
try:
if system:
cmd = ['REG', 'DELETE', 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', '/V', key, '/f']
else:
cmd = ['REG', 'DELETE', 'HKCU\\Environment', '/V', key, '/f']
debug_print(str(cmd))
value = subprocess.call(cmd, stdout=subprocess.PIPE)
except Exception:
return False
return True
try:
if win_set_environment_variable_direct(key, value, system):
return True
# Escape % signs so that we don't expand references to environment variables.
value = value.replace('%', '^%')
if len(value) >= 1024:
exit_with_error('ERROR! The new environment variable ' + key + ' is more than 1024 characters long! A value this long cannot be set via command line: please add the environment variable specified above to system environment manually via Control Panel.')
cmd = ['SETX', key, value]
debug_print(str(cmd))
retcode = subprocess.call(cmd, stdout=subprocess.PIPE)
if retcode != 0:
errlog('ERROR! Failed to set environment variable ' + key + '=' + value + '. You may need to set it manually.')
else:
return True
except Exception as e:
errlog('ERROR! Failed to set environment variable ' + key + '=' + value + ':')
errlog(str(e))
errlog('You may need to set it manually.')
return False
def win_set_environment_variables(env_vars_to_add, system, user):
if not env_vars_to_add:
return
changed = False
for key, value in env_vars_to_add:
if win_set_environment_variable(key, value, system, user):
if not changed:
changed = True
print('Setting global environment variables:')
print(key + ' = ' + value)
if not changed:
print('Global environment variables up to date')
return
# if changes were made then we need to notify other processes
try:
HWND_BROADCAST = ctypes.wintypes.HWND(0xFFFF) # win32con.HWND_BROADCAST == 65535
WM_SETTINGCHANGE = 0x001A # win32con.WM_SETTINGCHANGE == 26
SMTO_BLOCK = 0x0001 # win32con.SMTO_BLOCK == 1
ctypes.windll.user32.SendMessageTimeoutA(
HWND_BROADCAST, # hWnd: notify everyone
WM_SETTINGCHANGE, # Msg: registry changed
0, # wParam: Must be 0 when setting changed is sent by users
'Environment', # lParam: Specifically environment variables changed
SMTO_BLOCK, # fuFlags: Wait for message to be sent or timeout
100) # uTimeout: 100ms
except Exception as e:
errlog('SendMessageTimeout failed with error: ' + str(e))
def win_delete_environment_variable(key, system=True, user=True):
debug_print('win_delete_environment_variable(key=' + key + ', system=' + str(system) + ')')
return win_set_environment_variable(key, None, system, user)
# Returns the absolute pathname to the given path inside the Emscripten SDK.
def sdk_path(path):
if os.path.isabs(path):
return path
return to_unix_path(os.path.join(emsdk_path(), path))
# Modifies the given file in-place to contain '\r\n' line endings.
def file_to_crlf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n').replace('\n', '\r\n')
open(filename, 'wb').write(text)
# Modifies the given file in-place to contain '\n' line endings.
def file_to_lf(filename):
text = open(filename, 'r').read()
text = text.replace('\r\n', '\n')
open(filename, 'wb').write(text)
# Removes a single file, suppressing exceptions on failure.
def rmfile(filename):
debug_print('rmfile(' + filename + ')')
try:
os.remove(filename)
except:
pass
def fix_lineendings(filename):
if WINDOWS:
file_to_crlf(filename)
else:
file_to_lf(filename)
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
debug_print('mkdir_p(' + path + ')')
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def num_files_in_directory(path):
if not os.path.isdir(path):
return 0
return len([name for name in os.listdir(path) if os.path.exists(os.path.join(path, name))])
def run(cmd, cwd=None):
debug_print('run(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, env=os.environ.copy())
process.communicate()
if process.returncode != 0:
errlog(str(cmd) + ' failed with error code ' + str(process.returncode) + '!')
return process.returncode
# http://pythonicprose.blogspot.fi/2009/10/python-extract-targz-archive.html
def untargz(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('untargz(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
run(['tar', '-xvf' if VERBOSE else '-xf', sdk_path(source_filename), '--strip', '1'], cwd=dest_dir)
# tfile = tarfile.open(source_filename, 'r:gz')
# tfile.extractall(dest_dir)
return True
# On Windows, it is not possible to reference path names that are longer than
# ~260 characters, unless the path is referenced via a "\\?\" prefix.
# See https://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath and http://stackoverflow.com/questions/3555527/python-win32-filename-length-workaround
# In that mode, forward slashes cannot be used as delimiters.
def fix_potentially_long_windows_pathname(pathname):
if not WINDOWS:
return pathname
# Test if emsdk calls fix_potentially_long_windows_pathname() with long relative paths (which is problematic)
if not os.path.isabs(pathname) and len(pathname) > 200:
errlog('Warning: Seeing a relative path "' + pathname + '" which is dangerously long for being referenced as a short Windows path name. Refactor emsdk to be able to handle this!')
if pathname.startswith('\\\\?\\'):
return pathname
pathname = os.path.normpath(pathname.replace('/', '\\'))
if MINGW:
# MinGW versions of Python return normalized paths with backslashes
# converted to forward slashes, so we must use forward slashes in our
# prefix
return '//?/' + pathname
return '\\\\?\\' + pathname
# On windows, rename/move will fail if the destination exists, and there is no
# race-free way to do it. This method removes the destination if it exists, so
# the move always works
def move_with_overwrite(src, dest):
if os.path.exists(dest):
os.remove(dest)
os.rename(src, dest)
# http://stackoverflow.com/questions/12886768/simple-way-to-unzip-file-in-python-on-all-oses
def unzip(source_filename, dest_dir, unpack_even_if_exists=False):
debug_print('unzip(source_filename=' + source_filename + ', dest_dir=' + dest_dir + ')')
if not unpack_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("File '" + source_filename + "' has already been unpacked, skipping.")
return True
print("Unpacking '" + source_filename + "' to '" + dest_dir + "'")
mkdir_p(dest_dir)
common_subdir = None
try:
with zipfile.ZipFile(source_filename) as zf:
# Implement '--strip 1' behavior to unzipping by testing if all the files
# in the zip reside in a common subdirectory, and if so, we move the
# output tree at the end of uncompression step.
for member in zf.infolist():
words = member.filename.split('/')
if len(words) > 1: # If there is a directory component?
if common_subdir is None:
common_subdir = words[0]
elif common_subdir != words[0]:
common_subdir = None
break
else:
common_subdir = None
break
unzip_to_dir = dest_dir
if common_subdir:
unzip_to_dir = os.path.join(os.path.dirname(dest_dir), 'unzip_temp')
# Now do the actual decompress.
for member in zf.infolist():
zf.extract(member, fix_potentially_long_windows_pathname(unzip_to_dir))
dst_filename = os.path.join(unzip_to_dir, member.filename)
# See: https://stackoverflow.com/questions/42326428/zipfile-in-python-file-permission
unix_attributes = member.external_attr >> 16
if unix_attributes:
os.chmod(dst_filename, unix_attributes)
# Move the extracted file to its final location without the base
# directory name, if we are stripping that away.
if common_subdir:
if not member.filename.startswith(common_subdir):
raise Exception('Unexpected filename "' + member.filename + '"!')
stripped_filename = '.' + member.filename[len(common_subdir):]
final_dst_filename = os.path.join(dest_dir, stripped_filename)
# Check if a directory
if stripped_filename.endswith('/'):
d = fix_potentially_long_windows_pathname(final_dst_filename)
if not os.path.isdir(d):
os.mkdir(d)
else:
parent_dir = os.path.dirname(fix_potentially_long_windows_pathname(final_dst_filename))
if parent_dir and not os.path.exists(parent_dir):
os.makedirs(parent_dir)
move_with_overwrite(fix_potentially_long_windows_pathname(dst_filename), fix_potentially_long_windows_pathname(final_dst_filename))
if common_subdir:
remove_tree(unzip_to_dir)
except zipfile.BadZipfile as e:
errlog("Unzipping file '" + source_filename + "' failed due to reason: " + str(e) + "! Removing the corrupted zip file.")
rmfile(source_filename)
return False
except Exception as e:
errlog("Unzipping file '" + source_filename + "' failed due to reason: " + str(e))
return False
return True
# This function interprets whether the given string looks like a path to a
# directory instead of a file, without looking at the actual filesystem.
# 'a/b/c' points to directory, so does 'a/b/c/', but 'a/b/c.x' is parsed as a
# filename
def path_points_to_directory(path):
if path == '.':
return True
last_slash = max(path.rfind('/'), path.rfind('\\'))
last_dot = path.rfind('.')
no_suffix = last_dot < last_slash or last_dot == -1
if no_suffix:
return True
suffix = path[last_dot:]
# Very simple logic for the only file suffixes used by emsdk downloader. Other
# suffixes, like 'clang-3.2' are treated as dirs.
if suffix in ('.exe', '.zip', '.txt'):
return False
else:
return True
def get_content_length(download):
try:
meta = download.info()
if hasattr(meta, "getheaders") and hasattr(meta.getheaders, "Content-Length"):
return int(meta.getheaders("Content-Length")[0])
elif hasattr(download, "getheader") and download.getheader('Content-Length'):
return int(download.getheader('Content-Length'))
elif hasattr(meta, "getheader") and meta.getheader('Content-Length'):
return int(meta.getheader('Content-Length'))
except Exception:
pass
return 0
def get_download_target(url, dstpath, filename_prefix=''):
file_name = filename_prefix + url.split('/')[-1]
if path_points_to_directory(dstpath):
file_name = os.path.join(dstpath, file_name)
else:
file_name = dstpath
# Treat all relative destination paths as relative to the SDK root directory,
# not the current working directory.
file_name = sdk_path(file_name)
return file_name
# On success, returns the filename on the disk pointing to the destination file that was produced
# On failure, returns None.
def download_file(url, dstpath, download_even_if_exists=False, filename_prefix=''):
debug_print('download_file(url=' + url + ', dstpath=' + dstpath + ')')
file_name = get_download_target(url, dstpath, filename_prefix)
if os.path.exists(file_name) and not download_even_if_exists:
print("File '" + file_name + "' already downloaded, skipping.")
return file_name
try:
u = urlopen(url)
mkdir_p(os.path.dirname(file_name))
with open(file_name, 'wb') as f:
file_size = get_content_length(u)
if file_size > 0:
print("Downloading: %s from %s, %s Bytes" % (file_name, url, file_size))
else:
print("Downloading: %s from %s" % (file_name, url))
file_size_dl = 0
# Draw a progress bar 80 chars wide (in non-TTY mode)
progress_max = 80 - 4
progress_shown = 0
block_sz = 256 * 1024
if not TTY_OUTPUT:
print(' [', end='')
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if file_size:
percent = file_size_dl * 100.0 / file_size
if TTY_OUTPUT:
status = r" %10d [%3.02f%%]" % (file_size_dl, percent)
print(status, end='\r')
else:
while progress_shown < progress_max * percent / 100:
print('-', end='')
sys.stdout.flush()
progress_shown += 1
if not TTY_OUTPUT:
print(']')
sys.stdout.flush()
except Exception as e:
errlog("Error: Downloading URL '" + url + "': " + str(e))
if "SSL: CERTIFICATE_VERIFY_FAILED" in str(e) or "urlopen error unknown url type: https" in str(e):
errlog("Warning: Possibly SSL/TLS issue. Update or install Python SSL root certificates (2048-bit or greater) supplied in Python folder or https://pypi.org/project/certifi/ and try again.")
rmfile(file_name)
return None
except KeyboardInterrupt:
rmfile(file_name)
exit_with_error("Aborted by User, exiting")
return file_name
def run_get_output(cmd, cwd=None):
debug_print('run_get_output(cmd=' + str(cmd) + ', cwd=' + str(cwd) + ')')
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, env=os.environ.copy(), universal_newlines=True)
stdout, stderr = process.communicate()
return (process.returncode, stdout, stderr)
# must_succeed: If false, the search is performed silently without printing out
# errors if not found. Empty string is returned if git is not found.
# If true, the search is required to succeed, and the execution
# will terminate with sys.exit(1) if not found.
def GIT(must_succeed=True):
# The order in the following is important, and specifies the preferred order
# of using the git tools. Primarily use git from emsdk if installed. If not,
# use system git.
gits = ['git/1.9.4/bin/git.exe', which('git')]
for git in gits:
try:
ret, stdout, stderr = run_get_output([git, '--version'])
if ret == 0:
return git
except:
pass
if must_succeed:
if WINDOWS:
msg = "ERROR: git executable was not found. Please install it by typing 'emsdk install git-1.9.4', or alternatively by installing it manually from http://git-scm.com/downloads . If you install git manually, remember to add it to PATH"
elif MACOS:
msg = "ERROR: git executable was not found. Please install git for this operation! This can be done from http://git-scm.com/ , or by installing XCode and then the XCode Command Line Tools (see http://stackoverflow.com/questions/9329243/xcode-4-4-command-line-tools )"
elif LINUX:
msg = "ERROR: git executable was not found. Please install git for this operation! This can be probably be done using your package manager, see http://git-scm.com/book/en/Getting-Started-Installing-Git"
else:
msg = "ERROR: git executable was not found. Please install git for this operation!"
exit_with_error(msg)
# Not found
return ''
def git_repo_version(repo_path):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', '1', '--pretty="%aD %H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip()
else:
return ""
def git_recent_commits(repo_path, n=20):
returncode, stdout, stderr = run_get_output([GIT(), 'log', '-n', str(n), '--pretty="%H"'], cwd=repo_path)
if returncode == 0:
return stdout.strip().replace('\r', '').replace('"', '').split('\n')
else:
return []
def git_clone(url, dstpath):
debug_print('git_clone(url=' + url + ', dstpath=' + dstpath + ')')
if os.path.isdir(os.path.join(dstpath, '.git')):
debug_print("Repository '" + url + "' already cloned to directory '" + dstpath + "', skipping.")
return True
mkdir_p(dstpath)
git_clone_args = []
if GIT_CLONE_SHALLOW:
git_clone_args += ['--depth', '1']
return run([GIT(), 'clone'] + git_clone_args + [url, dstpath]) == 0
def git_checkout_and_pull(repo_path, branch):
debug_print('git_checkout_and_pull(repo_path=' + repo_path + ', branch=' + branch + ')')
ret = run([GIT(), 'fetch', '--quiet', 'origin'], repo_path)
if ret != 0:
return False
try:
print("Fetching latest changes to the branch '" + branch + "' for '" + repo_path + "'...")
ret = run([GIT(), 'fetch', '--quiet', 'origin'], repo_path)
if ret != 0:
return False
# run([GIT, 'checkout', '-b', branch, '--track', 'origin/'+branch], repo_path)
# this line assumes that the user has not gone and manually messed with the
# repo and added new remotes to ambiguate the checkout.
ret = run([GIT(), 'checkout', '--quiet', branch], repo_path)
if ret != 0:
return False
# this line assumes that the user has not gone and made local changes to the repo
ret = run([GIT(), 'merge', '--ff-only', 'origin/' + branch], repo_path)
if ret != 0:
return False
except:
errlog('git operation failed!')
return False
print("Successfully updated and checked out branch '" + branch + "' on repository '" + repo_path + "'")
print("Current repository version: " + git_repo_version(repo_path))
return True
def git_clone_checkout_and_pull(url, dstpath, branch):
debug_print('git_clone_checkout_and_pull(url=' + url + ', dstpath=' + dstpath + ', branch=' + branch + ')')
success = git_clone(url, dstpath)
if not success:
return False
success = git_checkout_and_pull(dstpath, branch)
return success
# Each tool can have its own build type, or it can be overridden on the command
# line.
def decide_cmake_build_type(tool):
if CMAKE_BUILD_TYPE_OVERRIDE:
return CMAKE_BUILD_TYPE_OVERRIDE
else:
return tool.cmake_build_type
# The root directory of the build.
def llvm_build_dir(tool):
generator_suffix = ''
if CMAKE_GENERATOR == 'Visual Studio 15':
generator_suffix = '_vs2017'
elif CMAKE_GENERATOR == 'Visual Studio 16':
generator_suffix = '_vs2019'
elif CMAKE_GENERATOR == 'MinGW Makefiles':
generator_suffix = '_mingw'
bitness_suffix = '_32' if tool.bitness == 32 else '_64'
if hasattr(tool, 'git_branch'):
build_dir = 'build_' + tool.git_branch.replace(os.sep, '-') + generator_suffix + bitness_suffix
else:
build_dir = 'build_' + tool.version + generator_suffix + bitness_suffix
return build_dir
def exe_suffix(filename):
if WINDOWS and not filename.endswith('.exe'):
filename += '.exe'
return filename
# The directory where the binaries are produced. (relative to the installation
# root directory of the tool)
def fastcomp_build_bin_dir(tool):
build_dir = llvm_build_dir(tool)
if WINDOWS and 'Visual Studio' in CMAKE_GENERATOR:
old_llvm_bin_dir = os.path.join(build_dir, 'bin', decide_cmake_build_type(tool))
new_llvm_bin_dir = None
default_cmake_build_type = decide_cmake_build_type(tool)
cmake_build_types = [default_cmake_build_type, 'Release', 'RelWithDebInfo', 'MinSizeRel', 'Debug']
for build_type in cmake_build_types:
d = os.path.join(build_dir, build_type, 'bin')
if os.path.isfile(os.path.join(tool.installation_path(), d, exe_suffix('clang'))):
new_llvm_bin_dir = d
break
if new_llvm_bin_dir and os.path.exists(os.path.join(tool.installation_path(), new_llvm_bin_dir)):
return new_llvm_bin_dir
elif os.path.exists(os.path.join(tool.installation_path(), old_llvm_bin_dir)):
return old_llvm_bin_dir
return os.path.join(build_dir, default_cmake_build_type, 'bin')
else:
return os.path.join(build_dir, 'bin')
def build_env(generator):
build_env = os.environ.copy()
# To work around a build issue with older Mac OS X builds, add -stdlib=libc++ to all builds.
# See https://groups.google.com/forum/#!topic/emscripten-discuss/5Or6QIzkqf0
if MACOS:
build_env['CXXFLAGS'] = ((build_env['CXXFLAGS'] + ' ') if hasattr(build_env, 'CXXFLAGS') else '') + '-stdlib=libc++'
elif 'Visual Studio 15' in generator or 'Visual Studio 16' in generator:
if 'Visual Studio 16' in generator:
path = vswhere(16)
else:
path = vswhere(15)
# Configuring CMake for Visual Studio needs and env. var VCTargetsPath to be present.
# How this is supposed to work is unfortunately very undocumented. See
# https://discourse.cmake.org/t/cmake-failed-to-get-the-value-of-vctargetspath-with-vs2019-16-7/1839/16
# for some conversation. Try a couple of common paths if one of them would work.
# In the future as new versions of VS come out, we likely need to add new paths into this list.
if 'VCTargetsPath' not in build_env:
vctargets_paths = [
os.path.join(path, 'MSBuild\\Microsoft\\VC\\v160\\'),
os.path.join(path, 'Common7\\IDE\\VC\\VCTargets')
]
for p in vctargets_paths:
if os.path.isfile(os.path.join(p, 'Microsoft.Cpp.Default.props')):
debug_print('Set env. var VCTargetsPath=' + p + ' for CMake.')
build_env['VCTargetsPath'] = p
break
else:
debug_print('Searched path ' + p + ' as candidate for VCTargetsPath, not working.')
if 'VCTargetsPath' not in build_env:
errlog('Unable to locate Visual Studio compiler installation for generator "' + generator + '"!')
errlog('Either rerun installation in Visual Studio Command Prompt, or locate directory to Microsoft.Cpp.Default.props manually')
sys.exit(1)
# CMake and VS2017 cl.exe needs to have mspdb140.dll et al. in its PATH.
vc_bin_paths = [vs_filewhere(path, 'amd64', 'cl.exe'),
vs_filewhere(path, 'x86', 'cl.exe')]
for path in vc_bin_paths:
if os.path.isdir(path):
build_env['PATH'] = build_env['PATH'] + ';' + path
return build_env
def get_generator_for_sln_file(sln_file):
contents = open(sln_file, 'r').read()
if '# Visual Studio 16' in contents or '# Visual Studio Version 16' in contents: # VS2019
return 'Visual Studio 16'
if '# Visual Studio 15' in contents: # VS2017
return 'Visual Studio 15'
raise Exception('Unknown generator used to build solution file ' + sln_file)
def find_msbuild(sln_file):
# The following logic attempts to find a Visual Studio version specific
# MSBuild.exe from a list of known locations.
generator = get_generator_for_sln_file(sln_file)
debug_print('find_msbuild looking for generator ' + str(generator))
if generator == 'Visual Studio 16': # VS2019
path = vswhere(16)
search_paths = [os.path.join(path, 'MSBuild/Current/Bin'),
os.path.join(path, 'MSBuild/15.0/Bin/amd64'),
os.path.join(path, 'MSBuild/15.0/Bin')]
elif generator == 'Visual Studio 15': # VS2017
path = vswhere(15)
search_paths = [os.path.join(path, 'MSBuild/15.0/Bin/amd64'),
os.path.join(path, 'MSBuild/15.0/Bin')]
else:
raise Exception('Unknown generator!')
for path in search_paths:
p = os.path.join(path, 'MSBuild.exe')
debug_print('Searching for MSBuild.exe: ' + p)
if os.path.isfile(p):
return p
debug_print('MSBuild.exe in PATH? ' + str(which('MSBuild.exe')))
# Last fallback, try any MSBuild from PATH (might not be compatible, but best effort)
return which('MSBuild.exe')
def make_build(build_root, build_type, build_target_platform='x64'):
debug_print('make_build(build_root=' + build_root + ', build_type=' + build_type + ', build_target_platform=' + build_target_platform + ')')
if CPU_CORES > 1:
print('Performing a parallel build with ' + str(CPU_CORES) + ' cores.')
else:
print('Performing a singlethreaded build.')
generator_to_use = CMAKE_GENERATOR
if WINDOWS:
if 'Visual Studio' in CMAKE_GENERATOR:
solution_name = str(subprocess.check_output(['dir', '/b', '*.sln'], shell=True, cwd=build_root).decode('utf-8').strip())
generator_to_use = get_generator_for_sln_file(os.path.join(build_root, solution_name))
# Disabled for now: Don't pass /maxcpucount argument to msbuild, since it
# looks like when building, msbuild already automatically spawns the full
# amount of logical cores the system has, and passing the number of
# logical cores here has been observed to give a quadratic N*N explosion
# on the number of spawned processes (e.g. on a Core i7 5960X with 16
# logical cores, it would spawn 16*16=256 cl.exe processes, which would
# start crashing when running out of system memory)
# make = [find_msbuild(os.path.join(build_root, solution_name)), '/maxcpucount:' + str(CPU_CORES), '/t:Build', '/p:Configuration=' + build_type, '/nologo', '/verbosity:minimal', solution_name]
make = [find_msbuild(os.path.join(build_root, solution_name)), '/t:Build', '/p:Configuration=' + build_type, '/p:Platform=' + build_target_platform, '/nologo', '/verbosity:minimal', solution_name]
else:
make = ['mingw32-make', '-j' + str(CPU_CORES)]
else:
make = ['cmake', '--build', '.', '--', '-j' + str(CPU_CORES)]
# Build
try:
print('Running build: ' + str(make))
ret = subprocess.check_call(make, cwd=build_root, env=build_env(generator_to_use))
if ret != 0:
errlog('Build failed with exit code ' + ret + '!')
errlog('Working directory: ' + build_root)
return False
except Exception as e:
errlog('Build failed due to exception!')
errlog('Working directory: ' + build_root)
errlog(str(e))
return False
return True
def cmake_configure(generator, build_root, src_root, build_type, extra_cmake_args=[]):
debug_print('cmake_configure(generator=' + str(generator) + ', build_root=' + str(build_root) + ', src_root=' + str(src_root) + ', build_type=' + str(build_type) + ', extra_cmake_args=' + str(extra_cmake_args) + ')')
# Configure
if not os.path.isdir(build_root):
# Create build output directory if it doesn't yet exist.
os.mkdir(build_root)
try:
if generator:
generator = ['-G', generator]
else:
generator = []
cmdline = ['cmake'] + generator + ['-DCMAKE_BUILD_TYPE=' + build_type, '-DPYTHON_EXECUTABLE=' + sys.executable] + extra_cmake_args + [src_root]
print('Running CMake: ' + str(cmdline))
def quote_parens(x):
if ' ' in x:
return '"' + x.replace('"', '\\"') + '"'
else:
return x
# Create a file 'recmake.bat/sh' in the build root that user can call to
# manually recmake the build tree with the previous build params
open(os.path.join(build_root, 'recmake.' + ('bat' if WINDOWS else 'sh')), 'w').write(' '.join(map(quote_parens, cmdline)))
ret = subprocess.check_call(cmdline, cwd=build_root, env=build_env(CMAKE_GENERATOR))
if ret != 0:
errlog('CMake invocation failed with exit code ' + ret + '!')
errlog('Working directory: ' + build_root)
return False
except OSError as e:
if e.errno == errno.ENOENT:
errlog(str(e))
errlog('Could not run CMake, perhaps it has not been installed?')
if WINDOWS:
errlog('Installing this package requires CMake. Get it from http://www.cmake.org/')
elif LINUX:
errlog('Installing this package requires CMake. Get it via your system package manager (e.g. sudo apt-get install cmake), or from http://www.cmake.org/')
elif MACOS:
errlog('Installing this package requires CMake. Get it via a macOS package manager (Homebrew: "brew install cmake", or MacPorts: "sudo port install cmake"), or from http://www.cmake.org/')
return False
raise
except Exception as e:
errlog('CMake invocation failed due to exception!')
errlog('Working directory: ' + build_root)
errlog(str(e))
return False
return True
def xcode_sdk_version():
try:
output = subprocess.check_output(['xcrun', '--show-sdk-version'])
if sys.version_info >= (3,):
output = output.decode('utf8')
return output.strip().split('.')
except:
return subprocess.checkplatform.mac_ver()[0].split('.')
def build_fastcomp(tool):
debug_print('build_fastcomp(' + str(tool) + ')')
fastcomp_root = tool.installation_path()
fastcomp_src_root = os.path.join(fastcomp_root, 'src')
# Does this tool want to be git cloned from github?
if hasattr(tool, 'git_branch'):
success = git_clone_checkout_and_pull(tool.download_url(), fastcomp_src_root, tool.git_branch)
if not success:
return False
if hasattr(tool, 'clang_url'):
clang_root = os.path.join(fastcomp_src_root, 'tools/clang')
success = git_clone_checkout_and_pull(tool.clang_url, clang_root, tool.git_branch)
if not success:
return False
if hasattr(tool, 'lld_url'):
lld_root = os.path.join(fastcomp_src_root, 'tools/lld')
success = git_clone_checkout_and_pull(tool.lld_url, lld_root, tool.git_branch)
if not success:
return False
else:
# Not a git cloned tool, so instead download from git tagged releases
success = download_and_unzip(tool.download_url(), fastcomp_src_root, filename_prefix='llvm-e')
if not success:
return False
success = download_and_unzip(tool.windows_clang_url if WINDOWS else tool.unix_clang_url, os.path.join(fastcomp_src_root, 'tools/clang'), filename_prefix='clang-e')
if not success:
return False
args = []
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
build_dir = llvm_build_dir(tool)
build_root = os.path.join(fastcomp_root, build_dir)
build_type = decide_cmake_build_type(tool)
# Configure
tests_arg = 'ON' if BUILD_FOR_TESTING else 'OFF'
enable_assertions = ENABLE_LLVM_ASSERTIONS.lower() == 'on' or (ENABLE_LLVM_ASSERTIONS == 'auto' and build_type.lower() != 'release' and build_type.lower() != 'minsizerel')
only_supports_wasm = hasattr(tool, 'only_supports_wasm')
if ARCH == 'x86' or ARCH == 'x86_64':
targets_to_build = 'X86'
elif ARCH == 'arm':
targets_to_build = 'ARM'
elif ARCH == 'aarch64':
targets_to_build = 'AArch64'
else:
# May have problems with emconfigure
targets_to_build = ''
if not only_supports_wasm:
if targets_to_build != '':
targets_to_build += ';'
targets_to_build += 'JSBackend'
args += ['-DLLVM_TARGETS_TO_BUILD=' + targets_to_build, '-DLLVM_INCLUDE_EXAMPLES=OFF', '-DCLANG_INCLUDE_EXAMPLES=OFF', '-DLLVM_INCLUDE_TESTS=' + tests_arg, '-DCLANG_INCLUDE_TESTS=' + tests_arg, '-DLLVM_ENABLE_ASSERTIONS=' + ('ON' if enable_assertions else 'OFF')]
if os.environ.get('LLVM_CMAKE_ARGS'):
extra_args = os.environ['LLVM_CMAKE_ARGS'].split(',')
print('Passing the following extra arguments to LLVM CMake configuration: ' + str(extra_args))
args += extra_args
# MacOS < 10.13 workaround for LLVM build bug https://github.com/kripken/emscripten/issues/5418:
# specify HAVE_FUTIMENS=0 in the build if building with target SDK that is older than 10.13.
if MACOS and (not os.environ.get('LLVM_CMAKE_ARGS') or 'HAVE_FUTIMENS' not in os.environ.get('LLVM_CMAKE_ARGS')) and xcode_sdk_version() < ['10', '13']:
print('Passing -DHAVE_FUTIMENS=0 to LLVM CMake configure to workaround https://github.com/kripken/emscripten/issues/5418. Please update to macOS 10.13 or newer')
args += ['-DHAVE_FUTIMENS=0']
success = cmake_configure(cmake_generator, build_root, fastcomp_src_root, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
# LLVM git source tree migrated to a single repository instead of multiple
# ones, build_llvm() builds via that repository structure
def build_llvm(tool):
debug_print('build_llvm(' + str(tool) + ')')
llvm_root = tool.installation_path()
llvm_src_root = os.path.join(llvm_root, 'src')
success = git_clone_checkout_and_pull(tool.download_url(), llvm_src_root, tool.git_branch)
if not success:
return False
build_dir = llvm_build_dir(tool)
build_root = os.path.join(llvm_root, build_dir)
build_type = decide_cmake_build_type(tool)
# Configure
tests_arg = 'ON' if BUILD_FOR_TESTING else 'OFF'
enable_assertions = ENABLE_LLVM_ASSERTIONS.lower() == 'on' or (ENABLE_LLVM_ASSERTIONS == 'auto' and build_type.lower() != 'release' and build_type.lower() != 'minsizerel')
if ARCH == 'x86' or ARCH == 'x86_64':
targets_to_build = 'WebAssembly;X86'
elif ARCH == 'arm':
targets_to_build = 'WebAssembly;ARM'
elif ARCH == 'aarch64':
targets_to_build = 'WebAssembly;AArch64'
else:
targets_to_build = 'WebAssembly'
args = ['-DLLVM_TARGETS_TO_BUILD=' + targets_to_build,
'-DLLVM_INCLUDE_EXAMPLES=OFF',
'-DCLANG_INCLUDE_EXAMPLES=OFF',
'-DLLVM_INCLUDE_TESTS=' + tests_arg,
'-DCLANG_INCLUDE_TESTS=' + tests_arg,
'-DLLVM_ENABLE_ASSERTIONS=' + ('ON' if enable_assertions else 'OFF')]
# LLVM build system bug: looks like everything needs to be passed to LLVM_ENABLE_PROJECTS twice. (or every second field is ignored?)
# LLVM build system bug #2: compiler-rt does not build on Windows. It insists on performing a CMake install step that writes to C:\Program Files. Attempting
# to reroute that to build_root directory then fails on an error
# file INSTALL cannot find
# "C:/code/emsdk/llvm/git/build_master_vs2017_64/$(Configuration)/lib/clang/10.0.0/lib/windows/clang_rt.ubsan_standalone-x86_64.lib".
# (there instead of $(Configuration), one would need ${CMAKE_BUILD_TYPE} ?)
# It looks like compiler-rt is not compatible to build on Windows?
args += ['-DLLVM_ENABLE_PROJECTS="clang;clang;lld;lld"']
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
args += ['-Thost=x64']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
args += ['-Thost=x64']
if os.environ.get('LLVM_CMAKE_ARGS'):
extra_args = os.environ['LLVM_CMAKE_ARGS'].split(',')
print('Passing the following extra arguments to LLVM CMake configuration: ' + str(extra_args))
args += extra_args
cmakelists_dir = os.path.join(llvm_src_root, 'llvm')
success = cmake_configure(cmake_generator, build_root, cmakelists_dir, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
return success
def build_ninja(tool):
debug_print('build_ninja(' + str(tool) + ')')
root = os.path.normpath(tool.installation_path())
src_root = os.path.join(root, 'src')
success = git_clone_checkout_and_pull(tool.download_url(), src_root, tool.git_branch)
if not success:
return False
build_dir = llvm_build_dir(tool)
build_root = os.path.join(root, build_dir)
build_type = decide_cmake_build_type(tool)
# Configure
cmake_generator = CMAKE_GENERATOR
args = []
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
args += ['-Thost=x64']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
args += ['-Thost=x64']
cmakelists_dir = os.path.join(src_root)
success = cmake_configure(cmake_generator, build_root, cmakelists_dir, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
if success:
bin_dir = os.path.join(root, 'bin')
mkdir_p(bin_dir)
exe_paths = [os.path.join(build_root, 'Release', 'ninja'), os.path.join(build_root, 'ninja')]
for e in exe_paths:
for s in ['.exe', '']:
ninja = e + s
if os.path.isfile(ninja):
dst = os.path.join(bin_dir, 'ninja' + s)
shutil.copyfile(ninja, dst)
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
return success
def build_ccache(tool):
debug_print('build_ccache(' + str(tool) + ')')
root = os.path.normpath(tool.installation_path())
src_root = os.path.join(root, 'src')
success = git_clone_checkout_and_pull(tool.download_url(), src_root, tool.git_branch)
if not success:
return False
build_dir = llvm_build_dir(tool)
build_root = os.path.join(root, build_dir)
build_type = decide_cmake_build_type(tool)
# Configure
cmake_generator = CMAKE_GENERATOR
args = ['-DZSTD_FROM_INTERNET=ON']
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
args += ['-Thost=x64']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
args += ['-Thost=x64']
cmakelists_dir = os.path.join(src_root)
success = cmake_configure(cmake_generator, build_root, cmakelists_dir, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
if success:
bin_dir = os.path.join(root, 'bin')
mkdir_p(bin_dir)
exe_paths = [os.path.join(build_root, 'Release', 'ccache'), os.path.join(build_root, 'ccache')]
for e in exe_paths:
for s in ['.exe', '']:
ccache = e + s
if os.path.isfile(ccache):
dst = os.path.join(bin_dir, 'ccache' + s)
shutil.copyfile(ccache, dst)
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
cache_dir = os.path.join(root, 'cache')
open(os.path.join(root, 'emcc_ccache.conf'), 'w').write('''# Set maximum cache size to 10 GB:
max_size = 10G
cache_dir = %s
''' % cache_dir)
mkdir_p(cache_dir)
return success
# Emscripten asm.js optimizer build scripts:
def optimizer_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_optimizer'
return build_root
def uninstall_optimizer(tool):
debug_print('uninstall_optimizer(' + str(tool) + ')')
build_root = optimizer_build_root(tool)
print("Deleting path '" + build_root + "'")
remove_tree(build_root)
def is_optimizer_installed(tool):
build_root = optimizer_build_root(tool)
return os.path.exists(build_root)
# Finds the newest installed version of a given tool
def find_latest_installed_tool(name):
for t in reversed(tools):
if t.id == name and t.is_installed():
return t
# npm install in Emscripten root directory
def emscripten_npm_install(tool, directory):
node_tool = find_latest_installed_tool('node')
if not node_tool:
npm_fallback = which('npm')
if not npm_fallback:
errlog('Failed to find npm command!')
errlog('Running "npm ci" in installed Emscripten root directory ' + tool.installation_path() + ' is required!')
errlog('Please install node.js first!')
return False
node_path = os.path.dirname(npm_fallback)
else:
node_path = os.path.join(node_tool.installation_path(), 'bin')
npm = os.path.join(node_path, 'npm' + ('.cmd' if WINDOWS else ''))
env = os.environ.copy()
env["PATH"] = node_path + os.pathsep + env["PATH"]
print('Running post-install step: npm ci ...')
try:
subprocess.check_output(
[npm, 'ci', '--production', '--no-optional'],
cwd=directory, stderr=subprocess.STDOUT, env=env,
universal_newlines=True)
except subprocess.CalledProcessError as e:
errlog('Error running %s:\n%s' % (e.cmd, e.output))
return False
# Manually install the appropriate native Closure Compiler package
# This is currently needed because npm ci will install the packages
# for Closure for all platforms, adding 180MB to the download size
# There are two problems here:
# 1. npm ci does not consider the platform of optional dependencies
# https://github.com/npm/cli/issues/558
# 2. A bug with the native compiler has bloated the packages from
# 30MB to almost 300MB
# https://github.com/google/closure-compiler-npm/issues/186
# If either of these bugs are fixed then we can remove this exception
closure_compiler_native = ''
if LINUX and ARCH in ('x86', 'x86_64'):
closure_compiler_native = 'google-closure-compiler-linux'
if MACOS and ARCH in ('x86', 'x86_64'):
closure_compiler_native = 'google-closure-compiler-osx'
if WINDOWS and ARCH == 'x86_64':
closure_compiler_native = 'google-closure-compiler-windows'
if closure_compiler_native:
print('Running post-install step: npm install', closure_compiler_native)
try:
subprocess.check_output(
[npm, 'install', closure_compiler_native],
cwd=directory, stderr=subprocess.STDOUT, env=env,
universal_newlines=True)
except subprocess.CalledProcessError as e:
errlog('Error running %s:\n%s' % (e.cmd, e.output))
return False
print('Done running: npm ci')
return True
def emscripten_post_install(tool):
debug_print('emscripten_post_install(' + str(tool) + ')')
src_root = os.path.join(tool.installation_path(), 'tools', 'optimizer')
build_root = optimizer_build_root(tool)
build_type = decide_cmake_build_type(tool)
args = []
# Configure
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
success = cmake_configure(cmake_generator, build_root, src_root, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
if not success:
return False
success = emscripten_npm_install(tool, tool.installation_path())
return True
# Binaryen build scripts:
def binaryen_build_root(tool):
build_root = tool.installation_path().strip()
if build_root.endswith('/') or build_root.endswith('\\'):
build_root = build_root[:-1]
generator_prefix = cmake_generator_prefix()
build_root = build_root + generator_prefix + '_' + str(tool.bitness) + 'bit_binaryen'
return build_root
def uninstall_binaryen(tool):
debug_print('uninstall_binaryen(' + str(tool) + ')')
build_root = binaryen_build_root(tool)
print("Deleting path '" + build_root + "'")
remove_tree(build_root)
def is_binaryen_installed(tool):
build_root = binaryen_build_root(tool)
return os.path.exists(build_root)
def build_binaryen_tool(tool):
debug_print('build_binaryen_tool(' + str(tool) + ')')
src_root = tool.installation_path()
build_root = binaryen_build_root(tool)
build_type = decide_cmake_build_type(tool)
# Configure
args = []
cmake_generator = CMAKE_GENERATOR
if 'Visual Studio 16' in CMAKE_GENERATOR: # VS2019
# With Visual Studio 16 2019, CMake changed the way they specify target arch.
# Instead of appending it into the CMake generator line, it is specified
# with a -A arch parameter.
args += ['-A', 'x64' if tool.bitness == 64 else 'x86']
elif 'Visual Studio' in CMAKE_GENERATOR and tool.bitness == 64:
cmake_generator += ' Win64'
if 'Visual Studio' in CMAKE_GENERATOR:
if BUILD_FOR_TESTING:
args += ['-DRUN_STATIC_ANALYZER=1']
success = cmake_configure(cmake_generator, build_root, src_root, build_type, args)
if not success:
return False
# Make
success = make_build(build_root, build_type, 'x64' if tool.bitness == 64 else 'Win32')
# Deploy scripts needed from source repository to build directory
remove_tree(os.path.join(build_root, 'scripts'))
shutil.copytree(os.path.join(src_root, 'scripts'), os.path.join(build_root, 'scripts'))
remove_tree(os.path.join(build_root, 'src', 'js'))
shutil.copytree(os.path.join(src_root, 'src', 'js'), os.path.join(build_root, 'src', 'js'))
return success
def download_and_unzip(zipfile, dest_dir, download_even_if_exists=False,
filename_prefix='', clobber=True):
debug_print('download_and_unzip(zipfile=' + zipfile + ', dest_dir=' + dest_dir + ')')
url = urljoin(emsdk_packages_url, zipfile)
download_target = get_download_target(url, zips_subdir, filename_prefix)
# If the archive was already downloaded, and the directory it would be
# unpacked to has contents, assume it's the same contents and skip.
if not download_even_if_exists and num_files_in_directory(dest_dir) > 0:
print("The contents of file '" + zipfile + "' already exist in destination '" + dest_dir + "', skipping.")
return True
# Otherwise, if the archive must be downloaded, always write into the
# target directory, since it may be a new version of a tool that gets
# installed to the same place (that is, a different download name
# indicates different contents).
download_even_if_exists = True
received_download_target = download_file(url, zips_subdir, download_even_if_exists, filename_prefix)
if not received_download_target:
return False
assert received_download_target == download_target
# Remove the old directory, since we have some SDKs that install into the
# same directory. If we didn't do this contents of the previous install
# could remain.
if clobber:
remove_tree(dest_dir)
if zipfile.endswith('.zip'):
return unzip(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
else:
return untargz(download_target, dest_dir, unpack_even_if_exists=download_even_if_exists)
def to_native_path(p):
if WINDOWS and not MSYS:
return to_unix_path(p).replace('/', '\\')
else:
return to_unix_path(p)
# Finds and returns a list of the directories that need to be added to PATH for
# the given set of tools.
def get_required_path(active_tools):
path_add = [to_native_path(emsdk_path())]
for tool in active_tools:
if hasattr(tool, 'activated_path'):
path = to_native_path(tool.expand_vars(tool.activated_path))
path_add.append(path)
return path_add
# Returns the absolute path to the file '.emscripten' for the current user on
# this system.
def dot_emscripten_path():
return os.path.join(emsdk_path(), ".emscripten")
dot_emscripten = {}
def parse_key_value(line):
if not line:
return ('', '')
eq = line.find('=')
if eq != -1:
key = line[0:eq].strip()
value = line[eq + 1:].strip()
return (key, value)
else:
return (key, '')
def load_dot_emscripten():
dot_emscripten.clear()
lines = []
try:
lines = open(dot_emscripten_path(), "r").read().split('\n')
except:
pass
for line in lines:
try:
key, value = parse_key_value(line)
if value != '':
dot_emscripten[key] = value
except:
pass
def generate_dot_emscripten(active_tools):
temp_dir = sdk_path('tmp')
mkdir_p(temp_dir)
cfg = 'import os\n'
cfg += "emsdk_path = os.path.dirname(os.environ.get('EM_CONFIG')).replace('\\\\', '/')\n"
# Different tools may provide the same activated configs; the latest to be
# activated is the relevant one.
activated_config = OrderedDict()
for tool in active_tools:
for name, value in tool.activated_config().items():
activated_config[name] = value
if 'NODE_JS' not in activated_config:
node_fallback = which('nodejs')
if not node_fallback:
node_fallback = 'node'
activated_config['NODE_JS'] = node_fallback
for name, value in activated_config.items():
cfg += name + " = '" + value + "'\n"
cfg += '''\
TEMP_DIR = '%s'
COMPILER_ENGINE = NODE_JS
JS_ENGINES = [NODE_JS]
''' % temp_dir
cfg = cfg.replace("'" + emsdk_path(), "emsdk_path + '")
if os.path.exists(dot_emscripten_path()):
backup_path = dot_emscripten_path() + ".old"
move_with_overwrite(dot_emscripten_path(), backup_path)
with open(dot_emscripten_path(), "w") as text_file:
text_file.write(cfg)
# Clear old emscripten content.
rmfile(os.path.join(emsdk_path(), ".emscripten_sanity"))
path_add = get_required_path(active_tools)
if not WINDOWS:
emsdk_env = sdk_path('emsdk_env.sh')
print('Next steps:')
print('- To conveniently access emsdk tools from the command line,')
print(' consider adding the following directories to your PATH:')
for p in path_add:
print(' ' + p)
print('- This can be done for the current shell by running:')
print(' source "%s"' % emsdk_env)
print('- Configure emsdk in your bash profile by running:')
print(' echo \'source "%s"\' >> $HOME/.bash_profile' % emsdk_env)
def find_msbuild_dir():
if 'ProgramFiles' in os.environ and os.environ['ProgramFiles']:
program_files = os.environ['ProgramFiles']
else:
program_files = 'C:/Program Files'
if 'ProgramFiles(x86)' in os.environ and os.environ['ProgramFiles(x86)']:
program_files_x86 = os.environ['ProgramFiles(x86)']
else:
program_files_x86 = 'C:/Program Files (x86)'
MSBUILDX86_DIR = os.path.join(program_files_x86, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
MSBUILD_DIR = os.path.join(program_files, "MSBuild/Microsoft.Cpp/v4.0/Platforms")
if os.path.exists(MSBUILDX86_DIR):
return MSBUILDX86_DIR
if os.path.exists(MSBUILD_DIR):
return MSBUILD_DIR
# No MSbuild installed.
return ''
class Tool(object):
def __init__(self, data):
# Convert the dictionary representation of the tool in 'data' to members of
# this class for convenience.
for key, value in data.items():
# Python2 compat, convert unicode to str
if sys.version_info < (3,) and isinstance(value, unicode): # noqa
value = value.encode('Latin-1')
setattr(self, key, value)
# Cache the name ID of this Tool (these are read very often)
self.name = self.id + '-' + self.version
if hasattr(self, 'bitness'):
self.name += '-' + str(self.bitness) + 'bit'
def __str__(self):
return self.name
def __repr__(self):
return self.name
def expand_vars(self, str):
if WINDOWS and '%MSBuildPlatformsDir%' in str:
str = str.replace('%MSBuildPlatformsDir%', find_msbuild_dir())
if '%cmake_build_type_on_win%' in str:
str = str.replace('%cmake_build_type_on_win%', (decide_cmake_build_type(self) + '/') if WINDOWS else '')
if '%installation_dir%' in str:
str = str.replace('%installation_dir%', sdk_path(self.installation_dir()))
if '%generator_prefix%' in str:
str = str.replace('%generator_prefix%', cmake_generator_prefix())
str = str.replace('%.exe%', '.exe' if WINDOWS else '')
if '%fastcomp_build_dir%' in str:
str = str.replace('%fastcomp_build_dir%', llvm_build_dir(self))
if '%fastcomp_build_bin_dir%' in str:
str = str.replace('%fastcomp_build_bin_dir%', fastcomp_build_bin_dir(self))
return str
# Return true if this tool requires building from source, and false if this is a precompiled tool.
def needs_compilation(self):
if hasattr(self, 'cmake_build_type'):
return True
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if not tool:
debug_print('Tool ' + str(self) + ' depends on ' + tool_name + ' which does not exist!')
continue
if tool.needs_compilation():
return True
return False
# Specifies the target path where this tool will be installed to. This could
# either be a directory or a filename (e.g. in case of node.js)
def installation_path(self):
if WINDOWS and hasattr(self, 'windows_install_path'):
pth = self.expand_vars(self.windows_install_path)
return sdk_path(pth)
if hasattr(self, 'install_path'):
pth = self.expand_vars(self.install_path)
return sdk_path(pth)
p = self.version
if hasattr(self, 'bitness') and (not hasattr(self, 'append_bitness') or self.append_bitness):
p += '_' + str(self.bitness) + 'bit'
return sdk_path(os.path.join(self.id, p))
# Specifies the target directory this tool will be installed to.
def installation_dir(self):
dir = self.installation_path()
if path_points_to_directory(dir):
return dir
else:
return os.path.dirname(dir)
# Returns the configuration item that needs to be added to .emscripten to make
# this Tool active for the current user.
def activated_config(self):
if not hasattr(self, 'activated_cfg'):
return {}
config = OrderedDict()
expanded = to_unix_path(self.expand_vars(self.activated_cfg))
for specific_cfg in expanded.split(';'):
name, value = specific_cfg.split('=')
config[name] = value.strip("'")
return config
def activated_environment(self):
if hasattr(self, 'activated_env'):
return self.expand_vars(self.activated_env).split(';')
else:
return []
def compatible_with_this_arch(self):
if hasattr(self, 'arch'):
if self.arch != ARCH:
return False
return True
def compatible_with_this_os(self):
if hasattr(self, 'os'):
if self.os == 'all':
return True
if self.compatible_with_this_arch() and ((WINDOWS and 'win' in self.os) or (LINUX and ('linux' in self.os or 'unix' in self.os)) or (MACOS and ('macos' in self.os or 'unix' in self.os))):
return True
else:
return False
else:
if not hasattr(self, 'macos_url') and not hasattr(self, 'windows_url') and not hasattr(self, 'unix_url') and not hasattr(self, 'linux_url'):
return True
if MACOS and hasattr(self, 'macos_url') and self.compatible_with_this_arch():
return True
if LINUX and hasattr(self, 'linux_url') and self.compatible_with_this_arch():
return True
if WINDOWS and (hasattr(self, 'windows_url') or hasattr(self, 'windows_install_path')) and self.compatible_with_this_arch():
return True
if UNIX and hasattr(self, 'unix_url'):
return True
return hasattr(self, 'url')
# the "version file" is a file inside install dirs that indicates the
# version installed there. this helps disambiguate when there is more than
# one version that may be installed to the same directory (which is used
# to avoid accumulating builds over time in some cases, with new builds
# overwriting the old)
def get_version_file_path(self):
return os.path.join(self.installation_path(), '.emsdk_version')
def is_installed_version(self):
version_file_path = self.get_version_file_path()
if os.path.isfile(version_file_path):
with open(version_file_path, 'r') as version_file:
return version_file.read().strip() == self.name
return False
def update_installed_version(self):
with open(self.get_version_file_path(), 'w') as version_file:
version_file.write(self.name + '\n')
return None
def is_installed(self, skip_version_check=False):
# If this tool/sdk depends on other tools, require that all dependencies are
# installed for this tool to count as being installed.
if hasattr(self, 'uses'):
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
errlog("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
return False
if not tool.is_installed():
return False
if self.download_url() is None:
# This tool does not contain downloadable elements, so it is installed by default.
return True
content_exists = os.path.exists(self.installation_path()) and (os.path.isfile(self.installation_path()) or num_files_in_directory(self.installation_path()) > 0)
# For e.g. fastcomp clang from git repo, the activated PATH is the
# directory where the compiler is built to, and installation_path is
# the directory where the source tree exists. To distinguish between
# multiple packages sharing the same source (clang-main-32bit,
# clang-main-64bit, clang-main-32bit and clang-main-64bit each
# share the same git repo), require that in addition to the installation
# directory, each item in the activated PATH must exist.
if hasattr(self, 'activated_path') and not os.path.exists(self.expand_vars(self.activated_path)):
content_exists = False
if hasattr(self, 'custom_is_installed_script'):
if self.custom_is_installed_script == 'is_optimizer_installed':
return is_optimizer_installed(self)
elif self.custom_is_installed_script == 'is_binaryen_installed':
return is_binaryen_installed(self)
else:
raise Exception('Unknown custom_is_installed_script directive "' + self.custom_is_installed_script + '"!')
return content_exists and (skip_version_check or self.is_installed_version())
def is_active(self):
if not self.is_installed():
return False
if self.id == 'vs-tool':
# vs-tool is a special tool since all versions must be installed to the
# same dir, which means that if this tool is installed, it is also active.
return True
# All dependencies of this tool must be active as well.
deps = self.dependencies()
for tool in deps:
if not tool.is_active():
return False
activated_cfg = self.activated_config()
if not activated_cfg:
return len(deps) > 0
for key, value in activated_cfg.items():
if key not in dot_emscripten:
debug_print(str(self) + ' is not active, because key="' + key + '" does not exist in .emscripten')
return False
# all paths are stored dynamically relative to the emsdk root, so
# normalize those first.
dot_emscripten_key = dot_emscripten[key].replace("emsdk_path + '", "'" + emsdk_path())
dot_emscripten_key = dot_emscripten_key.strip("'")
if dot_emscripten_key != value:
debug_print(str(self) + ' is not active, because key="' + key + '" has value "' + dot_emscripten_key + '" but should have value "' + value + '"')
return False
return True
# Returns true if the system environment variables requires by this tool are currently active.
def is_env_active(self):
envs = self.activated_environment()
for env in envs:
key, value = parse_key_value(env)
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value):
debug_print(str(self) + ' is not active, because environment variable key="' + key + '" has value "' + str(os.getenv(key)) + '" but should have value "' + value + '"')
return False
if hasattr(self, 'activated_path'):
path = to_unix_path(self.expand_vars(self.activated_path))
for p in path:
path_items = os.environ['PATH'].replace('\\', '/').split(ENVPATH_SEPARATOR)
if not normalized_contains(path_items, p):
debug_print(str(self) + ' is not active, because environment variable PATH item "' + p + '" is not present (PATH=' + os.environ['PATH'] + ')')
return False
return True
# If this tool can be installed on this system, this function returns True.
# Otherwise, this function returns a string that describes the reason why this
# tool is not available.
def can_be_installed(self):
if hasattr(self, 'bitness'):
if self.bitness == 64 and not is_os_64bit():
return "this tool is only provided for 64-bit OSes"
if self.id == 'vs-tool':
msbuild_dir = find_msbuild_dir()
if msbuild_dir:
return True
else:
return "Visual Studio was not found!"
else:
return True
def download_url(self):
if WINDOWS and hasattr(self, 'windows_url'):
return self.windows_url
elif MACOS and hasattr(self, 'macos_url'):
return self.macos_url
elif LINUX and hasattr(self, 'linux_url'):
return self.linux_url
elif UNIX and hasattr(self, 'unix_url'):
return self.unix_url
elif hasattr(self, 'url'):
return self.url
else:
return None
def install(self):
"""Returns True if the Tool was installed of False if was skipped due to
already being installed.
"""
if self.can_be_installed() is not True:
exit_with_error("The tool '" + str(self) + "' is not available due to the reason: " + self.can_be_installed())
if self.id == 'sdk':
return self.install_sdk()
else:
return self.install_tool()
def install_sdk(self):
"""Returns True if any SDK component was installed of False all componented
were already installed.
"""
print("Installing SDK '" + str(self) + "'..")
installed = False
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool is None:
exit_with_error("Manifest error: No tool by name '" + tool_name + "' found! This may indicate an internal SDK error!")
installed |= tool.install()
if not installed:
print("All SDK components already installed: '" + str(self) + "'.")
return False
if getattr(self, 'custom_install_script', None) == 'emscripten_npm_install':
# upstream tools have hardcoded paths that are not stored in emsdk_manifest.json registry
install_path = 'upstream' if 'releases-upstream' in self.version else 'fastcomp'
emscripten_dir = os.path.join(emsdk_path(), install_path, 'emscripten')
# Older versions of the sdk did not include the node_modules directory
# and require `npm ci` to be run post-install
if not os.path.exists(os.path.join(emscripten_dir, 'node_modules')):
if not emscripten_npm_install(self, emscripten_dir):
exit_with_error('post-install step failed: emscripten_npm_install')
print("Done installing SDK '" + str(self) + "'.")
return True
def install_tool(self):
"""Returns True if the SDK was installed of False if was skipped due to
already being installed.
"""
# Avoid doing a redundant reinstall of the tool, if it has already been installed.
# However all tools that are sourced directly from git branches do need to be
# installed every time when requested, since the install step is then used to git
# pull the tool to a newer version.
if self.is_installed() and not hasattr(self, 'git_branch'):
print("Skipped installing " + self.name + ", already installed.")
return False
print("Installing tool '" + str(self) + "'..")
url = self.download_url()
if hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_fastcomp':
success = build_fastcomp(self)
elif hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_llvm':
success = build_llvm(self)
elif hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_ninja':
success = build_ninja(self)
elif hasattr(self, 'custom_install_script') and self.custom_install_script == 'build_ccache':
success = build_ccache(self)
elif hasattr(self, 'git_branch'):
success = git_clone_checkout_and_pull(url, self.installation_path(), self.git_branch)
elif url.endswith(ARCHIVE_SUFFIXES):
# TODO: explain the vs-tool special-casing
download_even_if_exists = (self.id == 'vs-tool')
# The 'releases' sdk is doesn't include a verion number in the directory
# name and instead only one version can be install at the time and each
# one will clobber the other. This means we always need to extract this
# archive even when the target directory exists.
download_even_if_exists = (self.id == 'releases')
filename_prefix = getattr(self, 'zipfile_prefix', '')
success = download_and_unzip(url, self.installation_path(), download_even_if_exists=download_even_if_exists, filename_prefix=filename_prefix)
else:
dst_file = download_file(urljoin(emsdk_packages_url, self.download_url()), self.installation_path())
if dst_file:
success = True
else:
success = False
if not success:
exit_with_error("Installation failed!")
if hasattr(self, 'custom_install_script'):
if self.custom_install_script == 'emscripten_post_install':
success = emscripten_post_install(self)
elif self.custom_install_script == 'emscripten_npm_install':
success = emscripten_npm_install(self, self.installation_path())
elif self.custom_install_script in ('build_fastcomp', 'build_llvm', 'build_ninja', 'build_ccache'):
# 'build_fastcomp' is a special one that does the download on its
# own, others do the download manually.
pass
elif self.custom_install_script == 'build_binaryen':
success = build_binaryen_tool(self)
else:
raise Exception('Unknown custom_install_script command "' + self.custom_install_script + '"!')
if not success:
exit_with_error("Installation failed!")
# Install an emscripten-version.txt file if told to, and if there is one.
# (If this is not an actual release, but some other build, then we do not
# write anything.)
if hasattr(self, 'emscripten_releases_hash'):
emscripten_version_file_path = os.path.join(to_native_path(self.expand_vars(self.activated_path)), 'emscripten-version.txt')
version = get_emscripten_release_version(self.emscripten_releases_hash)
if version:
open(emscripten_version_file_path, 'w').write('"%s"' % version)
print("Done installing tool '" + str(self) + "'.")
# Sanity check that the installation succeeded, and if so, remove unneeded
# leftover installation files.
if not self.is_installed(skip_version_check=True):
exit_with_error("Installation of '" + str(self) + "' failed, but no error was detected. Either something went wrong with the installation, or this may indicate an internal emsdk error.")
self.cleanup_temp_install_files()
self.update_installed_version()
return True
def cleanup_temp_install_files(self):
url = self.download_url()
if url.endswith(ARCHIVE_SUFFIXES):
download_target = get_download_target(url, zips_subdir, getattr(self, 'zipfile_prefix', ''))
debug_print("Deleting temporary zip file " + download_target)
rmfile(download_target)
def uninstall(self):
if not self.is_installed():
print("Tool '" + str(self) + "' was not installed. No need to uninstall.")
return
print("Uninstalling tool '" + str(self) + "'..")
if hasattr(self, 'custom_uninstall_script'):
if self.custom_uninstall_script == 'uninstall_optimizer':
uninstall_optimizer(self)
elif self.custom_uninstall_script == 'uninstall_binaryen':
uninstall_binaryen(self)
else:
raise Exception('Unknown custom_uninstall_script directive "' + self.custom_uninstall_script + '"!')
print("Deleting path '" + self.installation_path() + "'")
remove_tree(self.installation_path())
print("Done uninstalling '" + str(self) + "'.")
def dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
return deps
def recursive_dependencies(self):
if not hasattr(self, 'uses'):
return []
deps = []
for tool_name in self.uses:
tool = find_tool(tool_name)
if tool:
deps += [tool]
deps += tool.recursive_dependencies()
return deps
# A global registry of all known Emscripten SDK tools available in the SDK manifest.
tools = []
tools_map = {}
def add_tool(tool):
tool.is_sdk = False
tools.append(tool)
if find_tool(str(tool)):
raise Exception('Duplicate tool ' + str(tool) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_tool(str(tool))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(tool).items()) + '}')
tools_map[str(tool)] = tool
# A global registry of all known SDK toolsets.
sdks = []
sdks_map = {}
def add_sdk(sdk):
sdk.is_sdk = True
sdks.append(sdk)
if find_sdk(str(sdk)):
raise Exception('Duplicate sdk ' + str(sdk) + '! Existing:\n{' + ', '.join("%s: %s" % item for item in vars(find_sdk(str(sdk))).items()) + '}, New:\n{' + ', '.join("%s: %s" % item for item in vars(sdk).items()) + '}')
sdks_map[str(sdk)] = sdk
# N.B. In both tools and sdks list above, we take the convention that the newest
# items are at the back of the list (ascending chronological order)
def find_tool(name):
return tools_map.get(name)
def find_sdk(name):
return sdks_map.get(name)
def is_os_64bit():
# http://stackoverflow.com/questions/2208828/detect-64bit-os-windows-in-python
return platform.machine().endswith('64')
def find_latest_releases_version():
releases_info = load_releases_info()
return releases_info['latest']
def find_latest_releases_hash():
releases_info = load_releases_info()
return releases_info['releases'][find_latest_releases_version()]
def find_latest_releases_sdk(which):
return 'sdk-releases-%s-%s-64bit' % (which, find_latest_releases_hash())
def find_tot_sdk():
debug_print('Fetching emscripten-releases repository...')
global extra_release_tag
extra_release_tag = get_emscripten_releases_tot()
return 'sdk-releases-upstream-%s-64bit' % (extra_release_tag)
# Given a git hash in emscripten-releases, find the emscripten
# version for it. There may not be one if this is not the hash of
# a release, in which case we return None.
def get_emscripten_release_version(emscripten_releases_hash):
releases_info = load_releases_info()
for key, value in dict(releases_info['releases']).items():
if value == emscripten_releases_hash:
return key
return None
# Get the tip-of-tree build identifier.
def get_emscripten_releases_tot():
git_clone_checkout_and_pull(emscripten_releases_repo, sdk_path('releases'), 'master')
recent_releases = git_recent_commits(sdk_path('releases'))
# The recent releases are the latest hashes in the git repo. There
# may not be a build for the most recent ones yet; find the last
# that does.
for release in recent_releases:
url = emscripten_releases_download_url_template % (
os_name_for_emscripten_releases(),
release,
'tbz2' if not WINDOWS else 'zip'
)
try:
urlopen(url)
except:
continue
return release
exit_with_error('failed to find build of any recent emsdk revision')
def get_release_hash(arg, releases_info):
return releases_info.get(arg, None) or releases_info.get('sdk-' + arg + '-64bit')
def version_key(ver):
return tuple(map(int, re.split('[._-]', ver)))
# A sort function that is compatible with both Python 2 and Python 3 using a
# custom comparison function.
def python_2_3_sorted(arr, cmp):
if sys.version_info >= (3,):
return sorted(arr, key=functools.cmp_to_key(cmp))
else:
return sorted(arr, cmp=cmp)
def is_emsdk_sourced_from_github():
return os.path.exists(os.path.join(emsdk_path(), '.git'))
def update_emsdk():
if is_emsdk_sourced_from_github():
errlog('You seem to have bootstrapped Emscripten SDK by cloning from GitHub. In this case, use "git pull" instead of "emsdk update" to update emsdk. (Not doing that automatically in case you have local changes)')
sys.exit(1)
if not download_and_unzip(emsdk_zip_download_url, emsdk_path(), download_even_if_exists=True, clobber=False):
sys.exit(1)
# Lists all legacy (pre-emscripten-releases) tagged versions directly in the Git
# repositories. These we can pull and compile from source.
def load_legacy_emscripten_tags():
return open(sdk_path('legacy-emscripten-tags.txt'), 'r').read().split('\n')
def load_legacy_binaryen_tags():
return open(sdk_path('legacy-binaryen-tags.txt'), 'r').read().split('\n')
def remove_prefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def remove_suffix(s, suffix):
if s.endswith(suffix):
return s[:len(s) - len(suffix)]
else:
return s
# filename should be one of: 'llvm-precompiled-tags-32bit.txt', 'llvm-precompiled-tags-64bit.txt'
def load_file_index_list(filename):
items = open(sdk_path(filename)).read().splitlines()
items = [remove_suffix(remove_suffix(remove_prefix(x, 'emscripten-llvm-e'), '.tar.gz'), '.zip').strip() for x in items]
items = [x for x in items if 'latest' not in x and len(x) > 0]
# Sort versions from oldest to newest (the default sort would be
# lexicographic, i.e. '1.37.1 < 1.37.10 < 1.37.2')
return sorted(items, key=version_key)
def exit_with_error(msg):
errlog(str(msg))
sys.exit(1)
# Load the json info for emscripten-releases.
def load_releases_info():
if not hasattr(load_releases_info, 'cached_info'):
try:
text = open(sdk_path('emscripten-releases-tags.txt'), 'r').read()
load_releases_info.cached_info = json.loads(text)
except Exception as e:
print('Error parsing emscripten-releases-tags.txt!')
exit_with_error(str(e))
return load_releases_info.cached_info
def get_installed_sdk_version():
version_file = sdk_path(os.path.join('upstream', '.emsdk_version'))
if not os.path.exists(version_file):
return None
with open(version_file) as f:
version = f.read()
return version.split('-')[2]
# Get a list of tags for emscripten-releases.
def load_releases_tags():
tags = []
tags_fastcomp = []
info = load_releases_info()
for version, sha in sorted(info['releases'].items(), key=lambda x: version_key(x[0])):
tags.append(sha)
# Only include versions older than 1.39.0 in fastcomp releases
if version_key(version) < (2, 0, 0):
tags_fastcomp.append(sha)
if extra_release_tag:
tags.append(extra_release_tag)
# Explicitly add the currently installed SDK version. This could be a custom
# version (installed explicitly) so it might not be part of the main list loaded above.
installed = get_installed_sdk_version()
if installed and installed not in tags:
tags.append(installed)
return tags, tags_fastcomp
def load_releases_versions():
info = load_releases_info()
versions = list(info['releases'].keys())
return versions
def is_string(s):
if sys.version_info[0] >= 3:
return isinstance(s, str)
return isinstance(s, basestring) # noqa
def load_sdk_manifest():
try:
manifest = json.loads(open(sdk_path("emsdk_manifest.json"), "r").read())
except Exception as e:
print('Error parsing emsdk_manifest.json!')
print(str(e))
return
emscripten_tags = load_legacy_emscripten_tags()
llvm_precompiled_tags_32bit = []
llvm_precompiled_tags_64bit = load_file_index_list('llvm-tags-64bit.txt')
llvm_precompiled_tags = llvm_precompiled_tags_32bit + llvm_precompiled_tags_64bit
binaryen_tags = load_legacy_binaryen_tags()
releases_tags, releases_tags_fastcomp = load_releases_tags()
def dependencies_exist(sdk):
for tool_name in sdk.uses:
tool = find_tool(tool_name)
if not tool:
debug_print('missing dependency: ' + tool_name)
return False
return True
def cmp_version(ver, cmp_operand, reference):
if cmp_operand == '<=':
return version_key(ver) <= version_key(reference)
if cmp_operand == '<':
return version_key(ver) < version_key(reference)
if cmp_operand == '>=':
return version_key(ver) >= version_key(reference)
if cmp_operand == '>':
return version_key(ver) > version_key(reference)
if cmp_operand == '==':
return version_key(ver) == version_key(reference)
if cmp_operand == '!=':
return version_key(ver) != version_key(reference)
raise Exception('Invalid cmp_operand "' + cmp_operand + '"!')
def passes_filters(param, ver, filters):
for v in filters:
if v[0] == param and not cmp_version(ver, v[1], v[2]):
return False
return True
# A 'category parameter' is a %foo%-encoded identifier that specifies
# a class of tools instead of just one tool, e.g. %tag%
def expand_category_param(param, category_list, t, is_sdk):
for i, ver in enumerate(category_list):
if not ver.strip():
continue
t2 = copy.copy(t)
found_param = False
for p, v in vars(t2).items():
if is_string(v) and param in v:
t2.__dict__[p] = v.replace(param, ver)
found_param = True
if not found_param:
continue
t2.is_old = i < len(category_list) - 2
if hasattr(t2, 'uses'):
t2.uses = [x.replace(param, ver) for x in t2.uses]
# Filter out expanded tools by version requirements, such as ["tag", "<=", "1.37.22"]
if hasattr(t2, 'version_filter'):
passes = passes_filters(param, ver, t2.version_filter)
if not passes:
continue
if is_sdk:
if dependencies_exist(t2):
if not find_sdk(t2.name):
add_sdk(t2)
else:
debug_print('SDK ' + str(t2) + ' already existed in manifest, not adding twice')
else:
if not find_tool(t2.name):
add_tool(t2)
else:
debug_print('Tool ' + str(t2) + ' already existed in manifest, not adding twice')
for tool in manifest['tools']:
t = Tool(tool)
if t.compatible_with_this_os():
if not hasattr(t, 'is_old'):
t.is_old = False
# Expand the metapackages that refer to tags
if '%tag%' in t.version:
expand_category_param('%tag%', emscripten_tags, t, is_sdk=False)
elif '%precompiled_tag%' in t.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, t, is_sdk=False)
elif '%precompiled_tag32%' in t.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, t, is_sdk=False)
elif '%precompiled_tag64%' in t.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, t, is_sdk=False)
elif '%binaryen_tag%' in t.version:
expand_category_param('%binaryen_tag%', binaryen_tags, t, is_sdk=False)
elif '%releases-tag%' in t.version and 'fastcomp' in t.version:
expand_category_param('%releases-tag%', releases_tags_fastcomp, t, is_sdk=False)
elif '%releases-tag%' in t.version:
expand_category_param('%releases-tag%', releases_tags, t, is_sdk=False)
else:
add_tool(t)
for sdk_str in manifest['sdks']:
sdk_str['id'] = 'sdk'
sdk = Tool(sdk_str)
if sdk.compatible_with_this_os():
if not hasattr(sdk, 'is_old'):
sdk.is_old = False
if '%tag%' in sdk.version:
expand_category_param('%tag%', emscripten_tags, sdk, is_sdk=True)
elif '%precompiled_tag%' in sdk.version:
expand_category_param('%precompiled_tag%', llvm_precompiled_tags, sdk, is_sdk=True)
elif '%precompiled_tag32%' in sdk.version:
expand_category_param('%precompiled_tag32%', llvm_precompiled_tags_32bit, sdk, is_sdk=True)
elif '%precompiled_tag64%' in sdk.version:
expand_category_param('%precompiled_tag64%', llvm_precompiled_tags_64bit, sdk, is_sdk=True)
elif '%releases-tag%' in sdk.version and 'fastcomp' in sdk.version:
expand_category_param('%releases-tag%', releases_tags_fastcomp, sdk, is_sdk=True)
elif '%releases-tag%' in sdk.version:
expand_category_param('%releases-tag%', releases_tags, sdk, is_sdk=True)
else:
add_sdk(sdk)
# Tests if the two given tools can be active at the same time.
# Currently only a simple check for name for same tool with different versions,
# possibly adds more logic in the future.
def can_simultaneously_activate(tool1, tool2):
return tool1.id != tool2.id
def remove_nonexisting_tools(tool_list, log_errors=True):
i = 0
while i < len(tool_list):
tool = tool_list[i]
if not tool.is_installed():
if log_errors:
errlog("Warning: The SDK/tool '" + str(tool) + "' cannot be activated since it is not installed! Skipping this tool...")
tool_list.pop(i)
continue
i += 1
return tool_list
# Expands dependencies for each tool, and removes ones that don't exist.
def process_tool_list(tools_to_activate, log_errors=True):
i = 0
# Gather dependencies for each tool
while i < len(tools_to_activate):
tool = tools_to_activate[i]
deps = tool.recursive_dependencies()
tools_to_activate = tools_to_activate[:i] + deps + tools_to_activate[i:]
i += len(deps) + 1
tools_to_activate = remove_nonexisting_tools(tools_to_activate, log_errors=log_errors)
# Remove conflicting tools
i = 0
while i < len(tools_to_activate):
j = 0
while j < i:
secondary_tool = tools_to_activate[j]
primary_tool = tools_to_activate[i]
if not can_simultaneously_activate(primary_tool, secondary_tool):
tools_to_activate.pop(j)
j -= 1
i -= 1
j += 1
i += 1
return tools_to_activate
def write_set_env_script(env_string):
assert(WINDOWS)
open(EMSDK_SET_ENV, 'w').write(env_string)
# Reconfigure .emscripten to choose the currently activated toolset, set PATH
# and other environment variables.
# Returns the full list of deduced tools that are now active.
def set_active_tools(tools_to_activate, permanently_activate, system):
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
if tools_to_activate:
tools = [x for x in tools_to_activate if not x.is_sdk]
print('Setting the following tools as active:\n ' + '\n '.join(map(lambda x: str(x), tools)))
print('')
generate_dot_emscripten(tools_to_activate)
# Construct a .bat script that will be invoked to set env. vars and PATH
# We only do this on windows since emsdk.bat is able to modify the
# calling shell environment. On other platform `source emsdk_env.sh` is
# required.
if WINDOWS:
# always set local environment variables since permanently activating will only set the registry settings and
# will not affect the current session
env_vars_to_add = get_env_vars_to_add(tools_to_activate, system, user=permanently_activate)
env_string = construct_env_with_vars(env_vars_to_add)
write_set_env_script(env_string)
if permanently_activate:
win_set_environment_variables(env_vars_to_add, system, user=permanently_activate)
return tools_to_activate
def currently_active_sdk():
for sdk in reversed(sdks):
if sdk.is_active():
return sdk
return None
def currently_active_tools():
active_tools = []
for tool in tools:
if tool.is_active():
active_tools += [tool]
return active_tools
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def unique_items(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
# Tests if a path is contained in the given list, but with separators normalized.
def normalized_contains(lst, elem):
elem = to_unix_path(elem)
for e in lst:
if elem == to_unix_path(e):
return True
return False
def to_msys_path(p):
p = to_unix_path(p)
new_path = re.sub(r'([a-zA-Z]):/(.*)', r'/\1/\2', p)
if len(new_path) > 3 and new_path[0] == '/' and new_path[2] == '/':
new_path = new_path[0] + new_path[1].lower() + new_path[2:]
return new_path
# Looks at the current PATH and adds and removes entries so that the PATH reflects
# the set of given active tools.
def adjusted_path(tools_to_activate, system=False, user=False):
# These directories should be added to PATH
path_add = get_required_path(tools_to_activate)
# These already exist.
if WINDOWS and not MSYS:
existing_path = win_get_environment_variable('PATH', system=system, user=user, fallback=True).split(ENVPATH_SEPARATOR)
else:
existing_path = os.environ['PATH'].split(ENVPATH_SEPARATOR)
emsdk_root_path = to_unix_path(emsdk_path())
existing_emsdk_tools = []
existing_nonemsdk_path = []
for entry in existing_path:
if to_unix_path(entry).startswith(emsdk_root_path):
existing_emsdk_tools.append(entry)
else:
existing_nonemsdk_path.append(entry)
new_emsdk_tools = []
kept_emsdk_tools = []
for entry in path_add:
if not normalized_contains(existing_emsdk_tools, entry):
new_emsdk_tools.append(entry)
else:
kept_emsdk_tools.append(entry)
whole_path = unique_items(new_emsdk_tools + kept_emsdk_tools + existing_nonemsdk_path)
if MSYS:
# XXX Hack: If running native Windows Python in MSYS prompt where PATH
# entries look like "/c/Windows/System32", os.environ['PATH']
# in Python will transform to show them as "C:\\Windows\\System32", so need
# to reconvert path delimiter back to forward slashes.
whole_path = [to_msys_path(p) for p in whole_path]
new_emsdk_tools = [to_msys_path(p) for p in new_emsdk_tools]
separator = ':' if MSYS else ENVPATH_SEPARATOR
return (separator.join(whole_path), new_emsdk_tools)
def get_env_vars_to_add(tools_to_activate, system, user):
env_vars_to_add = []
newpath, added_path = adjusted_path(tools_to_activate, system, user)
# Don't bother setting the path if there are no changes.
if os.environ['PATH'] != newpath:
env_vars_to_add += [('PATH', newpath)]
if added_path:
errlog('Adding directories to PATH:')
for item in added_path:
errlog('PATH += ' + item)
errlog('')
# A core variable EMSDK points to the root of Emscripten SDK directory.
env_vars_to_add += [('EMSDK', to_unix_path(emsdk_path()))]
env_vars_to_add += [('EM_CONFIG', os.path.normpath(dot_emscripten_path()))]
for tool in tools_to_activate:
config = tool.activated_config()
if 'EMSCRIPTEN_ROOT' in config:
# For older emscripten versions that don't use this default we export
# EM_CACHE.
em_cache_dir = os.path.join(config['EMSCRIPTEN_ROOT'], 'cache')
env_vars_to_add += [('EM_CACHE', em_cache_dir)]
envs = tool.activated_environment()
for env in envs:
key, value = parse_key_value(env)
value = to_native_path(tool.expand_vars(value))
env_vars_to_add += [(key, value)]
return env_vars_to_add
def construct_env(tools_to_activate, system, user):
return construct_env_with_vars(get_env_vars_to_add(tools_to_activate, system, user))
def construct_env_with_vars(env_vars_to_add):
env_string = ''
if env_vars_to_add:
errlog('Setting environment variables:')
for key, value in env_vars_to_add:
# Don't set env vars which are already set to the correct value.
if key not in os.environ or to_unix_path(os.environ[key]) != to_unix_path(value):
errlog(key + ' = ' + value)
if POWERSHELL:
env_string += '$env:' + key + '="' + value + '"\n'
elif CMD:
env_string += 'SET ' + key + '=' + value + '\n'
elif CSH:
env_string += 'setenv ' + key + ' "' + value + '"\n'
elif BASH:
env_string += 'export ' + key + '="' + value + '"\n'
else:
assert False
if 'EMSDK_PYTHON' in env_vars_to_add:
# When using our bundled python we never want the user's
# PYTHONHOME or PYTHONPATH
# See https://github.com/emscripten-core/emsdk/issues/598
if POWERSHELL:
env_string += 'Remove-Item env:PYTHONHOME\n'
env_string += 'Remove-Item env:PYTHONPATH\n'
elif CMD:
env_string += 'set PYTHONHOME=\n'
env_string += 'set PYTHONPATH=\n'
elif CSH:
env_string += 'unsetenv PYTHONHOME\n'
env_string += 'unsetenv PYTHONPATH\n'
elif BASH:
env_string += 'unset PYTHONHOME\n'
env_string += 'unset PYTHONPATH\n'
else:
assert False
return env_string
def error_on_missing_tool(name):
if name.endswith('-64bit') and not is_os_64bit():
errlog("Error: '%s' is only provided for 64-bit OSes." % name)
else:
errlog("Error: No tool or SDK found by name '%s'." % name)
return 1
def exit_with_fastcomp_error():
exit_with_error('The fastcomp backend is not getting new builds or releases. Please use the upstream llvm backend or use an older version than 2.0.0 (such as 1.40.1).')
def expand_sdk_name(name, activating):
if 'upstream-master' in name:
errlog('upstream-master SDK has been renamed upstream-main')
name = name.replace('upstream-master', 'upstream-main')
if name in ('latest-fastcomp', 'latest-releases-fastcomp', 'tot-fastcomp', 'sdk-nightly-latest'):
exit_with_fastcomp_error()
if name in ('latest', 'sdk-latest', 'latest-64bit', 'sdk-latest-64bit'):
# This is effectly the default SDK
return str(find_latest_releases_sdk('upstream'))
elif name in ('latest-upstream', 'latest-clang-upstream', 'latest-releases-upstream'):
return str(find_latest_releases_sdk('upstream'))
elif name in ('tot', 'sdk-tot', 'tot-upstream'):
if activating:
# When we are activating a tot release, assume that the currently
# installed SDK, if any, is the tot release we want to activate.
# Without this `install tot && activate tot` will race with the builders
# that are producing new builds.
installed = get_installed_sdk_version()
if installed:
debug_print('activating currently installed SDK; not updating tot version')
return 'sdk-releases-upstream-%s-64bit' % installed
return str(find_tot_sdk())
else:
# check if it's a release handled by an emscripten-releases version,
# and if so use that by using the right hash. we support a few notations,
# x.y.z[-(upstream|fastcomp_])
# sdk-x.y.z[-(upstream|fastcomp_])-64bit
# TODO: support short notation for old builds too?
backend = None
fullname = name
if '-upstream' in fullname:
fullname = name.replace('-upstream', '')
backend = 'upstream'
elif '-fastcomp' in fullname:
fullname = fullname.replace('-fastcomp', '')
backend = 'fastcomp'
version = fullname.replace('sdk-', '').replace('releases-', '').replace('-64bit', '').replace('tag-', '')
releases_info = load_releases_info()['releases']
release_hash = get_release_hash(version, releases_info)
if release_hash:
# Known release hash
if backend == 'fastcomp' and version_key(version) >= (2, 0, 0):
exit_with_fastcomp_error()
if backend is None:
if version_key(version) >= (1, 39, 0):
backend = 'upstream'
else:
backend = 'fastcomp'
return 'sdk-releases-%s-%s-64bit' % (backend, release_hash)
elif len(version) == 40:
global extra_release_tag
extra_release_tag = version
return 'sdk-releases-%s-%s-64bit' % (backend, version)
return name
def main(args):
if not args:
errlog("Missing command; Type 'emsdk help' to get a list of commands.")
return 1
cmd = args.pop(0)
if cmd in ('help', '--help', '-h'):
print(' emsdk: Available commands:')
print('''
emsdk list [--old] [--uses] - Lists all available SDKs and tools and their
current installation status. With the --old
parameter, also historical versions are
shown. If --uses is passed, displays the
composition of different SDK packages and
dependencies.
emsdk update - Updates emsdk to the newest version. If you have
bootstrapped emsdk via cloning directly from
GitHub, call "git pull" instead to update emsdk.
emsdk install [options] <tool 1> <tool 2> <tool 3> ...
- Downloads and installs given tools or SDKs.
Options can contain:
-j<num>: Specifies the number of cores to use when
building the tool. Default: use one less
than the # of detected cores.
--build=<type>: Controls what kind of build of LLVM to
perform. Pass either 'Debug', 'Release',
'MinSizeRel' or 'RelWithDebInfo'. Default:
'RelWithDebInfo'.
--generator=<type>: Specifies the CMake Generator to be used
during the build. Possible values are the
same as what your CMake supports and whether
the generator is valid depends on the tools
you have installed. Defaults to 'Unix Makefiles'
on *nix systems. If generator name is multiple
words, enclose with single or double quotes.
--shallow: When installing tools from one of the git
development branches, this parameter can be
passed to perform a shallow git clone instead
of a full one. This reduces the amount of
network transfer that is needed. This option
should only be used when you are interested in
downloading one of the development branches,
but are not looking to develop Emscripten
yourself. Default: disabled, i.e. do a full
clone.
--build-tests: If enabled, LLVM is built with internal tests
included. Pass this to enable running test
other.test_llvm_lit in the Emscripten test
suite. Default: disabled.
--enable-assertions: If specified, LLVM is built with assert()
checks enabled. Useful for development
purposes. Default: Enabled
--disable-assertions: Forces assertions off during the build.
--vs2017/--vs2019: If building from source, overrides to build
using the specified compiler. When installing
precompiled packages, this has no effect.
Note: The same compiler specifier must be
passed to the emsdk activate command to
activate the desired version.
Notes on building from source:
To pass custom CMake directives when configuring
LLVM build, specify the environment variable
LLVM_CMAKE_ARGS="param1=value1,param2=value2"
in the environment where the build is invoked.
See README.md for details.
emsdk uninstall <tool/sdk> - Removes the given tool or SDK from disk.''')
if WINDOWS:
print('''
emsdk activate [--permanent] [--system] [--build=type] [--vs2017/--vs2019] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell.
- If the `--permanent` option is passed, then the environment
variables are set permanently for the current user.
- If the `--system` option is passed, the registration
is done for all users of the system.
This needs admin privileges
(uses Machine environment variables).
- If a custom compiler version was used to override
the compiler to use, pass the same --vs2017/--vs2019 parameter
here to choose which version to activate.
emcmdprompt.bat - Spawns a new command prompt window with the
Emscripten environment active.''')
else:
print(''' emsdk activate [--build=type] <tool/sdk>
- Activates the given tool or SDK in the
environment of the current shell.''')
print('''
Both commands 'install' and 'activate' accept an optional parameter
'--build=type', which can be used to override what kind of installation
or activation to perform. Possible values for type are Debug, Release,
MinSizeRel or RelWithDebInfo. Note: When overriding a custom build type,
be sure to match the same --build= option to both 'install' and
'activate' commands and the invocation of 'emsdk_env', or otherwise
these commands will default to operating on the default build type
which in and RelWithDebInfo.''')
return 0
# Extracts a boolean command line argument from args and returns True if it was present
def extract_bool_arg(name):
if name in args:
args.remove(name)
return True
return False
arg_old = extract_bool_arg('--old')
arg_uses = extract_bool_arg('--uses')
arg_permanent = extract_bool_arg('--permanent')
arg_global = extract_bool_arg('--global')
arg_system = extract_bool_arg('--system')
if arg_global:
print('--global is deprecated. Use `--system` to set the environment variables for all users')
arg_system = True
if arg_system:
arg_permanent = True
if extract_bool_arg('--embedded'):
errlog('embedded mode is now the only mode available')
if extract_bool_arg('--no-embedded'):
errlog('embedded mode is now the only mode available')
return 1
arg_notty = extract_bool_arg('--notty')
if arg_notty:
global TTY_OUTPUT
TTY_OUTPUT = False
# Replace meta-packages with the real package names.
if cmd in ('update', 'install', 'activate'):
activating = cmd == 'activate'
args = [expand_sdk_name(a, activating=activating) for a in args]
load_dot_emscripten()
load_sdk_manifest()
# Process global args
for i in range(len(args)):
if args[i].startswith('--generator='):
build_generator = re.match(r'''^--generator=['"]?([^'"]+)['"]?$''', args[i])
if build_generator:
global CMAKE_GENERATOR
CMAKE_GENERATOR = build_generator.group(1)
args[i] = ''
else:
errlog("Cannot parse CMake generator string: " + args[i] + ". Try wrapping generator string with quotes")
return 1
elif args[i].startswith('--build='):
build_type = re.match(r'^--build=(.+)$', args[i])
if build_type:
global CMAKE_BUILD_TYPE_OVERRIDE
build_type = build_type.group(1)
build_types = ['Debug', 'MinSizeRel', 'RelWithDebInfo', 'Release']
try:
build_type_index = [x.lower() for x in build_types].index(build_type.lower())
CMAKE_BUILD_TYPE_OVERRIDE = build_types[build_type_index]
args[i] = ''
except:
errlog('Unknown CMake build type "' + build_type + '" specified! Please specify one of ' + str(build_types))
return 1
else:
errlog("Invalid command line parameter " + args[i] + ' specified!')
return 1
args = [x for x in args if x]
if cmd == 'list':
print('')
def installed_sdk_text(name):
sdk = find_sdk(name)
return 'INSTALLED' if sdk and sdk.is_installed() else ''
if (LINUX or MACOS or WINDOWS) and (ARCH == 'x86' or ARCH == 'x86_64'):
print('The *recommended* precompiled SDK download is %s (%s).' % (find_latest_releases_version(), find_latest_releases_hash()))
print()
print('To install/activate it, use one of:')
print(' latest [default (llvm) backend]')
print(' latest-fastcomp [legacy (fastcomp) backend]')
print('')
print('Those are equivalent to installing/activating the following:')
print(' %s %s' % (find_latest_releases_version(), installed_sdk_text(find_latest_releases_sdk('upstream'))))
print(' %s-fastcomp %s' % (find_latest_releases_version(), installed_sdk_text(find_latest_releases_sdk('fastcomp'))))
print('')
else:
print('Warning: your platform does not have precompiled SDKs available.')
print('You may install components from source.')
print('')
print('All recent (non-legacy) installable versions are:')
releases_versions = sorted(
load_releases_versions(),
key=lambda x: [int(v) if v.isdigit() else -1 for v in x.split('.')],
reverse=True,
)
releases_info = load_releases_info()['releases']
for ver in releases_versions:
print(' %s %s' % (ver, installed_sdk_text('sdk-releases-upstream-%s-64bit' % get_release_hash(ver, releases_info))))
print()
# Use array to work around the lack of being able to mutate from enclosing
# function.
has_partially_active_tools = [False]
if sdks:
def find_sdks(needs_compilation):
s = []
for sdk in sdks:
if sdk.is_old and not arg_old:
continue
if sdk.needs_compilation() == needs_compilation:
s += [sdk]
return s
def print_sdks(s):
for sdk in s:
installed = '\tINSTALLED' if sdk.is_installed() else ''
active = '*' if sdk.is_active() else ' '
print(' ' + active + ' {0: <25}'.format(str(sdk)) + installed)
if arg_uses:
for dep in sdk.uses:
print(' - {0: <25}'.format(dep))
print('')
print('The additional following precompiled SDKs are also available for download:')
print_sdks(find_sdks(False))
print('The following SDKs can be compiled from source:')
print_sdks(find_sdks(True))
if tools:
def find_tools(needs_compilation):
t = []
for tool in tools:
if tool.is_old and not arg_old:
continue
if tool.needs_compilation() != needs_compilation:
continue
t += [tool]
return t
def print_tools(t):
for tool in t:
if tool.is_old and not arg_old:
continue
if tool.can_be_installed() is True:
installed = '\tINSTALLED' if tool.is_installed() else ''
else:
installed = '\tNot available: ' + tool.can_be_installed()
tool_is_active = tool.is_active()
tool_is_env_active = tool_is_active and tool.is_env_active()
if tool_is_env_active:
active = ' * '
elif tool_is_active:
active = '(*)'
has_partially_active_tools[0] = has_partially_active_tools[0] or True
else:
active = ' '
print(' ' + active + ' {0: <25}'.format(str(tool)) + installed)
print('')
print('The following precompiled tool packages are available for download:')
print_tools(find_tools(needs_compilation=False))
print('The following tools can be compiled from source:')
print_tools(find_tools(needs_compilation=True))
else:
if is_emsdk_sourced_from_github():
print("There are no tools available. Run 'git pull' to fetch the latest set of tools.")
else:
print("There are no tools available. Run 'emsdk update' to fetch the latest set of tools.")
print('')
print('Items marked with * are activated for the current user.')
if has_partially_active_tools[0]:
env_cmd = 'emsdk_env.bat' if WINDOWS else 'source ./emsdk_env.sh'
print('Items marked with (*) are selected for use, but your current shell environment is not configured to use them. Type "' + env_cmd + '" to set up your current shell to use them' + (', or call "emsdk activate --permanent <name_of_sdk>" to permanently activate them.' if WINDOWS else '.'))
if not arg_old:
print('')
print("To access the historical archived versions, type 'emsdk list --old'")
print('')
if is_emsdk_sourced_from_github():
print('Run "git pull" to pull in the latest list.')
else:
print('Run "./emsdk update" to pull in the latest list.')
return 0
elif cmd == 'construct_env':
# Clean up old temp file up front, in case of failure later before we get
# to write out the new one.
tools_to_activate = currently_active_tools()
tools_to_activate = process_tool_list(tools_to_activate, log_errors=True)
env_string = construct_env(tools_to_activate, arg_system, arg_permanent)
if WINDOWS and not BASH:
write_set_env_script(env_string)
else:
sys.stdout.write(env_string)
return 0
elif cmd == 'update':
update_emsdk()
if WINDOWS:
# Clean up litter after old emsdk update which may have left this temp
# file around.
rmfile(sdk_path(EMSDK_SET_ENV))
return 0
elif cmd == 'update-tags':
errlog('`update-tags` is not longer needed. To install the latest tot release just run `install tot`')
return 0
elif cmd == 'activate':
if arg_permanent:
print('Registering active Emscripten environment permanently')
print('')
tools_to_activate = currently_active_tools()
args = [x for x in args if not x.startswith('--')]
for arg in args:
tool = find_tool(arg)
if tool is None:
tool = find_sdk(arg)
if tool is None:
return error_on_missing_tool(arg)
tools_to_activate += [tool]
if not tools_to_activate:
errlog('No tools/SDKs specified to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
active_tools = set_active_tools(tools_to_activate, permanently_activate=arg_permanent, system=arg_system)
if not active_tools:
errlog('No tools/SDKs found to activate! Usage:\n emsdk activate tool/sdk1 [tool/sdk2] [...]')
return 1
if WINDOWS and not arg_permanent:
errlog('The changes made to environment variables only apply to the currently running shell instance. Use the \'emsdk_env.bat\' to re-enter this environment later, or if you\'d like to permanently register this environment permanently, rerun this command with the option --permanent.')
return 0
elif cmd == 'install':
global BUILD_FOR_TESTING, ENABLE_LLVM_ASSERTIONS, CPU_CORES, GIT_CLONE_SHALLOW
# Process args
for i in range(len(args)):
if args[i].startswith('-j'):
multicore = re.match(r'^-j(\d+)$', args[i])
if multicore:
CPU_CORES = int(multicore.group(1))
args[i] = ''
else:
errlog("Invalid command line parameter " + args[i] + ' specified!')
return 1
elif args[i] == '--shallow':
GIT_CLONE_SHALLOW = True
args[i] = ''
elif args[i] == '--build-tests':
BUILD_FOR_TESTING = True
args[i] = ''
elif args[i] == '--enable-assertions':
ENABLE_LLVM_ASSERTIONS = 'ON'
args[i] = ''
elif args[i] == '--disable-assertions':
ENABLE_LLVM_ASSERTIONS = 'OFF'
args[i] = ''
args = [x for x in args if x]
if not args:
errlog("Missing parameter. Type 'emsdk install <tool name>' to install a tool or an SDK. Type 'emsdk list' to obtain a list of available tools. Type 'emsdk install latest' to automatically install the newest version of the SDK.")
return 1
for t in args:
tool = find_tool(t)
if tool is None:
tool = find_sdk(t)
if tool is None:
return error_on_missing_tool(t)
tool.install()
return 0
elif cmd == 'uninstall':
if not args:
errlog("Syntax error. Call 'emsdk uninstall <tool name>'. Call 'emsdk list' to obtain a list of available tools.")
return 1
tool = find_tool(args[0])
if tool is None:
errlog("Error: Tool by name '" + args[0] + "' was not found.")
return 1
tool.uninstall()
return 0
errlog("Unknown command '" + cmd + "' given! Type 'emsdk help' to get a list of commands.")
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 38.387543
| 333
| 0.672313
|
2c1d5e1623f8202e51d43582f39efc5c891f9f20
| 1,892
|
py
|
Python
|
ucscsdk/mometa/storage/StorageLunDisk.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/storage/StorageLunDisk.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/storage/StorageLunDisk.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for StorageLunDisk ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageLunDiskConsts():
pass
class StorageLunDisk(ManagedObject):
"""This is StorageLunDisk class."""
consts = StorageLunDiskConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageLunDisk", "storageLunDisk", "disk-[id]", VersionMeta.Version131a, "InputOutput", 0x1f, [], ["read-only"], [u'storageVirtualDrive'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.status = None
ManagedObject.__init__(self, "StorageLunDisk", parent_mo_or_dn, **kwargs)
| 43
| 249
| 0.64852
|
55db5378eab1d380d6828fa038dc1245b38ee43e
| 4,662
|
py
|
Python
|
etc/dbus-serialbattery/ant.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/ant.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
etc/dbus-serialbattery/ant.py
|
Carstijn/dbus-serialbattery
|
23afec33c2fd87fd4d4c53516f0a25f290643c82
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from battery import Protection, Battery, Cell
from utils import *
from struct import *
class Ant(Battery):
def __init__(self, port, baud):
super(Ant, self).__init__(port, baud)
self.type = self.BATTERYTYPE
command_general = b"\xDB\xDB\x00\x00\x00\x00"
# command_capacity_low = b"\x5A\x5A\x1F\x00\x00\x1F"
# command_capacity_high = b"\x5A\x5A\x20\x00\x00\x20"
balancing = 0
BATTERYTYPE = "ANT"
LENGTH_CHECK = -1
LENGTH_POS = 139
LENGTH_FIXED = 140
def test_connection(self):
# call a function that will connect to the battery, send a command and retrieve the result.
# The result or call should be unique to this BMS. Battery name or version, etc.
# Return True if success, False for failure
return self.read_status_data()
def get_settings(self):
# After successful connection get_settings will be call to set up the battery.
# Set the current limits, populate cell count, etc
# Return True if success, False for failure
self.max_battery_current = MAX_BATTERY_CURRENT
self.max_battery_discharge_current = MAX_BATTERY_DISCHARGE_CURRENT
self.version = "ANT BMS V2.0"
logger.info(self.hardware_version)
return True
def refresh_data(self):
# call all functions that will refresh the battery data.
# This will be called for every iteration (1 second)
# Return True if success, False for failure
result = self.read_status_data()
return result
def read_status_data(self):
status_data = self.read_serial_data_ant(self.command_general)
# check if connection success
if status_data is False:
return False
voltage = unpack_from('>H', status_data, 4)
self.voltage = voltage[0]*0.1
current, self.soc = unpack_from('>lB', status_data, 70)
self.current = 0.0 if current == 0 else current / -10
self.cell_count = unpack_from('>b', status_data, 123)[0]
self.max_battery_voltage = MAX_CELL_VOLTAGE * self.cell_count
self.min_battery_voltage = MIN_CELL_VOLTAGE * self.cell_count
cell_max_no, cell_max_voltage, cell_min_no, cell_min_voltage = unpack_from('>bhbh', status_data, 115)
self.cell_max_no = cell_max_no - 1
self.cell_min_no = cell_min_no - 1
self.cell_max_voltage = cell_max_voltage / 1000
self.cell_min_voltage = cell_min_voltage / 1000
capacity = unpack_from('>L', status_data, 75)
self.capacity = capacity[0] / 1000000
capacity_remain = unpack_from('>L', status_data, 79)
self.capacity_remain = capacity_remain[0] / 1000000
total_ah_drawn = unpack_from('>L', status_data, 83)
self.total_ah_drawn = total_ah_drawn[0] / 1000
self.cycles = self.total_ah_drawn / self.capacity
self.charge_fet, self.discharge_fet, self.balancing = unpack_from('>bbb',status_data, 103)
self.temp1, self.temp2 = unpack_from('>bxb',status_data, 96)
self.hardware_version = "ANT BMS " + str(self.cell_count) + " cells"
# Alarms
self.protection.voltage_high = 2 if self.charge_fet==2 else 0
self.protection.voltage_low = 2 if self.discharge_fet==2 or self.discharge_fet==5 else 0
self.protection.voltage_cell_low = 2 if self.cell_min_voltage < MIN_CELL_VOLTAGE - 0.1 else 1 if self.cell_min_voltage < MIN_CELL_VOLTAGE else 0
self.protection.temp_high_charge = 1 if self.charge_fet==3 or self.charge_fet==6 else 0
self.protection.temp_high_discharge = 1 if self.discharge_fet==7 or self.discharge_fet==6 else 0
self.protection.current_over = 2 if self.charge_fet==3 else 0
self.protection.current_under = 2 if self.discharge_fet==3 else 0
return True
def get_balancing(self):
return 1 if self.balancing or self.balancing == 2 else 0
def read_serial_data_ant(self, command):
# use the read_serial_data() function to read the data and then do BMS spesific checks (crc, start bytes, etc)
data = read_serial_data(command, self.port, self.baud_rate,
self.LENGTH_POS, self.LENGTH_CHECK, self.LENGTH_FIXED)
if data is False:
logger.info(">>> ERROR: Incorrect Data")
return False
if len(data) == self.LENGTH_FIXED:
return data
else:
logger.info(">>> ERROR: Incorrect Reply")
return False
| 42.381818
| 152
| 0.66302
|
1b4c407e31abc8018e4e492dd70dd5c01ec1740b
| 1,118
|
py
|
Python
|
spark/twitter_api.py
|
epu-ntua/PSYMBIOSYS-Influencial
|
25b23ca22ff54f97410ad007df2faad1fe2b89ff
|
[
"MIT"
] | null | null | null |
spark/twitter_api.py
|
epu-ntua/PSYMBIOSYS-Influencial
|
25b23ca22ff54f97410ad007df2faad1fe2b89ff
|
[
"MIT"
] | null | null | null |
spark/twitter_api.py
|
epu-ntua/PSYMBIOSYS-Influencial
|
25b23ca22ff54f97410ad007df2faad1fe2b89ff
|
[
"MIT"
] | null | null | null |
import twitter
import time
api = twitter.Api(consumer_key='GDWyb1WBb0yJwYBwQ9g3m2nFx',
consumer_secret='BK78W93LakpOwPL84otIl2dVZQ0gNjAYPiFWQbCfdFO6UiF0lF',
access_token_key='95257276-aptBPzbJCDqJWKKZ65RwgtJehNiOTEgoyASzaIPqQ',
access_token_secret='0qHtIAuQSHUXEeTZ5AHwc8SJaHULyJcPyhEdc5QI5bF1I')
def member_metric(member):
tweets = api.GetUserTimeline(user_id =member.id)
mentions = 0
for tweet in tweets:
mentions = mentions + tweet.retweet_count
if tweets > 0:
return (mentions*1.0)/len(tweets)
else:
return 0
def profile(member):
member = api.GetUser(screen_name=member)
url = member.url
description = member.description
id = member.id
# metric = member_metric(member)
metric = 0
# TODO improve the call to start from the last tweet
last_tweet_id = None
# time.sleep(10)
return {
"metric":metric,
"full_name":member.name,
"profile_image_url":str(member.profile_image_url).replace("_normal","")
}
# print profile("apilama")
| 26.619048
| 92
| 0.666369
|
2f127fb794c3b4155b7c2f01a6542e28121c774d
| 3,295
|
py
|
Python
|
app/api.py
|
romanpeters/url-shortener
|
6f15f4a6a8b488bfc2ff72771cfe6f342ee7a640
|
[
"MIT"
] | null | null | null |
app/api.py
|
romanpeters/url-shortener
|
6f15f4a6a8b488bfc2ff72771cfe6f342ee7a640
|
[
"MIT"
] | null | null | null |
app/api.py
|
romanpeters/url-shortener
|
6f15f4a6a8b488bfc2ff72771cfe6f342ee7a640
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlparse
import validators
from flask import request, jsonify, abort
from app import database as db
from app.url import fix_url, fix_url_id, add_url
from app import app
@app.route("/api/", methods=['GET'])
def endpoints():
"""Overview of the API endpoints."""
return jsonify({'endpoints': endpoints})
@app.route("/api/links/", methods=['GET'])
def get_all_links():
"""Overview of all links."""
print(request.url)
session = db.Session()
url_entries = session.query(db.URL).all()
session.close()
links = []
for url_entry in url_entries:
links.append({"url": url_entry.url,
"id": url_entry.url_id,
"shortcut": f"{request.host_url.replace('http://', 'https://')}{url_entry.url_id}",
"visits": url_entry.visits})
return jsonify({"links": links})
@app.route("/api/links/<string:url_id>/", methods=['GET'])
def get_link(url_id):
"""Get info on a link."""
session = db.Session()
url_entry = session.query(db.URL).filter_by(url_id=url_id).first()
session.close()
if not url_entry:
return abort(404)
result = {"url": url_entry.url,
"id": url_entry.url_id,
"shortcut": f"{request.host_url}{url_entry.url_id}",
"visits": url_entry.visits}
return jsonify({"links": [result]})
@app.route("/api/links/", methods=['POST'])
@app.route("/api/links/<string:url_id>/", methods=['POST'])
def set_link(url_id=None):
"""Create a new link."""
if url_id:
if len(fix_url_id(url_id)) <= 1:
return abort(400)
if not request.json \
or 'url' not in request.json \
or (request.url.split('/')[-1] != 'links' and 'url_id' in request.json): # url_id is given twice
return abort(400)
url = request.json['url']
# print(url, '->', end=" ")
url = fix_url(url)
# print(url)
if not validators.url(url):
return abort(400)
elif urlparse(url).netloc == urlparse(request.host_url).netloc:
return abort(403)
else:
url_id = add_url(url=url, url_id=url_id)
result = {"url": url,
"id": url_id,
"shortcut": f"{request.host_url}{url_id}"}
return jsonify({"links": [result]}), 201
endpoints = [{"path": "/api/",
"name": endpoints.__name__,
"method": "GET",
"description": endpoints.__doc__},
{"path": "/api/links/",
"name": get_all_links.__name__,
"method": "GET",
"description": get_all_links.__doc__},
{"path": "/api/links/{{ id }}/",
"name": get_link.__name__,
"method": "GET",
"description": get_link.__doc__},
{"path": "/api/links/",
"name": set_link.__name__,
"method": "POST",
"description": set_link.__doc__,
"data": {"url": "{{ destination_url }}",
"url_id": "{{ id }}"}},
{"path": "/api/links/{{ id }}/",
"name": set_link.__name__,
"method": "POST",
"description": set_link.__doc__,
"data": {"url": "{{ destination_url }}"}}
]
| 32.95
| 109
| 0.540819
|
2779889771d2890ce0a4309eb12b0a1dbedf3ee6
| 986
|
py
|
Python
|
pages/migrations/0001_initial.py
|
mikehagquist/fitapps5
|
556f7a90670f1fb517fe4761e9a5ca7a31c41f50
|
[
"MIT"
] | null | null | null |
pages/migrations/0001_initial.py
|
mikehagquist/fitapps5
|
556f7a90670f1fb517fe4761e9a5ca7a31c41f50
|
[
"MIT"
] | 3
|
2020-02-11T23:58:46.000Z
|
2021-06-10T21:21:07.000Z
|
pages/migrations/0001_initial.py
|
mikehagquist/fitapps5
|
556f7a90670f1fb517fe4761e9a5ca7a31c41f50
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-03-20 11:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.866667
| 120
| 0.634888
|
43bff918ddf0bb50ab280d9c9c11a39b2c5b9cdb
| 750
|
py
|
Python
|
nicos_mlz/pgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_mlz/pgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_mlz/pgaa/setups/pgai.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
description = 'PGAA setup with Huber sample table'
group = 'basic'
sysconfig = dict(
datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink']
)
includes = [
'system',
'reactor',
'nl4b',
'pressure',
'sampletable',
'pilz',
'detector',
'collimation',
]
devices = dict(
mcasink = device('nicos_mlz.pgaa.devices.MCASink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
chnsink = device('nicos_mlz.pgaa.devices.CHNSink',
settypes = {'point'},
detectors = ['_60p', 'LEGe'],
),
csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink',
settypes = {'point'},
),
)
startupcode = """
SetDetectors('_60p', 'LEGe')
SetEnvironment(chamber_pressure)
"""
| 19.736842
| 61
| 0.582667
|
ae492b1f49dcf0532148f874b14c25fcd630113c
| 4,532
|
py
|
Python
|
data/process.py
|
CompVis/behavior-driven-video-synthesis
|
ec090009dd56926ecbccfc73b77f979a50e34af1
|
[
"Apache-2.0"
] | 14
|
2021-03-04T08:31:08.000Z
|
2022-03-30T02:44:18.000Z
|
data/process.py
|
CompVis/behavior-driven-video-synthesis
|
ec090009dd56926ecbccfc73b77f979a50e34af1
|
[
"Apache-2.0"
] | null | null | null |
data/process.py
|
CompVis/behavior-driven-video-synthesis
|
ec090009dd56926ecbccfc73b77f979a50e34af1
|
[
"Apache-2.0"
] | 3
|
2021-03-08T15:35:52.000Z
|
2022-03-06T22:21:04.000Z
|
#!/usr/bin/env python3
# based on the file 'process_all.py' from https://github.com/anibali/h36m-fetch
import argparse
from os import path, makedirs, listdir
from shutil import move
import traceback
import cdflib
import numpy as np
from subprocess import call
from tempfile import TemporaryDirectory
from tqdm import tqdm
from data.metadata import load_h36m_metadata
metadata = load_h36m_metadata()
# Subjects to include when preprocessing
included_subjects = {
"S1": 1,
"S5": 5,
"S6": 6,
"S7": 7,
"S8": 8,
"S9": 9,
"S11": 11,
}
# Rather than include every frame from every video, we can instead wait for the pose to change
# significantly before storing a new example.
def select_frame_indices_to_include(subject, poses_3d_univ):
# To process every single frame, uncomment the following line:
return np.arange(0, len(poses_3d_univ))
def infer_camera_intrinsics(points2d, points3d):
"""Infer camera instrinsics from 2D<->3D point correspondences."""
pose2d = points2d.reshape(-1, 2)
pose3d = points3d.reshape(-1, 3)
x3d = np.stack([pose3d[:, 0], pose3d[:, 2]], axis=-1)
x2d = (pose2d[:, 0] * pose3d[:, 2])
alpha_x, x_0 = list(np.linalg.lstsq(x3d, x2d, rcond=-1)[0].flatten())
y3d = np.stack([pose3d[:, 1], pose3d[:, 2]], axis=-1)
y2d = (pose2d[:, 1] * pose3d[:, 2])
alpha_y, y_0 = list(np.linalg.lstsq(y3d, y2d, rcond=-1)[0].flatten())
return np.array([alpha_x, x_0, alpha_y, y_0])
def process_view(ddir, out_dir, subject, action, subaction, camera):
subj_dir = path.join(ddir,'extracted', subject)
base_filename = metadata.get_base_filename(subject, action, subaction, camera)
poses_3d_univ = cdflib.CDF(path.join(subj_dir, 'Poses_D3_Positions_mono_universal', base_filename + '.cdf'))
poses_3d_univ = np.array(poses_3d_univ['Pose'])
poses_3d_univ = poses_3d_univ.reshape(poses_3d_univ.shape[1], 32, 3)
frame_indices = select_frame_indices_to_include(subject, poses_3d_univ)
frames = frame_indices + 1
video_file = path.join(subj_dir, 'Videos', base_filename + '.mp4')
frames_dir = path.join(out_dir, 'imageSequence', camera)
makedirs(frames_dir, exist_ok=True)
# Check to see whether the frame images have already been extracted previously
existing_files = {f for f in listdir(frames_dir)}
frames_are_extracted = True
for i in frames:
filename = 'img_%06d.jpg' % i
if filename not in existing_files:
frames_are_extracted = False
break
if not frames_are_extracted:
with TemporaryDirectory() as tmp_dir:
# Use ffmpeg to extract frames into a temporary directory
call([
'ffmpeg',
'-nostats', '-loglevel', 'error',
'-i', video_file,
'-qscale:v', '3',
path.join(tmp_dir, 'img_%06d.jpg')
])
# Move included frame images into the output directory
for i in frames:
filename = 'img_%06d.jpg' % i
move(
path.join(tmp_dir, filename),
path.join(frames_dir, filename)
)
def process_subaction(ddir,subject, action, subaction):
datasets = {}
out_dir = path.join(ddir,'processed','all', subject, metadata.action_names[action] + '-' + subaction)
makedirs(out_dir, exist_ok=True)
for camera in tqdm(metadata.camera_ids, ascii=True, leave=False):
try:
process_view(ddir, out_dir, subject, action, subaction, camera)
except:
tqdm.write('!!! Error processing sequence, skipping: ' + \
repr((subject, action, subaction, camera)))
tqdm.write(traceback.format_exc())
continue
def process_all(ddir):
sequence_mappings = metadata.sequence_mappings
subactions = []
for subject in included_subjects.keys():
subactions += [
(subject, action, subaction)
for action, subaction in sequence_mappings[subject].keys()
if int(action) > 1 # Exclude '_ALL'
]
for subject, action, subaction in tqdm(subactions, ascii=True, leave=False):
process_subaction(ddir,subject, action, subaction)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--datadir", type=str,
help="path to the data",required=True)
args = parser.parse_args()
ddir = args.datadir
process_all(ddir)
| 33.323529
| 112
| 0.642321
|
ac57d9ae6cac404570e214b27ec71a6bf89b925a
| 22,061
|
py
|
Python
|
Lib/test/libregrtest/main.py
|
alvinlindstam/RustPython
|
c07e9323bcc91d14a7ee70da5fa4d7e1d2bc2084
|
[
"MIT"
] | null | null | null |
Lib/test/libregrtest/main.py
|
alvinlindstam/RustPython
|
c07e9323bcc91d14a7ee70da5fa4d7e1d2bc2084
|
[
"MIT"
] | null | null | null |
Lib/test/libregrtest/main.py
|
alvinlindstam/RustPython
|
c07e9323bcc91d14a7ee70da5fa4d7e1d2bc2084
|
[
"MIT"
] | null | null | null |
import datetime
import faulthandler
import json
import locale
import os
import platform
import random
import re
import sys
import sysconfig
import tempfile
import time
import unittest
from test.libregrtest.cmdline import _parse_args
from test.libregrtest.runtest import (
findtests, runtest, get_abs_module,
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
PROGRESS_MIN_TIME, format_test_result)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import removepy, count, format_duration, printlist
from test import support
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
TEMPDIR = sysconfig.get_config_var('abs_builddir')
if TEMPDIR is None:
# bpo-30284: On Windows, only srcdir is available. Using abs_builddir
# mostly matters on UNIX when building Python out of the source tree,
# especially when the source tree is read only.
TEMPDIR = sysconfig.get_config_var('srcdir')
TEMPDIR = os.path.join(TEMPDIR, 'build')
else:
TEMPDIR = tempfile.gettempdir()
TEMPDIR = os.path.abspath(TEMPDIR)
class Regrtest:
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
def __init__(self):
# Namespace of command line options
self.ns = None
# tests
self.tests = []
self.selected = []
# test results
self.good = []
self.bad = []
self.skipped = []
self.resource_denieds = []
self.environment_changed = []
self.run_no_tests = []
self.rerun = []
self.first_result = None
self.interrupted = False
# used by --slow
self.test_times = []
# used by --coverage, trace.Trace instance
self.tracer = None
# used to display the progress bar "[ 3/100]"
self.start_time = time.monotonic()
self.test_count = ''
self.test_count_width = 1
# used by --single
self.next_single_test = None
self.next_single_filename = None
# used by --junit-xml
self.testsuite_xml = None
self.win_load_tracker = None
def get_executed(self):
return (set(self.good) | set(self.bad) | set(self.skipped)
| set(self.resource_denieds) | set(self.environment_changed)
| set(self.run_no_tests))
def accumulate_result(self, result, rerun=False):
test_name = result.test_name
ok = result.result
if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun:
self.test_times.append((result.test_time, test_name))
if ok == PASSED:
self.good.append(test_name)
elif ok in (FAILED, CHILD_ERROR):
if not rerun:
self.bad.append(test_name)
elif ok == ENV_CHANGED:
self.environment_changed.append(test_name)
elif ok == SKIPPED:
self.skipped.append(test_name)
elif ok == RESOURCE_DENIED:
self.skipped.append(test_name)
self.resource_denieds.append(test_name)
elif ok == TEST_DID_NOT_RUN:
self.run_no_tests.append(test_name)
elif ok == INTERRUPTED:
self.interrupted = True
else:
raise ValueError("invalid test result: %r" % ok)
if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}:
self.bad.remove(test_name)
xml_data = result.xml_data
if xml_data:
import xml.etree.ElementTree as ET
for e in xml_data:
try:
self.testsuite_xml.append(ET.fromstring(e))
except ET.ParseError:
print(xml_data, file=sys.__stderr__)
raise
def display_progress(self, test_index, text):
if self.ns.quiet:
return
# "[ 51/405/1] test_tcl passed"
line = f"{test_index:{self.test_count_width}}{self.test_count}"
fails = len(self.bad) + len(self.environment_changed)
if fails and not self.ns.pgo:
line = f"{line}/{fails}"
line = f"[{line}] {text}"
# add the system load prefix: "load avg: 1.80 "
load_avg = self.getloadavg()
if load_avg is not None:
line = f"load avg: {load_avg:.2f} {line}"
# add the timestamp prefix: "0:01:05 "
test_time = time.monotonic() - self.start_time
test_time = datetime.timedelta(seconds=int(test_time))
line = f"{test_time} {line}"
print(line, flush=True)
def parse_args(self, kwargs):
ns = _parse_args(sys.argv[1:], **kwargs)
if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'):
print("Warning: The timeout option requires "
"faulthandler.dump_traceback_later", file=sys.stderr)
ns.timeout = None
if ns.xmlpath:
support.junit_xml_list = self.testsuite_xml = []
# Strip .py extensions.
removepy(ns.args)
return ns
def find_tests(self, tests):
self.tests = tests
if self.ns.single:
self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest')
try:
with open(self.next_single_filename, 'r') as fp:
next_test = fp.read().strip()
self.tests = [next_test]
except OSError:
pass
if self.ns.fromfile:
self.tests = []
# regex to match 'test_builtin' in line:
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
with open(os.path.join(support.SAVEDCWD, self.ns.fromfile)) as fp:
for line in fp:
line = line.split('#', 1)[0]
line = line.strip()
match = regex.search(line)
if match is not None:
self.tests.append(match.group())
removepy(self.tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if self.ns.exclude:
for arg in self.ns.args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
self.ns.args = []
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if self.ns.testdir:
alltests = findtests(self.ns.testdir, list(), set())
else:
alltests = findtests(self.ns.testdir, stdtests, nottests)
if not self.ns.fromfile:
self.selected = self.tests or self.ns.args or alltests
else:
self.selected = self.tests
if self.ns.single:
self.selected = self.selected[:1]
try:
pos = alltests.index(self.selected[0])
self.next_single_test = alltests[pos + 1]
except IndexError:
pass
# Remove all the selected tests that precede start if it's set.
if self.ns.start:
try:
del self.selected[:self.selected.index(self.ns.start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests"
% self.ns.start, file=sys.stderr)
if self.ns.randomize:
if self.ns.random_seed is None:
self.ns.random_seed = random.randrange(10000000)
random.seed(self.ns.random_seed)
random.shuffle(self.selected)
def list_tests(self):
for name in self.selected:
print(name)
def _list_cases(self, suite):
for test in suite:
if isinstance(test, unittest.loader._FailedTest):
continue
if isinstance(test, unittest.TestSuite):
self._list_cases(test)
elif isinstance(test, unittest.TestCase):
if support.match_test(test):
print(test.id())
def list_cases(self):
support.verbose = False
support.set_match_tests(self.ns.match_tests)
for test_name in self.selected:
abstest = get_abs_module(self.ns, test_name)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
self._list_cases(suite)
except unittest.SkipTest:
self.skipped.append(test_name)
if self.skipped:
print(file=sys.stderr)
print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
printlist(self.skipped, file=sys.stderr)
def rerun_failed_tests(self):
self.ns.verbose = True
self.ns.failfast = False
self.ns.verbose3 = False
self.first_result = self.get_tests_result()
print()
print("Re-running failed tests in verbose mode")
self.rerun = self.bad[:]
for test_name in self.rerun:
print(f"Re-running {test_name} in verbose mode", flush=True)
self.ns.verbose = True
result = runtest(self.ns, test_name)
self.accumulate_result(result, rerun=True)
if result.result == INTERRUPTED:
break
if self.bad:
print(count(len(self.bad), 'test'), "failed again:")
printlist(self.bad)
self.display_result()
def display_result(self):
# If running the test suite for PGO then no one cares about results.
if self.ns.pgo:
return
print()
print("== Tests result: %s ==" % self.get_tests_result())
if self.interrupted:
print("Test suite interrupted by signal SIGINT.")
omitted = set(self.selected) - self.get_executed()
if omitted:
print()
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if self.good and not self.ns.quiet:
print()
if (not self.bad
and not self.skipped
and not self.interrupted
and len(self.good) > 1):
print("All", end=' ')
print(count(len(self.good), "test"), "OK.")
if self.ns.print_slow:
self.test_times.sort(reverse=True)
print()
print("10 slowest tests:")
for test_time, test in self.test_times[:10]:
print("- %s: %s" % (test, format_duration(test_time)))
if self.bad:
print()
print(count(len(self.bad), "test"), "failed:")
printlist(self.bad)
if self.environment_changed:
print()
print("{} altered the execution environment:".format(
count(len(self.environment_changed), "test")))
printlist(self.environment_changed)
if self.skipped and not self.ns.quiet:
print()
print(count(len(self.skipped), "test"), "skipped:")
printlist(self.skipped)
if self.rerun:
print()
print("%s:" % count(len(self.rerun), "re-run test"))
printlist(self.rerun)
if self.run_no_tests:
print()
print(count(len(self.run_no_tests), "test"), "run no tests:")
printlist(self.run_no_tests)
def run_tests_sequential(self):
if self.ns.trace:
import trace
self.tracer = trace.Trace(trace=False, count=True)
save_modules = sys.modules.keys()
print("Run tests sequentially")
previous_test = None
for test_index, test_name in enumerate(self.tests, 1):
start_time = time.monotonic()
text = test_name
if previous_test:
text = '%s -- %s' % (text, previous_test)
self.display_progress(test_index, text)
if self.tracer:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
cmd = ('result = runtest(self.ns, test_name); '
'self.accumulate_result(result)')
ns = dict(locals())
self.tracer.runctx(cmd, globals=globals(), locals=ns)
result = ns['result']
else:
result = runtest(self.ns, test_name)
self.accumulate_result(result)
if result.result == INTERRUPTED:
break
previous_test = format_test_result(result)
test_time = time.monotonic() - start_time
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
elif result[0] == PASSED:
# be quiet: say nothing if the test passed shortly
previous_test = None
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if previous_test:
print(previous_test)
def _test_forever(self, tests):
while True:
for test_name in tests:
yield test_name
if self.bad:
return
if self.ns.fail_env_changed and self.environment_changed:
return
def display_header(self):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
# TODO: Add platform.platform
# print("==", platform.platform(aliased=True),
# "%s-endian" % sys.byteorder)
print("== cwd:", os.getcwd())
cpu_count = os.cpu_count()
if cpu_count:
print("== CPU count:", cpu_count)
print("== encodings: locale=%s, FS=%s"
% (locale.getpreferredencoding(False),
sys.getfilesystemencoding()))
def get_tests_result(self):
result = []
if self.bad:
result.append("FAILURE")
elif self.ns.fail_env_changed and self.environment_changed:
result.append("ENV CHANGED")
elif not any((self.good, self.bad, self.skipped, self.interrupted,
self.environment_changed)):
result.append("NO TEST RUN")
if self.interrupted:
result.append("INTERRUPTED")
if not result:
result.append("SUCCESS")
result = ', '.join(result)
if self.first_result:
result = '%s then %s' % (self.first_result, result)
return result
def run_tests(self):
# For a partial run, we do not need to clutter the output.
if (self.ns.header
or not(self.ns.pgo or self.ns.quiet or self.ns.single
or self.tests or self.ns.args)):
self.display_header()
if self.ns.huntrleaks:
warmup, repetitions, _ = self.ns.huntrleaks
if warmup < 3:
msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
"3 warmup repetitions can give false positives!")
print(msg, file=sys.stdout, flush=True)
if self.ns.randomize:
print("Using random seed", self.ns.random_seed)
if self.ns.forever:
self.tests = self._test_forever(list(self.selected))
self.test_count = ''
self.test_count_width = 3
else:
self.tests = iter(self.selected)
self.test_count = '/{}'.format(len(self.selected))
self.test_count_width = len(self.test_count) - 1
if self.ns.use_mp:
from test.libregrtest.runtest_mp import run_tests_multiprocess
run_tests_multiprocess(self)
else:
self.run_tests_sequential()
def finalize(self):
if self.win_load_tracker is not None:
self.win_load_tracker.close()
self.win_load_tracker = None
if self.next_single_filename:
if self.next_single_test:
with open(self.next_single_filename, 'w') as fp:
fp.write(self.next_single_test + '\n')
else:
os.unlink(self.next_single_filename)
if self.tracer:
r = self.tracer.results()
r.write_results(show_missing=True, summary=True,
coverdir=self.ns.coverdir)
print()
duration = time.monotonic() - self.start_time
print("Total duration: %s" % format_duration(duration))
print("Tests result: %s" % self.get_tests_result())
if self.ns.runleaks:
os.system("leaks %d" % os.getpid())
def save_xml_result(self):
if not self.ns.xmlpath and not self.testsuite_xml:
return
import xml.etree.ElementTree as ET
root = ET.Element("testsuites")
# Manually count the totals for the overall summary
totals = {'tests': 0, 'errors': 0, 'failures': 0}
for suite in self.testsuite_xml:
root.append(suite)
for k in totals:
try:
totals[k] += int(suite.get(k, 0))
except ValueError:
pass
for k, v in totals.items():
root.set(k, str(v))
xmlpath = os.path.join(support.SAVEDCWD, self.ns.xmlpath)
with open(xmlpath, 'wb') as f:
for s in ET.tostringlist(root):
f.write(s)
def main(self, tests=None, **kwargs):
global TEMPDIR
self.ns = self.parse_args(kwargs)
if self.ns.tempdir:
TEMPDIR = self.ns.tempdir
elif self.ns.worker_args:
ns_dict, _ = json.loads(self.ns.worker_args)
TEMPDIR = ns_dict.get("tempdir") or TEMPDIR
os.makedirs(TEMPDIR, exist_ok=True)
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
test_cwd = 'test_python_{}'.format(os.getpid())
test_cwd = os.path.join(TEMPDIR, test_cwd)
# Run the tests in a context manager that temporarily changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(test_cwd, quiet=True):
self._main(tests, kwargs)
def getloadavg(self):
if self.win_load_tracker is not None:
return self.win_load_tracker.getloadavg()
if hasattr(os, 'getloadavg'):
return os.getloadavg()[0]
return None
def _main(self, tests, kwargs):
if self.ns.huntrleaks:
warmup, repetitions, _ = self.ns.huntrleaks
if warmup < 1 or repetitions < 1:
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
"number of warmups and repetitions must be at least 1 "
"each (1:1).")
print(msg, file=sys.stderr, flush=True)
sys.exit(2)
if self.ns.worker_args is not None:
from test.libregrtest.runtest_mp import run_tests_worker
run_tests_worker(self.ns.worker_args)
if self.ns.wait:
input("Press any key to continue...")
support.PGO = self.ns.pgo
setup_tests(self.ns)
self.find_tests(tests)
if self.ns.list_tests:
self.list_tests()
sys.exit(0)
if self.ns.list_cases:
self.list_cases()
sys.exit(0)
# If we're on windows and this is the parent runner (not a worker),
# track the load average.
if sys.platform == 'win32' and (self.ns.worker_args is None):
from test.libregrtest.win_utils import WindowsLoadTracker
try:
self.win_load_tracker = WindowsLoadTracker()
except FileNotFoundError as error:
# Windows IoT Core and Windows Nano Server do not provide
# typeperf.exe for x64, x86 or ARM
print(f'Failed to create WindowsLoadTracker: {error}')
self.run_tests()
self.display_result()
if self.ns.verbose2 and self.bad:
self.rerun_failed_tests()
self.finalize()
self.save_xml_result()
if self.bad:
sys.exit(2)
if self.interrupted:
sys.exit(130)
if self.ns.fail_env_changed and self.environment_changed:
sys.exit(3)
sys.exit(0)
def main(tests=None, **kwargs):
"""Run the Python suite."""
Regrtest().main(tests=tests, **kwargs)
| 34.256211
| 88
| 0.575133
|
1744489ec942e0328c82951bc48a387130126b05
| 2,043
|
py
|
Python
|
script/python/lib/utils/substitute_utils.py
|
timblechmann/nt2
|
6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce
|
[
"BSL-1.0"
] | 2
|
2016-09-14T00:23:53.000Z
|
2018-01-14T12:51:18.000Z
|
script/python/lib/utils/substitute_utils.py
|
timblechmann/nt2
|
6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce
|
[
"BSL-1.0"
] | null | null | null |
script/python/lib/utils/substitute_utils.py
|
timblechmann/nt2
|
6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce
|
[
"BSL-1.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
##############################################################################
# Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II
# Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI
#
# Distributed under the Boost Software License, Version 1.0
# See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
##############################################################################
"""utilities to substitute strings
chains to be substitued are of the form <token>.*<token>
the .* must correspond to an attribute of the class inheiting from
substitute
"""
__author__ = "Lapreste Jean-thierry (lapreste@univ-bpclermont.fr)"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2010 $"
__copyright__ = """ Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II
Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI"""
__license__ = "Boost Software License, Version 1.0"
import re
def listify(x) :
if isinstance(x,str) :
return [x]
elif x is None :
return x
else :
return x
def stringize(x,sep=', ') :
if isinstance(x,str) :
return x
elif x is None :
return x
else :
return sep.join(x)
class Substitute(object) :
def __init__(self) :
pass
def keys_of(self,txt,tokenl='\$',tokenr='\$') :
m = list(set(re.findall('(%s.*?%s)'%(tokenl,tokenr),txt)))
z = [ mm.replace('$','\$') for mm in m]
return z
def replace(self,txt,keys=None,tokenl='\$',tokenr='\$') :
if keys is None : keys = self.keys_of(txt,tokenl,tokenr)
ll=len(tokenl)
lr=-len(tokenr)
for k in keys :
## print("k -> %s "%k)
## print("g -> %s "%getattr(self,k[ll:lr]))
txt = re.sub(k,getattr(self,k[ll:lr]),txt)
return txt
if __name__ == "__main__" :
pass
| 30.492537
| 85
| 0.53304
|
3d3174a32777eaee727692cd6751bc9adbaada9e
| 702
|
py
|
Python
|
ex044.py
|
mateusloped/curso-python
|
1b5b3927141e985911c9b2344b3d4d663a90c29c
|
[
"MIT"
] | null | null | null |
ex044.py
|
mateusloped/curso-python
|
1b5b3927141e985911c9b2344b3d4d663a90c29c
|
[
"MIT"
] | null | null | null |
ex044.py
|
mateusloped/curso-python
|
1b5b3927141e985911c9b2344b3d4d663a90c29c
|
[
"MIT"
] | null | null | null |
#PROGRAMA DE PAGAMENTO: DINHEIRO/CHEQUE 10% DE DESC.; A VISTA NO CARTÃO 5%; 2 VEZES PREÇO NORMAL; MAIS 3 OU MAIS 20% JUROS;
pag = float(input('Valor a ser pago:'))
print('''[1]DINHEIRO ou CHEQUE
[2]A VISTA
[3]PARCELADO EM 2 VEZES
[4] PARCELADO EM 3X OU MAIS''')
forma = int(input('Qual a forma de pagamento? '))
if forma == 1:
calc = pag - (pag * 10/100)
print('Total a pagar R${} de R${}'.format(calc, pag))
elif forma == 2:
calc = pag - (pag * 5/100)
print('Total a pagar R${:.2f} de R${}'.format(calc, pag))
elif forma == 3:
print('Total a pagar R${}'.format(pag))
elif forma == 4:
calc = pag + (pag * 20 /100)
print('Total a ser pago 3x ou mais R${}'.format(calc))
| 36.947368
| 123
| 0.61396
|
169436beb675550fe60da14401e988e3fe015124
| 28
|
py
|
Python
|
scripts/daphnet/__init__.py
|
gait-analyzer/.github
|
f468d3d6a93d069dec07b0580c9c18d6d8dbd773
|
[
"MIT"
] | 1
|
2021-08-05T13:43:45.000Z
|
2021-08-05T13:43:45.000Z
|
scripts/daphnet/__init__.py
|
gait-analyzer/GaitMonitoringForParkinsonsDiseasePatients
|
2064375ddc36bf38f3ff65f09e776328b8b4612a
|
[
"MIT"
] | null | null | null |
scripts/daphnet/__init__.py
|
gait-analyzer/GaitMonitoringForParkinsonsDiseasePatients
|
2064375ddc36bf38f3ff65f09e776328b8b4612a
|
[
"MIT"
] | null | null | null |
from .subject import Subject
| 28
| 28
| 0.857143
|
a6f2b27e4016aac456521c4bfb25a4cfdc89fefc
| 2,199
|
py
|
Python
|
docs/source/conf.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
def __join_paths(*paths):
return os.path.abspath(os.path.join(*paths))
__root_dir = __join_paths(os.path.dirname(os.path.realpath(__file__)), "..", "..")
sys.path.insert(0, __root_dir)
import arjuna
import pallets_sphinx_themes
# -- Project information -----------------------------------------------------
project = 'Arjuna'
copyright = '2015-2021, Rahul Verma'
author = 'Rahul Verma'
# The full version, including alpha/beta/rc tags
release = '1.1.40'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"pallets_sphinx_themes",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Activate the theme.
html_theme = 'flask'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
| 32.338235
| 82
| 0.674397
|
1fbb1ce1fd2b278aa5ed222a61b3fa340f088329
| 226
|
py
|
Python
|
pagine/urls/locations.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
pagine/urls/locations.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
pagine/urls/locations.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
from django.urls import path
from pagine.views import ListLocation, DetailLocation
urlpatterns = [
path('', ListLocation.as_view(), name='locations'),
path('<slug>/', DetailLocation.as_view(), name='location'),
]
| 28.25
| 63
| 0.70354
|
c432884ec617c6029ec173a2a24146b93caf0fed
| 2,924
|
py
|
Python
|
utility.py
|
leonqli/mssql-cli
|
fd926d35e4cb589224d4c3c166dbed8f67492ebf
|
[
"BSD-3-Clause"
] | 1,237
|
2017-12-12T17:42:22.000Z
|
2022-03-30T10:16:10.000Z
|
utility.py
|
leonqli/mssql-cli
|
fd926d35e4cb589224d4c3c166dbed8f67492ebf
|
[
"BSD-3-Clause"
] | 352
|
2017-12-12T19:55:04.000Z
|
2022-03-30T10:23:10.000Z
|
utility.py
|
leonqli/mssql-cli
|
fd926d35e4cb589224d4c3c166dbed8f67492ebf
|
[
"BSD-3-Clause"
] | 183
|
2017-12-12T21:29:25.000Z
|
2022-03-13T15:41:25.000Z
|
from __future__ import print_function
from subprocess import check_call, CalledProcessError
import os
import platform
import shlex
import shutil
import sys
import string
import random
ROOT_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
MSSQLCLI_DIST_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'dist'))
MSSQLCLI_BUILD_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', 'build'))
MSSQLCLI_RPM_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', '..', 'rpm_output')
)
MSSQLCLI_DEB_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', '..', 'debian_output')
)
def exec_command(command, directory, continue_on_error=True):
"""
Execute command.
"""
try:
command_split = [token.strip('"') for token in shlex.split(command, posix=False)]
# The logic above is used to preserve multiple token arguments with pytest. It is
# specifically needed when calling "not unstable" for running all tests not marked
# as unstable.
check_call(command_split, cwd=directory)
except CalledProcessError as err:
# Continue execution in scenarios where we may be bulk command execution.
print(err, file=sys.stderr)
if not continue_on_error:
sys.exit(1)
else:
pass
def clean_up_egg_info_sub_directories(directory):
for f in os.listdir(directory):
if f.endswith(".egg-info"):
clean_up(os.path.join(directory, f))
def clean_up(directory):
"""
Delete directory.
"""
try:
shutil.rmtree(directory)
except OSError:
# Ignored, directory may not exist which is fine.
pass
def get_current_platform():
"""
Get current platform name.
"""
system = platform.system()
arch = platform.architecture()[0]
run_time_id = None
if system == 'Windows':
if arch == '32bit':
run_time_id = 'win32'
elif arch == '64bit':
run_time_id = 'win_amd64'
elif system == 'Darwin':
run_time_id = 'macosx_10_11_intel'
elif system == 'Linux':
run_time_id = 'manylinux1_x86_64'
return run_time_id
def copy_current_platform_mssqltoolsservice():
"""
Copy the necessary mssqltoolsservice binaries for the current platform if supported.
"""
# pylint: disable=import-outside-toplevel
import mssqlcli.mssqltoolsservice.externals as mssqltoolsservice
current_platform = get_current_platform()
if current_platform:
mssqltoolsservice.copy_sqltoolsservice(current_platform)
else:
print("This platform: {} does not support mssqltoolsservice.".format(platform.system()))
def random_str(size=12, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
| 28.666667
| 96
| 0.672025
|
0b7dfd4e1729a4723183a8d16908cbe56cd55f8a
| 3,007
|
py
|
Python
|
time_trial_gui/gui/plotter_tab.py
|
dmayer/time_trial
|
c9f896f8788bb4ae3b8bd526a325542c610d0573
|
[
"MIT"
] | 56
|
2015-01-23T13:06:14.000Z
|
2022-01-07T17:06:37.000Z
|
time_trial_gui/gui/plotter_tab.py
|
channgo2203/time_trial
|
c9f896f8788bb4ae3b8bd526a325542c610d0573
|
[
"MIT"
] | 2
|
2015-10-30T18:04:38.000Z
|
2016-05-01T16:11:56.000Z
|
time_trial_gui/gui/plotter_tab.py
|
channgo2203/time_trial
|
c9f896f8788bb4ae3b8bd526a325542c610d0573
|
[
"MIT"
] | 13
|
2015-06-28T17:29:30.000Z
|
2021-11-17T18:48:22.000Z
|
__author__ = 'daniel'
import logging
from PyQt4 import QtGui
from gui.data_source_model import DataSourceModel
from gui.plotter_widget import PlotterWidget
from lib.timing_data import TimingData
from lib.plot import Plot
class PlotterTab(QtGui.QWidget):
def __init__(self, parent = None):
super(PlotterTab, self).__init__(parent)
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
# data sources
self.data_box = QtGui.QGroupBox(self, title="Data Sources")
self.layout.addWidget(self.data_box,0,0)
data_box_layout = QtGui.QGridLayout(self.data_box)
self.data_box.setLayout(data_box_layout)
self.data_source_model = DataSourceModel()
self.data_source_table = QtGui.QTableView()
self.data_source_table.setModel(self.data_source_model)
self.data_source_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.data_source_table.activated.connect(self.event_open_data_source_edit)
data_box_layout.addWidget(self.data_source_table, 0, 0)
self.plotter = PlotterWidget(self)
self.plotter.set_data_source_model(self.data_source_model)
self.layout.addWidget(self.plotter, 1,0,1,2)
self.data_source_model.rowsInserted.connect(self.plotter.update_plot)
# main buttons
add_file_button = QtGui.QPushButton(self.data_box)
add_file_button.setText("Add File")
add_file_button.released.connect(self.event_show_select_file_dialog)
self.layout.addWidget(add_file_button,0,1)
def event_open_data_source_edit(self, index):
dialog = EditDataSourceDialog(index.data(QtCore.Qt.EditRole), self.main_widget)
dialog.accepted.connect(self.event_data_source_edited)
dialog.exec()
def event_data_source_edited(self):
self.data_source_table.resizeColumnsToContents()
self.update_plot()
def event_show_select_file_dialog(self):
file_dialog = QtGui.QFileDialog()
file_dialog.setAcceptMode(QtGui.QFileDialog.AcceptOpen)
filters = [ "PEM Files (*.pem)", "Any files (*)" ]
# file_dialog.fileSelected.connect(self.event_file_selected)
file_dialog.filesSelected.connect(self.event_files_selected)
file_dialog.setFileMode(QtGui.QFileDialog.ExistingFiles)
file_dialog.exec()
def event_files_selected(self, file_names):
print(file_names)
for f in file_names:
self.event_file_selected(f)
def event_file_selected(self,file_name):
new_data = TimingData()
new_data.load_from_csv(file_name)
new_plot = Plot(new_data)
self.data_source_model.add_data(new_plot)
self.data_source_table.resizeColumnsToContents()
#data = parse_csv(file_name)
#self.plot_canvas.add_plot(data, 200, [min(data), 26*1000*1000], "100 micros", 'red')
#self.plot_canvas.update_figure()
def add_data_row(self, data):
pass
| 34.170455
| 93
| 0.711673
|
58fcc09842d1de315f3e38bfe8848e5190ce5bdf
| 363
|
py
|
Python
|
scenarios/sleep.py
|
DataDog/system-tests
|
04f94312fddb135830dbe2df6d51d9246561ea6e
|
[
"Apache-2.0"
] | 3
|
2021-11-15T20:28:25.000Z
|
2022-01-27T18:33:15.000Z
|
scenarios/sleep.py
|
DataDog/system-tests
|
04f94312fddb135830dbe2df6d51d9246561ea6e
|
[
"Apache-2.0"
] | 25
|
2021-11-08T15:50:38.000Z
|
2022-03-29T12:16:17.000Z
|
scenarios/sleep.py
|
DataDog/system-tests
|
04f94312fddb135830dbe2df6d51d9246561ea6e
|
[
"Apache-2.0"
] | 1
|
2021-11-15T20:28:28.000Z
|
2021-11-15T20:28:28.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
import time
def test_sleep():
"""Sleep forever to allow you to perform some manual testing"""
time.sleep(3600 * 24)
| 33
| 120
| 0.752066
|
f521d3f1e733d02e53bd77f403c5e45701d9160b
| 1,491
|
py
|
Python
|
blogproject/blog/views.py
|
yeluoguigen/blog
|
24d16ce417970acf2c828c0d37b54f17b29e96a2
|
[
"MIT"
] | null | null | null |
blogproject/blog/views.py
|
yeluoguigen/blog
|
24d16ce417970acf2c828c0d37b54f17b29e96a2
|
[
"MIT"
] | null | null | null |
blogproject/blog/views.py
|
yeluoguigen/blog
|
24d16ce417970acf2c828c0d37b54f17b29e96a2
|
[
"MIT"
] | null | null | null |
import markdown
from django.db.models import Q
from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse, request
from comments.forms import CommentForm
from blog.models import Post,Category
#首页
def index(request):
post_list = Post.objects.all()
return render(request,'blog/index.html',context={'post_list':post_list})
#文章详情页
def detail(request,pk):
try:
post = Post.objects.get(id=pk)
except:
return render(request,'blog/detail.html',status=404)
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
form = CommentForm()
comment_list = post.comment_set.all()
context = {
'post':post,
'form':form,
'comment_list': comment_list
}
return render(request,'blog/detail.html',context=context)
#归档
def archives(request,year,month):
post_list = Post.objects.filter(Q(created_time__year=year) & Q(created_time__month=month))
return render(request,'blog/index.html',context={'post_list':post_list})
def category(request,pk):
cate = get_object_or_404(Category,pk=pk)
post_list = Post.objects.filter(category=cate)
return render(request,'blog/index.html',context={'post_list':post_list})
| 33.133333
| 94
| 0.634474
|
77e0b288fcc09c71d7aa1605bb5b0977fed2a619
| 14,838
|
py
|
Python
|
salt/modules/jboss7_cli.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 3
|
2015-04-16T18:42:35.000Z
|
2017-10-30T16:57:49.000Z
|
salt/modules/jboss7_cli.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 16
|
2015-11-18T00:44:03.000Z
|
2018-10-29T20:48:27.000Z
|
salt/modules/jboss7_cli.py
|
jkur/salt
|
3e62675550f9869d550d7787800270e632955d2f
|
[
"Apache-2.0"
] | 1
|
2018-04-19T16:57:27.000Z
|
2018-04-19T16:57:27.000Z
|
# -*- coding: utf-8 -*-
'''
Module for low-level interaction with JbossAS7 through CLI.
This module exposes two ways of interaction with the CLI, either through commands or operations.
.. note:: Following JBoss documentation (https://developer.jboss.org/wiki/CommandLineInterface):
"Operations are considered a low level but comprehensive way to manage the AS controller, i.e. if it can't be done with operations it can't be done in any other way.
Commands, on the other hand, are more user-friendly in syntax,
although most of them still translate into operation requests and some of them even into a few
composite operation requests, i.e. commands also simplify some management operations from the user's point of view."
The difference between calling a command or operation is in handling the result.
Commands return a zero return code if operation is successful or return non-zero return code and
print an error to standard output in plain text, in case of an error.
Operations return a json-like structure, that contain more information about the result.
In case of a failure, they also return a specific return code. This module parses the output from the operations and
returns it as a dictionary so that an execution of an operation can then be verified against specific errors.
In order to run each function, jboss_config dictionary with the following properties must be passed:
* cli_path: the path to jboss-cli script, for example: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
* controller: the ip addres and port of controller, for example: 10.11.12.13:9999
* cli_user: username to connect to jboss administration console if necessary
* cli_password: password to connect to jboss administration console if necessary
Example:
.. code-block:: yaml
jboss_config:
cli_path: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
controller: 10.11.12.13:9999
cli_user: 'jbossadm'
cli_password: 'jbossadm'
'''
# Import Python libs
from __future__ import absolute_import
import logging
import re
import pprint
import time
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def run_command(jboss_config, command, fail_on_error=True):
'''
Execute a command against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
command
Command to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionException exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
'''
cli_command_result = __call_cli(jboss_config, command)
if cli_command_result['retcode'] == 0:
cli_command_result['success'] = True
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_command_result['success'] = False
return cli_command_result
def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
'''
Execute an operation against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
operation
An operation to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionException exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
'''
cli_command_result = __call_cli(jboss_config, operation, retries)
if cli_command_result['retcode'] == 0:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = cli_result['outcome'] == 'success'
else:
raise CommandExecutionError('Operation has returned unparseable output: {0}'.format(cli_command_result['stdout']))
else:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = False
match = re.search(r'^(JBAS\d+):', cli_result['failure-description'])
cli_result['err_code'] = match.group(1)
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_result = {
'success': False,
'stdout': cli_command_result['stdout'],
'stderr': cli_command_result['stderr'],
'retcode': cli_command_result['retcode']
}
return cli_result
def __call_cli(jboss_config, command, retries=1):
command_segments = [
jboss_config['cli_path'],
'--connect',
'--controller="{0}"'.format(jboss_config['controller'])
]
if 'cli_user' in six.iterkeys(jboss_config):
command_segments.append('--user="{0}"'.format(jboss_config['cli_user']))
if 'cli_password' in six.iterkeys(jboss_config):
command_segments.append('--password="{0}"'.format(jboss_config['cli_password']))
command_segments.append('--command="{0}"'.format(__escape_command(command)))
cli_script = ' '.join(command_segments)
cli_command_result = __salt__['cmd.run_all'](cli_script)
log.debug('cli_command_result=%s', str(cli_command_result))
log.debug('========= STDOUT:\n%s', cli_command_result['stdout'])
log.debug('========= STDERR:\n%s', cli_command_result['stderr'])
log.debug('========= RETCODE: %d', cli_command_result['retcode'])
if cli_command_result['retcode'] == 127:
raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))
if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:
raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))
# It may happen that eventhough server is up it may not respond to the call
if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli
log.debug('Command failed, retrying... (%d tries left)', retries)
time.sleep(3)
return __call_cli(jboss_config, command, retries - 1)
return cli_command_result
def __escape_command(command):
'''
This function escapes the command so that can be passed in the command line to JBoss CLI.
Escaping commands passed to jboss is extremely confusing.
If you want to save a binding that contains a single backslash character read the following explanation.
A sample value, let's say "a\b" (with single backslash), that is saved in the config.xml file:
<bindings>
<simple name="java:/app/binding1" value="a\b"/>
</bindings>
Eventhough it is just a single "\" if you want to read it from command line you will get:
/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":read-resource"
{
"outcome" => "success",
"result" => {
"binding-type" => "simple",
"value" => "a\\b"
}
}
So, now you have two backslashes in the output, even though in the configuration file you have one.
Now, if you want to update this property, the easiest thing to do is to create a file with appropriate command:
/tmp/update-binding.cli:
----
/subsystem=naming/binding="java:/app/binding1":write-attribute(name=value, value="a\\\\b")
----
And run cli command:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --file="/tmp/update-binding.cli"
As you can see, here you need 4 backslashes to save it as one to the configuration file. Run it and go to the configuration file to check.
(You may need to reload jboss afterwards: ${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command=":reload" )
But if you want to run the same update operation directly from command line, prepare yourself for more escaping:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":write-attribute(name=value, value=\"a\\\\\\\\b\")"
So, here you need 8 backslashes to force JBoss to save it as one.
To sum up this behavior:
(1) 1 backslash in configuration file
(2) 2 backslashes when reading
(3) 4 backslashes when writing from file
(4) 8 backslashes when writing from command line
... are all the same thing:)
Remember that the command that comes in is already (3) format. Now we need to escape it further to be able to pass it to command line.
'''
result = command.replace('\\', '\\\\') # replace \ -> \\
result = result.replace('"', '\\"') # replace " -> \"
return result
def _is_cli_output(text):
cli_re = re.compile(r"^\s*{.+}\s*$", re.DOTALL)
if cli_re.search(text):
return True
else:
return False
def _parse(cli_output):
tokens = __tokenize(cli_output)
result = __process_tokens(tokens)
log.debug("=== RESULT: "+pprint.pformat(result))
return result
def __process_tokens(tokens):
result, token_no = __process_tokens_internal(tokens)
return result
def __process_tokens_internal(tokens, start_at=0):
if __is_dict_start(tokens[start_at]) and start_at == 0: # the top object
return __process_tokens_internal(tokens, start_at=1)
log.debug("__process_tokens, start_at="+str(start_at))
token_no = start_at
result = {}
current_key = None
while token_no < len(tokens):
token = tokens[token_no]
log.debug("PROCESSING TOKEN %d: %s", token_no, token)
if __is_quoted_string(token):
log.debug(" TYPE: QUOTED STRING ")
if current_key is None:
current_key = __get_quoted_string(token)
log.debug(" KEY: %s", current_key)
else:
result[current_key] = __get_quoted_string(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_datatype(token):
log.debug(" TYPE: DATATYPE: %s ", token)
result[current_key] = __get_datatype(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_boolean(token):
log.debug(" TYPE: BOOLEAN ")
result[current_key] = __get_boolean(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_int(token):
log.debug(" TYPE: INT ")
result[current_key] = __get_int(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_long(token):
log.debug(" TYPE: LONG ")
result[current_key] = __get_long(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_undefined(token):
log.debug(" TYPE: UNDEFINED ")
log.debug(" %s -> undefined (Adding as None to map)", current_key)
result[current_key] = None
current_key = None
elif __is_dict_start(token):
log.debug(" TYPE: DICT START")
dict_value, token_no = __process_tokens_internal(tokens, start_at=token_no+1)
log.debug(" DICT = %s ", dict_value)
result[current_key] = dict_value
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_dict_end(token):
log.debug(" TYPE: DICT END")
return result, token_no
elif __is_assignment(token):
log.debug(" TYPE: ASSIGNMENT")
is_assignment = True
else:
raise CommandExecutionError('Unknown token!', 'Token:'+token)
token_no = token_no + 1
def __tokenize(cli_output):
# add all possible tokens here
# \\ means a single backslash here
tokens_re = re.compile(r'("(?:[^"\\]|\\"|\\\\)*"|=>|{|}|true|false|undefined|[0-9A-Za-z]+)', re.DOTALL)
tokens = tokens_re.findall(cli_output)
log.debug("tokens=%s", str(tokens))
return tokens
def __is_dict_start(token):
return token == '{'
def __is_dict_end(token):
return token == '}'
def __is_boolean(token):
return token == 'true' or token == 'false'
def __get_boolean(token):
return token == 'true'
def __is_int(token):
return token.isdigit()
def __get_int(token):
return int(token)
def __is_long(token):
return token[0:-1].isdigit() and token[-1] == 'L'
def __get_long(token):
if six.PY2:
return long(token[0:-1]) # pylint: disable=incompatible-py3-code
else:
return int(token[0:-1])
def __is_datatype(token):
return token in ("INT", "BOOLEAN", "STRING", "OBJECT")
def __get_datatype(token):
return token
def __is_undefined(token):
return token == 'undefined'
def __is_quoted_string(token):
return token[0] == '"' and token[-1] == '"'
def __get_quoted_string(token):
result = token[1:-1] # remove quotes
result = result.replace('\\\\', '\\') # unescape the output, by default all the string are escaped in the output
return result
def __is_assignment(token):
return token == '=>'
| 39.673797
| 229
| 0.663162
|
6b25366084aea585243ad82f985190cf6d2af444
| 566
|
py
|
Python
|
registration/migrations/0002_auto_20161110_2316.py
|
tanql/RecommendApi
|
80dc9ea4c531c06e66b7c0b12b7089ed0a445874
|
[
"MIT"
] | 1
|
2017-10-02T18:12:28.000Z
|
2017-10-02T18:12:28.000Z
|
registration/migrations/0002_auto_20161110_2316.py
|
tanql/RecommendApi
|
80dc9ea4c531c06e66b7c0b12b7089ed0a445874
|
[
"MIT"
] | null | null | null |
registration/migrations/0002_auto_20161110_2316.py
|
tanql/RecommendApi
|
80dc9ea4c531c06e66b7c0b12b7089ed0a445874
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='myuser',
name='age',
field=models.IntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='myuser',
name='postCode',
field=models.IntegerField(null=True, blank=True),
),
]
| 22.64
| 61
| 0.575972
|
e8d25ca99f75f2b33b7344fcf0efacac248e1b58
| 26,387
|
py
|
Python
|
sdk/python/pulumi_gcp/iap/web_iam_binding.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/iap/web_iam_binding.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/iap/web_iam_binding.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['WebIamBindingArgs', 'WebIamBinding']
@pulumi.input_type
class WebIamBindingArgs:
def __init__(__self__, *,
members: pulumi.Input[Sequence[pulumi.Input[str]]],
role: pulumi.Input[str],
condition: Optional[pulumi.Input['WebIamBindingConditionArgs']] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebIamBinding resource.
:param pulumi.Input[str] role: The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
:param pulumi.Input['WebIamBindingConditionArgs'] condition: ) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "role", role)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def members(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "members")
@members.setter
def members(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['WebIamBindingConditionArgs']]:
"""
) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['WebIamBindingConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _WebIamBindingState:
def __init__(__self__, *,
condition: Optional[pulumi.Input['WebIamBindingConditionArgs']] = None,
etag: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering WebIamBinding resources.
:param pulumi.Input['WebIamBindingConditionArgs'] condition: ) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
:param pulumi.Input[str] etag: (Computed) The etag of the IAM policy.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param pulumi.Input[str] role: The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
if condition is not None:
pulumi.set(__self__, "condition", condition)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if members is not None:
pulumi.set(__self__, "members", members)
if project is not None:
pulumi.set(__self__, "project", project)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['WebIamBindingConditionArgs']]:
"""
) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['WebIamBindingConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
class WebIamBinding(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['WebIamBindingConditionArgs']]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Three different resources help you manage your IAM policy for Identity-Aware Proxy Web. Each of these resources serves a different use case:
* `iap.WebIamPolicy`: Authoritative. Sets the IAM policy for the web and replaces any existing policy already attached.
* `iap.WebIamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the web are preserved.
* `iap.WebIamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the web are preserved.
> **Note:** `iap.WebIamPolicy` **cannot** be used in conjunction with `iap.WebIamBinding` and `iap.WebIamMember` or they will fight over what your policy should be.
> **Note:** `iap.WebIamBinding` resources **can be** used in conjunction with `iap.WebIamMember` resources **only if** they do not grant privilege to the same role.
## google\_iap\_web\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
)])
policy = gcp.iap.WebIamPolicy("policy",
project=google_project_service["project_service"]["project"],
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
),
)])
policy = gcp.iap.WebIamPolicy("policy",
project=google_project_service["project_service"]["project"],
policy_data=admin.policy_data)
```
## google\_iap\_web\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.iap.WebIamBinding("binding",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"])
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.iap.WebIamBinding("binding",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
condition=gcp.iap.WebIamBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## google\_iap\_web\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.iap.WebIamMember("member",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
member="user:jane@example.com")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.iap.WebIamMember("member",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
member="user:jane@example.com",
condition=gcp.iap.WebIamMemberConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## Import
For all import syntaxes, the "resource in question" can take any of the following forms* projects/{{project}}/iap_web * {{project}} Any variables not passed in the import command will be taken from the provider configuration. Identity-Aware Proxy web IAM resources can be imported using the resource identifiers, role, and member. IAM member imports use space-delimited identifiersthe resource in question, the role, and the member identity, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor "projects/{{project}}/iap_web roles/iap.httpsResourceAccessor user:jane@example.com"
```
IAM binding imports use space-delimited identifiersthe resource in question and the role, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor "projects/{{project}}/iap_web roles/iap.httpsResourceAccessor"
```
IAM policy imports use the identifier of the resource in question, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor projects/{{project}}/iap_web
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['WebIamBindingConditionArgs']] condition: ) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param pulumi.Input[str] role: The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebIamBindingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Three different resources help you manage your IAM policy for Identity-Aware Proxy Web. Each of these resources serves a different use case:
* `iap.WebIamPolicy`: Authoritative. Sets the IAM policy for the web and replaces any existing policy already attached.
* `iap.WebIamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the web are preserved.
* `iap.WebIamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the web are preserved.
> **Note:** `iap.WebIamPolicy` **cannot** be used in conjunction with `iap.WebIamBinding` and `iap.WebIamMember` or they will fight over what your policy should be.
> **Note:** `iap.WebIamBinding` resources **can be** used in conjunction with `iap.WebIamMember` resources **only if** they do not grant privilege to the same role.
## google\_iap\_web\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
)])
policy = gcp.iap.WebIamPolicy("policy",
project=google_project_service["project_service"]["project"],
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
),
)])
policy = gcp.iap.WebIamPolicy("policy",
project=google_project_service["project_service"]["project"],
policy_data=admin.policy_data)
```
## google\_iap\_web\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.iap.WebIamBinding("binding",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"])
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.iap.WebIamBinding("binding",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
members=["user:jane@example.com"],
condition=gcp.iap.WebIamBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## google\_iap\_web\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.iap.WebIamMember("member",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
member="user:jane@example.com")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.iap.WebIamMember("member",
project=google_project_service["project_service"]["project"],
role="roles/iap.httpsResourceAccessor",
member="user:jane@example.com",
condition=gcp.iap.WebIamMemberConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## Import
For all import syntaxes, the "resource in question" can take any of the following forms* projects/{{project}}/iap_web * {{project}} Any variables not passed in the import command will be taken from the provider configuration. Identity-Aware Proxy web IAM resources can be imported using the resource identifiers, role, and member. IAM member imports use space-delimited identifiersthe resource in question, the role, and the member identity, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor "projects/{{project}}/iap_web roles/iap.httpsResourceAccessor user:jane@example.com"
```
IAM binding imports use space-delimited identifiersthe resource in question and the role, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor "projects/{{project}}/iap_web roles/iap.httpsResourceAccessor"
```
IAM policy imports use the identifier of the resource in question, e.g.
```sh
$ pulumi import gcp:iap/webIamBinding:WebIamBinding editor projects/{{project}}/iap_web
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param WebIamBindingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebIamBindingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['WebIamBindingConditionArgs']]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebIamBindingArgs.__new__(WebIamBindingArgs)
__props__.__dict__["condition"] = condition
if members is None and not opts.urn:
raise TypeError("Missing required property 'members'")
__props__.__dict__["members"] = members
__props__.__dict__["project"] = project
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["etag"] = None
super(WebIamBinding, __self__).__init__(
'gcp:iap/webIamBinding:WebIamBinding',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['WebIamBindingConditionArgs']]] = None,
etag: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None) -> 'WebIamBinding':
"""
Get an existing WebIamBinding resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['WebIamBindingConditionArgs']] condition: ) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
:param pulumi.Input[str] etag: (Computed) The etag of the IAM policy.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
:param pulumi.Input[str] role: The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WebIamBindingState.__new__(_WebIamBindingState)
__props__.__dict__["condition"] = condition
__props__.__dict__["etag"] = etag
__props__.__dict__["members"] = members
__props__.__dict__["project"] = project
__props__.__dict__["role"] = role
return WebIamBinding(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['outputs.WebIamBindingCondition']]:
"""
) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding.
Structure is documented below.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def members(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "members")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
The role that should be applied. Only one
`iap.WebIamBinding` can be used per role. Note that custom roles must be of the format
`[projects|organizations]/{parent-name}/roles/{role-name}`.
"""
return pulumi.get(self, "role")
| 45.338488
| 454
| 0.644711
|
684f652d11c52a94d9cacff41cc95e75637b83df
| 13,165
|
py
|
Python
|
models/mtcnn_models.py
|
darren-phang/MTCNN-TF2
|
879313af0e1b8699482eabe3a04da18525922ec8
|
[
"Apache-2.0"
] | 2
|
2020-09-03T09:13:33.000Z
|
2020-11-16T07:40:13.000Z
|
models/mtcnn_models.py
|
darren-phang/MTCNN-TF2
|
879313af0e1b8699482eabe3a04da18525922ec8
|
[
"Apache-2.0"
] | null | null | null |
models/mtcnn_models.py
|
darren-phang/MTCNN-TF2
|
879313af0e1b8699482eabe3a04da18525922ec8
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow.keras.layers as layers
from utils import *
import time
import cv2
def xy2yx(bbox):
x1, y1, x2, y2 = tf.unstack(bbox, 4, axis=-1)
bbox = tf.stack([y1, x1, y2, x2], axis=-1)
return bbox
class PNet(tf.keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conv1 = layers.Conv2D(10, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool1 = layers.MaxPool2D([2, 2], 2, padding='SAME')
self.conv2 = layers.Conv2D(16, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.conv3 = layers.Conv2D(32, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
# batch*H*W*2
self.cls = layers.Conv2D(2, 1, 1, activation=layers.Softmax())
# batch*H*W*4
self.box = layers.Conv2D(4, 1, 1, activation=None)
self.landmark = layers.Conv2D(10, 1, 1, activation=None)
@tf.function(experimental_relax_shapes=True)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.pool1(x)
x = self.conv2(x)
x = self.conv3(x)
cls_prob = self.cls(x)
bbox_pred = self.box(x)
landmark_pred = self.landmark(x)
if training:
cls_prob = tf.squeeze(cls_prob, [1, 2])
bbox_pred = tf.squeeze(bbox_pred, [1, 2])
landmark_pred = tf.squeeze(landmark_pred, [1, 2])
return cls_prob, bbox_pred, landmark_pred
@tf.function
def detect(self, img, net_size=12, min_ob_size=20,
scale_factor=0.79, thresh=0.6):
def cond(img_shape, all_boxes, im_resized, current_scale):
return tf.minimum(img_shape[0], img_shape[1]) > net_size
def body(img_shape, all_boxes, img_resized, current_scale):
cls_cls_map, reg, _ = self.call(img_resized[np.newaxis, ...], training=False)
boxes = generate_bbox(cls_cls_map[0, :, :, 1], reg[0],
2, net_size, current_scale, thresh)
current_scale *= scale_factor
img_resized = processed_image(img, current_scale)
img_shape = tf.shape(img_resized)
if tf.size(boxes) == 0:
return img_shape, all_boxes, img_resized, current_scale
selected_indices = tf.image.non_max_suppression(boxes=xy2yx(boxes[:, :4]),
scores=boxes[:, 4],
max_output_size=1000,
iou_threshold=0.5)
boxes = tf.gather(boxes, selected_indices)
all_boxes = tf.concat([all_boxes, boxes], axis=0)
return img_shape, all_boxes, img_resized, current_scale
img_shape = tf.shape(img)
current_scale = float(net_size) / min_ob_size # find initial scale
img_resized = processed_image(img, current_scale)
all_boxes = tf.zeros([0, 9], tf.float32)
im_shape, all_boxes, img_resized, current_scale = \
tf.while_loop(cond, body, [tf.shape(img_resized), all_boxes, img_resized, current_scale],
shape_invariants=[tf.TensorShape([3]), tf.TensorShape([None, 9]),
tf.TensorShape([None, None, 3]), tf.TensorShape([1])])
if tf.size(all_boxes) == 0:
return tf.cast([], tf.float32), tf.cast([], tf.float32), tf.cast([], tf.float32)
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
# refine the boxes
x1, x2 = all_boxes[:, 0] + all_boxes[:, 5] * bbw, all_boxes[:, 2] + all_boxes[:, 7] * bbw
y1, y2 = all_boxes[:, 1] + all_boxes[:, 6] * bbh, all_boxes[:, 3] + all_boxes[:, 8] * bbh
boxes = tf.concat([
tf.minimum(x1, x2)[..., tf.newaxis],
tf.minimum(y1, y2)[..., tf.newaxis],
tf.maximum(x1, x2)[..., tf.newaxis],
tf.maximum(y1, y2)[..., tf.newaxis]], axis=-1)
boxes = clip_bbox(boxes, img_shape[:2])
scores = all_boxes[:, 4]
selected_indices = tf.image.non_max_suppression(boxes=xy2yx(boxes),
scores=scores,
max_output_size=1000,
iou_threshold=0.7)
boxes = tf.gather(boxes, selected_indices)
scores = tf.gather(scores, selected_indices)
return boxes, scores, tf.cast([], tf.float32)
class RNet(tf.keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conv1 = layers.Conv2D(28, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool1 = layers.MaxPool2D(3, 2, padding='SAME')
self.conv2 = layers.Conv2D(48, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool2 = layers.MaxPool2D(3, 2)
self.conv3 = layers.Conv2D(64, 2, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.fc1 = layers.Dense(128, activation='relu')
self.cls = layers.Dense(2, activation=tf.nn.softmax)
self.box = layers.Dense(4, activation=None)
self.landmark = layers.Dense(10, activation=None)
@tf.function
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = layers.Flatten()(x)
x = self.fc1(x)
cls_pred = self.cls(x)
box_pred = self.box(x)
landmark_pred = self.landmark(x)
return cls_pred, box_pred, landmark_pred
@tf.function
def detect(self, img, bbox_pnet, net_size=24, batch_size=256,
thresh=0.6, iou_threshold=0.7, max_detect=100):
img_shape = tf.shape(img)
dets = convert_to_square(bbox_pnet)
dets = tf.round(dets)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, img_shape[:2])
num_boxes = tf.shape(dets)[0]
cropped_ims = tf.zeros((0, net_size, net_size, 3), tf.float32)
if tf.size(bbox_pnet) == 0:
return tf.cast([], tf.float32), tf.cast([], tf.float32), tf.cast([], tf.float32)
def cond(i, cropped_ims):
return i < num_boxes
def body(i, cropped_ims):
tmp_img = img[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
tmp_img = tf.pad(tmp_img, [[dy[i], tmph[i] - edy[i] - 1],
[dx[i], tmpw[i] - edx[i] - 1],
[0, 0]])
tmp_img = tf.cast(tmp_img, tf.float32)
tmp_img = (tf.image.resize(tmp_img, (net_size, net_size)) - 127.5) / 128
cropped_ims = tf.concat([cropped_ims, tmp_img[tf.newaxis, ...]], axis=0)
i = i + 1
return i, cropped_ims
i, cropped_ims = tf.while_loop(cond, body, [0, cropped_ims],
[tf.TensorShape(None), tf.TensorShape([None, net_size, net_size, 3])])
scores = tf.zeros((0,), tf.float32)
reg = tf.zeros((0, 4), tf.float32)
data = tf.data.Dataset.from_tensor_slices(cropped_ims).batch(batch_size)
for batch_in in data:
_scores, _reg, _ = self.call(batch_in, training=False)
scores = tf.concat([scores, _scores[..., 1]], axis=0)
reg = tf.concat([reg, _reg], axis=0)
boxes = calibrate_box(dets, reg)
selected_indices = tf.image.non_max_suppression(boxes=xy2yx(boxes),
scores=scores,
max_output_size=max_detect,
iou_threshold=iou_threshold,
score_threshold=thresh)
boxes, scores = tf.gather(boxes, selected_indices), tf.gather(scores, selected_indices)
return boxes, scores, tf.cast([], tf.float32)
class ONet(tf.keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conv1 = layers.Conv2D(32, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool1 = layers.MaxPool2D(3, 2, padding='SAME')
self.conv2 = layers.Conv2D(64, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool2 = layers.MaxPool2D(3, 2)
self.conv3 = layers.Conv2D(64, 3, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.pool3 = layers.MaxPool2D(2, 2, padding='SAME')
self.conv4 = layers.Conv2D(128, 2, 1, activation=layers.PReLU(shared_axes=[1, 2]),
kernel_regularizer=tf.keras.regularizers.l2(0.0005))
self.fc1 = layers.Dense(256, activation='relu')
self.cls = layers.Dense(2, activation=tf.nn.softmax)
self.box = layers.Dense(4, activation=None)
self.landmark = layers.Dense(10, activation=None)
@tf.function
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.pool3(x)
x = self.conv4(x)
x = layers.Flatten()(x)
x = self.fc1(x)
cls_pred = self.cls(x)
box_pred = self.box(x)
landmark_pred = self.landmark(x)
return cls_pred, box_pred, landmark_pred
@tf.function
def detect(self, img, bbox_rnet, net_size=48, batch_size=128,
thresh=0.7, iou_threshold=0.5, max_detect=100):
img_shape = tf.shape(img)
dets = convert_to_square(bbox_rnet)
dets = tf.round(dets)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, img_shape[:2])
num_boxes = tf.shape(dets)[0]
cropped_ims = tf.zeros((0, net_size, net_size, 3), tf.float32)
if tf.size(bbox_rnet) == 0:
return tf.cast([], tf.float32), tf.cast([], tf.float32), tf.cast([], tf.float32)
def cond(i, cropped_ims):
return i < num_boxes
def body(i, cropped_ims):
tmp_img = img[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
tmp_img = tf.pad(tmp_img, [[dy[i], tmph[i] - edy[i] - 1],
[dx[i], tmpw[i] - edx[i] - 1],
[0, 0]])
tmp_img = tf.cast(tmp_img, tf.float32)
tmp_img = (tf.image.resize(tmp_img, (net_size, net_size)) - 127.5) / 128
cropped_ims = tf.concat([cropped_ims, tmp_img[tf.newaxis, ...]], axis=0)
i = i + 1
return i, cropped_ims
i, cropped_ims = tf.while_loop(cond, body, [0, cropped_ims],
[tf.TensorShape(None), tf.TensorShape([None, net_size, net_size, 3])])
scores = tf.zeros((0,), tf.float32)
reg = tf.zeros((0, 4), tf.float32)
data = tf.data.Dataset.from_tensor_slices(cropped_ims).batch(batch_size)
for batch_in in data:
_scores, _reg, _ = self.call(batch_in, training=False)
scores = tf.concat([scores, _scores[..., 1]], axis=0)
reg = tf.concat([reg, _reg], axis=0)
boxes = calibrate_box(dets, reg)
selected_indices, selected_scores = tf.image.non_max_suppression_with_scores(boxes=xy2yx(boxes),
scores=scores,
max_output_size=max_detect,
iou_threshold=iou_threshold,
score_threshold=thresh)
boxes, scores = tf.gather(boxes, selected_indices), tf.gather(scores, selected_indices)
return boxes, scores, tf.cast([], tf.float32)
# w = boxes[:, 2] - boxes[:, 0] + 1
# h = boxes[:, 3] - boxes[:, 1] + 1
# landmark[:, 0::2] = (np.tile(w, (5, 1)) * landmark[:, 0::2].T + np.tile(boxes[:, 0], (5, 1)) - 1).T
# landmark[:, 1::2] = (np.tile(h, (5, 1)) * landmark[:, 1::2].T + np.tile(boxes[:, 1], (5, 1)) - 1).T
# return boxes, scores, np.reshape(landmark, [landmark.shape[0], -1, 2])
| 49.492481
| 113
| 0.533004
|
7c36a664000ccf523c87cde898d88a3d1cb31a9b
| 2,153
|
py
|
Python
|
mInit.py
|
lamecksf/nix
|
5562b6882dff5ba17bb732d8047815b9dc446d1e
|
[
"MIT"
] | null | null | null |
mInit.py
|
lamecksf/nix
|
5562b6882dff5ba17bb732d8047815b9dc446d1e
|
[
"MIT"
] | null | null | null |
mInit.py
|
lamecksf/nix
|
5562b6882dff5ba17bb732d8047815b9dc446d1e
|
[
"MIT"
] | null | null | null |
# NIx GeoIP Tool
# nv 1.4
# Modulo Inicial
import mRouter
version = 'nv1.4'
def header():
hd = '\n'+'%44s'%'.__ __. __ ___ ___\n'
hd += '%44s'%'| \ | | | | \ \ / /\n'
hd += '%44s'%'| \| | | | \ V / \n'
hd += '%44s'%'| . ` | | | > < \n'
hd += '%44s'%'| |\ | | | / . \ \n'
hd += '%45s'%'|__| \__| |__| /__/ \__\ '+version+'\n'
print hd
def helper():
hp = '\n'+'%27s'%'NIx Geo IP Tracking '+version+' (c) 2017 Lameck\n'
hp += '%41s'%'https://github.com/lamecksf/nix/\n\n'
hp += '%37s'%'usage: python nit.py [options]'
hp += '\n\n'+'%51s'%'##########################################\n'
hp += '%39s'%'SET IDENTIFIE PROTOCOL\n'
hp += '%51s'%'##########################################\n\n'
hp += '%40s'%'-i, --ip <iptarget or myip>%15s'%'Target IP\n\n'
hp += '%30s'%'--all %15s'%'All params\n'
hp += '%30s'%'--status %15s'%'Status\n'
hp += '%30s'%'--country %15s'%'Country\n'
hp += '%30s'%'--country_code %15s'%'Country Code\n'
hp += '%30s'%'--region %15s'%'Region\n'
hp += '%30s'%'--city %15s'%'City\n'
hp += '%30s'%'--zipcode %15s'%'Zipcode\n'
hp += '%30s'%'--lat %15s'%'Latitude\n'
hp += '%30s'%'--lon %15s'%'Longitude\n'
hp += '%30s'%'--timezone %15s'%'Timezone\n'
hp += '%30s'%'--isp %15s'%'ISP\n'
hp += '%30s'%'--org %15s'%'Organization\n'
hp += '%30s'%'--as %15s'%'As\n'
hp += '%30s'%'--query %15s'%'Identificate Protocol\n'
hp += '%30s'%'--zpc %15s'%'Postal Address System (Br)\n'
hp += '\n\n'+'%35s'%'nit.py --ip myip --all --zpc\n'
hp += '\n\n'+'%50s'%'##########################################\n'
hp += '%35s'%'SET ZIPCODE\n'
hp += '%50s'%'##########################################\n'
hp += '\n'+'%30s'%'--gzip %15s'%'Get Zipcode Area\n'
hp += '%45s'%'country code >>> %15s'%'Set code of country\n'
hp += '%45s'%'zipcode >>> %15s'%'Set zipecode of area\n'
hp += '\n'+'%20s'%'nit.py --gzip \n'
print hp
def gZip():
print ':: GET ZIP :: --------------------------------- \n\n'
country_code = raw_input('%28s'%" country code >>> ")
zipcode = raw_input('%28s'%" zipcode >>> ")
print '\n'
mRouter.setRote(country_code.strip(),zipcode.strip())
| 35.295082
| 69
| 0.450534
|
da13d0af94deedca6d0af852595621bbce3b6e5d
| 4,084
|
py
|
Python
|
src/dataflow/multiwoz/create_belief_state_tracker_data.py
|
aizwei/task_oriented_dialogue_as_dataflow_synthesis
|
abdfa723e04e393777c0144e60dd967f5842a1ef
|
[
"MIT"
] | 257
|
2020-09-18T23:12:13.000Z
|
2022-03-24T03:24:24.000Z
|
src/dataflow/multiwoz/create_belief_state_tracker_data.py
|
aizwei/task_oriented_dialogue_as_dataflow_synthesis
|
abdfa723e04e393777c0144e60dd967f5842a1ef
|
[
"MIT"
] | 24
|
2020-09-26T15:08:06.000Z
|
2022-03-11T07:46:30.000Z
|
src/dataflow/multiwoz/create_belief_state_tracker_data.py
|
aizwei/task_oriented_dialogue_as_dataflow_synthesis
|
abdfa723e04e393777c0144e60dd967f5842a1ef
|
[
"MIT"
] | 59
|
2020-09-22T05:47:13.000Z
|
2022-03-30T19:03:08.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Creates BeliefStateTrackerDatum from different sources TRADE processed dialogues.
"""
import argparse
import json
from typing import Any, Dict, Iterator, List
from dataflow.core.io_utils import save_jsonl_file
from dataflow.multiwoz.belief_state_tracker_datum import (
BeliefState,
BeliefStateTrackerDatum,
Slot,
sort_slots,
)
from dataflow.multiwoz.ontology import DATAFLOW_SLOT_NAMES_FOR_DOMAIN
from dataflow.multiwoz.trade_dst_utils import (
flatten_belief_state,
get_domain_and_slot_name,
)
def build_belief_state_from_belief_dict(
belief_dict: Dict[str, str], strict: bool
) -> BeliefState:
slots_for_domain: Dict[str, List[Slot]] = dict()
for slot_fullname, slot_value in belief_dict.items():
domain, slot_name = get_domain_and_slot_name(slot_fullname)
if strict:
assert (
slot_name in DATAFLOW_SLOT_NAMES_FOR_DOMAIN[domain]
), 'slot "{}" is not in ontology for domain "{}"'.format(slot_name, domain)
elif slot_name not in DATAFLOW_SLOT_NAMES_FOR_DOMAIN[domain]:
# NOTE: We only print a warning. The slot will be still included in the
# belief state for evaluation.
# If we assume the Belief State Tracker knows the ontology in advance, then
# we can remove the slot from the prediction.
print(
'slot "{}" is not in ontology for domain "{}"'.format(slot_name, domain)
)
if domain not in slots_for_domain:
slots_for_domain[domain] = []
slots_for_domain[domain].append(Slot(name=slot_name, value=slot_value))
sort_slots(slots_for_domain)
return BeliefState(slots_for_domain=slots_for_domain)
def build_belief_state_from_trade_turn(trade_turn: Dict[str, Any]) -> BeliefState:
"""Returns a BeliefState object from a TRADE turn."""
# do not drop any slots or change any slot values
belief_dict = flatten_belief_state(
belief_state=trade_turn["belief_state"],
keep_all_domains=True,
remove_none=False,
)
return build_belief_state_from_belief_dict(belief_dict=belief_dict, strict=True)
def build_belief_state_tracker_data_from_trade_dialogue(
trade_dialogue: Dict[str, Any],
) -> Iterator[BeliefStateTrackerDatum]:
for trade_turn in trade_dialogue["dialogue"]:
yield BeliefStateTrackerDatum(
dialogue_id=trade_dialogue["dialogue_idx"],
turn_index=int(trade_turn["turn_idx"]),
belief_state=build_belief_state_from_trade_turn(trade_turn),
prev_agent_utterance=trade_turn["system_transcript"],
curr_user_utterance=trade_turn["transcript"],
)
def main(trade_data_file: str, belief_state_tracker_data_file: str) -> None:
with open(trade_data_file) as fp:
trade_dialogues = json.loads(fp.read().strip())
belief_state_tracker_data = [
datum
for trade_dialogue in trade_dialogues
for datum in build_belief_state_tracker_data_from_trade_dialogue(trade_dialogue)
]
save_jsonl_file(
data=belief_state_tracker_data,
data_jsonl=belief_state_tracker_data_file,
remove_null=True,
)
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument(
"--trade_data_file", help="TRADE processed dialogues file",
)
argument_parser.add_argument(
"--belief_state_tracker_data_file",
help="output jsonl file of BeliefStateTrackerDatum",
)
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
trade_data_file=args.trade_data_file,
belief_state_tracker_data_file=args.belief_state_tracker_data_file,
)
| 35.824561
| 88
| 0.71474
|
8e0051cd340edee18a15e2cba44ace462e6ca812
| 1,864
|
py
|
Python
|
problems/EE/auto/problem106_EE.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | 5
|
2020-10-15T14:40:03.000Z
|
2021-08-20T17:45:41.000Z
|
problems/EE/auto/problem106_EE.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | null | null | null |
problems/EE/auto/problem106_EE.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | 2
|
2020-10-15T07:06:14.000Z
|
2020-10-15T17:33:01.000Z
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6']
rv.EDGES = {'base': {'z1': 50, 'z3': 50, 'z4': 40, 'z6': 40}, 'z1': {'base': 50, 'z2': 20}, 'z2': {'z1': 20, 'z3': 20}, 'z3': {'z2': 20, 'base': 50}, 'z4': {'z3': 90, 'z5': 35}, 'z5': {'z4': 35, 'z6': 35}, 'z6': {'base': 40, 'z5': 35}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 50, 'r1': 50, 'r2': 80}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': NIL}
state.storm = {'active': True}
tasks = {
3: [['doActivities', 'UAV', [['survey', 'z2'], ['survey', 'z1']]]],
5: [['doActivities', 'r1', [['process', 'z4'], ['screen', 'z3'], ['process', 'z6']]]],
6: [['handleEmergency', 'r2', 'z5']],
}
eventsEnv = {
6: [alienSpotted, ['z2']]
}
| 30.064516
| 235
| 0.482296
|
54ffb752f86f07693154b01973a358bc36da12ad
| 1,259
|
py
|
Python
|
test/test_site.py
|
Ayase-252/ayase-blog
|
0123f4f9691e6c79f016544bd7c1479895486646
|
[
"FSFAP"
] | null | null | null |
test/test_site.py
|
Ayase-252/ayase-blog
|
0123f4f9691e6c79f016544bd7c1479895486646
|
[
"FSFAP"
] | null | null | null |
test/test_site.py
|
Ayase-252/ayase-blog
|
0123f4f9691e6c79f016544bd7c1479895486646
|
[
"FSFAP"
] | null | null | null |
"""
Test cases for site model
"""
from django.test import TestCase
from ..models.site import Site
class SiteMethodTests(TestCase):
"""
Test class for site model
Exempted method:
"""
def test_get_site_info_normal_cond(self):
"""
Test get_site_info method in normal operating condition,
namely, there is only single instance in Site table.
"""
Site.objects.create(name='test web',
tagline='may there be no bug')
self.assertEqual(Site.get_site_info(),
{'name': 'test web',
'tagline': 'may there be no bug'})
def test_get_site_info_multiple_instance_cond(self):
"""
Test get_site_info method in condition where multiple
instances exist in Site table.
Method should return the first instance.
"""
Site.objects.create(name='test web',
tagline='may there be no bug')
Site.objects.create(name='test web 2',
tagline='no no do not return this')
self.assertEqual(Site.get_site_info(),
{'name': 'test web',
'tagline': 'may there be no bug'})
| 30.707317
| 64
| 0.55838
|
5688c853d6d8256b45be8be3dde2113b78d82a36
| 1,343
|
py
|
Python
|
tests/many_to_many/models.py
|
dwightgunning/django
|
9e399e15fbf03507fa54e4bb20ed6f1b0d817b83
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 3
|
2015-09-26T13:33:07.000Z
|
2020-03-08T07:34:38.000Z
|
tests/many_to_many/models.py
|
dwightgunning/django
|
9e399e15fbf03507fa54e4bb20ed6f1b0d817b83
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-24T12:21:05.000Z
|
2021-03-24T12:31:52.000Z
|
tests/many_to_many/models.py
|
dwightgunning/django
|
9e399e15fbf03507fa54e4bb20ed6f1b0d817b83
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 15
|
2016-01-08T14:28:41.000Z
|
2019-04-19T08:33:31.000Z
|
"""
Many-to-many relationships
To define a many-to-many relationship, use ``ManyToManyField()``.
In this example, an ``Article`` can be published in multiple ``Publication``
objects, and a ``Publication`` has multiple ``Article`` objects.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
# Assign a unicode string as name to make sure the intermediary model is
# correctly created. Refs #20207
publications = models.ManyToManyField(Publication, name='publications')
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
# Models to test correct related_name inheritance
class AbstractArticle(models.Model):
class Meta:
abstract = True
ordering = ('title',)
publications = models.ManyToManyField(Publication, name='publications', related_name='+')
class InheritedArticleA(AbstractArticle):
pass
class InheritedArticleB(AbstractArticle):
pass
| 24.418182
| 93
| 0.726731
|
79c87069a7de3a0031b84ece57ba9c912f146abd
| 2,351
|
py
|
Python
|
dynamic_json/json_wrappers.py
|
childsish/dynamic-json
|
3a37caba373a4da8c4eb40c5ac11c88e09875451
|
[
"MIT"
] | 1
|
2021-02-08T16:41:55.000Z
|
2021-02-08T16:41:55.000Z
|
dynamic_json/json_wrappers.py
|
childsish/dynamic-json
|
3a37caba373a4da8c4eb40c5ac11c88e09875451
|
[
"MIT"
] | null | null | null |
dynamic_json/json_wrappers.py
|
childsish/dynamic-json
|
3a37caba373a4da8c4eb40c5ac11c88e09875451
|
[
"MIT"
] | null | null | null |
from collections.abc import Mapping, Sequence
class JsonDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super().__setattr__('_root', self)
def __getattr__(self, key):
if key in self:
return self[key]
return super().__getattribute__(key)
def __setattr__(self, key, value):
self[key] = value
def __getitem__(self, key):
v = super().__getitem__(key)
if isinstance(v, str):
v = v.format(**super().__getattribute__('_root'))
return v
def __setitem__(self, key, value):
if isinstance(value, Mapping) and not isinstance(value, JsonDict):
value = JsonDict(value)
value.set_as_root(super().__getattribute__('_root'))
elif isinstance(value, Sequence) and not isinstance(value, (str, JsonList)):
value = JsonList(value)
value.set_as_root(super().__getattribute__('_root'))
super().__setitem__(key, value)
def set_as_root(self, root=None):
if root is not None:
super().__setattr__('_root', root)
for k, v in self.items():
if hasattr(v, 'set_as_root'):
v.set_as_root(super().__getattribute__('_root'))
class JsonList(list):
ROOT_NAME = 'root'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super().__setattr__('_root', {JsonList.ROOT_NAME: self})
def __getitem__(self, key):
v = super().__getitem__(key)
if isinstance(v, str):
v = v.format(**super().__getattribute__('_root'))
return v
def __setitem__(self, key, value):
if isinstance(value, Mapping) and not isinstance(value, JsonDict):
value = JsonDict(value)
value.set_as_root(super().__getattribute__('_root'))
elif isinstance(value, Sequence) and not isinstance(value, (str, JsonList)):
value = JsonList(value)
value.set_as_root(super().__getattribute__('_root'))
super().__setitem__(key, value)
def set_as_root(self, root=None):
if root is not None:
super().__setattr__('_root', root)
for v in self:
if hasattr(v, 'set_as_root'):
v.set_as_root(super().__getattribute__('_root'))
| 34.072464
| 84
| 0.594641
|
dac5cf53e757b85a1153821cd15558a814db53b5
| 12,103
|
py
|
Python
|
google_compute_engine/accounts/accounts_utils.py
|
jrw972/compute-image-packages
|
f5b2ae581c4bb2d02d4d86918a27baa81dd30861
|
[
"Apache-2.0"
] | null | null | null |
google_compute_engine/accounts/accounts_utils.py
|
jrw972/compute-image-packages
|
f5b2ae581c4bb2d02d4d86918a27baa81dd30861
|
[
"Apache-2.0"
] | null | null | null |
google_compute_engine/accounts/accounts_utils.py
|
jrw972/compute-image-packages
|
f5b2ae581c4bb2d02d4d86918a27baa81dd30861
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for provisioning or deprovisioning a Linux user account."""
import grp
import os
import pwd
import re
import shutil
import subprocess
import tempfile
from google_compute_engine import constants
from google_compute_engine import file_utils
USER_REGEX = re.compile(r'\A[A-Za-z0-9._][A-Za-z0-9._-]{0,31}\Z')
DEFAULT_GROUPADD_CMD = 'groupadd {group}'
DEFAULT_USERADD_CMD = 'useradd -m -s /bin/bash -p * {user}'
DEFAULT_USERDEL_CMD = 'userdel -r {user}'
DEFAULT_USERMOD_CMD = 'usermod -G {groups} {user}'
class AccountsUtils(object):
"""System user account configuration utilities."""
google_comment = '# Added by Google'
def __init__(
self, logger, groups=None, remove=False, groupadd_cmd=None,
useradd_cmd=None, userdel_cmd=None, usermod_cmd=None):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
groups: string, a comma separated list of groups.
remove: bool, True if deprovisioning a user should be destructive.
groupadd_cmd: string, command to add a new group.
useradd_cmd: string, command to create a new user.
userdel_cmd: string, command to delete a user.
usermod_cmd: string, command to modify user's groups.
"""
self.groupadd_cmd = groupadd_cmd or DEFAULT_GROUPADD_CMD
self.useradd_cmd = useradd_cmd or DEFAULT_USERADD_CMD
self.userdel_cmd = userdel_cmd or DEFAULT_USERDEL_CMD
self.usermod_cmd = usermod_cmd or DEFAULT_USERMOD_CMD
self.logger = logger
self.google_sudoers_group = 'google-sudoers'
self.google_sudoers_file = (
constants.LOCALBASE + '/etc/sudoers.d/google_sudoers')
self.google_users_dir = constants.LOCALBASE + '/var/lib/google'
self.google_users_file = os.path.join(self.google_users_dir, 'google_users')
self._CreateSudoersGroup()
self.groups = groups.split(',') if groups else []
self.groups.append(self.google_sudoers_group)
self.groups = list(filter(self._GetGroup, self.groups))
self.remove = remove
def _GetGroup(self, group):
"""Retrieve a Linux group.
Args:
group: string, the name of the Linux group to retrieve.
Returns:
grp.struct_group, the Linux group or None if it does not exist.
"""
try:
return grp.getgrnam(group)
except KeyError:
return None
def _CreateSudoersGroup(self):
"""Create a Linux group for Google added sudo user accounts."""
if not self._GetGroup(self.google_sudoers_group):
try:
command = self.groupadd_cmd.format(group=self.google_sudoers_group)
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not create the sudoers group. %s.', str(e))
if not os.path.exists(self.google_sudoers_file):
try:
with open(self.google_sudoers_file, 'w') as group:
message = '%{0} ALL=(ALL:ALL) NOPASSWD:ALL'.format(
self.google_sudoers_group)
group.write(message)
except IOError as e:
self.logger.error(
'Could not write sudoers file. %s. %s',
self.google_sudoers_file, str(e))
return
file_utils.SetPermissions(
self.google_sudoers_file, mode=0o440, uid=0, gid=0)
def _GetUser(self, user):
"""Retrieve a Linux user account.
Args:
user: string, the name of the Linux user account to retrieve.
Returns:
pwd.struct_passwd, the Linux user or None if it does not exist.
"""
try:
return pwd.getpwnam(user)
except KeyError:
return None
def _AddUser(self, user):
"""Configure a Linux user account.
Args:
user: string, the name of the Linux user account to create.
Returns:
bool, True if user creation succeeded.
"""
self.logger.info('Creating a new user account for %s.', user)
command = self.useradd_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not create user %s. %s.', user, str(e))
return False
else:
self.logger.info('Created user account %s.', user)
return True
def _UpdateUserGroups(self, user, groups):
"""Update group membership for a Linux user.
Args:
user: string, the name of the Linux user account.
groups: list, the group names to add the user as a member.
Returns:
bool, True if user update succeeded.
"""
groups = ','.join(groups)
self.logger.debug('Updating user %s with groups %s.', user, groups)
command = self.usermod_cmd.format(user=user, groups=groups)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not update user %s. %s.', user, str(e))
return False
else:
self.logger.debug('Updated user account %s.', user)
return True
def _UpdateAuthorizedKeys(self, user, ssh_keys):
"""Update the authorized keys file for a Linux user with a list of SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Raises:
IOError, raised when there is an exception updating a file.
OSError, raised when setting permissions or writing to a read-only
file system.
"""
pw_entry = self._GetUser(user)
if not pw_entry:
return
uid = pw_entry.pw_uid
gid = pw_entry.pw_gid
home_dir = pw_entry.pw_dir
ssh_dir = os.path.join(home_dir, '.ssh')
# Not all sshd's support multiple authorized_keys files so we have to
# share one with the user. We add each of our entries as follows:
# # Added by Google
# authorized_key_entry
authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')
# Do not write to the authorized keys file if it is a symlink.
if os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file):
self.logger.warning(
'Not updating authorized keys for user %s. File is a symlink.', user)
return
# Create home directory if it does not exist. This can happen if _GetUser
# (getpwnam) returns non-local user info (e.g., from LDAP).
if not os.path.exists(home_dir):
file_utils.SetPermissions(home_dir, mode=0o755, uid=uid, gid=gid,
mkdir=True)
# Create ssh directory if it does not exist.
file_utils.SetPermissions(ssh_dir, mode=0o700, uid=uid, gid=gid, mkdir=True)
# Create entry in the authorized keys file.
prefix = self.logger.name + '-'
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix, delete=True) as updated_keys:
updated_keys_file = updated_keys.name
if os.path.exists(authorized_keys_file):
lines = open(authorized_keys_file).readlines()
else:
lines = []
google_lines = set()
for i, line in enumerate(lines):
if line.startswith(self.google_comment):
google_lines.update([i, i+1])
# Write user's authorized key entries.
for i, line in enumerate(lines):
if i not in google_lines and line:
line += '\n' if not line.endswith('\n') else ''
updated_keys.write(line)
# Write the Google authorized key entries at the end of the file.
# Each entry is preceded by '# Added by Google'.
for ssh_key in ssh_keys:
ssh_key += '\n' if not ssh_key.endswith('\n') else ''
updated_keys.write('%s\n' % self.google_comment)
updated_keys.write(ssh_key)
# Write buffered data to the updated keys file without closing it and
# update the Linux user's authorized keys file.
updated_keys.flush()
shutil.copy(updated_keys_file, authorized_keys_file)
file_utils.SetPermissions(
authorized_keys_file, mode=0o600, uid=uid, gid=gid)
def _RemoveAuthorizedKeys(self, user):
"""Remove a Linux user account's authorized keys file to prevent login.
Args:
user: string, the Linux user account to remove access.
"""
pw_entry = self._GetUser(user)
if not pw_entry:
return
home_dir = pw_entry.pw_dir
authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')
if os.path.exists(authorized_keys_file):
try:
os.remove(authorized_keys_file)
except OSError as e:
message = 'Could not remove authorized keys for user %s. %s.'
self.logger.warning(message, user, str(e))
def GetConfiguredUsers(self):
"""Retrieve the list of configured Google user accounts.
Returns:
list, the username strings of users congfigured by Google.
"""
if os.path.exists(self.google_users_file):
users = open(self.google_users_file).readlines()
else:
users = []
return [user.strip() for user in users]
def SetConfiguredUsers(self, users):
"""Set the list of configured Google user accounts.
Args:
users: list, the username strings of the Linux accounts.
"""
prefix = self.logger.name + '-'
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix, delete=True) as updated_users:
updated_users_file = updated_users.name
for user in users:
updated_users.write(user + '\n')
updated_users.flush()
if not os.path.exists(self.google_users_dir):
os.makedirs(self.google_users_dir)
shutil.copy(updated_users_file, self.google_users_file)
file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0)
def UpdateUser(self, user, ssh_keys):
"""Update a Linux user with authorized SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Returns:
bool, True if the user account updated successfully.
"""
if not bool(USER_REGEX.match(user)):
self.logger.warning('Invalid user account name %s.', user)
return False
if not self._GetUser(user):
# User does not exist. Attempt to create the user and add them to the
# appropriate user groups.
if not (self._AddUser(user) and
self._UpdateUserGroups(user, self.groups)):
return False
# Don't try to manage account SSH keys with a shell set to disable
# logins. This helps avoid problems caused by operator and root sharing
# a home directory in CentOS and RHEL.
pw_entry = self._GetUser(user)
if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin':
message = 'Not updating user %s. User set `nologin` as login shell.'
self.logger.debug(message, user)
return True
try:
self._UpdateAuthorizedKeys(user, ssh_keys)
except (IOError, OSError) as e:
message = 'Could not update the authorized keys file for user %s. %s.'
self.logger.warning(message, user, str(e))
return False
else:
return True
def RemoveUser(self, user):
"""Remove a Linux user account.
Args:
user: string, the Linux user account to remove.
"""
self.logger.info('Removing user %s.', user)
if self.remove:
command = self.userdel_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not remove user %s. %s.', user, str(e))
else:
self.logger.info('Removed user account %s.', user)
self._RemoveAuthorizedKeys(user)
| 34.679083
| 80
| 0.677022
|
36ce6d5b507185884de6d8ae99eeded2e03968aa
| 2,995
|
py
|
Python
|
Algorithms/resampling.py
|
OIEIEIO/earthengine-py-notebooks
|
5d6c5cdec0c73bf02020ee17d42c9e30d633349f
|
[
"MIT"
] | 1,008
|
2020-01-27T02:03:18.000Z
|
2022-03-24T10:42:14.000Z
|
Algorithms/resampling.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 8
|
2020-02-01T20:18:18.000Z
|
2021-11-23T01:48:02.000Z
|
Algorithms/resampling.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 325
|
2020-01-27T02:03:36.000Z
|
2022-03-25T20:33:33.000Z
|
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/resampling.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/resampling.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/resampling.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# %%
import ee
import geemap
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
# Load a Landsat image over San Francisco, California, UAS.
landsat = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20160323')
# Set display and visualization parameters.
Map.setCenter(-122.37383, 37.6193, 15)
visParams = {'bands': ['B4', 'B3', 'B2'], 'max': 0.3}
# Display the Landsat image using the default nearest neighbor resampling.
# when reprojecting to Mercator for the Code Editor map.
Map.addLayer(landsat, visParams, 'original image')
# Force the next reprojection on this image to use bicubic resampling.
resampled = landsat.resample('bicubic')
# Display the Landsat image using bicubic resampling.
Map.addLayer(resampled, visParams, 'resampled')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 41.027397
| 457
| 0.736227
|
964c8d0f76dbc1c23dc16ba458fb5eb50b262c39
| 859
|
py
|
Python
|
tests/test_tapioca_amarilis.py
|
imoveisamarilis/tapioca-amarilis
|
86b5a34e2a4a47960f25d011b6f0fd6027129efb
|
[
"MIT"
] | null | null | null |
tests/test_tapioca_amarilis.py
|
imoveisamarilis/tapioca-amarilis
|
86b5a34e2a4a47960f25d011b6f0fd6027129efb
|
[
"MIT"
] | null | null | null |
tests/test_tapioca_amarilis.py
|
imoveisamarilis/tapioca-amarilis
|
86b5a34e2a4a47960f25d011b6f0fd6027129efb
|
[
"MIT"
] | null | null | null |
import os
import unittest
from tapioca_amarilis import AmarilisV1
class TestTapiocaAmarilisV1(unittest.TestCase):
def test_resource_access(self):
api_client = AmarilisV1(
user=os.getenv('AMARILIS_USER', default=''),
password=os.getenv('AMARILIS_PASSWORD', default=''),
)
resource = api_client.bookings()
assert resource.data == 'https://pms.imoveisamarilis.com.br/api/v1/bookings/'
def test_custom_api_root(self):
api_client = AmarilisV1(
user=os.getenv('AMARILIS_USER', default=''),
password=os.getenv('AMARILIS_PASSWORD', default=''),
host='http://localhost:8000/api/v1/'
)
resource = api_client.bookings()
assert resource.data == 'http://localhost:8000/api/v1/bookings/'
if __name__ == '__main__':
unittest.main()
| 30.678571
| 85
| 0.643772
|
884ffdcab6c7edb89ee137baeacc53ffd848181b
| 870
|
py
|
Python
|
users/migrations/0003_auto_20201007_1318.py
|
ominicomdevgt/LaChalupa
|
3e7ea16fa97aa311bd3513dc463c30c37dfb5761
|
[
"MIT"
] | null | null | null |
users/migrations/0003_auto_20201007_1318.py
|
ominicomdevgt/LaChalupa
|
3e7ea16fa97aa311bd3513dc463c30c37dfb5761
|
[
"MIT"
] | null | null | null |
users/migrations/0003_auto_20201007_1318.py
|
ominicomdevgt/LaChalupa
|
3e7ea16fa97aa311bd3513dc463c30c37dfb5761
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-07 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20201007_1301'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='DNI',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='profile',
name='birthday',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='profile',
name='direction',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='profile',
name='telephone',
field=models.IntegerField(null=True),
),
]
| 25.588235
| 74
| 0.556322
|
0ee0ee8c453387729fcc020de13edc691840e6fc
| 604
|
py
|
Python
|
bandit/plugins/shutil_rmtree.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
bandit/plugins/shutil_rmtree.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
bandit/plugins/shutil_rmtree.py
|
lyvd/bandit4mal
|
b1ca9eb773ebed84d04cfeb589d028af532d1d11
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
import bandit
from bandit.core import test_properties as test
@test.test_id("B337")
@test.checks("Call")
def shutil_rmtree(context):
if context.is_module_imported_like("shutil"):
if context.call_function_name_qual.endswith("rmtree"):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.MEDIUM,
text="shutil_rmtree",
lineno=context.get_lineno_for_call_arg("debug"),
)
| 26.26087
| 64
| 0.645695
|
eb4c55e6a66619a363a04fde72fc72a62ba8d5e6
| 528
|
py
|
Python
|
web/core/migrations/0165_alter_lottransaction_delivery_status.py
|
MTES-MCT/biocarburants
|
ff084916e18cdbdc41400f36fa6cc76a5e05900e
|
[
"MIT"
] | 4
|
2020-03-22T18:13:12.000Z
|
2021-01-25T10:33:31.000Z
|
web/core/migrations/0165_alter_lottransaction_delivery_status.py
|
MTES-MCT/carbure
|
2876756b760ab4866fa783bb40e61a046eebb1ab
|
[
"MIT"
] | 20
|
2020-07-06T14:33:14.000Z
|
2022-03-15T16:54:17.000Z
|
web/core/migrations/0165_alter_lottransaction_delivery_status.py
|
MTES-MCT/biocarburants
|
ff084916e18cdbdc41400f36fa6cc76a5e05900e
|
[
"MIT"
] | 4
|
2020-04-03T12:19:12.000Z
|
2021-06-15T12:20:57.000Z
|
# Generated by Django 3.2 on 2021-06-18 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0164_lotv2_year'),
]
operations = [
migrations.AlterField(
model_name='lottransaction',
name='delivery_status',
field=models.CharField(choices=[('N', 'En attente'), ('A', 'Accepté'), ('R', 'Refusé'), ('AC', 'À corriger'), ('AA', 'Corrigé'), ('F', 'Déclaré')], default='N', max_length=64),
),
]
| 27.789474
| 188
| 0.575758
|
40a4a91178f64360a7381d22e6eedb9e6c9f827f
| 1,480
|
py
|
Python
|
exercises/code/simplify-conditionals/check_cholesterol_levels.py
|
matthewwei35/ACS-4931-Testing-and-Architecture
|
e1810ccf89fb2f136e3ba7ec138fa4140d698c4d
|
[
"MIT"
] | null | null | null |
exercises/code/simplify-conditionals/check_cholesterol_levels.py
|
matthewwei35/ACS-4931-Testing-and-Architecture
|
e1810ccf89fb2f136e3ba7ec138fa4140d698c4d
|
[
"MIT"
] | null | null | null |
exercises/code/simplify-conditionals/check_cholesterol_levels.py
|
matthewwei35/ACS-4931-Testing-and-Architecture
|
e1810ccf89fb2f136e3ba7ec138fa4140d698c4d
|
[
"MIT"
] | null | null | null |
# By Kami Bigdely
# Decompose conditional
# Reference: https://www.healthline.com/health/high-cholesterol/levels-by-age
# Blood test analysis program
from tabnanny import check
total_cholostrol = 70
ldl = 30
triglyceride = 120
def check_for_good_cholesterol(total_cholesterol):
return total_cholesterol < 200 and ldl < 100 and triglyceride < 150
def check_for_high_cholesterol(total_cholesterol):
return 200 < total_cholesterol > 240 or ldl > 160 or triglyceride >= 200
def check_for_tlc_cholesterol(total_cholesterol):
return 200 <total_cholostrol < 240 or 130 < ldl < 160 or 150 <= triglyceride < 200
if (check_for_good_cholesterol(total_cholostrol)):
# good level
print('*** Good level of cholestrol ***')
elif (check_for_high_cholesterol(total_cholostrol)):
# High cholestrol level
print('*** High cholestrol level ***')
print('start taking pills such as statins')
print('start TLC diet')
elif check_for_tlc_cholesterol(total_cholostrol):
#TLC_diet
print('*** Borderline to moderately elevated ***')
print("Start TLC diet")
print("Under this meal plan, only 7 percent of your daily calories \nshould come from saturated fat.")
print('Some foods help your digestive tract absorb less cholesterol. For example,\nyour doctor may encourage you to eat more:')
print('oats, barley, and other whole grains.')
print('fruits such as apples, pears, bananas, and oranges.')
else:
print('Error: unhandled case.')
| 37
| 131
| 0.739865
|
47be8a95a596bcb1a81f61dcfd0b41989c30c01a
| 7,728
|
py
|
Python
|
ingestion/src/metadata/ingestion/source/atlas/atlas_lineage.py
|
chaitrarao4/OpenMetadata
|
c28f7ac22263fd325a1124e9758f97cc9ac9d5d3
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/ingestion/source/atlas/atlas_lineage.py
|
chaitrarao4/OpenMetadata
|
c28f7ac22263fd325a1124e9758f97cc9ac9d5d3
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/ingestion/source/atlas/atlas_lineage.py
|
chaitrarao4/OpenMetadata
|
c28f7ac22263fd325a1124e9758f97cc9ac9d5d3
|
[
"Apache-2.0"
] | null | null | null |
import logging
import uuid
from dataclasses import dataclass, field
from typing import Iterable, List
from metadata.generated.schema.entity.data.table import Column, Table
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.common import WorkflowContext
from metadata.ingestion.api.source import Source, SourceStatus
from metadata.ingestion.models.ometa_table_db import OMetaDatabaseAndTable
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
from metadata.utils.atlas_client import AtlasClient, AtlasSourceConfig
from metadata.utils.column_helpers import get_column_type
from metadata.utils.helpers import get_database_service_or_create
from metadata.generated.schema.type.entityLineage import EntitiesEdge
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.data.pipeline import Pipeline
from metadata.generated.schema.api.lineage.addLineage import AddLineage
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.models.table_metadata import Chart, Dashboard
logger: logging.Logger = logging.getLogger(__name__)
@dataclass
class AtlasSourceStatus(SourceStatus):
tables_scanned: List[str] = field(default_factory=list)
filtered: List[str] = field(default_factory=list)
def table_scanned(self, table: str) -> None:
self.tables_scanned.append(table)
def dropped(self, topic: str) -> None:
self.filtered.append(topic)
@dataclass
class AtlasSource(Source):
config: AtlasSourceConfig
atlas_client: AtlasClient
status: AtlasSourceStatus
tables: List[str]
def __init__(
self,
config: AtlasSourceConfig,
metadata_config: MetadataServerConfig,
ctx: WorkflowContext,
):
super().__init__(ctx)
self.config = config
self.metadata_config = metadata_config
self.status = AtlasSourceStatus()
self.service = get_database_service_or_create(config, metadata_config)
self.atlas_client = AtlasClient(config)
@classmethod
def create(cls, config_dict, metadata_config_dict, ctx):
config = AtlasSourceConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(config, metadata_config, ctx)
def prepare(self):
self.tables = self.atlas_client.list_entities('rdbms_table')
def next_record(self):
for table in self.tables:
table_entity = self.atlas_client.get_table(table)
yield from self._parse_table_entity(table_entity)
yield from self.ingest_lineage(table)
def close(self):
pass
def get_status(self) -> SourceStatus:
return self.status
def _parse_table_entity(self, table_entity):
tbl_entities = table_entity["entities"]
for tbl_entity in tbl_entities:
try:
tbl_columns = self._parse_table_columns(table_entity, tbl_entity)
tbl_attrs = tbl_entity["attributes"]
db_entity = tbl_entity["relationshipAttributes"]["rdbms_db"]
db = self._get_database(db_entity["displayText"])
table_name = tbl_attrs["name"]
fqn = f"{self.config.service_name}.{db.name.__root__}.{table_name}"
tbl_description = (
tbl_attrs["description"]
if tbl_attrs["description"] is not None
else " "
)
om_table_entity = Table(
id=uuid.uuid4(),
name=table_name,
description=tbl_description,
fullyQualifiedName=fqn,
columns=tbl_columns,
)
table_and_db = OMetaDatabaseAndTable(table=om_table_entity, database=db)
yield table_and_db
except Exception as e:
logger.error(e)
logger.error(f"Failed to parse {table_entity}")
pass
def _parse_table_columns(self, table_response, tbl_entity) -> List[Column]:
om_cols = []
col_entities = tbl_entity["relationshipAttributes"]["rdbms_columns"]
referred_entities = table_response["referredEntities"]
dataset_name = tbl_entity["attributes"]["name"]
ordinal_pos = 1
for col in col_entities:
try:
col_guid = col["guid"]
col_ref_entity = referred_entities[col_guid]
column = col_ref_entity["attributes"]
data_type_display = tbl_entity["attributes"]["name"]
logger.info(column["data_type"])
logger.info(get_column_type(
self.status, dataset_name, column["data_type"].upper()
))
col_data_length = "1"
om_column = Column(
name=column["name"],
description=column.get("comment", None),
dataType=get_column_type(
self.status, dataset_name, column["data_type"].upper()
),
dataTypeDisplay="{}({})".format(column["data_type"], "1"),
dataLength=col_data_length,
ordinalPosition=ordinal_pos,
)
om_cols.append(om_column)
except Exception as err:
logger.error(f"{err}")
continue
return om_cols
def _get_database(self, database_name: str) -> Database:
return Database(
name=database_name,
service=EntityReference(id=self.service.id, type=self.config.service_type),
)
def ingest_lineage(self, source_guid) -> Iterable[AddLineage]:
lineageResponse = self.atlas_client.get_lineage(source_guid)
lineage_relations = lineageResponse["relations"]
tbl_entity = self.atlas_client.get_table(lineageResponse["baseEntityGuid"])
tbl_attrs = tbl_entity["attributes"]
db_entity = tbl_entity["relationshipAttributes"]["rdbms_db"]
db = self._get_database(db_entity["displayText"])
table_name = tbl_attrs["name"]
fqn = f"{self.config.service_name}.{db.name.__root__}.{table_name}"
from_entity_ref = self.get_lineage_entity_ref(fqn, self.metadata_config, "table")
for edge in lineage_relations:
if lineageResponse["guidEntityMap"][edge["toEntityId"]]["typeName"] == "processor" :
continue
tbl_entity = self.atlas_client.get_table(edge["toEntityId"])
tbl_attrs = tbl_entity["attributes"]
db_entity = tbl_entity["relationshipAttributes"]["rdbms_db"]
db = self._get_database(db_entity["displayText"])
table_name = tbl_attrs["name"]
fqn = f"{self.config.service_name}.{db.name.__root__}.{table_name}"
to_entity_ref = self.get_lineage_entity_ref(fqn, self.metadata_config, "table")
lineage = AddLineage(
edge=EntitiesEdge(fromEntity=from_entity_ref, toEntity=to_entity_ref)
)
yield lineage
def get_lineage_entity_ref(self, fqn, metadata_config, type) -> EntityReference:
metadata = OpenMetadata(metadata_config)
if type == "table":
table = metadata.get_by_name(entity=Table, fqdn=fqn)
return EntityReference(id=table.id, type="table")
elif type == "pipeline":
pipeline = metadata.get_by_name(entity=Pipeline, fqdn=fqn)
return EntityReference(id=pipeline.id, type="pipeline")
| 42.696133
| 96
| 0.644151
|
1e312fb832fa72c8c7ba2ec2badfec4e38fd06a9
| 1,256
|
py
|
Python
|
app/core/tests/test_models.py
|
pr0xii/recipe-app-api
|
875de401dc5734860d888615dfcdfa9aa8182ccf
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
pr0xii/recipe-app-api
|
875de401dc5734860d888615dfcdfa9aa8182ccf
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
pr0xii/recipe-app-api
|
875de401dc5734860d888615dfcdfa9aa8182ccf
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
#Test creating a new user with an email is successful
email = 'test@test.com'
password = 'password'
user = get_user_model().objects.create_user(
email=email,
password=password,
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalize(self):
#Test the email for a new user is normalized
email = 'test@test.com'
user = get_user_model().objects.create_user(email, 'password')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
#Test creating user with no email reaises error
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'password')
def test_create_new_superuser(self):
#Test creating a new superuser
user = get_user_model().objects.create_superuser(
'test@test.com',
'password',
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 33.945946
| 70
| 0.663217
|
a4f669d5e8ede9563cabbafbcc7019735dc31f8b
| 7,030
|
py
|
Python
|
dataprofiler/tests/data_readers/test_avro_data.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/tests/data_readers/test_avro_data.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | 1
|
2021-11-20T01:08:12.000Z
|
2021-11-20T01:08:12.000Z
|
dataprofiler/tests/data_readers/test_avro_data.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, print_function
import os
import unittest
from io import BytesIO
from dataprofiler.data_readers.avro_data import AVROData
from dataprofiler.data_readers.data import Data
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestAVRODataClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.input_file_path = None
cls.output_file_path = None
cls.ss = None
test_dir = os.path.join(test_root_path, 'data')
cls.input_file_names = [
dict(path=os.path.join(test_dir, 'avro/users.avro'), count=4),
dict(path=os.path.join(test_dir, 'avro/userdata1.avro'), count=1000),
dict(path=os.path.join(test_dir, 'avro/userdata1_intentionally_mislabled_file.parquet'), count=1000),
dict(path=os.path.join(test_dir, 'avro/userdata1_intentionally_mislabled_file.csv'), count=1000),
dict(path=os.path.join(test_dir, 'avro/userdata1_intentionally_mislabled_file.json'), count=1000),
dict(path=os.path.join(test_dir, 'avro/userdata1_intentionally_mislabled_file.txt'), count=1000),
dict(path=os.path.join(test_dir, 'avro/deflate_compressed_intentionally_mislabeled_file.csv'), count=4),
dict(path=os.path.join(test_dir, 'avro/snappy_compressed_intentionally_mislabeled_file.csv'), count=4),
]
cls.buffer_list = []
for input_file in cls.input_file_names:
# add BytesIO
buffer_info = input_file.copy()
with open(input_file['path'], 'rb') as fp:
buffer_info['path'] = BytesIO(fp.read())
cls.buffer_list.append(buffer_info)
cls.file_or_buf_list = cls.input_file_names + cls.buffer_list
@classmethod
def setUp(cls):
for buffer in cls.buffer_list:
buffer['path'].seek(0)
def test_is_match(self):
"""
Determine if the avro file can be automatically identified from
byte stream or file path
"""
for input_file in self.file_or_buf_list:
self.assertTrue(AVROData.is_match(input_file['path']))
def test_avro_file_identification(self):
"""
Determine if the avro file can be automatically identified
"""
for input_file in self.file_or_buf_list:
input_data_obj = Data(input_file['path'])
self.assertEqual(input_data_obj.data_type, 'avro')
def test_specifying_data_type(self):
"""
Determine if the avro file can be loaded with manual data_type setting
"""
for input_file in self.file_or_buf_list:
input_data_obj = Data(input_file['path'], data_type='avro')
self.assertEqual(input_data_obj.data_type, 'avro')
def test_reload_data(self):
"""
Determine if the avro file can be reloaded
"""
for input_file in self.file_or_buf_list:
input_data_obj = Data(input_file['path'])
input_data_obj.reload(input_file['path'])
self.assertEqual(input_data_obj.data_type, 'avro')
self.assertEqual(input_file['path'], input_data_obj.input_file_path)
def test_data_formats(self):
"""
Determine if the avro file data_formats can be used
"""
for input_file in self.file_or_buf_list:
input_data_obj = Data(input_file['path'])
for data_format in list(input_data_obj._data_formats.keys()):
input_data_obj.data_format = data_format
self.assertEqual(input_data_obj.data_format, data_format)
data = input_data_obj.data
if data_format == "dataframe":
import pandas as pd
self.assertIsInstance(data, pd.DataFrame)
elif data_format in ["records", "json"]:
self.assertIsInstance(data, list)
self.assertIsInstance(data[0], str)
def test_nested_keys(self):
"""
Determine if the avro file data_formats can be used
"""
dict =[
{'name': 1, 'favorite_number': 1},
{'favorite_color': 1, 'address': {'streetaddress': 1, 'city': 1}}]
nested_keys = AVROData._get_nested_keys_from_dicts(dict)
self.assertIsNotNone(nested_keys)
schema_avro = {
'namespace': 'avro_namespace',
'name': 'avro_filename',
'type': 'record',
'fields': [
{'name': 'name', 'type': ['string', 'null']},
{'name': 'favorite_number', 'type': ['string', 'null']},
{'name': 'favorite_color', 'type': ['string', 'null']},
{
'name': 'address',
'type': [{
'namespace': 'avro_namespace',
'name': 'address',
'type': 'record',
'fields': [
{'name': 'streetaddress', 'type': ['string', 'null']},
{'name': 'city', 'type': ['string', 'null']}
]
},
'null'
]
}
]
}
schema_avro = AVROData._get_schema_avro(nested_keys, schema_avro)
self.assertIsNotNone(schema_avro)
def test_len_data(self):
"""
Validate that length called on JSONData is appropriately determining the
length value.
"""
for input_file in self.file_or_buf_list:
data = Data(input_file["path"])
self.assertEqual(input_file['count'],
len(data),
msg=input_file['path'])
self.assertEqual(input_file['count'],
data.length,
msg=input_file['path'])
def test_file_encoding(self):
"""Tests to ensure file_encoding set to None"""
for input_file in self.file_or_buf_list:
data = AVROData(input_file["path"])
self.assertIsNone(data.file_encoding)
def test_is_structured(self):
# Default construction
data = AVROData()
self.assertTrue(data.is_structured)
# With option specifying dataframe as data_format
data = AVROData(options={"data_format": "dataframe"})
self.assertTrue(data.is_structured)
# With option specifying flattened_dataframe as data_format
data = AVROData(options={"data_format": "flattened_dataframe"})
self.assertTrue(data.is_structured)
# With option specifying records as data_format
data = AVROData(options={"data_format": "records"})
self.assertFalse(data.is_structured)
# With option specifying json as data_format
data = AVROData(options={"data_format": "json"})
self.assertFalse(data.is_structured)
if __name__ == '__main__':
unittest.main()
| 39.055556
| 116
| 0.590754
|
83893f63cc31238baee1641a8589277eafc232da
| 1,294
|
py
|
Python
|
tests/_async/test_reverse.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | null | null | null |
tests/_async/test_reverse.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | 12
|
2020-10-10T14:28:10.000Z
|
2020-10-28T05:42:34.000Z
|
tests/_async/test_reverse.py
|
christopher-henderson/PyStream
|
8c76a634448d98591aa68087bf78c6cd4da6a6b7
|
[
"MIT"
] | null | null | null |
import unittest
from pstream import AsyncStream
from tests._async.utils import Driver, Method
class Reverse(Method):
def __init__(self, args):
super(Reverse, self).__init__(AsyncStream.reverse, args)
class TestReverse(unittest.TestCase):
@Driver(initial=range(10), method=Reverse(args=[]), want=[9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
def test__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=range(10), method=Reverse(args=[]), want=[9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
def test__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
###############################
@Driver(initial=range(0), method=Reverse(args=[]), want=[])
def test1__a(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
@Driver(initial=range(0), method=Reverse(args=[]), want=[])
def test1__s(self, got=None, want=None, exception=None):
if exception is not None:
raise exception
self.assertEqual(got, want)
if __name__ == '__main__':
unittest.main()
| 29.409091
| 92
| 0.619011
|
2a525de90e08abd39c439ba51b80453c1f7f980e
| 3,187
|
py
|
Python
|
scripts/monitor_passwd.py
|
sqall01/LSMS
|
8f8497f63e3fa896f934b4e4e4491fe0fb339125
|
[
"MIT"
] | 62
|
2021-12-27T16:23:22.000Z
|
2022-03-28T10:26:27.000Z
|
scripts/monitor_passwd.py
|
sqall01/LSMS
|
8f8497f63e3fa896f934b4e4e4491fe0fb339125
|
[
"MIT"
] | null | null | null |
scripts/monitor_passwd.py
|
sqall01/LSMS
|
8f8497f63e3fa896f934b4e4e4491fe0fb339125
|
[
"MIT"
] | 9
|
2021-12-28T02:05:17.000Z
|
2022-01-23T03:14:55.000Z
|
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the MIT License.
"""
Short summary:
Monitor /etc/passwd for changes to detect malicious attempts to hijack/change users.
NOTE: The first execution of this script will only show you the current state of the environment which should be acknowledged before monitoring for changes will become an effective security measure.
Requirements:
None
"""
import os
from typing import Dict
from lib.state import load_state, store_state
from lib.util import output_error, output_finding
from lib.util_user import get_system_users
# Read configuration.
try:
from config.config import ALERTR_FIFO, FROM_ADDR, TO_ADDR, STATE_DIR
from config.monitor_passwd import ACTIVATED
STATE_DIR = os.path.join(os.path.dirname(__file__), STATE_DIR, os.path.basename(__file__))
except:
ALERTR_FIFO = None
FROM_ADDR = None
TO_ADDR = None
ACTIVATED = True
STATE_DIR = os.path.join("/tmp", os.path.basename(__file__))
def _get_passwd() -> Dict[str, str]:
passwd_data = {}
for user_obj in get_system_users():
user = user_obj.name
passwd_data[user] = str(user_obj)
return passwd_data
def monitor_passwd():
# Decide where to output results.
print_output = False
if ALERTR_FIFO is None and FROM_ADDR is None and TO_ADDR is None:
print_output = True
if not ACTIVATED:
if print_output:
print("Module deactivated.")
return
stored_passwd_data = {}
try:
stored_passwd_data = load_state(STATE_DIR)
except Exception as e:
output_error(__file__, str(e))
return
curr_passwd_data = {}
try:
curr_passwd_data = _get_passwd()
except Exception as e:
output_error(__file__, str(e))
return
# Compare stored data with current one.
for stored_entry_user in stored_passwd_data.keys():
# Extract current entry belonging to the same user.
if stored_entry_user not in curr_passwd_data.keys():
message = "User '%s' was deleted." % stored_entry_user
output_finding(__file__, message)
continue
# Check entry was modified.
if stored_passwd_data[stored_entry_user] != curr_passwd_data[stored_entry_user]:
message = "Passwd entry for user '%s' was modified.\n\n" % stored_entry_user
message += "Old entry: %s\n" % stored_passwd_data[stored_entry_user]
message += "New entry: %s" % curr_passwd_data[stored_entry_user]
output_finding(__file__, message)
# Check new data was added.
for curr_entry_user in curr_passwd_data.keys():
if curr_entry_user not in stored_passwd_data.keys():
message = "User '%s' was added.\n\n" % curr_entry_user
message += "Entry: %s" % curr_passwd_data[curr_entry_user]
output_finding(__file__, message)
try:
store_state(STATE_DIR, curr_passwd_data)
except Exception as e:
output_error(__file__, str(e))
if __name__ == '__main__':
monitor_passwd()
| 28.20354
| 198
| 0.683715
|
e5e01c6692030adccbbd78afaac8dee367977d37
| 721
|
py
|
Python
|
faceai/inpaint.py
|
DnotCf/faceai-master
|
2e38ecce629c1d9d2b4ec59fb05f81c5cbb0b91a
|
[
"MIT"
] | null | null | null |
faceai/inpaint.py
|
DnotCf/faceai-master
|
2e38ecce629c1d9d2b4ec59fb05f81c5cbb0b91a
|
[
"MIT"
] | null | null | null |
faceai/inpaint.py
|
DnotCf/faceai-master
|
2e38ecce629c1d9d2b4ec59fb05f81c5cbb0b91a
|
[
"MIT"
] | null | null | null |
#coding=utf-8
#图片修复
import cv2
import numpy as np
path = "img/inpaint.png"
img = cv2.imread(path)
hight, width, depth = img.shape[0:3]
#图片二值化处理,把[240, 240, 240]~[255, 255, 255]以外的颜色变成0
thresh = cv2.inRange(img, np.array([140, 140, 140]), np.array([255, 255, 255]))
#创建形状和尺寸的结构元素
kernel = np.ones((3, 3), np.uint8)
#扩张待修复区域
hi_mask = cv2.dilate(thresh, kernel, iterations=1)
specular = cv2.inpaint(img, hi_mask, 5, flags=cv2.INPAINT_TELEA)
cv2.namedWindow("Image", 0)
cv2.resizeWindow("Image", int(width / 2), int(hight / 2))
cv2.imshow("Image", img)
cv2.namedWindow("newImage", 0)
cv2.resizeWindow("newImage", int(width / 2), int(hight / 2))
cv2.imshow("newImage", specular)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 24.033333
| 79
| 0.700416
|
5f633a5b44bfcfa91ec32073e2975013c6a32065
| 4,881
|
py
|
Python
|
simple_web_generator/window.py
|
Slepice1/simple-web-generator
|
b13248933ef03d71aa6368c1f42636d184142820
|
[
"MIT"
] | null | null | null |
simple_web_generator/window.py
|
Slepice1/simple-web-generator
|
b13248933ef03d71aa6368c1f42636d184142820
|
[
"MIT"
] | null | null | null |
simple_web_generator/window.py
|
Slepice1/simple-web-generator
|
b13248933ef03d71aa6368c1f42636d184142820
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Window class"""
from simple_web_generator.content import Content
class Window:
"""Basic window class"""
HORIZONTAL_BORDER = "-";
VERTICAL_BORDER = "|";
CORNER = "+";
def __init__(self, attributes):
self.id = attributes.get("id")
self.name = attributes.get("name", self.id)
self.show_name = attributes.get("show_name", False)
self.h2_name = attributes.get("h2_name", False)
self.content = Content(attributes.get("content", ""))
self._set_border(attributes.get("border", {}))
self.padding = tuple(int(pad) for pad in attributes.get("padding", "0 0 0 0").split(' '))
self._set_sizes(attributes.get("width", 2), attributes.get("height", 2)) #must be at least 3
def _set_border(self, border):
b = border
self.top_border = b.get("top", b.get("horizontal", b.get("all", Window.HORIZONTAL_BORDER)))
self.bottom_border = b.get("bottom", b.get("horizontal", b.get("all", Window.HORIZONTAL_BORDER)))
self.left_border = b.get("left", b.get("vertical", b.get("all", Window.VERTICAL_BORDER)))
self.right_border = b.get("right", b.get("vertical", b.get("all", Window.VERTICAL_BORDER)))
self.corner = b.get("corner", b.get("all", Window.CORNER))
def _set_sizes(self, width, height):
horizontal_padding = self.padding[1] + self.padding[3]
computed_width = self.content.width + horizontal_padding + 2 #border size
if self.show_name:
computed_width = max(computed_width, len(self.name) + horizontal_padding + 4) #border size
self._width = max(int(width), computed_width)
self.inside_width = self._width - horizontal_padding - 2 #border size
vertical_padding = self.padding[0] + self.padding[2]
computed_height = self.content.height + vertical_padding + 2 #border size
self._height = max(int(height), computed_height)
self.inside_height = self._height - vertical_padding - 2 #border size
assert self.inside_height >= 0
assert self.inside_width >= 0
assert self._height >= self.inside_height
assert self._width >= self.inside_width
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._set_sizes(width, self._height)
def render(self):
lines = []
spaces_width = self.inside_width + self.padding[1] + self.padding[3]
horizontal_template = "{0}" + "{1}"*spaces_width + "{2}"
#Top Border
if self.show_name:
template = "{0}{1}{2}" + "{1}"*(spaces_width-len(self.name)-1) + "{0}"
if self.h2_name:
name = "<h2>" + self.name + "</h2>"
else:
name = self.name
lines.append(template.format(self.corner,
self.top_border,
name))
else:
lines.append(horizontal_template.format(self.corner,
self.top_border,
self.corner,))
#Top padding
for i in range(self.padding[0]):
lines.append(horizontal_template.format(self.left_border,
" ",
self.right_border))
#Content
content_lines = self.content.render().splitlines()
plain_content_lines = self.content.plain_text.splitlines()
for i in range(self.inside_height):
if i < len(content_lines):
line_template = ("{0}" + "{1}"*self.padding[3] + "{2}" +
"{1}"*(self.inside_width + self.padding[1] - len(plain_content_lines[i])) + "{3}")
lines.append(line_template.format(self.left_border,
" ",
content_lines[i],
self.right_border))
else:
lines.append(horizontal_template.format(self.left_border,
" ",
self.right_border))
#Bottom padding
for i in range(self.padding[2]):
lines.append(horizontal_template.format(self.left_border,
" ",
self.right_border))
#Bottom border
lines.append(horizontal_template.format(self.corner,
self.bottom_border,
self.corner))
return '\n'.join(lines)
| 44.372727
| 115
| 0.521614
|
2b226e0206fd004e9e9aa63c9e2c3b8ada84788e
| 9,219
|
py
|
Python
|
vaccine_feed_ingest/runners/az/arcgis/normalize.py
|
toffer/vaccine-feed-ingest
|
7f91c10df6e47acdc03709c4f04e4837b393188a
|
[
"MIT"
] | null | null | null |
vaccine_feed_ingest/runners/az/arcgis/normalize.py
|
toffer/vaccine-feed-ingest
|
7f91c10df6e47acdc03709c4f04e4837b393188a
|
[
"MIT"
] | null | null | null |
vaccine_feed_ingest/runners/az/arcgis/normalize.py
|
toffer/vaccine-feed-ingest
|
7f91c10df6e47acdc03709c4f04e4837b393188a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import json
import logging
import os
import pathlib
import re
import sys
from datetime import datetime
from typing import List, Optional
# import schema
site_dir = pathlib.Path(__file__).parent
state_dir = site_dir.parent
runner_dir = state_dir.parent
root_dir = runner_dir.parent
sys.path.append(str(root_dir))
from schema import schema # noqa: E402
# Configure logger
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s:%(name)s:%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
)
logger = logging.getLogger("az/arcgis/normalize.py")
output_dir = pathlib.Path(sys.argv[1])
input_dir = pathlib.Path(sys.argv[2])
json_filepaths = input_dir.glob("*.ndjson")
parsed_at_timestamp = datetime.utcnow().isoformat()
def _get_id(site: dict) -> str:
data_id = site["attributes"]["globalid"]
# Could parse these from directory traversal, but do not for now to avoid
# accidental mutation.
site = "arcgis"
runner = "az"
# Could parse these from the input file name, but do not for now to avoid
# accidental mutation.
arcgis = "128ead309d754558ad81bccd99188dc9"
layer = 0
return f"{runner}:{site}:{arcgis}_{layer}:{data_id}"
def _get_contacts(site: dict) -> Optional[List[schema.Contact]]:
contacts = []
if site["attributes"]["prereg_phone"]:
matches = list(
re.finditer(
r"(?P<area_code>\d\d\d)\)?-? ?(?P<rest_of_number>\d\d\d-\d\d\d\d)",
site["attributes"]["prereg_phone"],
)
)
if not matches:
logger.warning(
"unparseable phone number: '%s'", site["attributes"]["prereg_phone"]
)
return None
for match in matches:
phone = f"({match.group('area_code')}) {match.group('rest_of_number')}"
contacts.append(schema.Contact(phone=phone))
if site["attributes"]["prereg_website"]:
contacts.append(schema.Contact(website=site["attributes"]["prereg_website"]))
if len(contacts) > 0:
return contacts
return None
def _get_languages(site: dict) -> Optional[List[str]]:
return {None: None, "Yes": ["en", "es"], "No": ["en"]}[
site["attributes"]["spanish_staff_y_n"]
]
def _get_opening_dates(site: dict) -> Optional[List[schema.OpenDate]]:
opens = None
closes = None
if site["attributes"]["begindate"] is not None:
opens = (
datetime.fromtimestamp(site["attributes"]["begindate"] // 1000)
.date()
.isoformat()
)
if site["attributes"]["enddate"] is not None:
closes = (
datetime.fromtimestamp(site["attributes"]["enddate"] // 1000)
.date()
.isoformat()
)
if opens is None and closes is None:
return None
return [
schema.OpenDate(
opens=opens,
closes=closes,
)
]
def _parse_time(human_readable_time: str) -> (int, int):
match = re.match(r"^(?P<hour>\d+):(?P<minute>\d+) ?AM?$", human_readable_time)
if match:
return int(match.group("hour")), int(match.group("minute"))
match = re.match(r"^(?P<hour>\d+):(?P<minute>\d+) ?P[MN]?$", human_readable_time)
if match:
return int(match.group("hour")) + 12, int(match.group("minute"))
match = re.match(r"^(?P<hour>\d+) ?AM$", human_readable_time)
if match:
return int(match.group("hour")), 0
match = re.match(r"^(?P<hour>\d+) ?PM$", human_readable_time)
if match:
return int(match.group("hour")) + 12, 0
match = re.match(r"^(?P<hour>\d+):(?P<minute>\d+)$", human_readable_time)
if match:
return int(match.group("hour")), int(match.group("minute"))
raise ValueError(human_readable_time)
def _normalize_time(human_readable_time: str) -> str:
hour, minute = _parse_time(human_readable_time)
return str(hour % 24).rjust(2, "0") + ":" + str(minute).rjust(2, "0")
def _normalize_hours(
human_readable_hours: Optional[str], day: str
) -> List[schema.OpenHour]:
processed_hours = human_readable_hours
if processed_hours is None:
return []
if processed_hours == "8-4":
return [schema.OpenHour(day=day, open="08:00", closes="16:00")]
if processed_hours == "8:00AM7:00PM":
return [schema.OpenHour(day=day, open="08:00", closes="16:00")]
processed_hours = processed_hours.upper().lstrip("BY APPOINTMENT ").strip()
if " AND " in processed_hours:
ranges = processed_hours.split(" AND ")
return sum((_normalize_hours(hours_range, day) for hours_range in ranges), [])
if ";" in processed_hours:
ranges = processed_hours.split(";")
return sum((_normalize_hours(hours_range, day) for hours_range in ranges), [])
if " TO " in processed_hours:
processed_hours = processed_hours.replace(" TO ", "-")
if processed_hours.count("-") != 1:
logger.warning("unparseable hours: '%s'", human_readable_hours)
return []
open_time, close_time = processed_hours.split("-")
try:
return [
schema.OpenHour(
day=day,
open=_normalize_time(open_time.strip().upper()),
closes=_normalize_time(close_time.strip().upper()),
)
]
except ValueError:
logger.warning("unparseable hours: '%s'", human_readable_hours)
return []
def _get_opening_hours(site: dict) -> Optional[List[schema.OpenHour]]:
hours = []
if site["attributes"]["mon_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["mon_hrs"], "monday")
if site["attributes"]["tues_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["tues_hrs"], "tuesday")
if site["attributes"]["wed_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["wed_hrs"], "wednesday")
if site["attributes"]["thurs_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["thur_hrs"], "thursday")
if site["attributes"]["fri_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["fri_hrs"], "friday")
if site["attributes"]["sat_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["sat_hrs"], "saturday")
if site["attributes"]["sun_open"] == "Yes":
hours += _normalize_hours(site["attributes"]["sun_hrs"], "sunday")
return hours if hours else None
def _get_inventory(site: dict) -> Optional[List[schema.Vaccine]]:
inventory_str = site["attributes"]["vaccine_manufacturer"]
inventory = (
inventory_str.split(";") if ";" in inventory_str else inventory_str.split(",")
)
return [
{
"Pfizer_BioNTech": schema.Vaccine(vaccine="pfizer"),
"Pfizer-BioNTech": schema.Vaccine(vaccine="pfizer"),
"Pfizer": schema.Vaccine(vaccine="pfizer"),
"Moderna": schema.Vaccine(vaccine="moderna"),
"J_J": schema.Vaccine(vaccine="janssen"),
}[vaccine.lstrip("\u200b").strip()]
for vaccine in inventory
]
def _get_normalized_location(site: dict, timestamp: str) -> schema.NormalizedLocation:
return schema.NormalizedLocation(
id=_get_id(site),
name=site["attributes"]["loc_name"],
address=schema.Address(
street1=site["attributes"]["addr1"],
street2=site["attributes"]["addr2"],
city=site["attributes"]["city"],
state=site["attributes"]["state"] or "AZ",
zip=site["attributes"]["zip"],
),
location=schema.LatLng(
latitude=site["geometry"]["y"], longitude=site["geometry"]["x"]
),
contact=_get_contacts(site),
languages=_get_languages(site),
opening_dates=_get_opening_dates(site),
opening_hours=_get_opening_hours(site),
availability=None,
inventory=_get_inventory(site),
access=None,
parent_organization=None,
links=None,
notes=[site["attributes"]["prereg_comments"]]
if site["attributes"]["prereg_comments"]
else None,
active=None,
source=schema.Source(
source="arcgis",
id=site["attributes"]["globalid"],
fetched_from_uri="https://adhsgis.maps.arcgis.com/apps/opsdashboard/index.html#/5d636af4d5134a819833b1a3b906e1b6", # noqa: E501
fetched_at=timestamp,
data=site,
),
)
for in_filepath in json_filepaths:
filename, _ = os.path.splitext(in_filepath.name)
out_filepath = output_dir / f"{filename}.normalized.ndjson"
logger.info(
"normalizing %s => %s",
in_filepath,
out_filepath,
)
with in_filepath.open() as fin:
with out_filepath.open("w") as fout:
for site_json in fin:
parsed_site = json.loads(site_json)
if parsed_site["attributes"]["addr1"] is None:
continue
normalized_site = _get_normalized_location(
parsed_site, parsed_at_timestamp
)
json.dump(normalized_site.dict(), fout)
fout.write("\n")
| 31.464164
| 140
| 0.606139
|
8a70c0b67a044ab8ea43c4a4e186324064946eb8
| 4,724
|
py
|
Python
|
server/common/config/client_config.py
|
prete/cellxgene
|
11acea86c4b3df334300fac7e9e034c1e61e67bc
|
[
"MIT"
] | 3
|
2019-11-11T15:41:07.000Z
|
2020-12-14T08:47:35.000Z
|
server/common/config/client_config.py
|
prete/cellxgene
|
11acea86c4b3df334300fac7e9e034c1e61e67bc
|
[
"MIT"
] | null | null | null |
server/common/config/client_config.py
|
prete/cellxgene
|
11acea86c4b3df334300fac7e9e034c1e61e67bc
|
[
"MIT"
] | 1
|
2021-05-12T15:15:05.000Z
|
2021-05-12T15:15:05.000Z
|
from server import display_version as cellxgene_display_version
def get_client_config(app_config, data_adaptor):
"""
Return the configuration as required by the /config REST route
"""
server_config = app_config.server_config
dataset_config = data_adaptor.dataset_config
annotation = dataset_config.user_annotations
auth = server_config.auth
# FIXME The current set of config is not consistently presented:
# we have camalCase, hyphen-text, and underscore_text
# make sure the configuration has been checked.
app_config.check_config()
# display_names
title = app_config.get_title(data_adaptor)
about = app_config.get_about(data_adaptor)
display_names = dict(engine=data_adaptor.get_name(), dataset=title)
# library_versions
library_versions = {}
library_versions.update(data_adaptor.get_library_versions())
library_versions["cellxgene"] = cellxgene_display_version
# links
links = {"about-dataset": about}
# parameters
parameters = {
"layout": dataset_config.embeddings__names,
"max-category-items": dataset_config.presentation__max_categories,
"obs_names": server_config.single_dataset__obs_names,
"var_names": server_config.single_dataset__var_names,
"diffexp_lfc_cutoff": dataset_config.diffexp__lfc_cutoff,
"backed": server_config.adaptor__anndata_adaptor__backed,
"disable-diffexp": not dataset_config.diffexp__enable,
"enable-reembedding": dataset_config.embeddings__enable_reembedding,
"annotations": False,
"annotations_file": None,
"annotations_dir": None,
"annotations_cell_ontology_enabled": False,
"annotations_cell_ontology_obopath": None,
"annotations_cell_ontology_terms": None,
"custom_colors": dataset_config.presentation__custom_colors,
"diffexp-may-be-slow": False,
"about_legal_tos": dataset_config.app__about_legal_tos,
"about_legal_privacy": dataset_config.app__about_legal_privacy,
}
# corpora dataset_props
# TODO/Note: putting info from the dataset into the /config is not ideal.
# However, it is definitely not part of /schema, and we do not have a top-level
# route for data properties. Consider creating one at some point.
corpora_props = data_adaptor.get_corpora_props()
if corpora_props and "default_embedding" in corpora_props:
default_embedding = corpora_props["default_embedding"]
if isinstance(default_embedding, str) and default_embedding.startswith("X_"):
default_embedding = default_embedding[2:] # drop X_ prefix
if default_embedding in data_adaptor.get_embedding_names():
parameters["default_embedding"] = default_embedding
data_adaptor.update_parameters(parameters)
if annotation:
annotation.update_parameters(parameters, data_adaptor)
# gather it all together
client_config = {}
config = client_config["config"] = {}
config["displayNames"] = display_names
config["library_versions"] = library_versions
config["links"] = links
config["parameters"] = parameters
config["corpora_props"] = corpora_props
config["limits"] = {
"column_request_max": server_config.limits__column_request_max,
"diffexp_cellcount_max": server_config.limits__diffexp_cellcount_max,
}
if dataset_config.app__authentication_enable and auth.is_valid_authentication_type():
config["authentication"] = {
"requires_client_login": auth.requires_client_login(),
}
if auth.requires_client_login():
config["authentication"].update(
{
# Todo why are these stored on the data_adaptor?
"login": auth.get_login_url(data_adaptor),
"logout": auth.get_logout_url(data_adaptor),
}
)
return client_config
def get_client_userinfo(app_config, data_adaptor):
"""
Return the userinfo as required by the /userinfo REST route
"""
server_config = app_config.server_config
dataset_config = data_adaptor.dataset_config
auth = server_config.auth
# make sure the configuration has been checked.
app_config.check_config()
if dataset_config.app__authentication_enable and auth.is_valid_authentication_type():
userinfo = {}
userinfo["userinfo"] = {
"is_authenticated": auth.is_user_authenticated(),
"username": auth.get_user_name(),
"user_id": auth.get_user_id(),
"email": auth.get_user_email(),
"picture": auth.get_user_picture(),
}
return userinfo
| 38.406504
| 89
| 0.698984
|
a6f7aa5639b8bbed9039f316ba1d94a0cd159389
| 1,085
|
py
|
Python
|
netbox/dcim/migrations/0035_device_expand_status_choices.py
|
BrnoPCmaniak/netbox
|
7b517abdb68a6324950dfd0375861163c7bfff00
|
[
"Apache-2.0"
] | 6
|
2017-12-01T05:13:39.000Z
|
2020-01-23T13:04:43.000Z
|
netbox/dcim/migrations/0035_device_expand_status_choices.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 25
|
2019-09-17T19:40:50.000Z
|
2022-03-11T04:01:55.000Z
|
netbox/dcim/migrations/0035_device_expand_status_choices.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 3
|
2017-11-18T01:28:22.000Z
|
2018-05-17T14:04:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-08 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0034_rename_module_to_inventoryitem'),
]
# We convert the BooleanField to an IntegerField first as PostgreSQL does not provide a direct cast for boolean to
# smallint (attempting to convert directly yields the error "cannot cast type boolean to smallint").
operations = [
migrations.AlterField(
model_name='device',
name='status',
field=models.PositiveIntegerField(choices=[[1, b'Active'], [0, b'Offline'], [2, b'Planned'], [3, b'Staged'], [4, b'Failed'], [5, b'Inventory']], default=1, verbose_name=b'Status'),
),
migrations.AlterField(
model_name='device',
name='status',
field=models.PositiveSmallIntegerField(choices=[[1, b'Active'], [0, b'Offline'], [2, b'Planned'], [3, b'Staged'], [4, b'Failed'], [5, b'Inventory']], default=1, verbose_name=b'Status'),
),
]
| 41.730769
| 197
| 0.626728
|
39191011a4f09fda40e66efffbd44e337d41ae54
| 191
|
py
|
Python
|
imaginaire/utils/__init__.py
|
MichaelDoron/imaginaire
|
5f95b988453d391e972fa528152121d0dd3cb51a
|
[
"RSA-MD"
] | 1
|
2021-11-17T08:45:58.000Z
|
2021-11-17T08:45:58.000Z
|
imaginaire/utils/__init__.py
|
jrfrantz/imaginaire
|
7c650977b29ea2dd12557d1fef447df9809db737
|
[
"RSA-MD"
] | null | null | null |
imaginaire/utils/__init__.py
|
jrfrantz/imaginaire
|
7c650977b29ea2dd12557d1fef447df9809db737
|
[
"RSA-MD"
] | 1
|
2021-06-09T01:28:59.000Z
|
2021-06-09T01:28:59.000Z
|
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
| 38.2
| 70
| 0.759162
|
a4b4d5883ce6d93b0277ff2ee0f168349c1dca63
| 285
|
py
|
Python
|
bluetooth_tutorial.py
|
juandados/gait-wearable
|
4790a912cdbb5a5d16697559af7dd5b8bd909077
|
[
"MIT"
] | null | null | null |
bluetooth_tutorial.py
|
juandados/gait-wearable
|
4790a912cdbb5a5d16697559af7dd5b8bd909077
|
[
"MIT"
] | null | null | null |
bluetooth_tutorial.py
|
juandados/gait-wearable
|
4790a912cdbb5a5d16697559af7dd5b8bd909077
|
[
"MIT"
] | null | null | null |
# based on https://github.com/pybluez/pybluez
# simple inquiry example
import bluetooth
nearby_devices = bluetooth.discover_devices(lookup_names=True)
print("Found {} devices.".format(len(nearby_devices)))
for addr, name in nearby_devices:
print(" {} - {}".format(addr, name))
| 25.909091
| 62
| 0.740351
|
573a2dd9eb34f45b32eca592ff0a86cdabae57a6
| 889
|
py
|
Python
|
model-optimizer/extensions/front/mxnet/maximum_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 2
|
2021-04-19T06:08:35.000Z
|
2021-08-25T02:43:43.000Z
|
model-optimizer/extensions/front/mxnet/maximum_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 6
|
2022-01-11T18:56:22.000Z
|
2022-02-21T13:20:20.000Z
|
model-optimizer/extensions/front/mxnet/maximum_ext.py
|
apexxs/dldt
|
17e66dc5a6631d630da454506902bd7c25d4170b
|
[
"Apache-2.0"
] | 3
|
2021-02-05T17:11:17.000Z
|
2021-04-19T08:33:31.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.ops.eltwise import Eltwise
class MaximumFrontExtractor(FrontExtractorOp):
op = '_maximum'
enabled = True
@staticmethod
def extract(node):
Eltwise.update_node_stat(node, {'operation': 'max'})
return __class__.enabled
| 30.655172
| 73
| 0.750281
|
bcb31dae25e74d3581fbe72f1c53e3f17a30edb8
| 284
|
py
|
Python
|
elliptic_meshql/MeshQL.py
|
padmec-reservoir/elliptic_meshql
|
cf56d6273aa915bfdecd9c031259e12182b181de
|
[
"MIT"
] | null | null | null |
elliptic_meshql/MeshQL.py
|
padmec-reservoir/elliptic_meshql
|
cf56d6273aa915bfdecd9c031259e12182b181de
|
[
"MIT"
] | null | null | null |
elliptic_meshql/MeshQL.py
|
padmec-reservoir/elliptic_meshql
|
cf56d6273aa915bfdecd9c031259e12182b181de
|
[
"MIT"
] | null | null | null |
from .Computer import ComputerContract, ComputerImplementationBase
from .Manager import ManagerContract, ManagerImplementationBase
from .Selector import SelectorContract, SelectorImplementationBase
class MeshQLContract(ComputerContract, ManagerContract, SelectorContract):
pass
| 35.5
| 74
| 0.869718
|
887bbf069b610385b3e327333042a229ef429be6
| 147
|
py
|
Python
|
api_gate/apps.py
|
dmitriyVasilievich1986/sudoku-server
|
767059998ccc2493424a7da39dfb9ac4284ae8a4
|
[
"MIT"
] | null | null | null |
api_gate/apps.py
|
dmitriyVasilievich1986/sudoku-server
|
767059998ccc2493424a7da39dfb9ac4284ae8a4
|
[
"MIT"
] | null | null | null |
api_gate/apps.py
|
dmitriyVasilievich1986/sudoku-server
|
767059998ccc2493424a7da39dfb9ac4284ae8a4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ApiGateConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'api_gate'
| 21
| 56
| 0.761905
|
94eb25b5480c48edd143909a58eae5c02ffc2c47
| 5,068
|
py
|
Python
|
Tests/test_reaction.py
|
mkratz/BioCRNPyler
|
27c14e6660fd3a14cde62718eee4cba4d16d94a8
|
[
"BSD-3-Clause"
] | null | null | null |
Tests/test_reaction.py
|
mkratz/BioCRNPyler
|
27c14e6660fd3a14cde62718eee4cba4d16d94a8
|
[
"BSD-3-Clause"
] | null | null | null |
Tests/test_reaction.py
|
mkratz/BioCRNPyler
|
27c14e6660fd3a14cde62718eee4cba4d16d94a8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from unittest import TestCase
from biocrnpyler import Reaction, Species
class TestReaction(TestCase):
def test_reaction_initialization(self):
# warns if both input and output species are empty
with self.assertWarns(Warning):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type="massaction", propensity_params=None)
# non-massaction propensities require propensity_params dict
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type="not_massaction")
# non-massaction propensities cannot be reversible
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type="not_massaction", propensity_params={}, k_rev=1)
# test under specified propensity parameters
propensity_types = ["hillpositive", "hillnegative", "proportionalhillpositive", "proportionalhillnegative"]
for propensity_type in propensity_types:
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type=propensity_type, propensity_params={'s1': 0})
# test when rate is missing from the propensity parameter dictionary for a general propensity
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type='general', propensity_params={})
# test unknown propensity type
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, propensity_type="dummy", propensity_params={})
# input must be a valid species object
with self.assertRaises(ValueError):
Reaction(inputs=['a'], outputs=[], k=0.1)
# output must be a valid species object
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=['b'], k=0.1)
# reaction rate coefficient must be larger than zero
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0)
# reaction rate coefficient must be larger than zero
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=-1)
rxn = Reaction(inputs=[], outputs=[], k=0.1, k_rev=1)
# test whether the reaction is registered as reversible
self.assertTrue(rxn.reversible)
# test whether the reaction is registered as massaction
self.assertTrue(rxn.propensity_type == 'massaction')
# test overspecified mass action
sp1 = Species(name='test_species_a')
sp2 = Species(name='test_species_b')
with self.assertWarns(Warning):
Reaction(inputs=[sp1], outputs=[sp2], propensity_type="massaction", propensity_params={}, k=0.1)
# test whether the number of input and output coefficients match
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, input_coefs=[1,2])
# test whether the number of input and output coefficients match
with self.assertRaises(ValueError):
Reaction(inputs=[], outputs=[], k=0.1, output_coefs=[1, 2])
# TODO add test for the equality operator
def test_complex_set_equality(self):
sp1 = Species(name='test_species_a')
sp2 = Species(name='test_species_b')
# test whether two reactions with the same species are equal
rxn1 = Reaction(inputs=[sp1], outputs=[], k=0.1, input_coefs=[1])
rtn = Reaction.complex_set_equality(c1=rxn1.inputs, c1_coefs=rxn1.input_coefs,
c2=rxn1.inputs, c2_coefs=rxn1.input_coefs)
self.assertTrue(rtn)
# test that two reactions have the two species with equal coefficients are equal
rxn2 = Reaction(inputs=[sp1], outputs=[sp2], k=0.1, input_coefs=[1], output_coefs=[1])
rtn = Reaction.complex_set_equality(c1=rxn1.inputs, c1_coefs=rxn1.input_coefs,
c2=rxn2.inputs, c2_coefs=rxn2.input_coefs)
self.assertTrue(rtn)
# test that two reactions have the two species with different coefficients are not equal
rxn1 = Reaction(inputs=[sp1], outputs=[], k=0.1, input_coefs=[1])
rxn2 = Reaction(inputs=[sp2], outputs=[], k=0.1, input_coefs=[2])
rtn2 = Reaction.complex_set_equality(c1=rxn1.inputs, c1_coefs=rxn1.input_coefs,
c2=rxn2.inputs, c2_coefs=rxn2.input_coefs)
self.assertFalse(rtn2)
# test that two reactions with different species are not equal
rxn1 = Reaction(inputs=[sp1,sp2], outputs=[], k=0.1, input_coefs=[1,2])
rxn2 = Reaction(inputs=[sp2], outputs=[], k=0.1, input_coefs=[2])
rtn3 = Reaction.complex_set_equality(c1=rxn1.inputs, c1_coefs=rxn1.input_coefs,
c2=rxn2.inputs, c2_coefs=rxn2.input_coefs)
self.assertFalse(rtn3)
| 47.811321
| 116
| 0.647395
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.