hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf4d1f70dca0eb371cea11ced7ad5e9c8cf764c4
| 1,731
|
py
|
Python
|
app/user/serializers.py
|
Mwangik/recipe-app-api
|
115b694fc358b183bfac6c6cb2f90a5a41cf8976
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Mwangik/recipe-app-api
|
115b694fc358b183bfac6c6cb2f90a5a41cf8976
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Mwangik/recipe-app-api
|
115b694fc358b183bfac6c6cb2f90a5a41cf8976
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""serializers for the users objects """
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.472727
| 74
| 0.646447
|
2351eb2890d9588f40a2c5f9b7e9be7f5f958c6e
| 3,029
|
py
|
Python
|
examples/tsm/infer.py
|
LDOUBLEV/hapi
|
1b2e866edca6ff3aa4bd32c96ca5082b1cab426f
|
[
"Apache-2.0"
] | null | null | null |
examples/tsm/infer.py
|
LDOUBLEV/hapi
|
1b2e866edca6ff3aa4bd32c96ca5082b1cab426f
|
[
"Apache-2.0"
] | null | null | null |
examples/tsm/infer.py
|
LDOUBLEV/hapi
|
1b2e866edca6ff3aa4bd32c96ca5082b1cab426f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import os
import argparse
import numpy as np
from paddle.incubate.hapi.model import Input, set_device
from paddle.incubate.hapi.vision.transforms import Compose
from check import check_gpu, check_version
from modeling import tsm_resnet50
from kinetics_dataset import KineticsDataset
from transforms import *
from utils import print_arguments
import logging
logger = logging.getLogger(__name__)
def main():
device = set_device(FLAGS.device)
fluid.enable_dygraph(device) if FLAGS.dynamic else None
transform = Compose([GroupScale(), GroupCenterCrop(), NormalizeImage()])
dataset = KineticsDataset(
pickle_file=FLAGS.infer_file,
label_list=FLAGS.label_list,
mode='test',
transform=transform)
labels = dataset.label_list
model = tsm_resnet50(
num_classes=len(labels), pretrained=FLAGS.weights is None)
inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs, device=FLAGS.device)
if FLAGS.weights is not None:
model.load(FLAGS.weights, reset_optimizer=True)
imgs, label = dataset[0]
pred = model.test_batch([imgs[np.newaxis, :]])
pred = labels[np.argmax(pred)]
logger.info("Sample {} predict label: {}, ground truth label: {}" \
.format(FLAGS.infer_file, pred, labels[int(label)]))
if __name__ == '__main__':
parser = argparse.ArgumentParser("CNN training on TSM")
parser.add_argument(
"--data",
type=str,
default='dataset/kinetics',
help="path to dataset root directory")
parser.add_argument(
"--device", type=str, default='gpu', help="device to use, gpu or cpu")
parser.add_argument(
"-d", "--dynamic", action='store_true', help="enable dygraph mode")
parser.add_argument(
"--label_list",
type=str,
default=None,
help="path to category index label list file")
parser.add_argument(
"--infer_file",
type=str,
default=None,
help="path to pickle file for inference")
parser.add_argument(
"-w",
"--weights",
default=None,
type=str,
help="weights path for evaluation")
FLAGS = parser.parse_args()
print_arguments(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main()
| 31.226804
| 78
| 0.685375
|
bd4c3f8153da3cf2dec931cdfc48e890723c24b7
| 1,966
|
py
|
Python
|
app2.py
|
Ruslion/Predicting-loan-eligibility
|
abed6de78352dd2206c95b0c9d40e745a6509538
|
[
"MIT"
] | null | null | null |
app2.py
|
Ruslion/Predicting-loan-eligibility
|
abed6de78352dd2206c95b0c9d40e745a6509538
|
[
"MIT"
] | null | null | null |
app2.py
|
Ruslion/Predicting-loan-eligibility
|
abed6de78352dd2206c95b0c9d40e745a6509538
|
[
"MIT"
] | null | null | null |
# import Flask and jsonify
from flask import render_template, Flask, jsonify, request, make_response
# import Resource, Api and reqparser
from flask_restful import Resource, Api, reqparse
import pandas as pd
import numpy as np
import pickle
app = Flask(__name__)
api = Api(app)
with open('myfile.pickle', 'rb') as file_handle:
pipeline = pickle.load(file_handle)
@app.route('/')
def home():
return render_template('index.html', prediction_text="")
class predict(Resource):
#@app.route('/predict',methods=['POST'])
def post(self):
# create request parser
#data = request.get_json(force=True)
data = request.form
predict_request = [data['Gender'], data['Married'], data['Dependents'], data['Education'], data['Self_Employed'], data['ApplicantIncome'],
data['CoapplicantIncome'], data['LoanAmount'], data['Loan_Amount_Term'], data['Credit_History'], data['Property_Area']
]
df_predict = pd.DataFrame([predict_request], columns= ['Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area'])
y_pred = pipeline.predict(df_predict)
output = str(y_pred[0])
print(output)
output_text=''
if output =='Y':
output_text = 'Congratulations! The loan has been approved.'
else:
output_text = 'We are sorry. You application has been rejected.'
headers = {'Content-Type': 'text/html'}
return make_response(
render_template('index.html', prediction_text=output_text), 200, headers)
# assign endpoint
api.add_resource(predict, '/predict')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 33.322034
| 146
| 0.608342
|
67e591b60a85134872438e2cbf7259cab05524a2
| 638
|
py
|
Python
|
layerindex/migrations/0039_comparison_sha256sum.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 3
|
2019-05-12T21:11:53.000Z
|
2019-09-15T18:11:21.000Z
|
layerindex/migrations/0039_comparison_sha256sum.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 21
|
2019-06-26T05:01:01.000Z
|
2022-03-11T23:47:21.000Z
|
layerindex/migrations/0039_comparison_sha256sum.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 8
|
2019-06-13T08:51:12.000Z
|
2021-02-17T11:14:46.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-04-03 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layerindex', '0038_patch_striplevel'),
]
operations = [
migrations.AddField(
model_name='classicrecipe',
name='sha256sum',
field=models.CharField(blank=True, max_length=64),
),
migrations.AddField(
model_name='patch',
name='sha256sum',
field=models.CharField(blank=True, max_length=64),
),
]
| 24.538462
| 62
| 0.601881
|
5347ba5bc15420f957d7542bb3e5dc14d513e22b
| 27,668
|
py
|
Python
|
ocs_ci/ocs/cluster.py
|
mykaul/ocs-ci
|
fcce03d1062a3565733fcf304c2caa6073181376
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
mykaul/ocs-ci
|
fcce03d1062a3565733fcf304c2caa6073181376
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
mykaul/ocs-ci
|
fcce03d1062a3565733fcf304c2caa6073181376
|
[
"MIT"
] | null | null | null |
"""
A module for all rook functionalities and abstractions.
This module has rook related classes, support for functionalities to work with
rook cluster. This works with assumptions that an OCP cluster is already
functional and proper configurations are made for interaction.
"""
import base64
import logging
import random
import re
import threading
import yaml
from time import sleep
import ocs_ci.ocs.resources.pod as pod
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import ocs, storage_cluster
import ocs_ci.ocs.constants as constant
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.framework import config
from ocs_ci.ocs import ocp, constants, exceptions
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs
logger = logging.getLogger(__name__)
class CephCluster(object):
"""
Handles all cluster related operations from ceph perspective
This class has depiction of ceph cluster. Contains references to
pod objects which represents ceph cluster entities.
Attributes:
pods (list) : A list of ceph cluster related pods
cluster_name (str): Name of ceph cluster
namespace (str): openshift Namespace where this cluster lives
"""
def __init__(self):
"""
Cluster object initializer, this object needs to be initialized
after cluster deployment. However its harmless to do anywhere.
"""
# cluster_name is name of cluster in rook of type CephCluster
self.POD = ocp.OCP(
kind='Pod', namespace=config.ENV_DATA['cluster_namespace']
)
self.CEPHCLUSTER = ocp.OCP(
kind='CephCluster', namespace=config.ENV_DATA['cluster_namespace']
)
self.CEPHFS = ocp.OCP(
kind='CephFilesystem',
namespace=config.ENV_DATA['cluster_namespace']
)
self.DEP = ocp.OCP(
kind='Deployment',
namespace=config.ENV_DATA['cluster_namespace']
)
self.cluster_resource_config = self.CEPHCLUSTER.get().get('items')[0]
try:
self.cephfs_config = self.CEPHFS.get().get('items')[0]
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.cephfs_config = None
self._cluster_name = (
self.cluster_resource_config.get('metadata').get('name')
)
self._namespace = (
self.cluster_resource_config.get('metadata').get('namespace')
)
# We are not invoking ocs.create() here
# assuming cluster creation is done somewhere after deployment
# So just load ocs with existing cluster details
self.cluster = ocs.OCS(**self.cluster_resource_config)
if self.cephfs_config:
self.cephfs = ocs.OCS(**self.cephfs_config)
else:
self.cephfs = None
self.mon_selector = constant.MON_APP_LABEL
self.mds_selector = constant.MDS_APP_LABEL
self.tool_selector = constant.TOOL_APP_LABEL
self.mgr_selector = constant.MGR_APP_LABEL
self.osd_selector = constant.OSD_APP_LABEL
self.noobaa_selector = constant.NOOBAA_APP_LABEL
self.noobaa_core_selector = constant.NOOBAA_CORE_POD_LABEL
self.mons = []
self._ceph_pods = []
self.mdss = []
self.mgrs = []
self.osds = []
self.noobaas = []
self.toolbox = None
self.mds_count = 0
self.mon_count = 0
self.mgr_count = 0
self.osd_count = 0
self.noobaa_count = 0
self.health_error_status = None
self.health_monitor_enabled = False
self.health_monitor = None
self.scan_cluster()
logging.info(f"Number of mons = {self.mon_count}")
logging.info(f"Number of mds = {self.mds_count}")
self.used_space = 0
@property
def cluster_name(self):
return self._cluster_name
@property
def namespace(self):
return self._namespace
@property
def pods(self):
return self._ceph_pods
def scan_cluster(self):
"""
Get accurate info on current state of pods
"""
self._ceph_pods = pod.get_all_pods(self._namespace)
# TODO: Workaround for BZ1748325:
mons = pod.get_mon_pods(self.mon_selector, self.namespace)
for mon in mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
self.mons.append(mon)
# TODO: End of workaround for BZ1748325
self.mdss = pod.get_mds_pods(self.mds_selector, self.namespace)
self.mgrs = pod.get_mgr_pods(self.mgr_selector, self.namespace)
self.osds = pod.get_osd_pods(self.osd_selector, self.namespace)
self.noobaas = pod.get_noobaa_pods(self.noobaa_selector, self.namespace)
self.toolbox = pod.get_ceph_tools_pod()
# set port attrib on mon pods
self.mons = list(map(self.set_port, self.mons))
self.cluster.reload()
if self.cephfs:
self.cephfs.reload()
else:
try:
self.cephfs_config = self.CEPHFS.get().get('items')[0]
self.cephfs = ocs.OCS(**self.cephfs_config)
self.cephfs.reload()
except IndexError as e:
logging.warning(e)
logging.warning("No CephFS found")
self.mon_count = len(self.mons)
self.mds_count = len(self.mdss)
self.mgr_count = len(self.mgrs)
self.osd_count = len(self.osds)
self.noobaa_count = len(self.noobaas)
@staticmethod
def set_port(pod):
"""
Set port attribute on pod.
port attribute for mon is required for secrets and this attrib
is not a member for original pod class.
Args:
pod(Pod): Pod object without 'port' attribute
Returns:
pod(Pod): A modified pod object with 'port' attribute set
"""
container = pod.pod_data.get('spec').get('containers')
port = container[0]['ports'][0]['containerPort']
# Dynamically added attribute 'port'
pod.port = port
logging.info(f"port={pod.port}")
return pod
def is_health_ok(self):
"""
Returns:
bool: True if "HEALTH_OK" else False
"""
self.cluster.reload()
return self.cluster.data['status']['ceph']['health'] == "HEALTH_OK"
def cluster_health_check(self, timeout=None):
"""
Check overall cluster health.
Relying on health reported by CephCluster.get()
Args:
timeout (int): in seconds. By default timeout value will be scaled
based on number of ceph pods in the cluster. This is just a
crude number. Its been observed that as the number of pods
increases it takes more time for cluster's HEALTH_OK.
Returns:
bool: True if "HEALTH_OK" else False
Raises:
CephHealthException: if cluster is not healthy
"""
# Scale timeout only if user hasn't passed any value
timeout = timeout or (10 * len(self.pods))
sample = TimeoutSampler(
timeout=timeout, sleep=3, func=self.is_health_ok
)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
# This way of checking health of different cluster entities and
# raising only CephHealthException is not elegant.
# TODO: add an attribute in CephHealthException, called "reason"
# which should tell because of which exact cluster entity health
# is not ok ?
expected_mon_count = self.mon_count
expected_mds_count = self.mds_count
self.scan_cluster()
try:
self.mon_health_check(expected_mon_count)
except exceptions.MonCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
try:
if not expected_mds_count:
pass
else:
self.mds_health_check(expected_mds_count)
except exceptions.MDSCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
self.noobaa_health_check()
# TODO: OSD and MGR health check
logger.info("Cluster HEALTH_OK")
# This scan is for reconcilation on *.count
# because during first scan in this function some of the
# pods may not be up and would have set count to lesser number
self.scan_cluster()
return True
def mon_change_count(self, new_count):
"""
Change mon count in the cluster
Args:
new_count(int): Absolute number of mons required
"""
self.cluster.reload()
self.cluster.data['spec']['mon']['count'] = new_count
logger.info(self.cluster.data)
self.cluster.apply(**self.cluster.data)
self.mon_count = new_count
self.cluster_health_check()
logger.info(f"Mon count changed to {new_count}")
self.cluster.reload()
def mon_health_check(self, count):
"""
Mon health check based on pod count
Args:
count (int): Expected number of mon pods
Raises:
MonCountException: if mon pod count doesn't match
"""
timeout = 10 * len(self.pods)
logger.info(f"Expected MONs = {count}")
try:
assert self.POD.wait_for_resource(
condition='Running', selector=self.mon_selector,
resource_count=count, timeout=timeout, sleep=3,
)
# TODO: Workaround for BZ1748325:
actual_mons = pod.get_mon_pods()
actual_running_mons = list()
for mon in actual_mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
actual_running_mons.append(mon)
actual = len(actual_running_mons)
# TODO: End of workaround for BZ1748325
assert count == actual, f"Expected {count}, Got {actual}"
except exceptions.TimeoutExpiredError as e:
logger.error(e)
raise exceptions.MonCountException(
f"Failed to achieve desired Mon count"
f" {count}"
)
def mds_change_count(self, new_count):
"""
Change mds count in the cluster
Args:
new_count(int): Absolute number of active mdss required
"""
self.cephfs.data['spec']['metadataServer']['activeCount'] = new_count
self.cephfs.apply(**self.cephfs.data)
logger.info(f"MDS active count changed to {new_count}")
if self.cephfs.data['spec']['metadataServer']['activeStandby']:
expected = new_count * 2
else:
expected = new_count
self.mds_count = expected
self.cluster_health_check()
self.cephfs.reload()
def mds_health_check(self, count):
"""
MDS health check based on pod count
Args:
count (int): number of pods expected
Raises:
MDACountException: if pod count doesn't match
"""
timeout = 10 * len(self.pods)
try:
assert self.POD.wait_for_resource(
condition='Running', selector=self.mds_selector,
resource_count=count, timeout=timeout, sleep=3,
)
except AssertionError as e:
logger.error(e)
raise exceptions.MDSCountException(
f"Failed to achieve desired MDS count"
f" {count}"
)
def noobaa_health_check(self):
"""
Noobaa health check based on pods status
"""
timeout = 10 * len(self.pods)
assert self.POD.wait_for_resource(
condition='Running', selector=self.noobaa_selector,
timeout=timeout, sleep=3,
), "Failed to achieve desired Noobaa Operator Status"
assert self.POD.wait_for_resource(
condition='Running', selector=self.noobaa_core_selector,
timeout=timeout, sleep=3,
), "Failed to achieve desired Noobaa Core Status"
def get_admin_key(self):
"""
Returns:
adminkey (str): base64 encoded key
"""
return self.get_user_key('client.admin')
def get_user_key(self, user):
"""
Args:
user (str): ceph username ex: client.user1
Returns:
key (str): base64 encoded user key
"""
out = self.toolbox.exec_cmd_on_pod(
f"ceph auth get-key {user} --format json"
)
if 'ENOENT' in out:
return False
key_base64 = base64.b64encode(out['key'].encode()).decode()
return key_base64
def create_user(self, username, caps):
"""
Create a ceph user in the cluster
Args:
username (str): ex client.user1
caps (str): ceph caps ex: mon 'allow r' osd 'allow rw'
Return:
return value of get_user_key()
"""
cmd = f"ceph auth add {username} {caps}"
# As of now ceph auth command gives output to stderr
# To be handled
out = self.toolbox.exec_cmd_on_pod(cmd)
logging.info(type(out))
return self.get_user_key(username)
def get_mons_from_cluster(self):
"""
Getting the list of mons from the cluster
Returns:
available_mon (list): Returns the mons from the cluster
"""
ret = self.DEP.get(
resource_name='', out_yaml_format=False, selector='app=rook-ceph-mon'
)
available_mon = re.findall(r'[\w-]+mon-+[\w-]', ret)
return available_mon
def remove_mon_from_cluster(self):
"""
Removing the mon pod from deployment
Returns:
remove_mon(bool): True if removal of mon is successful, False otherwise
"""
mons = self.get_mons_from_cluster()
after_delete_mon_count = len(mons) - 1
random_mon = random.choice(mons)
remove_mon = self.DEP.delete(resource_name=random_mon)
assert self.POD.wait_for_resource(
condition=constant.STATUS_RUNNING,
resource_count=after_delete_mon_count,
selector='app=rook-ceph-mon'
)
logging.info(f"Removed the mon {random_mon} from the cluster")
return remove_mon
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def check_ceph_pool_used_space(self, cbp_name):
"""
Check for the used space of a pool in cluster
Returns:
used_in_gb (float): Amount of used space in pool (in GBs)
Raises:
UnexpectedBehaviour: If used size keeps varying in Ceph status
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
assert rados_status is not None
used = rados_status['pools'][0]['size_bytes']
used_in_gb = format(used / constants.GB, '.4f')
if self.used_space and self.used_space == used_in_gb:
return float(self.used_space)
self.used_space = used_in_gb
raise UnexpectedBehaviour(
f"In Rados df, Used size is varying"
)
def get_ceph_health(self, detail=False):
"""
Exec `ceph health` cmd on tools pod and return the status of the ceph
cluster.
Args:
detail (bool): If True the 'ceph health detail' is executed
Returns:
str: Output of the ceph health command.
"""
ceph_health_cmd = "ceph health"
if detail:
ceph_health_cmd = f"{ceph_health_cmd} detail"
return self.toolbox.exec_cmd_on_pod(
ceph_health_cmd, out_yaml_format=False,
)
def get_ceph_status(self):
"""
Exec `ceph status` cmd on tools pod and return its output.
Returns:
str: Output of the ceph status command.
"""
return self.toolbox.exec_cmd_on_pod(
"ceph status", out_yaml_format=False,
)
def enable_health_monitor(self, sleep=5):
"""
Enable monitoring for ceph health status.
Args:
sleep (int): Number of seconds to sleep between health checks.
"""
self.monitor = HealthMonitorThread(self, sleep)
self.monitor.start()
def disable_health_monitor(self):
self.health_monitor_enabled = False
def get_ceph_cluster_iops(self):
"""
The function gets the IOPS from the ocs cluster
Returns:
Total IOPS in the cluster
"""
ceph_status = self.get_ceph_status()
for item in ceph_status.split("\n"):
if 'client' in item:
iops = re.findall(r'\d+\.+\d+|\d\d*', item.strip())
iops = iops[2::1]
if len(iops) == 2:
iops_in_cluster = float(iops[0]) + float(iops[1])
else:
iops_in_cluster = float(iops[0])
logging.info(f"IOPS in the cluster is {iops_in_cluster}")
return iops_in_cluster
def get_iops_percentage(self, osd_size=2):
"""
The function calculates the IOPS percentage
of the cluster depending on number of osds in the cluster
Args:
osd_size (int): Size of 1 OSD in Ti
Returns:
IOPS percentage of the OCS cluster
"""
osd_count = count_cluster_osd()
iops_per_osd = osd_size * constants.IOPS_FOR_1TiB_OSD
iops_in_cluster = self.get_ceph_cluster_iops()
osd_iops_limit = iops_per_osd * osd_count
iops_percentage = (iops_in_cluster / osd_iops_limit) * 100
logging.info(f"The IOPS percentage of the cluster is {iops_percentage}%")
return iops_percentage
def get_cluster_throughput(self):
"""
Function to get the throughput of ocs cluster
Returns:
Throughput of the cluster in MiB/s
"""
ceph_status = self.get_ceph_status()
for item in ceph_status.split("\n"):
if 'client' in item:
throughput_data = item.strip('client: ').split(",")
throughput_data = throughput_data[:2:1]
# Converting all B/s and KiB/s to MiB/s
conversion = {'B/s': 0.000000976562, 'KiB/s': 0.000976562, 'MiB/s': 1}
throughput = 0
for val in throughput_data:
throughput += [
float(re.findall(r'\d+', val)[0]) * conversion[key]
for key in conversion.keys() if key in val
][0]
logger.info(f"The throughput is {throughput} MiB/s")
return throughput
def get_throughput_percentage(self):
"""
Function to get throughput percentage of the ocs cluster
Returns:
Throughput percentage of the cluster
"""
throughput_of_cluster = self.get_cluster_throughput()
throughput_percentage = (throughput_of_cluster / constants.THROUGHPUT_LIMIT_OSD) * 100
logging.info(f"The throughput percentage of the cluster is {throughput_percentage}%")
return throughput_percentage
class HealthMonitorThread(threading.Thread):
"""
Class for monitoring ceph health status of CephCluster. If CephCluster will
get to HEALTH_ERROR state it will save the ceph status to
health_error_status variable in ceph_cluster object and will stop
monitoring. It's up to user how to handle the error.
"""
def __init__(self, ceph_cluster, sleep=5):
"""
Constructor for ceph health status thread.
Args:
ceph_cluster (CephCluster): Reference to CephCluster object.
sleep (int): Number of seconds to sleep between health checks.
"""
self.ceph_cluster = ceph_cluster
self.sleep = sleep
super(HealthMonitorThread, self).__init__()
def run(self):
self.ceph_cluster.health_monitor_enabled = True
while self.ceph_cluster.health_monitor_enabled and (
not self.ceph_cluster.health_error_status
):
sleep(self.sleep)
health_status = self.ceph_cluster.get_ceph_health(detail=True)
if "HEALTH_ERROR" in health_status:
self.ceph_cluster.health_error_status = (
self.ceph_cluster.get_ceph_status()
)
def validate_cluster_on_pvc():
"""
Validate creation of PVCs for MON and OSD pods.
Also validate that those PVCs are attached to the OCS pods
Raises:
AssertionError: If PVC is not mounted on one or more OCS pods
"""
# Get the PVCs for selected label (MON/OSD)
ns = config.ENV_DATA['cluster_namespace']
ocs_pvc_obj = get_all_pvc_objs(namespace=ns)
# Check all pvc's are in bound state
pvc_names = []
for pvc_obj in ocs_pvc_obj:
if (pvc_obj.name.startswith(constants.DEFAULT_DEVICESET_PVC_NAME)
or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME)):
assert pvc_obj.status == constants.STATUS_BOUND, (
f"PVC {pvc_obj.name} is not Bound"
)
logger.info(f"PVC {pvc_obj.name} is in Bound state")
pvc_names.append(pvc_obj.name)
mon_pods = get_pod_name_by_pattern('rook-ceph-mon', ns)
osd_pods = get_pod_name_by_pattern('rook-ceph-osd', ns, filter='prepare')
assert len(mon_pods) + len(osd_pods) == len(pvc_names), (
"Not enough PVC's available for all Ceph Pods"
)
for ceph_pod in mon_pods + osd_pods:
out = run_cmd(f'oc -n {ns} get pods {ceph_pod} -o yaml')
out_yaml = yaml.safe_load(out)
for vol in out_yaml['spec']['volumes']:
if vol.get('persistentVolumeClaim'):
claimName = vol.get('persistentVolumeClaim').get('claimName')
logger.info(f"{ceph_pod} backed by pvc {claimName}")
assert claimName in pvc_names, (
"Ceph Internal Volume not backed by PVC"
)
def count_cluster_osd():
"""
The function returns the number of cluster OSDs
Returns:
osd_count (int): number of OSD pods in current cluster
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA['storage_cluster_name'],
namespace=config.ENV_DATA['cluster_namespace'],
)
storage_cluster_obj.reload_data()
osd_count = (
int(storage_cluster_obj.data['spec']['storageDeviceSets'][0]['count'])
* int(storage_cluster_obj.data['spec']['storageDeviceSets'][0]['replica'])
)
return osd_count
def validate_pdb_creation():
"""
Validate creation of PDBs for MON, MDS and OSD pods.
Raises:
AssertionError: If required PDBs were not created.
"""
pdb_obj = ocp.OCP(kind='PodDisruptionBudget')
item_list = pdb_obj.get().get('items')
pdb_list = [item['metadata']['name'] for item in item_list]
osd_count = count_cluster_osd()
pdb_required = [constants.MDS_PDB, constants.MON_PDB]
for num in range(osd_count):
pdb_required.append(constants.OSD_PDB + str(num))
pdb_list.sort()
pdb_required.sort()
for required, given in zip(pdb_required, pdb_list):
assert required == given, f"{required} was not created"
logger.info(f"All required PDBs created: {pdb_required}")
def get_osd_utilization():
"""
Get osd utilization value
Returns:
osd_filled (dict): Dict of osd name and its used value
i.e {'osd.1': 15.276289408185841, 'osd.0': 15.276289408185841, 'osd.2': 15.276289408185841}
"""
osd_filled = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get('nodes'):
osd_filled[osd['name']] = osd['utilization']
return osd_filled
def validate_osd_utilization(osd_used=80):
"""
Validates osd utilization matches osd_used value
Args:
osd_used (int): osd used value
Returns:
bool: True if all osd values is equal or greater to osd_used.
False Otherwise.
"""
_rc = True
osd_filled = get_osd_utilization()
for osd, value in osd_filled.items():
if int(value) >= osd_used:
logger.info(f"{osd} used value {value}")
else:
_rc = False
logger.warn(f"{osd} used value {value}")
return _rc
def get_pgs_per_osd():
"""
Function to get ceph pg count per OSD
Returns:
osd_dict (dict): Dict of osd name and its used value
i.e {'osd.0': 136, 'osd.2': 136, 'osd.1': 136}
"""
osd_dict = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get('nodes'):
osd_dict[osd['name']] = osd['pgs']
return osd_dict
def get_balancer_eval():
"""
Function to get ceph pg balancer eval value
Returns:
eval_out (float): Eval output of pg balancer
"""
ceph_cmd = "ceph balancer eval"
ct_pod = pod.get_ceph_tools_pod()
eval_out = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd).split(' ')
return float(eval_out[3])
def get_pg_balancer_status():
"""
Function to check pg_balancer active and mode is upmap
Returns:
bool: True if active and upmap is set else False
"""
# Check either PG balancer is active or not
ceph_cmd = "ceph balancer status"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
# Check 'mode' is 'upmap', based on suggestion from Ceph QE
# TODO: Revisit this if mode needs change.
if output['active'] and output['mode'] == 'upmap':
logging.info("PG balancer is active and mode is upmap")
return True
else:
logging.error("PG balancer is not active")
return False
def validate_pg_balancer():
"""
Validate either data is equally distributed to OSDs
Returns:
bool: True if osd data consumption difference is <= 2% else False
"""
# Check OSD utilization either pg balancer is active
if get_pg_balancer_status():
eval = get_balancer_eval()
osd_dict = get_pgs_per_osd()
osd_min_pg_value = min(osd_dict.values())
osd_max_pg_value = max(osd_dict.values())
diff = osd_max_pg_value - osd_min_pg_value
# TODO: Revisit this if pg difference value needs change
# TODO: Revisit eval value if pg balancer mode changes from 'upmap'
if diff <= 5 and eval <= 0.02:
logging.info(
f"Eval value is {eval} and pg distribution "
f"difference is {diff} between high and low pgs per OSD"
)
return True
else:
logging.error(
f"Eval value is {eval} and pg distribution "
f"difference is {diff} between high and low pgs per OSD"
)
return False
else:
logging.info(f"pg_balancer is not active")
| 33.135329
| 99
| 0.612296
|
4ac8b85ae9895e91df48bfb342da53c9745f1964
| 4,483
|
py
|
Python
|
tests/util/test_ruamel_yaml.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 3
|
2019-01-24T20:32:14.000Z
|
2022-03-22T14:25:48.000Z
|
tests/util/test_ruamel_yaml.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
tests/util/test_ruamel_yaml.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 3
|
2016-08-26T12:32:49.000Z
|
2020-02-26T21:01:35.000Z
|
"""Test Home Assistant ruamel.yaml loader."""
import os
import unittest
from tempfile import mkdtemp
import pytest
from ruamel.yaml import YAML
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.ruamel_yaml as util_yaml
TEST_YAML_A = """\
title: My Awesome Home
# Include external resources
resources:
- url: /local/my-custom-card.js
type: js
- url: /local/my-webfont.css
type: css
# Exclude entities from "Unused entities" view
excluded_entities:
- weblink.router
views:
# View tab title.
- title: Example
# Optional unique id for direct access /lovelace/${id}
id: example
# Optional background (overwrites the global background).
background: radial-gradient(crimson, skyblue)
# Each view can have a different theme applied.
theme: dark-mode
# The cards to show on this view.
cards:
# The filter card will filter entities for their state
- type: entity-filter
entities:
- device_tracker.paulus
- device_tracker.anne_there
state_filter:
- 'home'
card:
type: glance
title: People that are home
# The picture entity card will represent an entity with a picture
- type: picture-entity
image: https://www.home-assistant.io/images/default-social.png
entity: light.bed_light
# Specify a tab icon if you want the view tab to be an icon.
- icon: mdi:home-assistant
# Title of the view. Will be used as the tooltip for tab icon
title: Second view
cards:
- id: test
type: entities
title: Test card
# Entities card will take a list of entities and show their state.
- type: entities
# Title of the entities card
title: Example
# The entities here will be shown in the same order as specified.
# Each entry is an entity ID or a map with extra options.
entities:
- light.kitchen
- switch.ac
- entity: light.living_room
# Override the name to use
name: LR Lights
# The markdown card will render markdown text.
- type: markdown
title: Lovelace
content: >
Welcome to your **Lovelace UI**.
"""
TEST_YAML_B = """\
title: Home
views:
- title: Dashboard
id: dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
cards:
- type: picture-entity
entity: group.sample
name: Sample
image: /local/images/sample.jpg
tap_action: toggle
"""
# Test data that can not be loaded as YAML
TEST_BAD_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards:
- id: testid
type: vertical-stack
"""
# Test unsupported YAML
TEST_UNSUP_YAML = """\
title: Home
views:
- title: Dashboard
icon: mdi:home
cards: !include cards.yaml
"""
class TestYAML(unittest.TestCase):
"""Test lovelace.yaml save and load."""
def setUp(self):
"""Set up for tests."""
self.tmp_dir = mkdtemp()
self.yaml = YAML(typ='rt')
def tearDown(self):
"""Clean up after tests."""
for fname in os.listdir(self.tmp_dir):
os.remove(os.path.join(self.tmp_dir, fname))
os.rmdir(self.tmp_dir)
def _path_for(self, leaf_name):
return os.path.join(self.tmp_dir, leaf_name+".yaml")
def test_save_and_load(self):
"""Test saving and loading back."""
fname = self._path_for("test1")
open(fname, "w+").close()
util_yaml.save_yaml(fname, self.yaml.load(TEST_YAML_A))
data = util_yaml.load_yaml(fname, True)
assert data == self.yaml.load(TEST_YAML_A)
def test_overwrite_and_reload(self):
"""Test that we can overwrite an existing file and read back."""
fname = self._path_for("test2")
open(fname, "w+").close()
util_yaml.save_yaml(fname, self.yaml.load(TEST_YAML_A))
util_yaml.save_yaml(fname, self.yaml.load(TEST_YAML_B))
data = util_yaml.load_yaml(fname, True)
assert data == self.yaml.load(TEST_YAML_B)
def test_load_bad_data(self):
"""Test error from trying to load unserialisable data."""
fname = self._path_for("test3")
with open(fname, "w") as fh:
fh.write(TEST_BAD_YAML)
with pytest.raises(HomeAssistantError):
util_yaml.load_yaml(fname, True)
| 28.194969
| 74
| 0.627705
|
a6a9d7b90eb535e6e829e828cc2e02093a7a6c47
| 147
|
py
|
Python
|
conjur/util/ssl_utils/__init__.py
|
cyberark/cyberark-conjur-cli
|
2507e8769808643d89efa7e2496cfc14f505bd7e
|
[
"Apache-2.0"
] | null | null | null |
conjur/util/ssl_utils/__init__.py
|
cyberark/cyberark-conjur-cli
|
2507e8769808643d89efa7e2496cfc14f505bd7e
|
[
"Apache-2.0"
] | 2
|
2022-03-09T11:56:10.000Z
|
2022-03-14T14:53:15.000Z
|
conjur/util/ssl_utils/__init__.py
|
cyberark/cyberark-conjur-cli
|
2507e8769808643d89efa7e2496cfc14f505bd7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Module holds the SSL utils of this project
"""
from conjur.util.ssl_utils.ssl_client import SSLClient
from conjur.util.ssl_utils import errors
| 24.5
| 54
| 0.802721
|
6f40082c43300d4b684d558d100407a894e940b0
| 614
|
py
|
Python
|
scripts/auth_tool.py
|
shogo82148/JO_RI_bot
|
653008faf8356a6c6e2b44f0154f646774aff79b
|
[
"MIT"
] | null | null | null |
scripts/auth_tool.py
|
shogo82148/JO_RI_bot
|
653008faf8356a6c6e2b44f0154f646774aff79b
|
[
"MIT"
] | null | null | null |
scripts/auth_tool.py
|
shogo82148/JO_RI_bot
|
653008faf8356a6c6e2b44f0154f646774aff79b
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import tweepy
if __name__ == "__main__":
consumer_key = raw_input('Your Consumer Key:')
consumer_secret = raw_input('Your Consumer Secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# ユーザにアプリケーションの許可を求めるためのURLを表示
print 'Please access this URL: ' + auth.get_authorization_url()
# PINを入力してもらう
pin = raw_input('Please input verification PIN from twitter.com: ').strip()
# Access Tokenの取得と表示
token = auth.get_access_token(verifier=pin)
print 'Access token:'
print ' Key: %s' % token.key
print ' Secret: %s' % token.secret
| 26.695652
| 77
| 0.713355
|
f63913853b67339f55a10be7ee00a42a23275d99
| 11,827
|
py
|
Python
|
lisa/tests/staging/sched_android.py
|
Binse-Park/lisa_ARM
|
aa7767654c95bb2cc0a2dddecd5b82a7fcf5c746
|
[
"Apache-2.0"
] | null | null | null |
lisa/tests/staging/sched_android.py
|
Binse-Park/lisa_ARM
|
aa7767654c95bb2cc0a2dddecd5b82a7fcf5c746
|
[
"Apache-2.0"
] | null | null | null |
lisa/tests/staging/sched_android.py
|
Binse-Park/lisa_ARM
|
aa7767654c95bb2cc0a2dddecd5b82a7fcf5c746
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
import abc
from lisa.wlgen.rta import RTA, Periodic
from lisa.tests.base import TestBundle, Result, ResultBundle, RTATestBundle, AggregatedResultBundle
from lisa.trace import Trace, FtraceCollector, FtraceConf, requires_events
from lisa.target import Target
from lisa.utils import ArtifactPath
from lisa.analysis.frequency import FrequencyAnalysis
from lisa.analysis.tasks import TasksAnalysis
class SchedTuneItemBase(RTATestBundle):
"""
Abstract class enabling rtapp execution in a schedtune group
:param boost: The boost level to set for the cgroup
:type boost: int
:param prefer_idle: The prefer_idle flag to set for the cgroup
:type prefer_idle: bool
"""
def __init__(self, res_dir, plat_info, boost, prefer_idle):
super().__init__(res_dir, plat_info)
self.boost = boost
self.prefer_idle = prefer_idle
@property
def cgroup_configuration(self):
return self.get_cgroup_configuration(self.plat_info, self.boost, self.prefer_idle)
@classmethod
def get_cgroup_configuration(cls, plat_info, boost, prefer_idle):
attributes = {
'boost': boost,
'prefer_idle': int(prefer_idle)
}
return { 'name': 'lisa_test',
'controller': 'schedtune',
'attributes': attributes }
@classmethod
# Not annotated, to prevent exekall from picking it up. See
# SchedTuneBase.from_target
def from_target(cls, target, boost, prefer_idle, res_dir=None, ftrace_coll=None):
"""
.. warning:: `res_dir` is at the end of the parameter list, unlike most
other `from_target` where it is the second one.
"""
return super().from_target(target, res_dir, boost=boost,
prefer_idle=prefer_idle, ftrace_coll=ftrace_coll)
@classmethod
def _from_target(cls, target, res_dir, boost, prefer_idle, ftrace_coll=None):
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
cgroup_config = cls.get_cgroup_configuration(plat_info, boost, prefer_idle)
cls._run_rtapp(target, res_dir, rtapp_profile, ftrace_coll, cgroup_config)
return cls(res_dir, plat_info, boost, prefer_idle)
class SchedTuneBase(TestBundle):
"""
Abstract class enabling the aggregation of ``SchedTuneItemBase``
:param test_bundles: a list of test bundles generated by
multiple ``SchedTuneItemBase`` instances
:type test_bundles: list
"""
def __init__(self, res_dir, plat_info, test_bundles):
super().__init__(res_dir, plat_info)
self.test_bundles = test_bundles
@classmethod
def from_target(cls, target:Target, res_dir:ArtifactPath=None,
ftrace_coll:FtraceCollector=None) -> 'SchedTuneBase':
"""
Creates a SchedTuneBase bundle from the target.
"""
return super().from_target(target, res_dir, ftrace_coll=ftrace_coll)
@classmethod
def _from_target(cls, target, res_dir, ftrace_coll):
return cls(res_dir, target.plat_info,
list(cls._create_test_bundles(target, res_dir, ftrace_coll))
)
@classmethod
@abc.abstractmethod
def _create_test_bundles(cls, target, res_dir, ftrace_coll):
"""
Collects and yields a :class:`lisa.tests.base.ResultBundle` per test
item.
"""
pass
@classmethod
def _create_test_bundle_item(cls, target, res_dir, ftrace_coll, item_cls,
boost, prefer_idle):
"""
Creates and returns a TestBundle for a given item class, and a given
schedtune configuration
"""
item_dir = ArtifactPath.join(res_dir, 'boost_{}_prefer_idle_{}'.format(
boost, int(prefer_idle)))
os.makedirs(item_dir)
logger = cls.get_logger()
logger.info('Running {} with boost={}, prefer_idle={}'.format(
item_cls.__name__, boost, prefer_idle))
return item_cls.from_target(target, boost, prefer_idle, res_dir=item_dir, ftrace_coll=ftrace_coll)
class SchedTuneFreqItem(SchedTuneItemBase):
"""
Runs a tiny RT rtapp task pinned to a big CPU at a given boost level and
checks the frequency selection was performed accordingly.
"""
@classmethod
def get_rtapp_profile(cls, plat_info):
cpu = plat_info['capacity-classes'][-1][0]
rtapp_profile = {}
rtapp_profile['rta_stune'] = Periodic(
duty_cycle_pct = 1, # very small task, no impact on freq w/o boost
duration_s = 10,
period_ms = 16,
cpus = [cpu], # pin to big CPU, to focus on frequency selection
sched_policy = 'FIFO' # RT tasks have the boost holding feature so
# the frequency should be more stable, and we
# shouldn't go to max freq in Android
)
return rtapp_profile
@requires_events(SchedTuneItemBase.trace_window.used_events, "cpu_frequency")
def trace_window(self, trace):
"""
Set the boundaries of the trace window to ``cpu_frequency`` events
before/after the task's start/end time
"""
rta_start, rta_stop = super().trace_window(trace)
cpu = self.plat_info['capacity-classes'][-1][0]
freq_df = trace.df_events('cpu_frequency')
freq_df = freq_df[freq_df.cpu == cpu]
# Find the frequency events before and after the task runs
freq_start = freq_df[freq_df.index < rta_start].index[-1]
freq_stop = freq_df[freq_df.index > rta_stop].index[0]
return (freq_start, freq_stop)
@FrequencyAnalysis.get_average_cpu_frequency.used_events
def test_stune_frequency(self, freq_margin_pct=10) -> ResultBundle:
"""
Test that frequency selection followed the boost
:param: freq_margin_pct: Allowed margin between estimated and measured
average frequencies
:type freq_margin_pct: int
Compute the expected frequency given the boost level and compare to the
real average frequency from the trace.
Check that the difference between expected and measured frequencies is
no larger than ``freq_margin_pct``.
"""
kernel_version = self.plat_info['kernel']['version']
if kernel_version.parts[:2] < (4, 14):
self.get_logger().warning('This test requires the RT boost hold, but it may be disabled in {}'.format(kernel_version))
cpu = self.plat_info['capacity-classes'][-1][0]
freqs = self.plat_info['freqs'][cpu]
max_freq = max(freqs)
# Estimate the target frequency, including sugov's margin, and round
# into a real OPP
boost = self.boost
target_freq = min(max_freq, max_freq * boost / 80)
target_freq = list(filter(lambda f: f >= target_freq, freqs))[0]
# Get the real average frequency
avg_freq = self.trace.analysis.frequency.get_average_cpu_frequency(cpu)
distance = abs(target_freq - avg_freq) * 100 / target_freq
res = ResultBundle.from_bool(distance < freq_margin_pct)
res.add_metric("target freq", target_freq, 'kHz')
res.add_metric("average freq", avg_freq, 'kHz')
res.add_metric("boost", boost, '%')
return res
class SchedTuneFrequencyTest(SchedTuneBase):
"""
Runs multiple ``SchedTuneFreqItem`` tests at various boost levels ranging
from 20% to 100%, then checks all succedeed.
"""
# Make sure exekall will always collect all events required by items
ftrace_conf = SchedTuneFreqItem.ftrace_conf
@classmethod
def _create_test_bundles(cls, target, res_dir, ftrace_coll):
for boost in range(20, 101, 20):
yield cls._create_test_bundle_item(target, res_dir, ftrace_coll,
SchedTuneFreqItem, boost, False)
def test_stune_frequency(self, freq_margin_pct=10) -> AggregatedResultBundle:
"""
.. seealso:: :meth:`SchedTuneFreqItem.test_stune_frequency`
"""
item_res_bundles = [
item.test_stune_frequency(freq_margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
class SchedTunePlacementItem(SchedTuneItemBase):
"""
Runs a tiny RT-App task marked 'prefer_idle' at a given boost level and
tests if it was placed on big-enough CPUs.
"""
@classmethod
def get_rtapp_profile(cls, plat_info):
rtapp_profile = {}
rtapp_profile['rta_stune'] = Periodic(
duty_cycle_pct = 1,
duration_s = 3,
period_ms = 16,
)
return rtapp_profile
@TasksAnalysis.df_task_total_residency.used_events
def test_stune_task_placement(self, bad_cpu_margin_pct=10) -> ResultBundle:
"""
Test that the task placement satisfied the boost requirement
Check that top-app tasks spend no more than ``bad_cpu_margin_pct`` of
their time on CPUs that don't have enough capacity to serve their
boost.
"""
assert len(self.rtapp_tasks) == 1
task = self.rtapp_tasks[0]
df = self.trace.analysis.tasks.df_task_total_residency(task)
# Find CPUs without enough capacity to meet the boost
boost = self.boost
cpu_caps = self.plat_info['cpu-capacities']
ko_cpus = list(filter(lambda x: (cpu_caps[x] / 10.24) < boost, cpu_caps))
# Count how much time was spend on wrong CPUs
time_ko = 0
total_time = 0
for cpu in cpu_caps:
t = df['runtime'][cpu]
if cpu in ko_cpus:
time_ko += t
total_time += t
pct_ko = time_ko * 100 / total_time
res = ResultBundle.from_bool(pct_ko < bad_cpu_margin_pct)
res.add_metric("time spent on inappropriate CPUs", pct_ko, '%')
res.add_metric("boost", boost, '%')
return res
class SchedTunePlacementTest(SchedTuneBase):
"""
Runs multiple ``SchedTunePlacementItem`` tests with prefer_idle set and
typical top-app boost levels, then checks all succedeed.
"""
# Make sure exekall will always collect all events required by items
ftrace_conf = SchedTunePlacementItem.ftrace_conf
@classmethod
def _create_test_bundles(cls, target, res_dir, ftrace_coll):
# Typically top-app tasks are boosted by 10%, or 50% during touchboost
for boost in [10, 50]:
yield cls._create_test_bundle_item(target, res_dir, ftrace_coll,
SchedTunePlacementItem, boost, True)
def test_stune_task_placement(self, margin_pct=10) -> AggregatedResultBundle:
"""
.. seealso:: :meth:`SchedTunePlacementItem.test_stune_task_placement`
"""
item_res_bundles = [
item.test_stune_task_placement(margin_pct)
for item in self.test_bundles
]
return AggregatedResultBundle(item_res_bundles, 'boost')
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| 37.665605
| 130
| 0.660523
|
7b3a568bf35fe2a3e0d80d4ce97584d98f9d7b40
| 1,992
|
py
|
Python
|
pytest_splunk_env/splunk/helmut/util/basefileutils.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | 1
|
2021-03-18T23:35:08.000Z
|
2021-03-18T23:35:08.000Z
|
pytest_splunk_env/splunk/helmut/util/basefileutils.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | null | null | null |
pytest_splunk_env/splunk/helmut/util/basefileutils.py
|
splunk/pytest-splunk-env
|
63ce423446f54869e4530627ff7463ea3e26c38a
|
[
"Apache-2.0"
] | 1
|
2022-03-27T16:55:33.000Z
|
2022-03-27T16:55:33.000Z
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from pytest_splunk_env.splunk.helmut.log import Logging
class BaseFileUtils(Logging):
def isfile(self, path):
raise NotImplementedError("Function not implemented")
def isdir(self, path):
raise NotImplementedError("Function not implemented")
def delete_file(self, file):
raise NotImplementedError("Function not implemented")
def get_file_contents(self, path):
raise NotImplementedError("Function not implemented")
def write_file_contents(self, path, contents, mode="w"):
raise NotImplementedError("Function not implemented")
def copy_file(self, source, target):
raise NotImplementedError("Function not implemented")
def move_file(self, source, target):
raise NotImplementedError("Function not implemented")
def copy_directory(self, source, target, ignore=None):
raise NotImplementedError("Function not implemented")
def compare_files(self, file1, file2):
raise NotImplementedError("Function not implemented")
def move_directory(self, source, target, ignore=None):
raise NotImplementedError("Function not implemented")
def force_remove_file(self, path):
raise NotImplementedError("Function not implemented")
def force_remove_directory(self, path):
raise NotImplementedError("Function not implemented")
def force_copy_file(self, source, target):
raise NotImplementedError("Function not implemented")
def force_move_file(self, source, target):
raise NotImplementedError("Function not implemented")
def force_move_directory(self, source, target):
raise NotImplementedError("Function not implemented")
def force_copy_directory(self, source, target):
raise NotImplementedError("Function not implemented")
def create_directory(self, path):
raise NotImplementedError("Function not implemented")
| 33.762712
| 61
| 0.728414
|
cc02386383b1887637d54999fd2ccb2df646cb4d
| 644
|
py
|
Python
|
setup.py
|
fanzeyi/typestruct
|
947e458e865ecf58c4c0848a05f53e9da3d9e686
|
[
"MIT"
] | 3
|
2018-09-13T06:54:49.000Z
|
2018-09-13T09:47:08.000Z
|
setup.py
|
fanzeyi/typestruct
|
947e458e865ecf58c4c0848a05f53e9da3d9e686
|
[
"MIT"
] | null | null | null |
setup.py
|
fanzeyi/typestruct
|
947e458e865ecf58c4c0848a05f53e9da3d9e686
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="typestruct",
version="0.1.1",
author="Zeyi Fan",
author_email="i@zr.is",
description="struct serialization/deserialization with typing",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fanzeyi/typestruct",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 26.833333
| 67
| 0.658385
|
374e71e4d04a6cd145d1dd571539f663a719189d
| 3,902
|
py
|
Python
|
code/python/third_party/Deadline/DeadlineConnect.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | 10
|
2020-11-17T00:33:42.000Z
|
2022-02-16T23:31:58.000Z
|
code/python/third_party/Deadline/DeadlineConnect.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | null | null | null |
code/python/third_party/Deadline/DeadlineConnect.py
|
mikeroberts3000/ml-hypersim
|
75b363ee52fbdbd0cc9b554c34c1aadea404183e
|
[
"AML"
] | null | null | null |
import sys
import subprocess
import os
import json
import traceback
from . import Jobs
from . import SlavesRenderingJob
from . import JobReports
from . import TaskReports
from . import Limits
from . import Tasks
from . import Pulse
from . import Repository
from . import MappedPaths
from . import MaximumPriority
from . import Pools
from . import Groups
from . import Plugins
from . import Slaves
from . import Users
from . import Balancer
from .ConnectionProperty import ConnectionProperty
#http://docs.python.org/2/library/httplib.html
class DeadlineCon:
"""
Object used by user to communicate with the web service.
Host name of the Web Service, as well as the port number the
Web Service is listening on are required for construction.
Call other API functions through this object.
"""
def __init__(self, host, port):
""" Constructs an instance of DeadlineCon.
Params: host name of the Web Service (string).
port number the Web Service is listening on (integer).
"""
#Builds the ConnectionProperty object used for sending requests.
address = host+":"+str(port)
self.connectionProperties = ConnectionProperty(address)
#The different request groups use the ConnectionProperty object to send their requests.
self.Jobs = Jobs.Jobs(self.connectionProperties)
self.SlavesRenderingJob = SlavesRenderingJob.SlavesRenderingJob(self.connectionProperties)
self.Tasks = Tasks.Tasks(self.connectionProperties)
self.TaskReports = TaskReports.TaskReports(self.connectionProperties)
self.JobReports = JobReports.JobReports(self.connectionProperties)
self.LimitGroups = Limits.LimitGroups(self.connectionProperties)
self.Pulse = Pulse.Pulse(self.connectionProperties)
self.Repository = Repository.Repository(self.connectionProperties)
self.MappedPaths = MappedPaths.MappedPaths(self.connectionProperties)
self.MaximumPriority = MaximumPriority.MaximumPriority(self.connectionProperties)
self.Pools = Pools.Pools(self.connectionProperties)
self.Groups = Groups.Groups(self.connectionProperties)
self.Plugins = Plugins.Plugins(self.connectionProperties)
self.Slaves = Slaves.Slaves(self.connectionProperties)
self.Users = Users.Users(self.connectionProperties)
self.Balancer = Balancer.Balancer(self.connectionProperties)
def EnableAuthentication(self, enable=True):
"""
Toggles authentication mode. If enabled, requests sent through this DeadlineCon object will attempt authentication with the current user name and password credentials.
If the authentication credentials have not been set, authentication will fail. Required to be enabled if the Web Service requires authentication.
Params: whether to disable or enable authentication mode (enabled by default, bool).
"""
self.connectionProperties.EnableAuthentication(enable)
def SetAuthenticationCredentials(self, username, password, enable=True):
"""
Sets the authentication credentials to be used when attempting authentication.
Params: the username credential (string).
the password credential (string).
whether to enable authentication mode or not (enabled by default, bool).
"""
self.connectionProperties.SetAuthentication(username, password)
self.connectionProperties.EnableAuthentication(enable)
def AuthenticationModeEnabled(self):
"""
Returns whether authentication mode is enabled for this DeadlineCon or not. If not, then authentication will fail if the Web Service requires authentication.
"""
return self.connectionProperties.AuthenticationEnabled()
| 47.012048
| 179
| 0.717837
|
140e9a7a95f6f4ecd0d02ea38bdcc88d9bc9c9a0
| 54
|
py
|
Python
|
fastai/version.py
|
zhanlliu/fastai_study
|
0b136a2fd40cbcea4d51a33efc7e7fee24b5bc08
|
[
"Apache-2.0"
] | null | null | null |
fastai/version.py
|
zhanlliu/fastai_study
|
0b136a2fd40cbcea4d51a33efc7e7fee24b5bc08
|
[
"Apache-2.0"
] | null | null | null |
fastai/version.py
|
zhanlliu/fastai_study
|
0b136a2fd40cbcea4d51a33efc7e7fee24b5bc08
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['__version__']
__version__ = '1.0.30.dev0'
| 18
| 27
| 0.685185
|
f702829a908f8235e9093bc97a9d0edce1997f1f
| 379
|
py
|
Python
|
pokemon/permissions.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | 1
|
2019-03-11T04:12:50.000Z
|
2019-03-11T04:12:50.000Z
|
pokemon/permissions.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | null | null | null |
pokemon/permissions.py
|
pessman/pokemon_utils
|
cbe06ebe323cb38a35846274d812bdbe8d0ae8ca
|
[
"MIT"
] | 2
|
2019-03-13T03:17:29.000Z
|
2019-04-04T20:06:50.000Z
|
from rest_framework.permissions import SAFE_METHODS, BasePermission
class IsAdminOrReadOnly(BasePermission):
"""
The request is authenticated as an Admin user or is Read Only
"""
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_staff
)
| 25.266667
| 67
| 0.662269
|
2ee193365add316a39099213870c2518c80ab4af
| 3,272
|
py
|
Python
|
examples/rdf_star_recursive.py
|
pragya16067/Sweb_Project_RDF-
|
92506cc78e6f5d78ae5f784f818310174635a6af
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rdf_star_recursive.py
|
pragya16067/Sweb_Project_RDF-
|
92506cc78e6f5d78ae5f784f818310174635a6af
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rdf_star_recursive.py
|
pragya16067/Sweb_Project_RDF-
|
92506cc78e6f5d78ae5f784f818310174635a6af
|
[
"BSD-3-Clause"
] | null | null | null |
from rdflib import Graph
from rdflib.namespace import RDF, Namespace
from pyparsing import ParseException
# For graphs with multiple rdf reification/* statements
rdf_graph = """
PREFIX ex:<http://example.org/>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
PREFIX dct:<http://purl.org/dc/terms/>
ex:bob foaf:name "Bob" ;
foaf:knows _:s2 .
_:s rdf:type rdf:Statement ;
rdf:subject ex:bob ;
rdf:predicate foaf:knows ;
rdf:object _:s2 .
_:s2 rdf:type rdf:Statement ;
rdf:subject ex:alice ;
rdf:predicate foaf:name ;
rdf:object "Alice" .
_:s dct:creator <http://example.com/crawlers#c1> ;
dct:source <http://example.net/bob.html> .
_:s2 dct:source <http://example.net/alice.html> .
ex:welles foaf:name "John Welles" ;
ex:mentioned ex:kubrick .
ex:kubrick foaf:name "Stanley Kubrick" ;
ex:influencedBy ex:welles .
_:s1 rdf:type rdf:Statement ;
rdf:subject ex:kubrick ;
rdf:predicate ex:influencedBy ;
rdf:object ex:welles .
_:s1 dct:creator <http://example.com/names#examples> ;
dct:source <http://example.net/people.html> .
"""
sparql_query = """
PREFIX ex:<http://example.org/>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
PREFIX dct:<http://purl.org/dc/terms/>
SELECT ?x ?y ?srcX ?srcY WHERE {
?x foaf:knows ?r2 .
?r rdf:type rdf:Statement ;
rdf:subject ?x ;
rdf:predicate foaf:knows ;
rdf:object ?r2 ;
dct:source ?srcX .
?r2 rdf:type rdf:Statement ;
rdf:subject ?y ;
rdf:predicate foaf:name ;
rdf:object ?name ;
dct:source ?srcY
}
"""
rdf_Star_graph = """
PREFIX ex:<http://example.org/>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
PREFIX dct:<http://purl.org/dc/terms/>
ex:bob foaf:name "Bob" .
<<ex:bob foaf:knows <<ex:alice foaf:name "Alice">>>> dct:creator <http://example.com/crawlers#c1> ;
dct:source <http://example.net/bob.html> .
<<ex:alice foaf:name "Alice">> dct:source <http://example.net/alice.html> .
ex:welles foaf:name "John Welles" ;
ex:mentioned ex:kubrick .
ex:kubrick foaf:name "Stanley Kubrick" .
<<ex:kubrick ex:influencedBy ex:welles>> dct:creator <http://example.com/names#examples> ;
dct:source <http://example.net/people.html> .
"""
sparql_star_query = """
PREFIX ex:<http://example.org/>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
PREFIX dct:<http://purl.org/dc/terms/>
SELECT ?x ?y ?srcX ?srcY WHERE
{ <<ex:bob foaf:knows <<?y foaf:name ?name>>>> dct:source ?srcX .
<<?y foaf:name ?name>> dct:source ?srcY .}
"""
def test_rdf_basic():
g = Graph()
g.parse(data=rdf_graph, format="turtle")
for row in g.query(sparql_query):
print(row)
def test_rdf_star():
g = Graph()
g.parse(data=rdf_Star_graph, format="turtle")
for row in g.query(sparql_query):
print(row)
if __name__ == '__main__':
#test_rdf_basic()
test_rdf_star()
| 27.965812
| 103
| 0.619193
|
d708dd0d19d0b303d001ce5350f035020caa6db8
| 25,603
|
py
|
Python
|
telegram-bot.py
|
0x5eba/RecommendationBot
|
e65916a8273f1d4c25021a1e1f13eaae431d4cb4
|
[
"MIT"
] | null | null | null |
telegram-bot.py
|
0x5eba/RecommendationBot
|
e65916a8273f1d4c25021a1e1f13eaae431d4cb4
|
[
"MIT"
] | null | null | null |
telegram-bot.py
|
0x5eba/RecommendationBot
|
e65916a8273f1d4c25021a1e1f13eaae431d4cb4
|
[
"MIT"
] | null | null | null |
import logging
import telegram
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultArticle, InputTextMessageContent
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, Filters, MessageHandler, InlineQueryHandler
from telegram.ext.dispatcher import run_async
import numpy as np
import copy, time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from read_csv import get_titles, get_most_poular, add_rating, add_user, user_gender, get_row_title
from hybrid import final_res, movies_from_last_one, list_movies_seen_user, list_movies_seen_user, predict_one_movie
from similar_user import get_similar_user
from movies_cinema_imdb import final_cinema_movies, get_all_cinema_movies
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
telegram.constants.MAX_MESSAGE_LENGTH = 10000
telegram.constants.MAX_CAPTION_LENGTH = 1000
flag_input_start = False
# title deve essere in lowercase
# partial se vuole tutti i possibili risulati di quel tiolo, altrimenti resituisce un nome solo (se lo trova)
def search(title, partial=True):
possible_original_title = []
titles_movies = get_titles()
for t in titles_movies:
t_copy = copy.deepcopy(t).lower()
if title in t_copy:
possible_original_title.append(t)
if partial:
return possible_original_title
if possible_original_title:
# prendo quello che ha la lunghezza piu' vicina a "title"
original_title = possible_original_title[0]
for t in possible_original_title:
if abs(len(title)-len(t)) < abs(len(title)-len(original_title)):
original_title = t
return original_title
return None
# creo l'interfaccia per tutti i possibili titoli
def input_movie_user(bot, update, possible_original_title, rate=0, info=False):
keyboard = [[i] for i in range(len(possible_original_title))]
count = 0
if info:
for title in possible_original_title:
keyboard[count][0] = InlineKeyboardButton(title, callback_data="5 " + title)
count += 1
elif rate == 0:
for title in possible_original_title:
keyboard[count][0] = InlineKeyboardButton(str(title), callback_data="3 " + str(title))
count += 1
else:
for title in possible_original_title:
keyboard[count][0] = InlineKeyboardButton(title, callback_data="4 " + title + " || " + str(rate))
count += 1
try:
reply_markup = InlineKeyboardMarkup(keyboard)
if len(possible_original_title) == 1:
update.message.reply_text('Is This?', reply_markup=reply_markup)
else:
update.message.reply_text('Which one?', reply_markup=reply_markup)
except:
chat_id = update.message.chat_id
bot.send_message(chat_id=chat_id, text="Invalid name \n[Too short/long, or does't exist]")
bot.send_message(chat_id, 'Insert the name of the movie', reply_markup=telegram.ForceReply(force_reply=True))
flag_input_start = True
@run_async
def start(bot, update):
movies = get_most_poular()
chat_id = update.message.chat_id
name = str(update['message']['chat']['username'])
if len(name) < 5:
bot.send_message(chat_id=chat_id, text="\t-- !ERROR! --\nYour data won't be save\nYou must have a username set on Telegram\n"\
"[How to do it](https://telegram.org/blog/usernames-and-secret-chats-v2)", parse_mode=telegram.ParseMode.MARKDOWN)
return
kb = [
[telegram.KeyboardButton('/rating'), telegram.KeyboardButton('/movies')],
[telegram.KeyboardButton('/rate'), telegram.KeyboardButton('/search'), telegram.KeyboardButton('/list')],
[telegram.KeyboardButton('/friend'), telegram.KeyboardButton('/cinema')]
]
kb_markup = telegram.ReplyKeyboardMarkup(kb, resize_keyboard=True)
# keyboard = [[i] for i in range(len(movies)+1)]
# keyboard[0][0] = InlineKeyboardButton("Write the name manually", callback_data="2 ")
keyboard = [[i] for i in range(len(movies))]
count = 0
for title, img in movies:
keyboard[count][0] = InlineKeyboardButton(title, callback_data="3 " + title)
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
if list_movies_seen_user(name, True) > 0:
update.message.reply_text('Hi @' + name + "\nYou have already started the adventure ;)\n/rating to add feedback, or /movies to get your recommended movies",
reply_markup=kb_markup)
else:
bot.send_message(chat_id=chat_id, text="Hi @" + name, reply_markup=kb_markup)
msg = "Here there is a quick review of the available commands:\n\n" +\
"rate - Rate a movie { /rate Interstellar 5 }\n" +\
"search - Get info about a movie { /search Interstellar }\n" +\
"list - Get the list of movies rated by you { /list }\n" +\
"rating - Add rates to movies { /rating }\n" +\
"movies - Recommended movies { /movies }\n" +\
"friend - Find a person similar to you { /friend }\n" +\
"cinema - Recommended movies at the cinema now { /cinema }"
bot.send_message(chat_id=chat_id, text=msg, reply_markup=kb_markup)
update.message.reply_text("Let begin to know each other ;)\nChoose or search (see /rate) your favorite movie", reply_markup=reply_markup)
# def contact(bot, update):
# contact_keyboard = telegram.KeyboardButton(text="send_contact", request_contact=True)
# custom_keyboard = [[contact_keyboard]]
# reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
# chat_id = update.message.chat_id
# bot.send_message(chat_id=chat_id,
# text="Would you mind sharing your contact with me?",
# reply_markup=reply_markup)
@run_async
def button(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
option = str(query.data.split(' ')[0])
# # INPUT USER
# if option == "2":
# global flag_input_start
# bot.send_message(chat_id, 'Insert the name of the movie', reply_markup=telegram.ForceReply(force_reply=True))
# flag_input_start = Truecheck_user_exist
# SHOW MOVIES AND RATING
if option == "3":
title = ' '.join(query.data.split(' ')[1:])
user_name = str(update['callback_query']['from_user']['username'])
bot.edit_message_text(text="Selected option: {0}".format(title),
chat_id=chat_id, message_id=query.message.message_id)
keyboard = [
[
InlineKeyboardButton("1", callback_data="4 " + title + " || 1"),
InlineKeyboardButton("2", callback_data="4 " + title + " || 2"),
InlineKeyboardButton("3", callback_data="4 " + title + " || 3")
],
[
InlineKeyboardButton("4", callback_data="4 " + title + " || 4"),
InlineKeyboardButton("5", callback_data="4 " + title + " || 5")
]
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='Insert your personal rating for the movie', reply_markup=reply_markup)
# SAVE AND ASK MOVIE AGAIN
elif option == "4":
option2 = ' '.join(query.data.split(' ')[1:])
user_name = str(update['callback_query']['from_user']['username'])
title, rating = option2.split(' || ')
bot.edit_message_text(text="{0}/5 for {1}".format(int(rating), title),
chat_id=chat_id, message_id=query.message.message_id)
add_rating(user_name, title, int(rating))
recommended_movies = movies_from_last_one(str(user_name))
keyboard = [[i] for i in range(len(recommended_movies))]
count = 0
for title, value in recommended_movies:
keyboard[count][0] = InlineKeyboardButton(str(title), callback_data="3 " + str(title))
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
n_movies = list_movies_seen_user(user_name, True)
if n_movies == 1:
bot.send_message(chat_id=chat_id, text='Nice choice!\nNow what', reply_markup=reply_markup)
elif n_movies == 2:
bot.send_message(chat_id=chat_id, text='Ok then one more?', reply_markup=reply_markup)
elif n_movies == 3:
msg = "Good job! Now you are ready to continue your adventure without worries\n" +\
"When you think to be ready do /movies to get your recommended movies\n"+ \
'Choose a movie to rate'
bot.send_message(chat_id=chat_id, text=msg, reply_markup=reply_markup)
elif n_movies == 5:
msg = "Feel free to send me some feedback about the bot [link](https://goo.gl/forms/02lDSAURiQq2Cnzg1)"
bot.send_message(chat_id=chat_id, text=msg, reply_markup=reply_markup, parse_mode=telegram.ParseMode.MARKDOWN)
bot.send_message(chat_id=chat_id, text='Choose a movie to rate', reply_markup=reply_markup)
else:
bot.send_message(chat_id=chat_id, text='Choose a movie to rate', reply_markup=reply_markup)
# INFO MOVIE
elif option == "5":
user_name = str(update['callback_query']['from_user']['username'])
title = ' '.join(query.data.split(' ')[1:])
row_title = get_row_title(title)
predict = None
overview = None
try:
overview = row_title['overview'].values[0]
except:
overview = ""
try:
predict = predict_one_movie(user_name, int(row_title['id'].values[0]))
except:
predict = "No info"
message = "*" + str(row_title['title'].values[0]).upper() + "* \n" + \
"*Year Release*: " + str(int(row_title['year'].values[0])) + "\n" + \
"*Genres*: " + ','.join([str(i) for i in row_title['genres']]) + "\n" + \
"*Runtime*: " + str(int(row_title['runtime'].values[0])) + " minutes\n" + \
"*Possible Rate*: " + str(predict) + "\n" +\
"*Overview*:\n" + str(overview)
# "[How to do it](https://telegram.org/blog/usernames-and-secret-chats-v2)"
keyboard = [[InlineKeyboardButton("Trailer", url="https://www.youtube.com/results?search_query=" + str(title) + " trailer")]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(chat_id=chat_id, text=message, message_id=query.message.message_id, parse_mode=telegram.ParseMode.MARKDOWN, reply_markup=reply_markup)
bot.send_photo(chat_id=chat_id, photo="https://image.tmdb.org/t/p/original/" + str(row_title['poster_path'].values[0]))
# YES or NO for /friend + ask GENDER
elif option == "6":
answer = query.data.split(' ')[1]
user_name = str(update['callback_query']['from_user']['username'])
if answer == "yes":
bot.edit_message_text(chat_id=chat_id, text="Welcome to this amazing feature :D\n", message_id=query.message.message_id)
keyboard = [[InlineKeyboardButton("Male", callback_data="7 male"), InlineKeyboardButton("Female", callback_data="7 female")]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='Now I need to know your gender and your age\nAre you a boy or a girl?', reply_markup=reply_markup)
else:
bot.edit_message_text(chat_id=chat_id, text="I can't continue without your agreement :(", message_id=query.message.message_id)
# GENDER + ask AGE
elif option == "7":
answer = query.data.split(' ')[1]
user_name = str(update['callback_query']['from_user']['username'])
keyboard = [
[InlineKeyboardButton("1-10", callback_data="8 " + answer + " 1"), InlineKeyboardButton("11-20", callback_data="8 " + answer + " 2"),
InlineKeyboardButton("21-30", callback_data="8 " + answer + " 3")],
[InlineKeyboardButton("31-40", callback_data="8 " + answer + " 4"), InlineKeyboardButton("41-50", callback_data="8 " + answer + " 5"),
InlineKeyboardButton("50+", callback_data="8 " + answer + " 6")]
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='And what about your age range?', reply_markup=reply_markup)
# SAVE USER
elif option == "8":
user_name = str(update['callback_query']['from_user']['username'])
gender = query.data.split(' ')[1]
age = query.data.split(' ')[2]
add_user(user_name, gender, age)
bot.send_message(chat_id=chat_id, text='Nice to meet you :)')
similar_user, percent_similarity = get_similar_user(user_name)
if similar_user == "":
bot.send_message(chat_id=chat_id, text='No user found :(')
else:
gender_user = user_gender(user_name)
if gender_user != user_gender(similar_user):
bot.send_message(chat_id=chat_id, text='WooW! Soo lucky ;)\n@' + similar_user + " with a similarity of " + percent_similarity + "%")
else:
bot.send_message(chat_id=chat_id, text="Here it is a person with tastes similar to yours\n@" + similar_user +\
" with a similarity of " + percent_similarity + "%")
# LIST MOVIES AT CINEMA + RECOMMENDED CINEMA MOVIES
elif option == "9":
user_name = str(update['callback_query']['from_user']['username'])
answer = query.data.split(' ')[1]
if answer == "1":
all_cinema_movies = get_all_cinema_movies()
keyboard = [[i] for i in range(len(all_cinema_movies))]
count = 0
for title, available in all_cinema_movies:
if available == "":
keyboard[count][0] = InlineKeyboardButton(str(title), callback_data="5 " + str(title))
else:
keyboard[count][0] = InlineKeyboardButton(str(title) + " [" + str(available) + "]", callback_data="5 " + str(title))
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(chat_id=chat_id, text='Best movies at cinema for you ;)\nClick one to have more info',
reply_markup=reply_markup, message_id=query.message.message_id)
else:
bot.edit_message_text(chat_id=chat_id, text="Have you already searched for the nearest cinema? ;)\nPlease wait some seconds", message_id=query.message.message_id)
recommended_movies = final_cinema_movies(user_name)
keyboard = [[i] for i in range(len(recommended_movies))]
count = 0
for title, rate in recommended_movies:
keyboard[count][0] = InlineKeyboardButton(str(title), callback_data="5 " + str(title))
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='Here it is your recommanded movies :)\nHave fun at cinema ;)\nClick one to have more info', reply_markup=reply_markup)
# @run_async
# def input_user(bot, update):
# global flag_input_start
# chat_id = update.message.chat_id
# if flag_input_start == True:
# flag_input_start = False
# title = str(update.message.text).lower()
# user_name = str(update['message']['chat']['username'])
# possible_original_title = search(title)
# if possible_original_title:
# input_movie_user(bot, update, possible_original_title)
# else:
# bot.send_message(chat_id=chat_id, text="Invalid name \n[Too short/long, or does't exist]")
@run_async
def rate_movie(bot, update, args):
chat_id = update.message.chat_id
if len(args) < 1:
bot.send_message(chat_id=chat_id, text="Invalid argument: /rate name [rate]\nExample:\n\t/rate Interstellar 5")
else:
title, rate = '', ''
if args[-1].isdigit():
if not int(args[-1]) in [1, 2, 3, 4, 5]:
bot.send_message(chat_id=chat_id, text="Invalid rate \n[Must be 1,2,3,4 or 5]")
return
title, rate = ' '.join(args[:-1]), args[-1]
else:
title = ' '.join(args)
possible_original_title = search(title.lower())
if len(possible_original_title):
if rate:
input_movie_user(bot, update, possible_original_title, rate)
else:
input_movie_user(bot, update, possible_original_title)
else:
bot.send_message(chat_id=chat_id, text="Invalid name \n[Too short/long, or does't exist]")
@run_async
def search_movie(bot, update, args):
chat_id = update.message.chat_id
if len(args) < 1:
bot.send_message(chat_id=chat_id, text="Invalid argument: /search name\nExample:\n\t/search Interstellar")
else:
title = ' '.join(args)
possible_original_title = search(title.lower())
if len(possible_original_title):
input_movie_user(bot, update, possible_original_title, info=True)
else:
bot.send_message(chat_id=chat_id, text="Invalid name \n[Too short/long, or does't exist]")
@run_async
def rating_movies(bot, update):
user_name = str(update['message']['chat']['username'])
chat_id = update.message.chat_id
recommended_movies = movies_from_last_one(user_name)
if len(recommended_movies) < 1:
bot.send_message(chat_id=chat_id, text='You should rate same movie befor start.\n/start to start')
return
keyboard = [[i] for i in range(len(recommended_movies))]
count = 0
for title, value in recommended_movies:
keyboard[count][0] = InlineKeyboardButton(str(title), callback_data="3 " + str(title))
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='Choose a movie to rate', reply_markup=reply_markup)
@run_async
def get_best_movie(bot, update):
user_name = str(update['message']['chat']['username'])
chat_id = update.message.chat_id
bot.send_message(chat_id=chat_id, text="Remember to do /rating to continue the adventure!\nPlease wait some seconds for the recommendetion")
recommended_movies = final_res(user_name)
if len(recommended_movies) < 1:
bot.send_message(chat_id=chat_id, text='You should rate same movie befor start.\n/start to start')
return
keyboard = [[i] for i in range(len(recommended_movies))]
count = 0
for title, rate in recommended_movies:
keyboard[count][0] = InlineKeyboardButton(str(title) + " -> " + str(rate)[:5], callback_data="5 " + str(title))
count += 1
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='Here it is your recommanded movies :)\nClick one to have more info', reply_markup=reply_markup)
@run_async
def get_list_movies(bot, update):
user_name = str(update['message']['chat']['username'])
chat_id = update.message.chat_id
lm = list_movies_seen_user(user_name)
if len(lm) == 0:
bot.send_message(chat_id=chat_id, text='You should rate same movie befor start.\n/start to start')
else:
bot.send_message(chat_id=chat_id, text='\n'.join(lm))
@run_async
def find_friend(bot, update):
user_name = str(update['message']['chat']['username'])
chat_id = update.message.chat_id
if list_movies_seen_user(user_name, True) < 10:
bot.send_message(chat_id=chat_id, text='You have to rate at least 10 movies before get into this section\n/rating to continue')
return
gender_user = user_gender(user_name)
if not gender_user:
bot.send_message(chat_id=chat_id, text='In this section you can find a friend ( or maybe something else ;) ) to watch a movie with')
keyboard = [[InlineKeyboardButton("Yes", callback_data="6 yes"), InlineKeyboardButton("No", callback_data="6 no")]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text='But first you have to agree to share your personal information with other people\nDo you agree?',
reply_markup=reply_markup)
else:
similar_user, percent_similarity = get_similar_user(user_name)
if similar_user == "":
bot.send_message(chat_id=chat_id, text='No user found :(')
else:
if gender_user != user_gender(similar_user):
bot.send_message(chat_id=chat_id, text='WooW! Soo lucky ;)\n@' + similar_user + " with a similarity of " + percent_similarity + "%")
else:
bot.send_message(chat_id=chat_id, text="Here it is a person with tastes similar to yours\n@" + similar_user +\
" with a similarity of " + percent_similarity + "%")
@run_async
def movies_cinema(bot, update):
user_name = str(update['message']['chat']['username'])
chat_id = update.message.chat_id
lm = list_movies_seen_user(user_name)
if len(lm) == 0:
bot.send_message(chat_id=chat_id, text='You should rate same movie befor start.\n/start to start')
else:
msg = "Choose an option:\n1) Show the most popular movies at cinema now\n" +\
"2) A recommendetion of the best movies, based on your preference, to watch at cinema now!"
keyboard = [[InlineKeyboardButton("1", callback_data="9 1"), InlineKeyboardButton("2", callback_data="9 2")]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text=msg, reply_markup=reply_markup)
# @run_async
# def inline_movies(bot, update):
# user_name = str(update['inline_query']['from_user']['username'])
# chat_id = update.inline_query.from_user.id
# query = update.inline_query.query
# results = list()
# if query == 'movies' or query == "Movies":
# pass
# if not str(user_name) in list_name_users:
# bot.send_message(chat_id=chat_id, text='You should rate same movie befor start.\n/start to start')
# return
# recommended_movies = final_res(str(user_name))
# # return_msg = ""
# # for key, value in recommended_movies:
# # return_msg += str(key) + " -> " + str(value)[5:] + "\n"
# keyboard = [[i] for i in range(len(recommended_movies))]
# count = 0
# for key, value in recommended_movies:
# keyboard[count][0] = InlineKeyboardButton(str(key) + " -> " + str(value)[:5], callback_data="3 " + key)
# count += 1
# reply_markup = InlineKeyboardMarkup(keyboard)
# bot.send_message(chat_id=chat_id, text='Here it is your recommanded movies :)', reply_markup=reply_markup)
# results.append(
# InlineQueryResultArticle(
# id=chat_id,
# title='Movies',
# input_message_content=InputTextMessageContent('Choose a movie to rate'),
# reply_markup=reply_markup
# )
# )
# elif query == 'search':
# return
# elif query == 'rate':
# return
# else:
# return
# bot.answer_inline_query(update.inline_query.id, results)
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError)
def error_callback(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error)
try:
raise error
except Unauthorized:
print("Unauthorized")
# remove update.message.chat_id from conversation list
except BadRequest as e:
print("BadRequest")
print(e)
# handle malformed requests - read more below!
except TimedOut:
print("TimedOut")
# handle slow connection problems
except NetworkError:
print("NetworkError")
# handle other connection problems
except ChatMigrated as e:
print("ChatMigrated")
print(e)
# the chat_id of a group has changed, use e.new_chat_id instead
except TelegramError:
print("TelegramError")
# handle all other telegram related errors
ud = updater.dispatcher
ud.add_handler(CommandHandler('start', start))
ud.add_handler(CommandHandler('rating', rating_movies))
ud.add_handler(CommandHandler('movies', get_best_movie))
ud.add_handler(CommandHandler('list', get_list_movies))
ud.add_handler(CommandHandler('friend', find_friend))
ud.add_handler(CommandHandler('cinema', movies_cinema))
ud.add_handler(CommandHandler('rate', rate_movie, pass_args=True))
ud.add_handler(CommandHandler('search', search_movie, pass_args=True))
ud.add_handler(CallbackQueryHandler(button)) # , pattern='main'
# ud.add_handler(CallbackQueryHandler(info_film))
# ud.add_handler(InlineQueryHandler(inline_movies))
# ud.add_handler(MessageHandler(Filters.text, input_user))
ud.add_error_handler(error_callback)
updater.start_polling(timeout=100.0)
updater.idle()
| 45.075704
| 174
| 0.638949
|
975cbc2f996fec5197090cc81bc5a29f36bbda5b
| 3,745
|
py
|
Python
|
cogwheels/helpers/tests/test_deprecation_warning_stacklevel.py
|
ababic/django-app-utils
|
7a5f423498854dbec936b06bbaf88f7106259144
|
[
"MIT"
] | 4
|
2018-06-27T22:44:34.000Z
|
2021-02-03T16:55:54.000Z
|
cogwheels/helpers/tests/test_deprecation_warning_stacklevel.py
|
ababic/django-app-utils
|
7a5f423498854dbec936b06bbaf88f7106259144
|
[
"MIT"
] | 15
|
2018-07-15T10:01:40.000Z
|
2019-10-30T08:15:14.000Z
|
cogwheels/helpers/tests/test_deprecation_warning_stacklevel.py
|
ababic/django-cogwheels
|
7a5f423498854dbec936b06bbaf88f7106259144
|
[
"MIT"
] | null | null | null |
import warnings
from django.test import override_settings
from cogwheels.tests.base import AppSettingTestCase
class TestDeprecationWarningStackLevelSetting(AppSettingTestCase):
def assert_this_file_appears_as_cause_of_warning(self, warning):
self.assertIn(
'/cogwheels/helpers/tests/test_deprecation_warning_stacklevel.py',
str(warning)
)
def test_raise_all_deprecated_setting_reference_warnings(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.appsettingshelper.get('DEPRECATED_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_model('REPLACED_MODEL_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.models.REPLACED_MODEL_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_module('REPLACED_MODULE_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.modules.REPLACED_MODULE_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_object('REPLACED_OBJECT_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.objects.REPLACED_OBJECT_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
@override_settings(
COGWHEELS_TESTS_RENAMED_SETTING_OLD='ooolaalaa',
COGWHEELS_TESTS_REPLACED_MODEL_SETTING='tests.ReplacementModel',
COGWHEELS_TESTS_REPLACED_MODULE_SETTING='cogwheels.tests.modules.replacement_module',
COGWHEELS_TESTS_REPLACED_OBJECT_SETTING='cogwheels.tests.classes.ReplacementClass'
)
def test_raise_all_deprecated_setting_value_used_by_replacement_warnings(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.appsettingshelper.get('RENAMED_SETTING_NEW')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_model('REPLACEMENT_MODEL_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_module('REPLACEMENT_MODULE_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.get_object('REPLACEMENT_OBJECT_SETTING')
self.assert_this_file_appears_as_cause_of_warning(w.pop())
@override_settings(
COGWHEELS_TESTS_RENAMED_SETTING_OLD='ooolaalaa',
COGWHEELS_TESTS_REPLACED_MODEL_SETTING='tests.ReplacementModel',
COGWHEELS_TESTS_REPLACED_MODULE_SETTING='cogwheels.tests.modules.replacement_module',
COGWHEELS_TESTS_REPLACED_OBJECT_SETTING='cogwheels.tests.classes.ReplacementClass'
)
def test_raise_all_deprecated_setting_value_used_by_replacement_warnings_via_attribute_shortcuts(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.appsettingshelper.RENAMED_SETTING_NEW
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.models.REPLACEMENT_MODEL_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.modules.REPLACEMENT_MODULE_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
self.appsettingshelper.objects.REPLACEMENT_OBJECT_SETTING
self.assert_this_file_appears_as_cause_of_warning(w.pop())
| 46.8125
| 107
| 0.75247
|
5b788877bc1f86a70651f1de28a2ccd7b959652e
| 2,269
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/methods/users/get_common_chats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/methods/users/get_common_chats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/methods/users/get_common_chats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from typing import Union
from pyrogram import raw
from pyrogram import types
from pyrogram.scaffold import Scaffold
class GetCommonChats(Scaffold):
async def get_common_chats(self, user_id: Union[int, str]) -> list:
"""Get the common chats you have with a user.
Parameters:
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
Returns:
List of :obj:`~pyrogram.types.Chat`: On success, a list of the common chats is returned.
Raises:
ValueError: If the user_id doesn't belong to a user.
Example:
.. code-block:: python
common = app.get_common_chats("haskell")
print(common)
"""
peer = await self.resolve_peer(user_id)
if isinstance(peer, raw.types.InputPeerUser):
r = await self.send(
raw.functions.messages.GetCommonChats(
user_id=peer,
max_id=0,
limit=100,
)
)
return types.List([types.Chat._parse_chat(self, x) for x in r.chats])
raise ValueError(f'The user_id "{user_id}" doesn\'t belong to a user')
| 36.015873
| 107
| 0.639489
|
4b66e00a3ff62ecd6675a263ada22aa5fb1aca25
| 1,696
|
py
|
Python
|
figthesis/fignoise.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
figthesis/fignoise.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
figthesis/fignoise.py
|
Gattocrucco/sipmfilter
|
74215d6c53b998808fc6c677b46030234d996bdf
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
import figlatex
import toy
import num2si
import textbox
config = [
# label, noisefile, time [ns], timebase [ns]
('LNGS noise' , 'noises/nuvhd_lf_3x_tile57_77K_64V_6VoV_1-noise.npz', 150, [1, 8, 16, 32]),
('Proto0 noise', 'noises/merged_000886-adc_W201_Ch00.npz' , 150, [ 8, 16, 32]),
]
plotkw = {
1: dict(color='#aaa'),
8: dict(color='#000'),
16: dict(color='#000', linestyle='--', marker='o', markerfacecolor='#f55'),
32: dict(color='#f55', marker='o', markerfacecolor='#fff'),
}
###########################
fig, axs = plt.subplots(1, 2, num='fignoise', clear=True, figsize=[9, 3.3], sharex=True, sharey=True)
for ax, (label, noisefile, N, timebase) in zip(axs, config):
basetb = np.gcd.reduce(timebase)
noise = toy.DataCycleNoise(timebase=basetb)
noise.load(noisefile)
ax.set_xlabel(f'Time [ns]')
n = noise.generate(1, 3 * N)[0]
for tb in timebase:
nr = toy.downsample(n, tb // basetb)
x = (tb - 1) / 2 + tb * np.arange(len(nr)) - N
kwargs = dict(
label=num2si.num2si(1e9 / tb, format='%.3g') + 'Sa/s',
marker='.',
)
kwargs.update(plotkw[tb])
ax.plot(x, nr, **kwargs)
textbox.textbox(ax, label, fontsize='medium', loc='upper left')
ax.legend(loc='lower left', ncol=2, title='Sampling frequency', framealpha=0.95, fontsize='small', title_fontsize='small')
ax.set_xlim(0, N)
ax.set_ylim(-3, 3)
ax.minorticks_on()
ax.grid(True, which='major', linestyle='--')
ax.grid(True, which='minor', linestyle=':')
fig.tight_layout()
fig.show()
figlatex.save(fig)
| 29.241379
| 126
| 0.596698
|
a545a27f6826a1a4610bd47c7a45d7c5e8b4e388
| 31,025
|
py
|
Python
|
darr/tests/test_array.py
|
gjlbeckers-uu/dArray
|
e156495fd48b10085f5ddadf4890b626a78f5100
|
[
"BSD-3-Clause"
] | null | null | null |
darr/tests/test_array.py
|
gjlbeckers-uu/dArray
|
e156495fd48b10085f5ddadf4890b626a78f5100
|
[
"BSD-3-Clause"
] | null | null | null |
darr/tests/test_array.py
|
gjlbeckers-uu/dArray
|
e156495fd48b10085f5ddadf4890b626a78f5100
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import unittest
import tempfile
import shutil
import numpy as np
from darr.array import asarray, create_array, create_datadir, Array, \
numtypesdescr, truncate_array, delete_array, AppendDataError, \
numtypedescriptiontxt
from darr.utils import tempdir, tempdirfile
# TODO clean up overwrite parameters, not necessary anymore
class DarrTestCase(unittest.TestCase):
def assertArrayIdentical(self, x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertEqual(np.sum((x-y)**2), 0)
class AsArray(DarrTestCase):
def setUp(self):
self.tempdirname1 = tempfile.mkdtemp()
self.tempdirname2 = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdirname1)
shutil.rmtree(self.tempdirname2)
def check_arrayequaltoasarray(self, ndarray):
"""Tests if asarray creates an array of same shape and dtype and same
contents as input."""
dar = asarray(path=self.tempdirname1, array=ndarray, overwrite=True)
ndarray = np.asarray(ndarray) # could be list or tuple
self.assertArrayIdentical(dar[:], ndarray)
self.assertEqual(dar.dtype, ndarray.dtype)
self.assertEqual(dar.shape, ndarray.shape)
def test_asarraynumberint(self):
dar = asarray(path=self.tempdirname1, array=1, overwrite=True)
self.assertEqual(dar[0], 1)
def test_asarraynumberfloat(self):
dar = asarray(path=self.tempdirname1, array=1.0, overwrite=True)
self.assertEqual(dar[0], 1.0)
def test_asarrayonedimensionalndarray(self):
ndarray = np.arange(24)
self.check_arrayequaltoasarray(ndarray)
def test_asarraytwodimensionalndarray(self):
ndarray = np.arange(24).reshape(12, 2)
self.check_arrayequaltoasarray(ndarray)
def test_asarraythreedimensionalndarray(self):
ndarray = np.arange(24).reshape(4, 2, 3)
self.check_arrayequaltoasarray(ndarray)
def test_asarrayonedimensionallist(self):
ndarray = [1, 2, 3, 4]
self.check_arrayequaltoasarray(ndarray)
def test_asarraytwodimensionallist(self):
ndarray = [[1, 2, 3, 4],
[1, 2, 3, 4]]
self.check_arrayequaltoasarray(ndarray)
def test_asarraythreedimensionallist(self):
ndarray = [[[1, 2, 3, 4],
[1, 2, 3, 4]],
[[1, 2, 3, 4],
[1, 2, 3, 4]]]
self.check_arrayequaltoasarray(ndarray)
def test_asarraynumericdtypes(self):
dtypes = numtypesdescr.keys()
for dtype in dtypes:
with self.subTest(dtype=dtype):
ndarray = np.arange(24, dtype=dtype)
self.check_arrayequaltoasarray(ndarray)
def test_asarrayfortranorder(self):
ndarray = np.asarray(np.arange(24, dtype='float64'), order='F')
self.check_arrayequaltoasarray(ndarray)
def test_asarraycorder(self):
ndarray = np.asarray(np.arange(24, dtype='float64'), order='C')
self.check_arrayequaltoasarray(ndarray)
def test_asarraylittleendian(self):
ndarray = np.arange(24, dtype='<f4')
self.check_arrayequaltoasarray(ndarray)
def test_asarraybigendian(self):
ndarray = np.arange(24, dtype='>f4')
self.check_arrayequaltoasarray(ndarray)
def test_asarrayoverwrite(self):
a = np.zeros((5,), dtype='float64')
_ = asarray(path=self.tempdirname1, array=a, overwrite=True)
b = np.ones((4,2), dtype='uint8')
dar = asarray(path=self.tempdirname1, array=b, overwrite=True)
self.assertArrayIdentical(dar[:], b)
def test_asarraysequencesmallchunklen(self):
a = [1, 2, 3, 4, 5]
dar = asarray(path=self.tempdirname1, array=a, chunklen=3,
overwrite=True)
self.assertArrayIdentical(np.array(a), dar[:])
def test_asarraywritingsmallerchunks(self):
a = np.arange(1024, dtype='int64').reshape(2,-1)
dar = asarray(path=self.tempdirname1, array=a, chunklen=4, overwrite=True)
self.assertArrayIdentical(a, dar[:])
dar = asarray(path=self.tempdirname1, array=a, chunklen=5, overwrite=True)
self.assertArrayIdentical(a, dar[:])
def test_asarraywritinglargerthanlenchunks(self):
a = np.arange(1024, dtype='int64').reshape(2, -1)
dar = asarray(path=self.tempdirname1, array=a, chunklen=4096, overwrite=True)
self.assertArrayIdentical(a, dar[:])
def test_asarrayarray(self):
a = np.arange(1024, dtype='int64').reshape(2, -1)
dar1 = asarray(path=self.tempdirname1, array=a, overwrite=True)
dar2 = asarray(path=self.tempdirname2, array=a, chunklen=5, overwrite=True)
self.assertArrayIdentical(dar1[:], dar2[:])
def test_asarraywronginput(self):
a = 'text'
self.assertRaises(TypeError, asarray, path=self.tempdirname1, array=a,
chunklen=32, overwrite=True)
def test_asarraykeepattrs(self):
class AttrList(list):
attrs = {'a': 1, 'b': 2}
a = AttrList([0, 0, 0, 0])
dar = asarray(path=self.tempdirname1, array=a, overwrite=True)
self.assertEqual(dict(dar.metadata), AttrList.attrs)
def test_asarraywarnsnondictattrs(self):
class AttrList(list):
attrs = [0]
a = AttrList([0, 0, 0, 0])
self.assertWarns(UserWarning, asarray, path=self.tempdirname1, array=a,
overwrite=True)
def test_asarrayfromincompatipletype(self):
a = {'a': 1}
self.assertRaises(TypeError, asarray,path=self.tempdirname1, array=a,
overwrite=True)
def test_asarrayarratosamepath(self):
dar = asarray(path=self.tempdirname1, array=[0,1], overwrite=True)
self.assertRaises(ValueError, asarray, path=self.tempdirname1,
array=dar, overwrite=True)
def test_asarraysequenceofzerodimnumpyscalars(self):
def a():
yield np.float32(0)
yield np.float32(1)
yield np.float32(2)
dar = asarray(path=self.tempdirname1, array=a(), overwrite=True)
self.assertArrayIdentical(dar[:], np.array([0,1,2], dtype=np.float32))
def test_asarrayremoveoldmetadata(self):
dar = asarray(path=self.tempdirname1, array=[1,2],
metadata={'a':1}, overwrite=True)
dar = asarray(path=self.tempdirname1, array=[1, 2],
overwrite=True)
self.assertDictEqual(dict(dar.metadata), {})
class CreateDiskArray(DarrTestCase):
def check_arrayequaltocreatearray(self, ndarray, shape, dtype=None,
chunklen=None):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=shape,
dtype=dtype, chunklen=chunklen,
overwrite=True)
if dtype is not None:
ndarray = ndarray.astype(dtype)
self.assertArrayIdentical(ndarray, dar[:])
self.assertEqual(shape, dar.shape)
def test_zerosfloat64default(self):
shape = (12,)
ndarray = np.zeros(shape, dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=shape)
def test_twodimensional(self):
shape = (12, 2)
ndarray = np.zeros(shape, dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=shape)
def test_threedimensional(self):
shape = (4, 2, 3)
ndarray = np.zeros(shape, dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=shape)
# split out manually?
def test_numericdtypes(self):
dtypes = numtypesdescr.keys()
for dtype in dtypes:
ndarray = np.zeros(24, dtype=dtype)
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(24,),
dtype=dtype)
def test_emptyarray(self):
ndarray = np.zeros(0, dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(0,),
dtype='float64')
def test_emptyarraymd(self):
ndarray = np.zeros((0,3,7), dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(0, 3, 7),
chunklen=1)
def test_emptyarraydifferentdtype(self):
ndarray = np.zeros(0, dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(0,),
dtype='int64')
def test_chunked(self):
ndarray = np.zeros(12, dtype='float64')
for chunklen in (1, 5, 6, 11, 12, 13):
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(12,),
chunklen=chunklen)
ndarray = np.zeros(13, dtype='float64')
for chunklen in (1, 6, 7, 12, 13, 14):
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(13,),
chunklen=chunklen)
def test_chunkedthreedimensional(self):
ndarray = np.zeros((12,3,7), dtype='float64')
for chunklen in (1, 5, 6, 11, 12, 13):
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(12, 3, 7),
chunklen=chunklen*21)
ndarray = np.zeros((13,3,7), dtype='float64')
for chunklen in (1, 6, 7, 12, 13, 14):
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(13, 3, 7),
chunklen=chunklen*21)
def test_toosmallchunklen(self):
ndarray = np.zeros((12, 3, 7), dtype='float64')
self.check_arrayequaltocreatearray(ndarray=ndarray, shape=(12, 3, 7),
chunklen=1)
def test_shapeisint(self):
# we allow shapes to be integers
with tempdirfile() as filename:
dar = create_array(path=filename, shape=1,
dtype='int32', overwrite=True)
self.assertTupleEqual((1,), dar.shape)
def test_fillandfillfuncisnotnone(self):
fillfunc= lambda i: i * 2
with tempdirfile() as filename:
self.assertRaises(ValueError, create_array, path=filename,
shape=(1,), fill=1, fillfunc=fillfunc,
dtype='int32', overwrite=True)
class TestArray(DarrTestCase):
def setUp(self):
self.temparpath = tempfile.mkdtemp(dir=None)
self.tempnonarpath = tempfile.mkdtemp(dir=None)
self.tempar = create_array(path=self.temparpath, shape=(12,),
dtype='int64', metadata={'a': 1},
overwrite=True)
def tearDown(self):
shutil.rmtree(str(self.temparpath))
shutil.rmtree(str(self.tempnonarpath))
def test_instantiatefromexistingpath(self):
Array(path=self.temparpath)
def test_instantiatefromnonexistingpath(self):
with self.assertRaises(OSError):
Array(path=self.tempnonarpath)
def test_setvalues(self):
self.assertArrayIdentical(self.tempar[2:4],
np.array([0,0], dtype=self.tempar.dtype))
self.tempar[2:4] = 1
self.assertArrayIdentical(self.tempar[2:4],
np.array([1, 1], dtype=self.tempar.dtype))
def test_datadirexistence(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
self.assertEqual(filename, dar.datadir.path)
def test_str(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
self.assertEqual(str(dar),'[0 0]')
def test_repr(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
# linux and windows have different numpy memmap reprs...
self.assertEqual(repr(dar)[:18], 'darr array ([0, 0]')
def test_setaccessmode(self):
self.assertEqual(self.tempar.accessmode, 'r+')
self.tempar.accessmode = 'r'
self.assertEqual(self.tempar.accessmode, 'r')
self.assertRaises(ValueError, setattr, self.tempar, 'accessmode', 'w')
self.assertRaises(ValueError, setattr, self.tempar, 'accessmode', 'a')
def test_itemsize(self):
self.assertEqual(self.tempar.itemsize, 8)
def test_nbytes(self):
self.assertEqual(self.tempar.nbytes, 12*8)
def test_mb(self):
self.assertEqual(self.tempar.mb, 12*8/1e6)
def test_size(self):
self.assertEqual(self.tempar.size, 12)
def test_copy(self):
dar2 = self.tempar.copy(path=self.tempnonarpath, overwrite=True)
self.assertArrayIdentical(self.tempar[:], dar2[:])
self.assertEqual(dict(self.tempar.metadata), dict(dar2.metadata))
# FIXME more tests open accessmode
def test_open(self):
with self.tempar.open_array() as r:
self.assertIsNone(r)
def test_readcodelanguages(self):
self.assertIsInstance(self.tempar.readcodelanguages, tuple)
self.assertIn('numpymemmap', self.tempar.readcodelanguages)
class TestReadArrayDescr(DarrTestCase):
def test_arrayinfomissingfile(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
dar._arraydescrpath.unlink()
self.assertRaises(FileNotFoundError, Array, dar.path)
def test_arrayinfonewerversionfile(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
arrayinfo = dar._arrayinfo.copy()
vs = f"1{arrayinfo['darrversion']}"
arrayinfo['darrversion'] = vs
dar._datadir._write_jsondict(dar._arraydescrfilename, arrayinfo,
overwrite=True)
self.assertWarns(UserWarning, Array, dar.path)
def test_arrayinfowrongshapetype(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
arrayinfo = dar._arrayinfo.copy()
arrayinfo['shape'] = ['a', 3]
dar._datadir._write_jsondict(dar._arraydescrfilename, arrayinfo,
overwrite=True)
self.assertRaises(TypeError, Array, dar.path)
def test_arrayinfowrongorder(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
arrayinfo = dar._arrayinfo.copy()
arrayinfo['arrayorder'] = 'D'
dar._datadir._write_jsondict(dar._arraydescrfilename, arrayinfo,
overwrite=True)
self.assertRaises(ValueError, Array, dar.path)
arrayinfo['arrayorder'] = '[D]'
dar._datadir._write_jsondict(dar._arraydescrfilename, arrayinfo,
overwrite=True)
self.assertRaises(Exception, Array, dar.path)
def test_allowfortranorder(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,4), fill=0,
dtype='int64', overwrite=True)
dar._datadir._update_jsondict(dar._arraydescrpath.absolute(),
{'arrayorder': 'F'})
dar = Array(filename)
self.assertIn("Column-major", numtypedescriptiontxt(dar))
def test_warnwritefortranarray(self):
with tempdirfile() as filename1, tempdirfile() as filename2:
dar = create_array(path=filename1, shape=(2, 4), fill=0,
dtype='int64', overwrite=True)
dar._datadir._update_jsondict(dar._arraydescrpath.absolute(),
{'arrayorder': 'F'})
dar = Array(filename1)
self.assertWarns(UserWarning, asarray, path=filename2, array=dar,
overwrite=True)
def test_unknownarrayordertype(self):
with tempdir() as dirname:
dar = create_array(path=dirname, shape=(2,4), fill=0,
dtype='int64', overwrite=True)
dar._update_arrayinfo({'arrayorder': 'X'})
self.assertRaises(ValueError, numtypedescriptiontxt, dar)
self.assertRaises(ValueError, Array, dirname)
class TestConsistency(DarrTestCase):
def test_consistencycorrect(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
self.assertIsNone(dar._check_arrayinfoconsistency())
dar.append([0,0])
self.assertIsNone(dar._check_arrayinfoconsistency())
def test_consistencyincorrectinfoshape(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
dar._update_arrayinfo({'shape': (3,)})
self.assertRaises(ValueError, dar._check_arrayinfoconsistency)
def test_consistencywronginfoitemsize(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
dar._update_arrayinfo({'numtype': 'int32'})
self.assertRaises(ValueError, dar._check_arrayinfoconsistency)
def test_consistencyincorrectinfofileshape(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
dar._update_arrayinfo({'shape': (3,)})
self.assertRaises(ValueError, dar._check_arrayinfoconsistency)
self.assertRaises(ValueError, Array, dar.path)
def test_consistencywronginfofileitemsize(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', overwrite=True)
arrayinfo = dar._arrayinfo.copy()
arrayinfo['numtype'] = 'int32'
dar._datadir._write_jsondict(dar._arraydescrfilename, arrayinfo,
overwrite=True)
self.assertRaises(ValueError, dar._check_arrayinfoconsistency)
self.assertRaises(ValueError, Array, dar.path)
class TestCheckArraywriteable(DarrTestCase):
def test_check_arraywriteable(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', accessmode='r+', overwrite=True)
self.assertIsNone(dar.check_arraywriteable())
def test_check_arraynotwriteable(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(2,), fill=0,
dtype='int64', accessmode='r', overwrite=True)
self.assertRaises(OSError, dar.check_arraywriteable)
class IterFrames(DarrTestCase):
def setUp(self):
self.tempearpath = tempfile.mkdtemp() # even array
self.tempoarpath = tempfile.mkdtemp() # odd array
self.tempnonarpath = tempfile.mkdtemp()
self.tempear = create_array(path=self.tempearpath, shape=(12,),
dtype='int64', metadata={'a': 1},
overwrite=True)
self.tempoar = create_array(path=self.tempoarpath, shape=(13,),
dtype='int64', metadata={'a': 1},
overwrite=True)
def tearDown(self):
shutil.rmtree(str(self.tempearpath))
shutil.rmtree(str(self.tempoarpath))
shutil.rmtree(str(self.tempnonarpath))
def test_defaultparams_fit(self):
l = [c for c in self.tempear.iterchunks(chunklen=2)]
self.assertEqual(len(l), 6)
self.assertArrayIdentical(np.concatenate(l), self.tempear[:])
def test_remainderfalse_fit(self):
l = [c for c in self.tempear.iterchunks(chunklen=2,
include_remainder=False)]
self.assertEqual(len(l), 6)
self.assertArrayIdentical(np.concatenate(l), self.tempear[:])
def test_defaultparams_nofit(self):
l = [c for c in self.tempoar.iterchunks(chunklen=2)]
self.assertEqual(len(l), 7)
self.assertArrayIdentical(np.concatenate(l), self.tempoar[:])
def test_remainderfalse_nofit(self):
l = [c for c in
self.tempoar.iterchunks(chunklen=2, include_remainder=False)]
self.assertEqual(len(l), 6)
self.assertArrayIdentical(np.concatenate(l), self.tempoar[:12])
class AppendData(DarrTestCase):
def setUp(self):
self.temparpath = tempfile.mkdtemp() # even array
def tearDown(self):
shutil.rmtree(str(self.temparpath))
def test_appendnumber(self):
dar = create_array(path=self.temparpath, shape=(2,),
dtype='int64', overwrite=True)
dar.append(1)
self.assertArrayIdentical(np.array([0, 0, 1], dtype='int64'), dar[:])
def test_appendlist1d(self):
dar = create_array(path=self.temparpath, shape=(2,),
dtype='int64', overwrite=True)
dar.append([1,2])
dar.append([3])
self.assertArrayIdentical(np.array([0, 0, 1, 2, 3], dtype='int64'), dar[:])
def test_appendlist2d(self):
dar = create_array(path=self.temparpath, shape=(2, 3),
dtype='int64', overwrite=True)
dar.append([[1,2,3]])
dar.append([[1,2,3],[4,5,6]])
self.assertArrayIdentical(np.array([[0, 0, 0], [0, 0, 0], [1, 2, 3], [1, 2, 3],
[4, 5, 6]], dtype='int64'), dar[:])
def test_appendtoempty1d(self):
dar = create_array(path=self.temparpath, shape=(0,),
dtype='int64', overwrite=True)
dar.append([1, 2, 3])
self.assertArrayIdentical(np.array([1, 2, 3], dtype='int64'), dar[:])
def test_appendtoempty2d(self):
dar = create_array(path=self.temparpath, shape=(0, 2),
dtype='int64', overwrite=True)
dar.append([[1,2]])
dar.append([[1,2],[3,4]])
self.assertArrayIdentical(np.array([[1, 2], [1, 2], [3, 4]], dtype='int64'),
dar[:])
def test_appendempty1d(self):
dar = create_array(path=self.temparpath, shape=(1,),
dtype='int64', overwrite=True)
dar.append([])
self.assertArrayIdentical(np.array([0], dtype='int64'), dar[:])
def test_appendempty2d(self):
dar = create_array(path=self.temparpath, shape=(1, 2),
dtype='int64', overwrite=True)
dar.append(np.zeros((0,2), dtype='int64'))
self.assertArrayIdentical(np.array([[0, 0]], dtype='int64'), dar[:])
def test_appendemptytoempty1d(self):
dar = create_array(path=self.temparpath, shape=(0,),
dtype='int64', overwrite=True)
dar.append([])
self.assertArrayIdentical(np.array([], dtype='int64'), dar[:])
def test_appendemptytoempty2d(self):
dar = create_array(path=self.temparpath, shape=(0, 2),
dtype='int64', overwrite=True)
dar.append(np.zeros((0, 2), dtype='int64'))
self.assertArrayIdentical(np.zeros((0, 2), dtype='int64'), dar[:])
def test_appenddataerror(self):
def testiter():
yield [1, 2, 3]
yield [4, 5, 6]
raise ValueError
g = (f for f in testiter())
dar = create_array(path=self.temparpath, shape=(2,),
dtype='int64', overwrite=True)
self.assertRaises(AppendDataError, dar.iterappend, g)
self.assertArrayIdentical(dar[:], np.array([0, 0, 1, 2, 3, 4, 5, 6],
dtype='int64'))
def test_appendwrongshape(self):
dar = create_array(path=self.temparpath, shape=(2,3),
dtype='int64', overwrite=True)
ar = [[3,4]]
self.assertRaises(AppendDataError, dar.append, ar)
def test_appendreadonlyarray(self):
dar = create_array(path=self.temparpath, shape=(2,),
dtype='int64', overwrite=True, accessmode='r')
ar = [3, 4]
self.assertRaises(OSError, dar.append, ar)
def test_iterappendnoniterable(self):
dar = create_array(path=self.temparpath, shape=(2,),
dtype='int64', overwrite=True)
ar = 3
self.assertRaises(TypeError, dar.iterappend, ar)
# def test_fdclosedduringiterappend(self):
#
# def seq(dar):
# yield [0]
# dar._valuesfd.close()
# yield [0]
#
# dar = create_array(path=self.temparpath, shape=(2,),
# dtype='int64', overwrite=True)
# self.assertRaises(AppendDataError, dar.iterappend, seq(dar))
# self.assertArrayIdentical(dar, np.array([0, 0, 0], dtype='int64'))
# dar._check_consistency()
class TestIterFrames(DarrTestCase):
def setUp(self):
self.temparpath = tempfile.mkdtemp()
self.tempar = create_array(path=self.temparpath, shape=(10,),
dtype='int64', overwrite=True)
def tearDown(self):
shutil.rmtree(str(self.temparpath))
def test_iterframesstartindextoohigh(self):
with self.assertRaises(ValueError):
_ = [f for f in self.tempar.iterchunks(chunklen=2, startindex=12,
endindex=2)]
def test_iterframesendindextoohigh(self):
with self.assertRaises(ValueError):
_ = [f for f in self.tempar.iterchunks(chunklen=2, startindex=1,
endindex=12)]
class TestOpenFile(DarrTestCase):
def test_openfile(self):
with tempdir() as dirname:
dar = create_array(path=dirname, shape=(0, 2), dtype='int64',
overwrite=True, accessmode='r+')
with dar._datadir.open_file('notes.txt', 'a') as f:
f.write('test\n')
path = dar.path / 'notes.txt'
self.assertEqual(path.exists(), True)
def test_openfileprotectedfiles(self):
with tempdir() as dirname:
dar = create_array(path=dirname, shape=(0, 2), dtype='int64',
overwrite=True, accessmode='r+')
for fn in dar._protectedfiles:
with self.assertRaises(OSError):
with dar._datadir.open_file(fn, 'a') as f:
f.write('test\n')
class TruncateData(DarrTestCase):
def test_truncate1d(self):
with tempdirfile() as filename:
a = np.array([0, 1, 2, 3, 4], dtype='int64')
dar = asarray(path=filename, array=a, accessmode='r+')
truncate_array(dar, 2)
self.assertArrayIdentical(dar[:],
np.array([0,1], dtype=dar.dtype))
a = Array(filename)
self.assertArrayIdentical(a[:],
np.array([0, 1], dtype=a.dtype))
def test_truncatebydirname(self):
with tempdirfile() as filename:
a = np.array([0, 1, 2, 3, 4], dtype='int64')
dar = asarray(path=filename, array=a, accessmode='r+')
truncate_array(filename, 2)
a = Array(filename)
self.assertArrayIdentical(a[:], np.array([0, 1],
dtype=a.dtype))
def test_donottruncatenondarrdir(self):
with tempdirfile() as filename:
bd = create_datadir(filename)
bd._write_jsondict('test.json', {'a': 1})
self.assertRaises(TypeError, truncate_array, filename, 3)
def test_truncateinvalidindextype(self):
with tempdirfile() as filename:
a = np.array([0, 1, 2, 3, 4], dtype='int64')
dar = asarray(path=filename, array=a, accessmode='r+')
self.assertRaises(TypeError, truncate_array, dar, 'a')
def test_truncateindextoohigh(self):
with tempdirfile() as filename:
a = np.array([0, 1, 2, 3, 4], dtype='int64')
dar = asarray(path=filename, array=a, overwrite=True,
accessmode='r+')
self.assertRaises(IndexError, truncate_array, dar, 10)
def test_truncatetolen0(self):
with tempdirfile() as filename:
a = asarray(path=filename, array=[0, 1, 2, 3, 4],
dtype='int64', accessmode='r+')
truncate_array(a, 0)
self.assertEqual(len(a), 0)
ra = Array(filename)
self.assertEqual(len(a), 0)
class DeleteArray(DarrTestCase):
def test_simpledeletearray(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(0, 2), dtype='int64')
delete_array(dar)
self.assertEqual(len(os.listdir(filename.parent)), 0)
def test_simpledeletearraypath(self):
with tempdirfile() as filename:
_ = create_array(path=filename, shape=(0, 2), dtype='int64')
delete_array(filename)
self.assertEqual(len(os.listdir(filename.parent)), 0)
def test_donotdeletenondarrfile(self):
with tempdirfile() as filename:
dar = create_array(path=filename, shape=(0, 2), dtype='int64')
dar._datadir._write_jsondict('test.json', {'a': 1})
testpath = dar._path.joinpath('test.json')
self.assertRaises(OSError, delete_array, dar)
self.assertEqual(testpath.exists(), True)
def test_donotdeletenondarrdir(self):
with tempdirfile() as filename:
bd = create_datadir(filename, overwrite=True)
self.assertRaises(TypeError, delete_array, filename)
bd._write_jsondict('test.json', {'a': 1})
self.assertRaises(TypeError, delete_array, filename)
if __name__ == '__main__':
unittest.main()
| 40.930079
| 87
| 0.586527
|
c788baa25c69287c0e6b27d2fcdbd48e1f127df8
| 284
|
py
|
Python
|
fern/items/urls.py
|
Eslamhathout/Fern
|
ddcdb7318689f41ccfc6914175e5ccac9b1fb2fa
|
[
"MIT"
] | null | null | null |
fern/items/urls.py
|
Eslamhathout/Fern
|
ddcdb7318689f41ccfc6914175e5ccac9b1fb2fa
|
[
"MIT"
] | null | null | null |
fern/items/urls.py
|
Eslamhathout/Fern
|
ddcdb7318689f41ccfc6914175e5ccac9b1fb2fa
|
[
"MIT"
] | 1
|
2020-06-20T23:50:59.000Z
|
2020-06-20T23:50:59.000Z
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
path('items/', views.ItemList.as_view()),
path('items/<int:pk>', views.ItemDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 23.666667
| 61
| 0.760563
|
31331baa0be9758fc5a43e34ec130d4c98791411
| 7,147
|
py
|
Python
|
translation/fairseq/data/iterators.py
|
houshengyuan/MAN
|
03912e80b21e0fc40c36515b6893d4244b82e6b6
|
[
"MIT"
] | 140
|
2019-06-10T04:02:07.000Z
|
2022-03-22T11:08:27.000Z
|
translation/fairseq/data/iterators.py
|
houshengyuan/MAN
|
03912e80b21e0fc40c36515b6893d4244b82e6b6
|
[
"MIT"
] | 6
|
2019-06-10T07:36:44.000Z
|
2020-05-15T03:33:39.000Z
|
translation/fairseq/data/iterators.py
|
houshengyuan/MAN
|
03912e80b21e0fc40c36515b6893d4244b82e6b6
|
[
"MIT"
] | 11
|
2019-06-21T05:31:17.000Z
|
2022-01-04T02:20:46.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import math
import numpy as np
import torch
from . import data_utils
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
Attributes:
count (int): number of elements consumed from this iterator
"""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.count < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
class EpochBatchIterator(object):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler): an iterator over batches of
indices
seed (int, optional): seed for random number generator for
reproducibility. Default: ``1``
num_shards (int, optional): shard the data iterator into N
shards. Default: ``1``
shard_id (int, optional): which shard of the data iterator to
return. Default: ``0``
"""
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.epoch = 0
self._cur_epoch_itr = None
self._next_epoch_itr = None
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator. Default: ``True``
"""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._cur_epoch_itr
def end_of_epoch(self):
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle):
if shuffle:
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with data_utils.numpy_seed(self.seed + epoch):
batches = list(self.frozen_batches) # copy
np.random.shuffle(batches)
else:
batches = self.frozen_batches
return CountingIterator(torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),
))
class GroupedIterator(object):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
"""
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil(len(iterable) / float(chunk_size)))
self.itr = iter(iterable)
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if len(chunk) == 0:
raise e
return chunk
class ShardedIterator(object):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards*. Default: ``None``
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
| 32.935484
| 98
| 0.634392
|
7907c353fe7228172d12dfe2ebf9d989531d716e
| 583
|
py
|
Python
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 9
|
2017-02-13T18:17:13.000Z
|
2020-11-21T20:15:54.000Z
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 2
|
2021-05-20T14:26:47.000Z
|
2021-05-20T14:26:48.000Z
|
ecs/meetings/migrations/0010_meeting_documents_zip.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 4
|
2017-04-02T18:48:59.000Z
|
2021-11-23T15:40:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0004_uuidfield'),
('meetings', '0009_auto_20170106_1414'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='documents_zip',
field=models.ForeignKey(to='documents.Document', related_name='zip_for_meeting', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| 26.5
| 150
| 0.656947
|
98b2ba7b0fabe7d26db09f5936abb0bbcbafcc8b
| 3,139
|
py
|
Python
|
src/plotmanx/_tests/manager_test.py
|
tokenchain/plotman
|
666e4e34eecda90aa94004ff042fefebfbf3fead
|
[
"Apache-2.0"
] | null | null | null |
src/plotmanx/_tests/manager_test.py
|
tokenchain/plotman
|
666e4e34eecda90aa94004ff042fefebfbf3fead
|
[
"Apache-2.0"
] | null | null | null |
src/plotmanx/_tests/manager_test.py
|
tokenchain/plotman
|
666e4e34eecda90aa94004ff042fefebfbf3fead
|
[
"Apache-2.0"
] | null | null | null |
# TODO: migrate away from unittest patch
from unittest.mock import patch
import pytest
from plotmanx import configuration, manager
@pytest.fixture
def sched_cfg():
return configuration.Scheduling(
global_max_jobs=1,
global_stagger_m=2,
polling_time_s=2,
tmpdir_stagger_phase_major=3,
tmpdir_stagger_phase_minor=0,
tmpdir_max_jobs=3
)
@pytest.fixture
def dir_cfg():
return configuration.Directories(
log="/plots/log",
tmp=["/var/tmp", "/tmp"],
dst=["/mnt/dst/00", "/mnt/dst/01", "/mnt/dst/03"],
tmp_overrides={"/mnt/tmp/04": configuration.TmpOverrides(tmpdir_max_jobs=4)}
)
def test_permit_new_job_post_milestone(sched_cfg, dir_cfg):
assert manager.phases_permit_new_job(
[(3, 8), (4, 1)], '/mnt/tmp/00', sched_cfg, dir_cfg)
def test_permit_new_job_pre_milestone(sched_cfg, dir_cfg):
assert not manager.phases_permit_new_job(
[(2, 3), (4, 1)], '/mnt/tmp/00', sched_cfg, dir_cfg)
def test_permit_new_job_too_many_jobs(sched_cfg, dir_cfg):
assert not manager.phases_permit_new_job(
[(3, 1), (3, 2), (3, 3)], '/mnt/tmp/00', sched_cfg, dir_cfg)
def test_permit_new_job_too_many_jobs_zerophase(sched_cfg, dir_cfg):
assert not manager.phases_permit_new_job(
[(3, 0), (3, 1), (3, 3)], '/mnt/tmp/00', sched_cfg, dir_cfg)
def test_permit_new_job_too_many_jobs_nonephase(sched_cfg, dir_cfg):
assert manager.phases_permit_new_job(
[(None, None), (3, 1), (3, 3)], '/mnt/tmp/00', sched_cfg, dir_cfg)
def test_permit_new_job_override_tmp_dir(sched_cfg, dir_cfg):
assert manager.phases_permit_new_job(
[(3, 1), (3, 2), (3, 3)], '/mnt/tmp/04', sched_cfg, dir_cfg)
assert not manager.phases_permit_new_job(
[(3, 1), (3, 2), (3, 3), (3, 6)], '/mnt/tmp/04', sched_cfg,
dir_cfg)
@patch('plotmanx.job.Job')
def job_w_tmpdir_phase(tmpdir, phase, MockJob):
j = MockJob()
j.progress.return_value = phase
j.tmpdir = tmpdir
return j
@patch('plotmanx.job.Job')
def job_w_dstdir_phase(dstdir, phase, MockJob):
j = MockJob()
j.progress.return_value = phase
j.dstdir = dstdir
return j
def test_dstdirs_to_furthest_phase():
all_jobs = [job_w_dstdir_phase('/plots1', (1, 5)),
job_w_dstdir_phase('/plots2', (1, 1)),
job_w_dstdir_phase('/plots2', (3, 1)),
job_w_dstdir_phase('/plots2', (2, 1)),
job_w_dstdir_phase('/plots3', (4, 1))]
assert (manager.dstdirs_to_furthest_phase(all_jobs) ==
{'/plots1': (1, 5),
'/plots2': (3, 1),
'/plots3': (4, 1)})
def test_dstdirs_to_youngest_phase():
all_jobs = [job_w_dstdir_phase('/plots1', (1, 5)),
job_w_dstdir_phase('/plots2', (1, 1)),
job_w_dstdir_phase('/plots2', (3, 1)),
job_w_dstdir_phase('/plots2', (2, 1)),
job_w_dstdir_phase('/plots3', (4, 1))]
assert (manager.dstdirs_to_youngest_phase(all_jobs) ==
{'/plots1': (1, 5),
'/plots2': (1, 1),
'/plots3': (4, 1)})
| 30.475728
| 84
| 0.621854
|
8aa610b0cdb50c578ef100939ebdbcb07450ba72
| 12,282
|
py
|
Python
|
tests/test_util.py
|
EpicWink/sfini
|
6bb09d6a59f940947bc2739d24ed05611ebba933
|
[
"MIT"
] | 3
|
2019-09-09T23:24:18.000Z
|
2020-03-30T02:07:37.000Z
|
tests/test_util.py
|
EpicWink/sfini
|
6bb09d6a59f940947bc2739d24ed05611ebba933
|
[
"MIT"
] | 26
|
2019-05-26T07:51:17.000Z
|
2020-09-08T12:40:26.000Z
|
tests/test_util.py
|
EpicWink/sfini
|
6bb09d6a59f940947bc2739d24ed05611ebba933
|
[
"MIT"
] | 1
|
2020-04-10T20:26:18.000Z
|
2020-04-10T20:26:18.000Z
|
"""Test ``sfini._util``."""
from sfini import _util as tscr
import pytest
from unittest import mock
import logging as lg
import boto3
class TestDefaultParameter:
"""Test `sfini._util.DefaultParameter``."""
@pytest.fixture
def default(self):
"""A DefaultParameter instance."""
return tscr.DefaultParameter()
def test_bool(self, default):
"""Conversion to boolean."""
assert not default
assert bool(default) is False
def test_eq(self, default):
"""Default paramater equality."""
other = tscr.DefaultParameter()
assert default is not other
assert default == other
def test_str(self, default):
"""Default paramater stringification."""
assert str(default) == "<unspecified>"
def test_repr(self, default):
"""Default paramater string representation."""
assert repr(default) == "DefaultParameter()"
@pytest.mark.parametrize(
("level", "exp_logger_level", "exp_handler_level"),
[(None, lg.WARNING, lg.NOTSET), (lg.INFO, lg.INFO, lg.INFO)])
def test_setup_logging(level, exp_logger_level, exp_handler_level):
"""Standard-library logging set-up configuration."""
# Setup environment
root_logger = lg.getLogger()
root_logger.setLevel(lg.WARNING)
# Run function
with mock.patch.object(root_logger, "handlers", []):
tscr.setup_logging(level=level)
handlers = root_logger.handlers
# Check result
assert root_logger.level == exp_logger_level
handler, = handlers
assert isinstance(handler, lg.StreamHandler)
fmt = handler.formatter._fmt
assert "message" in fmt
assert "asctime" in fmt[:fmt.index("message")]
assert "levelname" in fmt[:fmt.index("message")]
assert "name" in fmt[:fmt.index("message")]
assert handler.level == exp_handler_level
class TestCachedProperty:
"""Test `sfini._util.cached_property``."""
def test(self):
"""Standard use."""
with mock.patch.object(tscr, "DEBUG", False):
class C:
def __init__(self):
self.a = 42
@tscr.cached_property
def b(self):
return self.a * 2
# Getting
c = C()
assert c.b == 84
c.a = 3
assert c.b == 84
# Setting
with pytest.raises(AttributeError):
c.b = 4
# Deleting
with pytest.raises(AttributeError):
del c.b
def test_debug(self):
"""Debug-model allowing property setting/deleting."""
with mock.patch.object(tscr, "DEBUG", True):
class C:
def __init__(self):
self.a = 42
@tscr.cached_property
def b(self):
return self.a * 2
# Getting
c = C()
assert c.b == 84
c.a = 3
assert c.b == 84
# Setting
c.b = 4
assert c.b == 4
# Deleting
del c.b
assert c.b == 6
class TestAssertValidName:
"""AWS-given name validation."""
@pytest.mark.parametrize("name", ["spam", "a.!@-_+='"])
def test_valid(self, name):
"""Passes for valid names."""
tscr.assert_valid_name(name)
@pytest.mark.parametrize(
"name",
[
"Lorem ipsum dolor sit amet, consectetur adipiscing "
"elit, sed do eiusmod tempor incididunt",
"foo bar",
"spam\nbla",
"<xml />",
"spam [type]",
"{name}",
"\"name\"",
"name:spam",
"#names",
"eggs?",
".*",
"50%",
"\\spam",
"foo^bar",
"spam|bla",
"~name",
"/path/to/name",
"`name`",
"$name"
"foo&bar",
"foo,bar",
"spam;bar",
tscr.INVALID_NAME_CHARACTERS])
def test_invalid(self, name):
"""Raises for invalid names."""
with pytest.raises(ValueError) as e:
tscr.assert_valid_name(name)
assert name in str(e.value)
def test_collect_paginated():
"""Paginated AWS API endpoint request collection."""
# Build input
fn_rvs = [
{"items": [1, 5, 4], "nextToken": 42},
{"items": [9, 3, 0], "nextToken": 17},
{"items": [8]}]
fn = mock.Mock(side_effect=fn_rvs)
kwargs = {"a": 128, "b": [{"c": None, "d": "spam"}]}
# Build expectation
exp = {"items": [1, 5, 4, 9, 3, 0, 8]}
exp_calls = [
mock.call(**kwargs),
mock.call(nextToken=42, **kwargs),
mock.call(nextToken=17, **kwargs)]
# Run function
res = tscr.collect_paginated(fn, **kwargs)
# Check result
assert res == exp
assert fn.call_args_list == exp_calls
class TestEasyRepr:
"""Test ``sfini._util.easy_repr``"""
def test_no_params(self):
"""Function has no paramaters."""
class Class:
pass
instance = Class()
exp = "Class()"
res = tscr.easy_repr(instance)
assert res == exp
def test_positional(self):
"""Called with only positional arguments."""
class Class:
def __init__(self, a, b):
self.a = a
self.b = b
instance = Class(42, "spam")
exp = "Class(42, 'spam')"
res = tscr.easy_repr(instance)
assert res == exp
def test_positional_with_optional(self):
"""Called with only some positional arguments."""
class Class:
def __init__(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
instance = Class(42, "spam")
exp = "Class(42, 'spam')"
res = tscr.easy_repr(instance)
assert res == exp
def test_positional_with_optional_provided(self):
"""Called with some arguments keyword."""
class Class:
def __init__(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
instance = Class(42, "spam", c=[1, 2])
exp = "Class(42, 'spam', c=[1, 2])"
res = tscr.easy_repr(instance)
assert res == exp
def test_keyword_only_required(self):
"""Function has required keyword-only."""
class Class:
def __init__(self, a, b, c=None, *, d):
self.a = a
self.b = b
self.c = c
self.d = d
instance = Class(42, "spam", d="bla")
exp = "Class(42, 'spam', d='bla')"
res = tscr.easy_repr(instance)
assert res == exp
def test_keyword_only_optional(self):
"""Function has optional keyword-only."""
class Class:
def __init__(self, a, b, c=None, *, d=None):
self.a = a
self.b = b
self.c = c
self.d = d
instance = Class(42, "spam")
exp = "Class(42, 'spam')"
res = tscr.easy_repr(instance)
assert res == exp
def test_long_positional(self):
"""Passed long positional."""
class Class:
def __init__(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
instance = Class(42, "spam" * 42)
exp = "Class(42, len 168)"
res = tscr.easy_repr(instance)
assert res == exp
def test_long_keyword(self):
"""Passed long keyword."""
class Class:
def __init__(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
instance = Class(42, "spam", c=[1, 2] * 42)
exp = "Class(42, 'spam', len(c)=84)"
res = tscr.easy_repr(instance)
assert res == exp
def test_combined(self):
"""A combined test."""
class Class:
def __init__(self, a, b, c=None, d="foo", *, e, f=None, g=""):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
self.f = f
self.g = g
instance = Class(42, "spam", d="bar", e=3, g="1")
exp = "Class(42, 'spam', d='bar', e=3, g='1')"
res = tscr.easy_repr(instance)
assert res == exp
def test_var_positional(self):
"""Error when initialiser has a positional var-arg."""
class Class:
def __init__(self, a, b, c=None, *args):
self.a = a
self.b = b
self.c = c
self.args = args
instance = Class(42, "spam")
with pytest.raises(RuntimeError):
_ = tscr.easy_repr(instance)
def test_var_keyword(self):
"""Error when initialiser has a keyword var-arg."""
class Class:
def __init__(self, a, b, c=None, **kwargs):
self.a = a
self.b = b
self.c = c
self.kwargs = kwargs
instance = Class(42, "spam")
with pytest.raises(RuntimeError):
_ = tscr.easy_repr(instance)
def test_repr(self):
"""Usage as ``__repr__``."""
class Class:
def __init__(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
__repr__ = tscr.easy_repr
instance = Class(42, "spam")
exp = "Class(42, 'spam')"
res = repr(instance)
assert res == exp
def test_repr_combined(self):
"""A combined test."""
class Class:
def __init__(self, a, b, c=None, d="foo", *, e, f=None, g=""):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
self.f = f
self.g = g
__repr__ = tscr.easy_repr
instance = Class(42, "spam", d="bar", e=3, g="1")
exp = "Class(42, 'spam', d='bar', e=3, g='1')"
res = repr(instance)
assert res == exp
class TestAWSSession:
"""Test ``sfini._util.AWSSession``."""
@pytest.fixture
def session(self):
"""AWS ``boto3`` session mock."""
return mock.Mock(spec=boto3.Session)
@pytest.fixture
def sfini_session(self, session):
"""An example AWSSession instance."""
return tscr.AWSSession(session=session)
def test_init(self, sfini_session, session):
"""AWSSession instantiation."""
assert sfini_session.session is session
def test_str(self, sfini_session):
"""AWSSession stringification."""
sfini_session.credentials = mock.Mock()
sfini_session.credentials.access_key = "spamkey"
sfini_session.region = "spamregion"
res = str(sfini_session)
assert "spamkey" in res
assert "spamregion" in res
def test_repr(self, sfini_session, session):
"""AWSSession string representation."""
exp = "AWSSession(session=%r)" % session
res = repr(sfini_session)
assert res == exp
def test_credentials(self, sfini_session, session):
"""AWS IAM credentials."""
res = sfini_session.credentials
assert res is session.get_credentials.return_value
session.get_credentials.assert_called_once_with()
def test_sfn(self, sfini_session, session):
"""AWS Step Functions client."""
res = sfini_session.sfn
assert res is session.client.return_value
session.client.assert_called_once_with("stepfunctions")
def test_region(self, sfini_session, session):
"""AWS session API region."""
session.region_name = "spamregion"
assert sfini_session.region == "spamregion"
def test_account_id(self, sfini_session, session):
"""AWS account ID."""
client_mock = mock.Mock()
session.client.return_value = client_mock
client_mock.get_caller_identity.return_value = {"Account": "spamacc"}
assert sfini_session.account_id == "spamacc"
session.client.assert_called_once_with("sts")
client_mock.get_caller_identity.assert_called_once_with()
| 29.242857
| 77
| 0.52638
|
9f8af2bc36ab026a3c45c85493fd75b1c82e65c0
| 2,799
|
py
|
Python
|
test/test_appscale_get_property.py
|
NeolithEra/appscale-tools
|
bf02462645ba08975e0e4cfcc25f3ff4e0ec1545
|
[
"Apache-2.0"
] | 21
|
2015-04-22T02:46:22.000Z
|
2021-11-14T20:05:18.000Z
|
test/test_appscale_get_property.py
|
NeolithEra/appscale-tools
|
bf02462645ba08975e0e4cfcc25f3ff4e0ec1545
|
[
"Apache-2.0"
] | 244
|
2015-01-02T22:43:58.000Z
|
2020-01-29T17:52:43.000Z
|
test/test_appscale_get_property.py
|
NeolithEra/appscale-tools
|
bf02462645ba08975e0e4cfcc25f3ff4e0ec1545
|
[
"Apache-2.0"
] | 32
|
2015-03-12T14:49:49.000Z
|
2021-06-08T09:43:36.000Z
|
#!/usr/bin/env python
# General-purpose Python library imports
import json
import os
import sys
import time
import unittest
# Third party libraries
from flexmock import flexmock
import SOAPpy
# AppScale import, the library that we're testing here
from appscale.tools.appscale_logger import AppScaleLogger
from appscale.tools.appscale_tools import AppScaleTools
from appscale.tools.local_state import LocalState
from appscale.tools.parse_args import ParseArgs
class TestAppScaleGetProperty(unittest.TestCase):
def setUp(self):
self.keyname = "boobazblargfoo"
self.function = "appscale-get-property"
# mock out any writing to stdout
flexmock(AppScaleLogger)
AppScaleLogger.should_receive('log').and_return()
AppScaleLogger.should_receive('success').and_return()
AppScaleLogger.should_receive('warn').and_return()
# mock out all sleeping
flexmock(time)
time.should_receive('sleep').and_return()
def test_get_property(self):
# put in a mock for reading the secret file
builtins = flexmock(sys.modules['__builtin__'])
builtins.should_call('open') # set the fall-through
secret_key_location = LocalState.get_secret_key_location(self.keyname)
fake_secret = flexmock(name="fake_secret")
fake_secret.should_receive('read').and_return('the secret')
builtins.should_receive('open').with_args(secret_key_location, 'r') \
.and_return(fake_secret)
# mock out finding the shadow node's IP address from the json file
flexmock(os.path)
os.path.should_call('exists') # set the fall-through
os.path.should_receive('exists').with_args(
LocalState.get_locations_json_location(self.keyname)).and_return(True)
fake_nodes_json = flexmock(name="fake_secret")
fake_nodes_json.should_receive('read').and_return(json.dumps(
{"node_info": [{
'public_ip': 'public1',
'private_ip': 'private1',
'roles': ['shadow']
}]}))
builtins.should_receive('open').with_args(
LocalState.get_locations_json_location(self.keyname), 'r') \
.and_return(fake_nodes_json)
# mock out grabbing the userappserver ip from an appcontroller
property_regex = "property-name-.*"
expected = {'a':'b'}
fake_appcontroller = flexmock(name='fake_appcontroller')
fake_appcontroller.should_receive('get_property').with_args(property_regex,
'the secret').and_return(json.dumps(expected))
flexmock(SOAPpy)
SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \
.and_return(fake_appcontroller)
argv = [
"--keyname", self.keyname,
"--property", property_regex
]
options = ParseArgs(argv, self.function).args
properties = AppScaleTools.get_property(options)
self.assertEqual(expected, properties)
| 31.449438
| 79
| 0.728832
|
09357f8d9e666a631d42ded9fa15c4a84c45d1dc
| 108,636
|
py
|
Python
|
tests/unit/forklift/test_legacy.py
|
lala7573/warehouse
|
56938af14d0337724873f4e9024be7aa0adcda1e
|
[
"Apache-2.0"
] | 1
|
2019-06-23T14:37:33.000Z
|
2019-06-23T14:37:33.000Z
|
tests/unit/forklift/test_legacy.py
|
Matthelonianxl/warehouse
|
a08fdb85c523be2c6d5fe29e0c333e1664566cc2
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/forklift/test_legacy.py
|
Matthelonianxl/warehouse
|
a08fdb85c523be2c6d5fe29e0c333e1664566cc2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import io
import re
import tempfile
import zipfile
from cgi import FieldStorage
from unittest import mock
import pkg_resources
import pretend
import pytest
import requests
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden
from sqlalchemy.orm import joinedload
from webob.multidict import MultiDict
from wtforms.form import Form
from wtforms.validators import ValidationError
from warehouse.admin.flags import AdminFlag
from warehouse.admin.squats import Squat
from warehouse.classifiers.models import Classifier
from warehouse.forklift import legacy
from warehouse.packaging.interfaces import IFileStorage
from warehouse.packaging.models import (
Dependency,
DependencyKind,
File,
Filename,
JournalEntry,
Project,
Release,
Role,
)
from ...common.db.accounts import EmailFactory, UserFactory
from ...common.db.classifiers import ClassifierFactory
from ...common.db.packaging import (
FileFactory,
ProjectFactory,
ReleaseFactory,
RoleFactory,
)
def test_exc_with_message():
exc = legacy._exc_with_message(HTTPBadRequest, "My Test Message.")
assert isinstance(exc, HTTPBadRequest)
assert exc.status_code == 400
assert exc.status == "400 My Test Message."
class TestValidation:
@pytest.mark.parametrize("version", ["1.0", "30a1", "1!1", "1.0-1"])
def test_validates_valid_pep440_version(self, version):
form, field = pretend.stub(), pretend.stub(data=version)
legacy._validate_pep440_version(form, field)
@pytest.mark.parametrize("version", ["dog", "1.0.dev.a1", "1.0+local"])
def test_validates_invalid_pep440_version(self, version):
form, field = pretend.stub(), pretend.stub(data=version)
with pytest.raises(ValidationError):
legacy._validate_pep440_version(form, field)
@pytest.mark.parametrize(
("requirement", "expected"),
[("foo", ("foo", None)), ("foo (>1.0)", ("foo", ">1.0"))],
)
def test_parses_legacy_requirement_valid(self, requirement, expected):
parsed = legacy._parse_legacy_requirement(requirement)
assert parsed == expected
@pytest.mark.parametrize("requirement", ["foo bar"])
def test_parses_legacy_requirement_invalid(self, requirement):
with pytest.raises(ValueError):
legacy._parse_legacy_requirement(requirement)
@pytest.mark.parametrize("specifier", [">=1.0", "<=1.0-1"])
def test_validates_valid_pep440_specifier(self, specifier):
legacy._validate_pep440_specifier(specifier)
@pytest.mark.parametrize("specifier", ["wat?"])
def test_validates_invalid_pep440_specifier(self, specifier):
with pytest.raises(ValidationError):
legacy._validate_pep440_specifier(specifier)
@pytest.mark.parametrize(
"requirement", ["foo (>=1.0)", "foo", "_foo", "foo2", "foo.bar"]
)
def test_validates_legacy_non_dist_req_valid(self, requirement):
legacy._validate_legacy_non_dist_req(requirement)
@pytest.mark.parametrize(
"requirement",
[
"foo-bar (>=1.0)",
"foo-bar",
"2foo (>=1.0)",
"2foo",
"☃ (>=1.0)",
"☃",
"name @ https://github.com/pypa",
"foo.2bar",
],
)
def test_validates_legacy_non_dist_req_invalid(self, requirement):
with pytest.raises(ValidationError):
legacy._validate_legacy_non_dist_req(requirement)
def test_validate_legacy_non_dist_req_list(self, monkeypatch):
validator = pretend.call_recorder(lambda datum: None)
monkeypatch.setattr(legacy, "_validate_legacy_non_dist_req", validator)
data = [pretend.stub(), pretend.stub(), pretend.stub()]
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_legacy_non_dist_req_list(form, field)
assert validator.calls == [pretend.call(datum) for datum in data]
@pytest.mark.parametrize(
"requirement", ["foo (>=1.0)", "foo", "foo2", "foo-bar", "foo_bar"]
)
def test_validate_legacy_dist_req_valid(self, requirement):
legacy._validate_legacy_dist_req(requirement)
@pytest.mark.parametrize(
"requirement",
[
"☃ (>=1.0)",
"☃",
"foo-",
"foo- (>=1.0)",
"_foo",
"_foo (>=1.0)",
"name @ https://github.com/pypa",
],
)
def test_validate_legacy_dist_req_invalid(self, requirement):
with pytest.raises(ValidationError):
legacy._validate_legacy_dist_req(requirement)
def test_validate_legacy_dist_req_list(self, monkeypatch):
validator = pretend.call_recorder(lambda datum: None)
monkeypatch.setattr(legacy, "_validate_legacy_dist_req", validator)
data = [pretend.stub(), pretend.stub(), pretend.stub()]
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_legacy_dist_req_list(form, field)
assert validator.calls == [pretend.call(datum) for datum in data]
@pytest.mark.parametrize(
("requirement", "specifier"), [("C", None), ("openssl (>=1.0.0)", ">=1.0.0")]
)
def test_validate_requires_external(self, monkeypatch, requirement, specifier):
spec_validator = pretend.call_recorder(lambda spec: None)
monkeypatch.setattr(legacy, "_validate_pep440_specifier", spec_validator)
legacy._validate_requires_external(requirement)
if specifier is not None:
assert spec_validator.calls == [pretend.call(specifier)]
else:
assert spec_validator.calls == []
def test_validate_requires_external_list(self, monkeypatch):
validator = pretend.call_recorder(lambda datum: None)
monkeypatch.setattr(legacy, "_validate_requires_external", validator)
data = [pretend.stub(), pretend.stub(), pretend.stub()]
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_requires_external_list(form, field)
assert validator.calls == [pretend.call(datum) for datum in data]
@pytest.mark.parametrize(
"project_url",
["Home, https://pypi.python.org/", ("A" * 32) + ", https://example.com/"],
)
def test_validate_project_url_valid(self, project_url):
legacy._validate_project_url(project_url)
@pytest.mark.parametrize(
"project_url",
[
"Home,https://pypi.python.org/",
"https://pypi.python.org/",
", https://pypi.python.org/",
"Home, ",
("A" * 33) + ", https://example.com/",
"Home, I am a banana",
"Home, ssh://foobar",
"",
],
)
def test_validate_project_url_invalid(self, project_url):
with pytest.raises(ValidationError):
legacy._validate_project_url(project_url)
@pytest.mark.parametrize(
"project_urls",
[["Home, https://pypi.python.org/", ("A" * 32) + ", https://example.com/"]],
)
def test_all_valid_project_url_list(self, project_urls):
form, field = pretend.stub(), pretend.stub(data=project_urls)
legacy._validate_project_url_list(form, field)
@pytest.mark.parametrize(
"project_urls",
[
["Home, https://pypi.python.org/", ""], # Valid # Invalid
[
("A" * 32) + ", https://example.com/", # Valid
("A" * 33) + ", https://example.com/", # Invalid
],
],
)
def test_invalid_member_project_url_list(self, project_urls):
form, field = pretend.stub(), pretend.stub(data=project_urls)
with pytest.raises(ValidationError):
legacy._validate_project_url_list(form, field)
def test_validate_project_url_list(self, monkeypatch):
validator = pretend.call_recorder(lambda datum: None)
monkeypatch.setattr(legacy, "_validate_project_url", validator)
data = [pretend.stub(), pretend.stub(), pretend.stub()]
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_project_url_list(form, field)
assert validator.calls == [pretend.call(datum) for datum in data]
@pytest.mark.parametrize(
"data",
[
(""),
("foo@bar.com"),
("foo@bar.com,"),
("foo@bar.com, biz@baz.com"),
('"C. Schultz" <cschultz@example.com>'),
('"C. Schultz" <cschultz@example.com>, snoopy@peanuts.com'),
],
)
def test_validate_rfc822_email_field(self, data):
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_rfc822_email_field(form, field)
@pytest.mark.parametrize(
"data",
[
("foo"),
("foo@"),
("@bar.com"),
("foo@bar"),
("foo AT bar DOT com"),
("foo@bar.com, foo"),
],
)
def test_validate_rfc822_email_field_raises(self, data):
form, field = pretend.stub(), pretend.stub(data=data)
with pytest.raises(ValidationError):
legacy._validate_rfc822_email_field(form, field)
@pytest.mark.parametrize(
"data",
[
"text/plain; charset=UTF-8",
"text/x-rst; charset=UTF-8",
"text/markdown; charset=UTF-8; variant=CommonMark",
"text/markdown; charset=UTF-8; variant=GFM",
"text/markdown",
],
)
def test_validate_description_content_type_valid(self, data):
form, field = pretend.stub(), pretend.stub(data=data)
legacy._validate_description_content_type(form, field)
@pytest.mark.parametrize(
"data",
[
"invalid_type/plain",
"text/invalid_subtype",
"text/plain; charset=invalid_charset",
"text/markdown; charset=UTF-8; variant=invalid_variant",
],
)
def test_validate_description_content_type_invalid(self, data):
form, field = pretend.stub(), pretend.stub(data=data)
with pytest.raises(ValidationError):
legacy._validate_description_content_type(form, field)
def test_validate_no_deprecated_classifiers_valid(self, db_request):
valid_classifier = ClassifierFactory(deprecated=False)
validator = legacy._no_deprecated_classifiers(db_request)
form = pretend.stub()
field = pretend.stub(data=[valid_classifier.classifier])
validator(form, field)
def test_validate_no_deprecated_classifiers_invalid(self, db_request):
deprecated_classifier = ClassifierFactory(classifier="AA: BB", deprecated=True)
validator = legacy._no_deprecated_classifiers(db_request)
db_request.registry = pretend.stub(settings={"warehouse.domain": "host"})
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/url")
form = pretend.stub()
field = pretend.stub(data=[deprecated_classifier.classifier])
with pytest.raises(ValidationError):
validator(form, field)
def test_construct_dependencies():
types = {"requires": DependencyKind.requires, "provides": DependencyKind.provides}
form = pretend.stub(
requires=pretend.stub(data=["foo (>1)"]),
provides=pretend.stub(data=["bar (>2)"]),
)
for dep in legacy._construct_dependencies(form, types):
assert isinstance(dep, Dependency)
if dep.kind == DependencyKind.requires:
assert dep.specifier == "foo (>1)"
elif dep.kind == DependencyKind.provides:
assert dep.specifier == "bar (>2)"
else:
pytest.fail("Unknown type of specifier")
class TestListField:
@pytest.mark.parametrize(
("data", "expected"),
[
(["foo", "bar"], ["foo", "bar"]),
([" foo"], ["foo"]),
(["f oo "], ["f oo"]),
("", []),
(" ", []),
],
)
def test_processes_form_data(self, data, expected):
field = legacy.ListField()
field = field.bind(pretend.stub(meta=pretend.stub()), "formname")
field.process_formdata(data)
assert field.data == expected
@pytest.mark.parametrize(("value", "expected"), [("", []), ("wutang", ["wutang"])])
def test_coerce_string_into_list(self, value, expected):
class MyForm(Form):
test = legacy.ListField()
form = MyForm(MultiDict({"test": value}))
assert form.test.data == expected
class TestMetadataForm:
@pytest.mark.parametrize(
"data",
[
{"filetype": "sdist", "md5_digest": "bad"},
{"filetpye": "bdist_wheel", "pyversion": "3.4", "md5_digest": "bad"},
{"filetype": "sdist", "sha256_digest": "bad"},
{"filetpye": "bdist_wheel", "pyversion": "3.4", "sha256_digest": "bad"},
{"filetype": "sdist", "md5_digest": "bad", "sha256_digest": "bad"},
{
"filetpye": "bdist_wheel",
"pyversion": "3.4",
"md5_digest": "bad",
"sha256_digest": "bad",
},
],
)
def test_full_validate_valid(self, data):
form = legacy.MetadataForm(MultiDict(data))
form.full_validate()
@pytest.mark.parametrize(
"data", [{"filetype": "sdist", "pyversion": "3.4"}, {"filetype": "bdist_wheel"}]
)
def test_full_validate_invalid(self, data):
form = legacy.MetadataForm(MultiDict(data))
with pytest.raises(ValidationError):
form.full_validate()
def test_requires_python(self):
form = legacy.MetadataForm(MultiDict({"requires_python": ">= 3.5"}))
form.requires_python.validate(form)
class TestFileValidation:
def test_defaults_to_true(self):
assert legacy._is_valid_dist_file("", "")
@pytest.mark.parametrize(
("filename", "filetype"),
[("test.exe", "bdist_msi"), ("test.msi", "bdist_wininst")],
)
def test_bails_with_invalid_package_type(self, filename, filetype):
assert not legacy._is_valid_dist_file(filename, filetype)
@pytest.mark.parametrize(
("filename", "filetype"),
[
("test.exe", "bdist_wininst"),
("test.zip", "sdist"),
("test.egg", "bdist_egg"),
("test.whl", "bdist_wheel"),
],
)
def test_bails_with_invalid_zipfile(self, tmpdir, filename, filetype):
f = str(tmpdir.join(filename))
with open(f, "wb") as fp:
fp.write(b"this isn't a valid zip file")
assert not legacy._is_valid_dist_file(f, filetype)
def test_wininst_unsafe_filename(self, tmpdir):
f = str(tmpdir.join("test.exe"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something/bar.py", b"the test file")
assert not legacy._is_valid_dist_file(f, "bdist_wininst")
def test_wininst_safe_filename(self, tmpdir):
f = str(tmpdir.join("test.exe"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("purelib/bar.py", b"the test file")
assert legacy._is_valid_dist_file(f, "bdist_wininst")
def test_msi_invalid_header(self, tmpdir):
f = str(tmpdir.join("test.msi"))
with open(f, "wb") as fp:
fp.write(b"this isn't the correct header for an msi")
assert not legacy._is_valid_dist_file(f, "bdist_msi")
def test_msi_valid_header(self, tmpdir):
f = str(tmpdir.join("test.msi"))
with open(f, "wb") as fp:
fp.write(b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1")
assert legacy._is_valid_dist_file(f, "bdist_msi")
def test_zip_no_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
assert not legacy._is_valid_dist_file(f, "sdist")
def test_zip_has_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
zfp.writestr("PKG-INFO", b"this is the package info")
assert legacy._is_valid_dist_file(f, "sdist")
def test_zipfile_supported_compression(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("PKG-INFO", b"this is the package info")
zfp.writestr("1.txt", b"1", zipfile.ZIP_STORED)
zfp.writestr("2.txt", b"2", zipfile.ZIP_DEFLATED)
assert legacy._is_valid_dist_file(f, "")
@pytest.mark.parametrize("method", [zipfile.ZIP_BZIP2, zipfile.ZIP_LZMA])
def test_zipfile_unsupported_compression(self, tmpdir, method):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("1.txt", b"1", zipfile.ZIP_STORED)
zfp.writestr("2.txt", b"2", zipfile.ZIP_DEFLATED)
zfp.writestr("3.txt", b"3", method)
assert not legacy._is_valid_dist_file(f, "")
def test_egg_no_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.egg"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
assert not legacy._is_valid_dist_file(f, "bdist_egg")
def test_egg_has_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.egg"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
zfp.writestr("PKG-INFO", b"this is the package info")
assert legacy._is_valid_dist_file(f, "bdist_egg")
def test_wheel_no_wheel_file(self, tmpdir):
f = str(tmpdir.join("test.whl"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
assert not legacy._is_valid_dist_file(f, "bdist_wheel")
def test_wheel_has_wheel_file(self, tmpdir):
f = str(tmpdir.join("test.whl"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
zfp.writestr("WHEEL", b"this is the package info")
assert legacy._is_valid_dist_file(f, "bdist_wheel")
class TestIsDuplicateFile:
def test_is_duplicate_true(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
file_value = file_content.getvalue()
hashes = {
"sha256": hashlib.sha256(file_value).hexdigest(),
"md5": hashlib.md5(file_value).hexdigest(),
"blake2_256": hashlib.blake2b(file_value, digest_size=256 // 8).hexdigest(),
}
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashes["md5"],
sha256_digest=hashes["sha256"],
blake2_256_digest=hashes["blake2_256"],
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
assert legacy._is_duplicate_file(db_request.db, filename, hashes)
def test_is_duplicate_none(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
requested_file_name = "{}-{}-1.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
file_value = file_content.getvalue()
hashes = {
"sha256": hashlib.sha256(file_value).hexdigest(),
"md5": hashlib.md5(file_value).hexdigest(),
"blake2_256": hashlib.blake2b(file_value, digest_size=256 // 8).hexdigest(),
}
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashes["md5"],
sha256_digest=hashes["sha256"],
blake2_256_digest=hashes["blake2_256"],
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
hashes["blake2_256"] = "another blake2 digest"
assert (
legacy._is_duplicate_file(db_request.db, requested_file_name, hashes)
is None
)
def test_is_duplicate_false_same_blake2(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
requested_file_name = "{}-{}-1.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
file_value = file_content.getvalue()
hashes = {
"sha256": hashlib.sha256(file_value).hexdigest(),
"md5": hashlib.md5(file_value).hexdigest(),
"blake2_256": hashlib.blake2b(file_value, digest_size=256 // 8).hexdigest(),
}
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashes["md5"],
sha256_digest=hashes["sha256"],
blake2_256_digest=hashes["blake2_256"],
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
assert (
legacy._is_duplicate_file(db_request.db, requested_file_name, hashes)
is False
)
def test_is_duplicate_false(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
file_value = file_content.getvalue()
hashes = {
"sha256": hashlib.sha256(file_value).hexdigest(),
"md5": hashlib.md5(file_value).hexdigest(),
"blake2_256": hashlib.blake2b(file_value, digest_size=256 // 8).hexdigest(),
}
wrong_hashes = {"sha256": "nah", "md5": "nope", "blake2_256": "nuh uh"}
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashes["md5"],
sha256_digest=hashes["sha256"],
blake2_256_digest=hashes["blake2_256"],
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
assert legacy._is_duplicate_file(db_request.db, filename, wrong_hashes) is False
class TestFileUpload:
@pytest.mark.parametrize("version", ["2", "3", "-1", "0", "dog", "cat"])
def test_fails_invalid_version(self, pyramid_config, pyramid_request, version):
pyramid_config.testing_securitypolicy(userid=1)
pyramid_request.POST["protocol_version"] = version
pyramid_request.flags = pretend.stub(enabled=lambda *a: False)
pyramid_request.user = pretend.stub(primary_email=pretend.stub(verified=True))
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(pyramid_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Unknown protocol version."
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"'' is an invalid value for Metadata-Version. "
"Error: This field is required. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. "
"Error: Use a known metadata version. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. "
"Error: This field is required. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. "
"Error: Start and end with a letter or numeral containing "
"only ASCII numeric and '.', '_' and '-'. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. "
"Error: This field is required. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. "
"Error: Start and end with a letter or numeral "
"containing only ASCII numeric and '.', '_' and '-'. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
# filetype/pyversion errors.
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"md5_digest": "bad",
},
"Invalid value for filetype. Error: This field is required.",
),
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "bdist_wat",
},
"Error: Python version is required for binary distribution " "uploads.",
),
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "bdist_wat",
"pyversion": "1.0",
"md5_digest": "bad",
},
"Invalid value for filetype. Error: Use a known file type.",
),
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"pyversion": "1.0",
},
"Error: Use 'source' as Python version for an sdist.",
),
# digest errors.
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
},
"Error: Include at least one message digest.",
),
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"sha256_digest": "an invalid sha256 digest",
},
"Invalid value for sha256_digest. "
"Error: Use a valid, hex-encoded, SHA256 message digest.",
),
# summary errors
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"summary": "A" * 513,
},
"'" + "A" * 513 + "' is an invalid value for Summary. "
"Error: Field cannot be longer than 512 characters. "
"See "
"https://packaging.python.org/specifications/core-metadata",
),
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"summary": "A\nB",
},
(
"{!r} is an invalid value for Summary. ".format("A\nB")
+ "Error: Use a single line only. "
"See "
"https://packaging.python.org/specifications/core-metadata"
),
),
# classifiers are a FieldStorage
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"classifiers": FieldStorage(),
},
"classifiers: Should not be a tuple.",
),
# keywords are a FieldStorage
(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"keywords": FieldStorage(),
},
"keywords: Should not be a tuple.",
),
],
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.POST = MultiDict(post_data)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 {}".format(message)
@pytest.mark.parametrize("name", ["requirements.txt", "rrequirements.txt"])
def test_fails_with_invalid_names(self, pyramid_config, db_request, name):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": name,
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"content": pretend.stub(
filename=f"{name}-1.0.tar.gz",
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="project-name")]
assert resp.status_code == 400
assert resp.status == (
"400 The name {!r} isn't allowed. "
"See /the/help/url/ "
"for more information."
).format(name)
@pytest.mark.parametrize(
("description_content_type", "description", "message"),
[
(
"text/x-rst",
".. invalid-directive::",
"400 The description failed to render for 'text/x-rst'. "
"See /the/help/url/ for more information.",
),
(
"",
".. invalid-directive::",
"400 The description failed to render in the default format "
"of reStructuredText. "
"See /the/help/url/ for more information.",
),
],
)
def test_fails_invalid_render(
self, pyramid_config, db_request, description_content_type, description, message
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.remote_addr = "10.10.10.30"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"content": pretend.stub(
filename="example-1.0.tar.gz",
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
"description_content_type": description_content_type,
"description": description,
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [
pretend.call(_anchor="description-content-type")
]
assert resp.status_code == 400
assert resp.status == message
@pytest.mark.parametrize(
"name",
[
"xml",
"XML",
"pickle",
"PiCKle",
"main",
"future",
"al",
"uU",
"test",
"encodings.utf_8_sig",
"distutils.command.build_clib",
"xmlrpc",
"xmlrpc.server",
"xml.etree",
"xml.etree.ElementTree",
"xml.parsers",
"xml.parsers.expat",
"xml.parsers.expat.errors",
"encodings.idna",
"encodings",
"CGIHTTPServer",
"cgihttpserver",
],
)
def test_fails_with_stdlib_names(self, pyramid_config, db_request, name):
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
pyramid_config.testing_securitypolicy(userid=1)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": name,
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"content": pretend.stub(
filename=f"{name}-1.0.tar.gz",
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="project-name")]
assert resp.status_code == 400
assert resp.status == (
(
"400 The name {!r} isn't allowed (conflict "
"with Python Standard Library module name). "
"See /the/help/url/ "
"for more information."
)
).format(name)
def test_fails_with_admin_flag_set(self, pyramid_config, db_request):
admin_flag = (
db_request.db.query(AdminFlag)
.filter(AdminFlag.id == "disallow-new-project-registration")
.first()
)
admin_flag.enabled = True
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
name = "fails-with-admin-flag"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": name,
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
"content": pretend.stub(
filename=f"{name}-1.0.tar.gz",
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPForbidden) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 403
assert resp.status == (
"403 New project registration temporarily "
"disabled. See "
"/the/help/url/ for "
"details"
)
def test_upload_fails_without_file(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Upload payload does not have a file."
@pytest.mark.parametrize("value", [("UNKNOWN"), ("UNKNOWN\n\n")])
def test_upload_cleans_unknown_values(self, pyramid_config, db_request, value):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": value,
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
}
)
with pytest.raises(HTTPBadRequest):
legacy.file_upload(db_request)
assert "name" not in db_request.POST
def test_upload_escapes_nul_characters(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "testing",
"summary": "I want to go to the \x00",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "a fake md5 digest",
}
)
with pytest.raises(HTTPBadRequest):
legacy.file_upload(db_request)
assert "\x00" not in db_request.POST["summary"]
@pytest.mark.parametrize(
("has_signature", "digests"),
[
(True, {"md5_digest": "335c476dc930b959dda9ec82bd65ef19"}),
(
True,
{
"sha256_digest": (
"4a8422abcc484a4086bdaa618c65289f749433b07eb433c51c4e3"
"77143ff5fdb"
)
},
),
(False, {"md5_digest": "335c476dc930b959dda9ec82bd65ef19"}),
(
False,
{
"sha256_digest": (
"4a8422abcc484a4086bdaa618c65289f749433b07eb433c51c4e3"
"77143ff5fdb"
)
},
),
(
True,
{
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"sha256_digest": (
"4a8422abcc484a4086bdaa618c65289f749433b07eb433c51c4e3"
"77143ff5fdb"
),
},
),
(
False,
{
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"sha256_digest": (
"4a8422abcc484a4086bdaa618c65289f749433b07eb433c51c4e3"
"77143ff5fdb"
),
},
),
],
)
def test_successful_upload(
self, tmpdir, monkeypatch, pyramid_config, db_request, has_signature, digests
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
db_request.db.add(Classifier(classifier="Environment :: Other Environment"))
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.user = user
db_request.remote_addr = "10.10.10.40"
db_request.user_agent = "warehouse-tests/6.6.6"
content = FieldStorage()
content.filename = filename
content.file = io.BytesIO(b"A fake file.")
content.type = "application/tar"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"pyversion": "source",
"content": content,
"description": "an example description",
}
)
db_request.POST.extend([("classifiers", "Environment :: Other Environment")])
db_request.POST.update(digests)
if has_signature:
gpg_signature = FieldStorage()
gpg_signature.filename = filename + ".asc"
gpg_signature.file = io.BytesIO(
b"-----BEGIN PGP SIGNATURE-----\n" b" This is a Fake Signature"
)
db_request.POST["gpg_signature"] = gpg_signature
assert isinstance(db_request.POST["gpg_signature"], FieldStorage)
@pretend.call_recorder
def storage_service_store(path, file_path, *, meta):
if file_path.endswith(".asc"):
expected = (
b"-----BEGIN PGP SIGNATURE-----\n" b" This is a Fake Signature"
)
else:
expected = b"A fake file."
with open(file_path, "rb") as fp:
assert fp.read() == expected
storage_service = pretend.stub(store=storage_service_store)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None: storage_service
)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert len(storage_service.store.calls) == 2 if has_signature else 1
assert storage_service.store.calls[0] == pretend.call(
"/".join(
[
"4e",
"6e",
"fa4c0ee2bbad071b4f5b5ea68f1aea89fa716e7754eb13e2314d45a5916e",
filename,
]
),
mock.ANY,
meta={
"project": project.normalized_name,
"version": release.version,
"package-type": "sdist",
"python-version": "source",
},
)
if has_signature:
assert storage_service.store.calls[1] == pretend.call(
"/".join(
[
"4e",
"6e",
(
"fa4c0ee2bbad071b4f5b5ea68f1aea89fa716e7754eb13e2314d"
"45a5916e"
),
filename + ".asc",
]
),
mock.ANY,
meta={
"project": project.normalized_name,
"version": release.version,
"package-type": "sdist",
"python-version": "source",
},
)
# Ensure that a File object has been created.
uploaded_file = (
db_request.db.query(File)
.filter((File.release == release) & (File.filename == filename))
.one()
)
assert uploaded_file.uploaded_via == "warehouse-tests/6.6.6"
# Ensure that a Filename object has been created.
db_request.db.query(Filename).filter(Filename.filename == filename).one()
# Ensure that all of our journal entries have been created
journals = (
db_request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.order_by("submitted_date", "id")
.all()
)
assert [
(j.name, j.version, j.action, j.submitted_by, j.submitted_from)
for j in journals
] == [
(
release.project.name,
release.version,
"add source file {}".format(filename),
user,
"10.10.10.40",
)
]
@pytest.mark.parametrize("content_type", [None, "image/foobar"])
def test_upload_fails_invlaid_content_type(
self, tmpdir, monkeypatch, pyramid_config, db_request, content_type
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
db_request.db.add(Classifier(classifier="Environment :: Other Environment"))
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"pyversion": "source",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type=content_type,
),
}
)
db_request.POST.extend([("classifiers", "Environment :: Other Environment")])
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Invalid distribution file."
def test_upload_fails_with_legacy_type(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "bdist_dumb",
"pyversion": "2.7",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Unknown type of file."
def test_upload_fails_with_legacy_ext(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
db_request.user = user
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.bz2".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 Invalid file extension: Use .egg, .tar.gz, .whl or .zip "
"extension. (https://www.python.org/dev/peps/pep-0527)"
)
def test_upload_fails_for_second_sdist(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
FileFactory.create(
release=release,
packagetype="sdist",
filename="{}-{}.tar.gz".format(project.name, release.version),
)
RoleFactory.create(user=user, project=project)
filename = "{}-{}.zip".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/zip",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Only one sdist may be uploaded per release."
@pytest.mark.parametrize("sig", [b"lol nope"])
def test_upload_fails_with_invalid_signature(self, pyramid_config, db_request, sig):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
"gpg_signature": pretend.stub(
filename=filename + ".asc", file=io.BytesIO(sig)
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 PGP signature isn't ASCII armored."
def test_upload_fails_with_invalid_classifier(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.POST.extend([("classifiers", "Environment :: Other Environment")])
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 Invalid value for classifiers. "
"Error: 'Environment :: Other Environment' is not a valid choice "
"for this field"
)
def test_upload_fails_with_deprecated_classifier(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
classifier = ClassifierFactory(classifier="AA :: BB", deprecated=True)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.POST.extend([("classifiers", classifier.classifier)])
db_request.route_url = pretend.call_recorder(lambda *a, **kw: "/url")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 Invalid value for classifiers. "
"Error: Classifier 'AA :: BB' has been deprecated, see /url "
"for a list of valid classifiers."
)
@pytest.mark.parametrize(
"digests",
[
{"md5_digest": "bad"},
{
"sha256_digest": (
"badbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbad"
"badbadb"
)
},
{
"md5_digest": "bad",
"sha256_digest": (
"badbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbad"
"badbadb"
),
},
{
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"sha256_digest": (
"badbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbad"
"badbadb"
),
},
{
"md5_digest": "bad",
"sha256_digest": (
"4a8422abcc484a4086bdaa618c65289f749433b07eb433c51c4e37714"
"3ff5fdb"
),
},
],
)
def test_upload_fails_with_invalid_digest(
self, pyramid_config, db_request, digests
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.POST.update(digests)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 The digest supplied does not match a digest calculated "
"from the uploaded file."
)
def test_upload_fails_with_invalid_file(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.zip".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "0cc175b9c0f1b6a831c399e269772661",
"content": pretend.stub(
filename=filename, file=io.BytesIO(b"a"), type="application/zip"
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Invalid distribution file."
def test_upload_fails_with_too_large_file(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create(
name="foobar", upload_limit=(60 * 1024 * 1024) # 60 MB
)
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "nope!",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"a" * (project.upload_limit + 1)),
type="application/tar",
),
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="file-size-limit")]
assert resp.status_code == 400
assert resp.status == (
"400 File too large. Limit for project 'foobar' is 60 MB. "
"See /the/help/url/"
)
def test_upload_fails_with_too_large_signature(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "0cc175b9c0f1b6a831c399e269772661",
"content": pretend.stub(
filename=filename, file=io.BytesIO(b"a"), type="application/tar"
),
"gpg_signature": pretend.stub(
filename=filename + ".asc",
file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)),
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Signature too large."
def test_upload_fails_with_previously_used_filename(
self, pyramid_config, db_request
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": hashlib.md5(file_content.getvalue()).hexdigest(),
"content": pretend.stub(
filename=filename, file=file_content, type="application/tar"
),
}
)
db_request.db.add(Filename(filename=filename))
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="file-name-reuse")]
assert resp.status_code == 400
assert resp.status == (
"400 This filename has already been used, use a "
"different version. "
"See /the/help/url/"
)
def test_upload_noop_with_existing_filename_same_content(
self, pyramid_config, db_request
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": hashlib.md5(file_content.getvalue()).hexdigest(),
"content": pretend.stub(
filename=filename, file=file_content, type="application/tar"
),
}
)
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashlib.md5(file_content.getvalue()).hexdigest(),
sha256_digest=hashlib.sha256(file_content.getvalue()).hexdigest(),
blake2_256_digest=hashlib.blake2b(
file_content.getvalue(), digest_size=256 // 8
).hexdigest(),
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
def test_upload_fails_with_existing_filename_diff_content(
self, pyramid_config, db_request
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": hashlib.md5(file_content.getvalue()).hexdigest(),
"content": pretend.stub(
filename=filename, file=file_content, type="application/tar"
),
}
)
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashlib.md5(filename.encode("utf8")).hexdigest(),
sha256_digest=hashlib.sha256(filename.encode("utf8")).hexdigest(),
blake2_256_digest=hashlib.blake2b(
filename.encode("utf8"), digest_size=256 // 8
).hexdigest(),
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="file-name-reuse")]
assert resp.status_code == 400
assert resp.status == "400 File already exists. See /the/help/url/"
def test_upload_fails_with_diff_filename_same_blake2(
self, pyramid_config, db_request
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
file_content = io.BytesIO(b"A fake file.")
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": hashlib.md5(file_content.getvalue()).hexdigest(),
"content": pretend.stub(
filename="{}-fake.tar.gz".format(project.name),
file=file_content,
type="application/tar",
),
}
)
db_request.db.add(
File(
release=release,
filename=filename,
md5_digest=hashlib.md5(file_content.getvalue()).hexdigest(),
sha256_digest=hashlib.sha256(file_content.getvalue()).hexdigest(),
blake2_256_digest=hashlib.blake2b(
file_content.getvalue(), digest_size=256 // 8
).hexdigest(),
path="source/{name[0]}/{name}/{filename}".format(
name=project.name, filename=filename
),
)
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="file-name-reuse")]
assert resp.status_code == 400
assert resp.status == "400 File already exists. See /the/help/url/"
def test_upload_fails_with_wrong_filename(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "nope-{}.tar.gz".format(release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "nope!",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)),
type="application/tar",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 Start filename for {!r} with {!r}.".format(
project.name, pkg_resources.safe_name(project.name).lower()
)
)
def test_upload_fails_with_invalid_extension(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.wat".format(project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "nope!",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)),
type="application/tar",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == (
"400 Invalid file extension: Use .egg, .tar.gz, .whl or .zip "
"extension. (https://www.python.org/dev/peps/pep-0527)"
)
@pytest.mark.parametrize("character", ["/", "\\"])
def test_upload_fails_with_unsafe_filename(
self, pyramid_config, db_request, character
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.wat".format(character + project.name, release.version)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "nope!",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)),
type="application/tar",
),
}
)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert resp.status == "400 Cannot upload a file with '/' or '\\' in the name."
def test_upload_fails_without_permission(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1, permissive=False)
user1 = UserFactory.create()
EmailFactory.create(user=user1)
user2 = UserFactory.create()
EmailFactory.create(user=user2)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user1, project=project)
filename = "{}-{}.tar.wat".format(project.name, release.version)
db_request.user = user2
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"md5_digest": "nope!",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"a" * (legacy.MAX_FILESIZE + 1)),
type="application/tar",
),
}
)
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPForbidden) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="project-name")]
assert resp.status_code == 403
assert resp.status == (
"403 The user '{0}' isn't allowed to upload to project '{1}'. "
"See /the/help/url/ for more information."
).format(user2.username, project.name)
@pytest.mark.parametrize(
"plat",
[
"any",
"win32",
"win_amd64",
"win_ia64",
"manylinux1_i686",
"manylinux1_x86_64",
"manylinux2010_i686",
"manylinux2010_x86_64",
"macosx_10_6_intel",
"macosx_10_13_x86_64",
# A real tag used by e.g. some numpy wheels
(
"macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64."
"macosx_10_10_intel.macosx_10_10_x86_64"
),
],
)
def test_upload_succeeds_with_wheel(
self, tmpdir, monkeypatch, pyramid_config, db_request, plat
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}-cp34-none-{}.whl".format(project.name, release.version, plat)
db_request.user = user
db_request.remote_addr = "10.10.10.30"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "bdist_wheel",
"pyversion": "cp34",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
@pretend.call_recorder
def storage_service_store(path, file_path, *, meta):
with open(file_path, "rb") as fp:
assert fp.read() == b"A fake file."
storage_service = pretend.stub(store=storage_service_store)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None: storage_service
)
monkeypatch.setattr(legacy, "_is_valid_dist_file", lambda *a, **kw: True)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert storage_service.store.calls == [
pretend.call(
"/".join(
[
"4e",
"6e",
(
"fa4c0ee2bbad071b4f5b5ea68f1aea89fa716e7754eb13e2314d4"
"5a5916e"
),
filename,
]
),
mock.ANY,
meta={
"project": project.normalized_name,
"version": release.version,
"package-type": "bdist_wheel",
"python-version": "cp34",
},
)
]
# Ensure that a File object has been created.
db_request.db.query(File).filter(
(File.release == release) & (File.filename == filename)
).one()
# Ensure that a Filename object has been created.
db_request.db.query(Filename).filter(Filename.filename == filename).one()
# Ensure that all of our journal entries have been created
journals = (
db_request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.order_by("submitted_date", "id")
.all()
)
assert [
(j.name, j.version, j.action, j.submitted_by, j.submitted_from)
for j in journals
] == [
(
release.project.name,
release.version,
"add cp34 file {}".format(filename),
user,
"10.10.10.30",
)
]
def test_upload_succeeds_with_wheel_after_sdist(
self, tmpdir, monkeypatch, pyramid_config, db_request
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
FileFactory.create(
release=release,
packagetype="sdist",
filename="{}-{}.tar.gz".format(project.name, release.version),
)
RoleFactory.create(user=user, project=project)
filename = "{}-{}-cp34-none-any.whl".format(project.name, release.version)
db_request.user = user
db_request.remote_addr = "10.10.10.30"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "bdist_wheel",
"pyversion": "cp34",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
@pretend.call_recorder
def storage_service_store(path, file_path, *, meta):
with open(file_path, "rb") as fp:
assert fp.read() == b"A fake file."
storage_service = pretend.stub(store=storage_service_store)
db_request.find_service = pretend.call_recorder(
lambda svc, name=None: storage_service
)
monkeypatch.setattr(legacy, "_is_valid_dist_file", lambda *a, **kw: True)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
assert db_request.find_service.calls == [pretend.call(IFileStorage)]
assert storage_service.store.calls == [
pretend.call(
"/".join(
[
"4e",
"6e",
(
"fa4c0ee2bbad071b4f5b5ea68f1aea89fa716e7754eb13e2314d4"
"5a5916e"
),
filename,
]
),
mock.ANY,
meta={
"project": project.normalized_name,
"version": release.version,
"package-type": "bdist_wheel",
"python-version": "cp34",
},
)
]
# Ensure that a File object has been created.
db_request.db.query(File).filter(
(File.release == release) & (File.filename == filename)
).one()
# Ensure that a Filename object has been created.
db_request.db.query(Filename).filter(Filename.filename == filename).one()
# Ensure that all of our journal entries have been created
journals = (
db_request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.order_by("submitted_date", "id")
.all()
)
assert [
(j.name, j.version, j.action, j.submitted_by, j.submitted_from)
for j in journals
] == [
(
release.project.name,
release.version,
"add cp34 file {}".format(filename),
user,
"10.10.10.30",
)
]
def test_upload_succeeds_with_legacy_ext(
self, tmpdir, monkeypatch, pyramid_config, db_request
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create(allow_legacy_files=True)
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.bz2".format(project.name, release.version)
db_request.user = user
db_request.remote_addr = "10.10.10.30"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "sdist",
"pyversion": "source",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
def storage_service_store(path, file_path, *, meta):
with open(file_path, "rb") as fp:
assert fp.read() == b"A fake file."
storage_service = pretend.stub(store=storage_service_store)
db_request.find_service = lambda svc, name=None: storage_service
monkeypatch.setattr(legacy, "_is_valid_dist_file", lambda *a, **kw: True)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
def test_upload_succeeds_with_legacy_type(
self, tmpdir, monkeypatch, pyramid_config, db_request
):
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create(allow_legacy_files=True)
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}.tar.gz".format(project.name, release.version)
db_request.user = user
db_request.remote_addr = "10.10.10.30"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "bdist_dumb",
"pyversion": "3.5",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
def storage_service_store(path, file_path, *, meta):
with open(file_path, "rb") as fp:
assert fp.read() == b"A fake file."
storage_service = pretend.stub(store=storage_service_store)
db_request.find_service = lambda svc, name=None: storage_service
monkeypatch.setattr(legacy, "_is_valid_dist_file", lambda *a, **kw: True)
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
@pytest.mark.parametrize("plat", ["linux_x86_64", "linux_x86_64.win32"])
def test_upload_fails_with_unsupported_wheel_plat(
self, monkeypatch, pyramid_config, db_request, plat
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
db_request.user = user
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
filename = "{}-{}-cp34-none-{}.whl".format(project.name, release.version, plat)
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": release.version,
"filetype": "bdist_wheel",
"pyversion": "cp34",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
monkeypatch.setattr(legacy, "_is_valid_dist_file", lambda *a, **kw: True)
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert resp.status_code == 400
assert re.match(
"400 Binary wheel .* has an unsupported " "platform tag .*", resp.status
)
def test_upload_updates_existing_project_name(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create(name="Package-Name")
RoleFactory.create(user=user, project=project)
new_project_name = "package-name"
filename = "{}-{}.tar.gz".format(new_project_name, "1.1")
db_request.user = user
db_request.remote_addr = "10.10.10.20"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.1",
"name": new_project_name,
"version": "1.1",
"summary": "This is my summary!",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
db_request.remote_addr = "10.10.10.10"
db_request.user_agent = "warehouse-tests/6.6.6"
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
# Ensure that a Project object name has been updated.
project = (
db_request.db.query(Project).filter(Project.name == new_project_name).one()
)
# Ensure that a Release object has been created.
release = (
db_request.db.query(Release)
.filter((Release.project == project) & (Release.version == "1.1"))
.one()
)
assert release.uploaded_via == "warehouse-tests/6.6.6"
def test_upload_succeeds_creates_release(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
RoleFactory.create(user=user, project=project)
db_request.db.add(Classifier(classifier="Environment :: Other Environment"))
db_request.db.add(Classifier(classifier="Programming Language :: Python"))
filename = "{}-{}.tar.gz".format(project.name, "1.0")
db_request.user = user
db_request.remote_addr = "10.10.10.20"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": "1.0",
"summary": "This is my summary!",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
db_request.POST.extend(
[
("classifiers", "Environment :: Other Environment"),
("classifiers", "Programming Language :: Python"),
("requires_dist", "foo"),
("requires_dist", "bar (>1.0)"),
("project_urls", "Test, https://example.com/"),
("requires_external", "Cheese (>1.0)"),
("provides", "testing"),
]
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
# Ensure that a Release object has been created.
release = (
db_request.db.query(Release)
.filter((Release.project == project) & (Release.version == "1.0"))
.one()
)
assert release.summary == "This is my summary!"
assert release.classifiers == [
"Environment :: Other Environment",
"Programming Language :: Python",
]
assert set(release.requires_dist) == {"foo", "bar (>1.0)"}
assert set(release.project_urls) == {"Test, https://example.com/"}
assert set(release.requires_external) == {"Cheese (>1.0)"}
assert set(release.provides) == {"testing"}
assert release.canonical_version == "1"
assert release.uploaded_via == "warehouse-tests/6.6.6"
# Ensure that a File object has been created.
db_request.db.query(File).filter(
(File.release == release) & (File.filename == filename)
).one()
# Ensure that a Filename object has been created.
db_request.db.query(Filename).filter(Filename.filename == filename).one()
# Ensure that all of our journal entries have been created
journals = (
db_request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.order_by("submitted_date", "id")
.all()
)
assert [
(j.name, j.version, j.action, j.submitted_by, j.submitted_from)
for j in journals
] == [
(release.project.name, release.version, "new release", user, "10.10.10.20"),
(
release.project.name,
release.version,
"add source file {}".format(filename),
user,
"10.10.10.20",
),
]
def test_equivalent_version_one_release(self, pyramid_config, db_request):
"""
Test that if a release with a version like '1.0' exists, that a future
upload with an equivalent version like '1.0.0' will not make a second
release
"""
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
RoleFactory.create(user=user, project=project)
db_request.user = user
db_request.remote_addr = "10.10.10.20"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": "1.0.0",
"summary": "This is my summary!",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename="{}-{}.tar.gz".format(project.name, "1.0.0"),
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
# Ensure that a Release object has been created.
releases = db_request.db.query(Release).filter(Release.project == project).all()
# Asset that only one release has been created
assert releases == [release]
def test_equivalent_canonical_versions(self, pyramid_config, db_request):
"""
Test that if more than one release with equivalent canonical versions
exists, we use the one that is an exact match
"""
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
project = ProjectFactory.create()
release_a = ReleaseFactory.create(project=project, version="1.0")
release_b = ReleaseFactory.create(project=project, version="1.0.0")
RoleFactory.create(user=user, project=project)
db_request.user = user
db_request.remote_addr = "10.10.10.20"
db_request.user_agent = "warehouse-tests/6.6.6"
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": project.name,
"version": "1.0.0",
"summary": "This is my summary!",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename="{}-{}.tar.gz".format(project.name, "1.0.0"),
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
legacy.file_upload(db_request)
assert len(release_a.files.all()) == 0
assert len(release_b.files.all()) == 1
def test_upload_succeeds_creates_project(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
filename = "{}-{}.tar.gz".format("example", "1.0")
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
db_request.remote_addr = "10.10.10.10"
db_request.user_agent = "warehouse-tests/6.6.6"
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
# Ensure that a Project object has been created.
project = db_request.db.query(Project).filter(Project.name == "example").one()
# Ensure that a Role with the user as owner has been created.
role = (
db_request.db.query(Role)
.filter((Role.user == user) & (Role.project == project))
.one()
)
assert role.role_name == "Owner"
# Ensure that a Release object has been created.
release = (
db_request.db.query(Release)
.filter((Release.project == project) & (Release.version == "1.0"))
.one()
)
assert release.uploaded_via == "warehouse-tests/6.6.6"
# Ensure that a File object has been created.
db_request.db.query(File).filter(
(File.release == release) & (File.filename == filename)
).one()
# Ensure that a Filename object has been created.
db_request.db.query(Filename).filter(Filename.filename == filename).one()
# Ensure that all of our journal entries have been created
journals = (
db_request.db.query(JournalEntry)
.options(joinedload("submitted_by"))
.order_by("submitted_date", "id")
.all()
)
assert [
(j.name, j.version, j.action, j.submitted_by, j.submitted_from)
for j in journals
] == [
("example", None, "create", user, "10.10.10.10"),
(
"example",
None,
"add Owner {}".format(user.username),
user,
"10.10.10.10",
),
("example", "1.0", "new release", user, "10.10.10.10"),
(
"example",
"1.0",
"add source file example-1.0.tar.gz",
user,
"10.10.10.10",
),
]
def test_upload_succeeds_creates_squats(self, pyramid_config, db_request):
pyramid_config.testing_securitypolicy(userid=1)
squattee = ProjectFactory(name="example")
user = UserFactory.create()
EmailFactory.create(user=user)
filename = "{}-{}.tar.gz".format("exmaple", "1.0")
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "exmaple",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
db_request.remote_addr = "10.10.10.10"
db_request.user_agent = "warehouse-tests/6.6.6"
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
# Ensure that a Project object has been created.
squatter = db_request.db.query(Project).filter(Project.name == "exmaple").one()
# Ensure that a Squat object has been created.
squat = db_request.db.query(Squat).one()
assert squat.squattee == squattee
assert squat.squatter == squatter
assert squat.reviewed is False
@pytest.mark.parametrize(
("emails_verified", "expected_success"),
[
([], False),
([True], True),
([False], False),
([True, True], True),
([True, False], True),
([False, False], False),
([False, True], False),
],
)
def test_upload_requires_verified_email(
self, pyramid_config, db_request, emails_verified, expected_success
):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
for i, verified in enumerate(emails_verified):
EmailFactory.create(user=user, verified=verified, primary=i == 0)
filename = "{}-{}.tar.gz".format("example", "1.0")
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
db_request.remote_addr = "10.10.10.10"
db_request.user_agent = "warehouse-tests/6.6.6"
if expected_success:
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
else:
db_request.help_url = pretend.call_recorder(lambda **kw: "/the/help/url/")
with pytest.raises(HTTPBadRequest) as excinfo:
legacy.file_upload(db_request)
resp = excinfo.value
assert db_request.help_url.calls == [pretend.call(_anchor="verified-email")]
assert resp.status_code == 400
assert resp.status == (
(
"400 User {!r} does not have a verified primary email "
"address. Please add a verified primary email before "
"attempting to upload to PyPI. See /the/help/url/ for "
"more information.for more information."
).format(user.username)
)
def test_upload_purges_legacy(self, pyramid_config, db_request, monkeypatch):
pyramid_config.testing_securitypolicy(userid=1)
user = UserFactory.create()
EmailFactory.create(user=user)
filename = "{}-{}.tar.gz".format("example", "1.0")
db_request.user = user
db_request.POST = MultiDict(
{
"metadata_version": "1.2",
"name": "example",
"version": "1.0",
"filetype": "sdist",
"md5_digest": "335c476dc930b959dda9ec82bd65ef19",
"content": pretend.stub(
filename=filename,
file=io.BytesIO(b"A fake file."),
type="application/tar",
),
}
)
storage_service = pretend.stub(store=lambda path, filepath, meta: None)
db_request.find_service = lambda svc, name=None: storage_service
db_request.remote_addr = "10.10.10.10"
db_request.user_agent = "warehouse-tests/6.6.6"
resp = legacy.file_upload(db_request)
assert resp.status_code == 200
def test_fails_in_read_only_mode(self, pyramid_request):
pyramid_request.flags = pretend.stub(enabled=lambda *a: True)
with pytest.raises(HTTPForbidden) as excinfo:
legacy.file_upload(pyramid_request)
resp = excinfo.value
assert resp.status_code == 403
assert resp.status == ("403 Read-only mode: Uploads are temporarily disabled")
def test_fails_without_user(self, pyramid_config, pyramid_request):
pyramid_request.flags = pretend.stub(enabled=lambda *a: False)
pyramid_config.testing_securitypolicy(userid=None)
with pytest.raises(HTTPForbidden) as excinfo:
legacy.file_upload(pyramid_request)
resp = excinfo.value
assert resp.status_code == 403
assert resp.status == (
"403 Invalid or non-existent authentication information."
)
@pytest.mark.parametrize("status", [True, False])
def test_legacy_purge(monkeypatch, status):
post = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(requests, "post", post)
legacy._legacy_purge(status, 1, 2, three=4)
if status:
assert post.calls == [pretend.call(1, 2, three=4)]
else:
assert post.calls == []
def test_submit(pyramid_request):
resp = legacy.submit(pyramid_request)
assert resp.status_code == 410
assert resp.status == (
"410 Project pre-registration is no longer required or supported, "
"upload your files instead."
)
def test_doc_upload(pyramid_request):
resp = legacy.doc_upload(pyramid_request)
assert resp.status_code == 410
assert resp.status == (
"410 Uploading documentation is no longer supported, we recommend "
"using https://readthedocs.org/."
)
| 35.571709
| 88
| 0.551245
|
de6cb412b2141ba59528962dab382c194281dd29
| 1,492
|
py
|
Python
|
mars/dataframe/datastore/tests/test_datastore.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/dataframe/datastore/tests/test_datastore.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/datastore/tests/test_datastore.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
from mars.dataframe import DataFrame
class Test(unittest.TestCase):
def testToCSV(self):
raw = pd.DataFrame(np.random.rand(10, 5))
df = DataFrame(raw, chunk_size=4)
r = df.to_csv('*.csv')
r = r.tiles()
self.assertEqual(r.chunk_shape[1], 1)
for i, c in enumerate(r.chunks):
self.assertEqual(type(c.op).__name__, 'DataFrameToCSV')
self.assertIs(c.inputs[0], r.inputs[0].chunks[i].data)
# test one file
r = df.to_csv('out.csv')
r = r.tiles()
self.assertEqual(r.chunk_shape[1], 1)
for i, c in enumerate(r.chunks):
self.assertEqual(len(c.inputs), 2)
self.assertIs(c.inputs[0].inputs[0], r.inputs[0].chunks[i].data)
self.assertEqual(type(c.inputs[1].op).__name__, 'DataFrameToCSVStat')
| 33.155556
| 81
| 0.66555
|
6741de9b684be3a2e568402bafa66b7dcb630b41
| 1,160
|
py
|
Python
|
tools/cellml/CellML_Edit.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | 1
|
2021-02-05T13:19:58.000Z
|
2021-02-05T13:19:58.000Z
|
tools/cellml/CellML_Edit.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | null | null | null |
tools/cellml/CellML_Edit.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
"""
Runs CellML simulation.
"""
import sys
import cgrspy.bootstrap
output_file = sys.argv[1]
input_file = sys.argv[2]
cgrspy.bootstrap.loadGenericModule('cgrs_cellml')
cellmlBootstrap = cgrspy.bootstrap.fetch('CreateCellMLBootstrap')
model = cellmlBootstrap.modelLoader.loadFromURL(input_file)
#Modify parameters:
for i in range(3,len(sys.argv),3):
c = model.allComponents.getComponent(sys.argv[i])
if c:
v = c.variables.getVariable(sys.argv[i+1])
if v:
if v.initialValue:
if v.publicInterface != "in":
v.initialValue = sys.argv[i+2]
else:
sys.stderr.write("Error: Variable %s in component %s has public interface of type \"in\".\n" % (sys.argv[i+1],sys.argv[i]))
else:
sys.stderr.write("Error: Variable %s in component %s does not have an initial value attribute.\n" % (sys.argv[i+1],sys.argv[i]))
else:
sys.stderr.write("Error: Could not find model variable %s in component %s\n" % (sys.argv[i+1],sys.argv[i]))
else:
sys.stderr.write("Error: Could not find model component %s\n" % sys.argv[i])
#Write modified model to XML
f = open(output_file,'w')
f.write(model.serialisedText)
f.close()
| 30.526316
| 132
| 0.705172
|
6a73b289e1bd60f35737bd93340d5cdcf1608a80
| 2,995
|
py
|
Python
|
Products/CMFCore/exportimport/cookieauth.py
|
MatthewWilkes/Products.CMFCore
|
22cba1e66040053762e57ac3597409038898a65f
|
[
"ZPL-2.1"
] | null | null | null |
Products/CMFCore/exportimport/cookieauth.py
|
MatthewWilkes/Products.CMFCore
|
22cba1e66040053762e57ac3597409038898a65f
|
[
"ZPL-2.1"
] | null | null | null |
Products/CMFCore/exportimport/cookieauth.py
|
MatthewWilkes/Products.CMFCore
|
22cba1e66040053762e57ac3597409038898a65f
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Cookie crumbler xml adapters and setup handlers.
"""
from zope.component import adapts
from zope.component import getSiteManager
from Products.GenericSetup.interfaces import ISetupEnviron
from Products.GenericSetup.utils import exportObjects
from Products.GenericSetup.utils import importObjects
from Products.GenericSetup.utils import PropertyManagerHelpers
from Products.GenericSetup.utils import XMLAdapterBase
from Products.CMFCore.interfaces import ICookieCrumbler
class CookieCrumblerXMLAdapter(XMLAdapterBase, PropertyManagerHelpers):
"""XML im- and exporter for CookieCrumbler.
"""
adapts(ICookieCrumbler, ISetupEnviron)
_LOGGER_ID = 'cookies'
name = 'cookieauth'
def _exportNode(self):
"""Export the object as a DOM node.
"""
node = self._getObjectNode('object')
node.appendChild(self._extractProperties())
self._logger.info('Cookie crumbler exported.')
return node
def _importNode(self, node):
"""Import the object from the DOM node.
"""
if self.environ.shouldPurge():
self._purgeProperties()
self._migrateProperties(node)
self._initProperties(node)
self._logger.info('Cookie crumbler imported.')
def _migrateProperties(self, node):
# BBB: for CMF 2.2 settings
for child in node.childNodes:
if child.nodeName != 'property':
continue
if child.getAttribute('name') not in ('auto_login_page',
'unauth_page', 'logout_page'):
continue
node.removeChild(child)
child.unlink()
def importCookieCrumbler(context):
"""Import cookie crumbler settings from an XML file.
"""
sm = getSiteManager(context.getSite())
tool = sm.queryUtility(ICookieCrumbler)
if tool is None:
logger = context.getLogger('cookies')
logger.debug('Nothing to import.')
return
importObjects(tool, '', context)
def exportCookieCrumbler(context):
"""Export cookie crumbler settings as an XML file.
"""
sm = getSiteManager(context.getSite())
tool = sm.queryUtility(ICookieCrumbler)
if tool is None:
logger = context.getLogger('cookies')
logger.debug('Nothing to export.')
return
exportObjects(tool, '', context)
| 31.861702
| 78
| 0.64374
|
9cb4eb938e0953aabe16b845d327e1c8f85b7ffa
| 16,244
|
py
|
Python
|
test/functional/mempool_accept.py
|
wbubblerteam/bubcoin
|
0bb0d66397efcdacd782f64b3906d1acaed468cf
|
[
"MIT"
] | 1
|
2021-08-29T13:37:38.000Z
|
2021-08-29T13:37:38.000Z
|
test/functional/mempool_accept.py
|
wbubblerteam/bubcoin
|
0bb0d66397efcdacd782f64b3906d1acaed468cf
|
[
"MIT"
] | null | null | null |
test/functional/mempool_accept.py
|
wbubblerteam/bubcoin
|
0bb0d66397efcdacd782f64b3906d1acaed468cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from decimal import Decimal
from io import BytesIO
import math
from test_framework.test_framework import BubcoinTestFramework
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_MONEY,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
hex_str_to_bytes,
)
class MempoolAcceptanceTest(BubcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
assert_equal(node.getblockcount(), 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
coins = node.listunspent()
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = coins.pop() # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = Decimal('0.000007')
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): Decimal('0.3') - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}],
rawtxs=[raw_tx_0],
)
self.log.info('A final transaction not in the mempool')
coin = coins.pop() # Pick a random coin(base) to spend
output_amount = Decimal('0.025')
raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL
outputs=[{node.getnewaddress(): output_amount}],
locktime=node.getblockcount() + 2000, # Can be anything
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final)))
fee_expected = coin['amount'] - output_amount
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0)
self.mempool_size += 1
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size += 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'txn-mempool-conflict'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(tx.serialize().hex())['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}],
rawtxs=[tx.serialize().hex()],
)
# The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = MAX_MONEY + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = MAX_MONEY
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'coinbase'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
tx.vout[0].scriptPubKey = CScript([OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) # Some bare multisig script (2-of-3)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([b'a' * 1648]) # Some too large scriptSig (>1650 bytes)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}],
rawtxs=[tx.serialize().hex()],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'multi-op-return'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-final'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-BIP68-final'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
| 48.059172
| 154
| 0.63445
|
b2567887807922397d50aa73e133271d00be77e8
| 5,049
|
py
|
Python
|
avatar_field/widget.py
|
ZFCon/django-avatarfield
|
a618de63f04228db9b2ee20275789ca5fee78e1a
|
[
"MIT"
] | 3
|
2021-02-20T04:51:00.000Z
|
2021-05-06T14:56:23.000Z
|
avatar_field/widget.py
|
bharathjinka09/django-avatarfield
|
c6e6e68ab4dd33fa688a9bfcb4b6312e5cfae2aa
|
[
"MIT"
] | null | null | null |
avatar_field/widget.py
|
bharathjinka09/django-avatarfield
|
c6e6e68ab4dd33fa688a9bfcb4b6312e5cfae2aa
|
[
"MIT"
] | 1
|
2021-02-20T04:51:52.000Z
|
2021-02-20T04:51:52.000Z
|
from django.forms import ModelChoiceField, Select
from django.forms.models import ModelChoiceIterator, ModelChoiceIteratorValue
from django.forms.fields import Field
from django.forms.widgets import RadioSelect
class AvatarModelChoiceIterator(ModelChoiceIterator):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
# set image field to use it later to access image
self.image_field = field.image_field
def __iter__(self):
if self.field.empty_label is not None:
yield (None, "", self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def choice(self, obj):
choice = (
getattr(obj, self.image_field),
ModelChoiceIteratorValue(self.field.prepare_value(obj), obj),
self.field.label_from_instance(obj),
)
return choice
class AvatarSelect(Select):
template_name = 'avatar_field/widgets/select.html'
option_template_name = 'avatar_field/widgets/select_option.html'
class Media:
css = {
'all': [
'avatar_field/css/bootstrap.css',
'https://cdn.jsdelivr.net/npm/bootstrap-select@1.13.14/dist/css/bootstrap-select.min.css',
'avatar_field/css/style.css',
]
}
js = [
'https://code.jquery.com/jquery-3.3.1.slim.min.js',
'avatar_field/js/bootstrap.min.js',
'https://cdn.jsdelivr.net/npm/bootstrap-select@1.13.14/dist/js/bootstrap-select.min.js',
]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_image, option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_image, option_value, option_label)]
groups.append((group_name, subgroup, index))
for image, subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
has_selected |= selected
subgroup.append(self.create_option(
name, image, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, image, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'image': image,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
class AvatarModelChoiceField(ModelChoiceField):
widget = AvatarSelect
iterator = AvatarModelChoiceIterator
def __init__(self, queryset, *, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
blank=False, image_field, **kwargs):
self.image_field = image_field # set image field to access it later
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(
self, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, **kwargs
)
if (
(required and initial is not None) or
(isinstance(self.widget, RadioSelect) and not blank)
):
self.empty_label = None
else:
self.empty_label = empty_label
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
| 36.854015
| 106
| 0.585859
|
4db3ed1aa0929ae05bb636d2d672e5546fb32224
| 2,997
|
py
|
Python
|
train/intermimic/Packet.py
|
eniac/MimicNet
|
c0790679f8c220c75c33ace67e2735816aac6815
|
[
"MIT"
] | 15
|
2021-08-20T08:10:01.000Z
|
2022-03-24T21:24:50.000Z
|
train/intermimic/Packet.py
|
eniac/MimicNet
|
c0790679f8c220c75c33ace67e2735816aac6815
|
[
"MIT"
] | 1
|
2022-03-30T09:03:39.000Z
|
2022-03-30T09:03:39.000Z
|
train/intermimic/Packet.py
|
eniac/MimicNet
|
c0790679f8c220c75c33ace67e2735816aac6815
|
[
"MIT"
] | 3
|
2021-08-20T08:10:34.000Z
|
2021-12-02T06:15:02.000Z
|
#!/usr/bin/env python3
import socket
import struct
"""
Convert an IP string to long
Taken from https://stackoverflow.com/questions/9590965/convert-an-ip-string-to-
a-number-and-vice-versa/22272197
"""
def ip2long(ip):
packed_ip = socket.inet_aton(ip)
return struct.unpack("!L", packed_ip)[0]
class Packet():
def __init__(self, toks):
self.data = dict()
self.data["time"] = float(toks[0][:-1])
self.data["src"] = toks[1]
self.data["dst"] = toks[3][:-1]
self.data["flags"] = toks[4]
seq_range = toks[5]
if ':' in seq_range:
self.data["seq_begin"], tmp = seq_range.split(':')
self.data["seq_end"] = tmp.split('(')[0]
else:
self.data["seq_begin"] = None
self.data["seq_end"] = None
if 'ack' in toks:
ack_index = toks.index('ack')
self.data["ack_num"] = toks[ack_index + 1]
else:
self.data["ack_num"] = None
if 'win' in toks:
win_index = toks.index('win')
self.data["win_num"] = toks[win_index + 1]
else:
self.data["win_num"] = None
if 'ecn' in toks:
ecn_index = toks.index('ecn')
if toks[ecn_index + 1] == "3":
self.data["ecn"] = "1"
else:
self.data["ecn"] = "0"
if 'agg' in toks:
agg_index = toks.index('agg')
self.data["agg"] = toks[agg_index + 1]
if 'tor' in toks:
tor_index = toks.index('tor')
self.data["tor"] = toks[tor_index + 1]
if 'svr' in toks:
svr_index = toks.index('svr')
self.data["svr"] = toks[svr_index + 1]
if 'interface' in toks:
interface_index = toks.index('interface')
self.data["interface"] = toks[interface_index + 1]
def get(self, key):
return self.data[key]
def set(self, key, value):
self.data[key] = value
def matches(self, other_packet):
if self.data["src"] != other_packet.data["src"]:
return False
if self.data["dst"] != other_packet.data["dst"]:
return False
if self.data["seq_begin"] != other_packet.data["seq_begin"]:
return False
if self.data["flags"] != other_packet.data["flags"]:
return False
if self.data["seq_end"] != other_packet.data["seq_end"]:
return False
if self.data["ack_num"] != other_packet.data["ack_num"]:
return False
if self.data["win_num"] != other_packet.data["win_num"]:
return False
return True
def __str__(self):
return str(self.data["time"]) + ": " + \
self.data["src"] + "|" + self.data["dst"] + "|" + \
str(self.data["seq_begin"]) + "|" + \
str(self.data["seq_end"]) + "|" + \
str(self.data["ack_num"])
| 30.581633
| 79
| 0.508509
|
1da33e3d29da8aa12b13db870d58812cd47a361f
| 11,901
|
py
|
Python
|
src/Fig04_repo.py
|
takumihonda/lightning_da_ideal
|
8c79004487a95674de9ad2fe40bcad760e867877
|
[
"MIT"
] | null | null | null |
src/Fig04_repo.py
|
takumihonda/lightning_da_ideal
|
8c79004487a95674de9ad2fe40bcad760e867877
|
[
"MIT"
] | null | null | null |
src/Fig04_repo.py
|
takumihonda/lightning_da_ideal
|
8c79004487a95674de9ad2fe40bcad760e867877
|
[
"MIT"
] | null | null | null |
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
from datetime import timedelta
import os
import sys
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.colors as mcolors
import matplotlib.patches as patches
from tools_LT import read_evar_only
quick = True
#quick = False
def read_vars( INFO, tlev=0, HIM8=True ):
# Read variables
if HIM8:
fn_Him8 = os.path.join( INFO["GTOP"], INFO["EXP"], INFO["time0"].strftime('%Y%m%d%H%M%S'), INFO["TYPE"], INFO["MEM"],
"Him8_" + INFO["time0"].strftime('%Y%m%d%H%M%S_') + INFO["MEM"] + ".nc")
print( fn_Him8 )
nc = Dataset(fn_Him8, 'r', format='NETCDF4')
tbb = nc.variables["tbb"][tlev,:,:,:]
nc.close()
else:
tbb = np.zeros(1)
fn_radar = os.path.join( INFO["GTOP"], INFO["EXP"], INFO["time0"].strftime('%Y%m%d%H%M%S'), INFO["TYPE"], INFO["MEM"],
"radar_" + INFO["time0"].strftime('%Y%m%d%H%M%S_') + INFO["MEM"] + ".nc")
print( fn_radar, tlev )
nc = Dataset(fn_radar, 'r', format='NETCDF4')
if INFO["TYPE"] is "fcst":
z = nc.variables["z"][tlev,:,:,:]
vr = nc.variables["vr"][tlev,:,:,:]
else:
z = nc.variables["z"][:,:,:]
vr = nc.variables["vr"][:,:,:]
nc.close()
return( tbb, z, vr )
def main( INFO, EXP1="2000m_DA_0306", EXP2="2000m_DA_0306", tlev=0, typ="anal", vname="QG" ):
data_path = "../../dat4figs/Fig04"
os.makedirs( data_path, exist_ok=True )
print( tlev, INFO["DT"]*tlev )
#ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO["DT"]*tlev )
ctime = INFO["time0"] + timedelta(seconds=INFO["DT"]*tlev )
if typ is not "fcst":
ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO["DT"]*tlev )
INFO["EXP"] = EXP1
INFO["MEM"] = "mean"
INFO["TYPE"] = typ
if typ is not "fcst":
INFO["time0"] = ctime
print("CHECK", INFO["time0"] )
# tbb_exp1, z_exp1, vr_exp1 = read_vars( INFO, tlev=tlev, HIM8=False )
# evar_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname )
# efp_exp1 = read_evar_only( INFO, tlev=tlev, vname="FP" )
INFO["EXP"] = EXP2
# tbb_exp2, z_exp2, vr_exp2 = read_vars( INFO, tlev=tlev, HIM8=False )
# evar_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname )
# efp_exp2 = read_evar_only( INFO, tlev=tlev, vname="FP" )
ft_sec = int( INFO["DT"]*tlev )
# nature run
# read variables
INFO["EXP"] = EXP1
INFO["MEM"] = "mean"
INFO["TYPE"] = "fcst"
INFO["time0"] = datetime(2001, 1, 1, 1, 0)
tlev_nat = int( ( ctime - datetime(2001, 1, 1, 1, 0) ).total_seconds() / INFO["DT"] )
print( "DEBUG", tlev_nat, ctime)
# tbb_nat, z_nat, vr_nat = read_vars( INFO, tlev=tlev_nat, HIM8=False )
# evar_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname )
# efp_nat = read_evar_only( INFO, tlev=tlev_nat, vname="FP" )
#
# print("evars: ", evar_nat.shape, evar_exp1.shape, evar_exp2.shape )
tit_l = [ "NODA (analysis)",
"RDA (analysis)",
"Nature run" ]
if typ is "fcst":
foot = "\n(fcst from mean)"
if ft_sec == 0:
foot = "\n(analysis)"
tit_l = [
"NODA" + foot,
"RDA" + foot,
"Nature run",
"NODA" + foot,
"RDA" + foot,
"Nature run",
]
# print( z_nat.shape, z_exp1.shape, z_exp2.shape )
fig, ((ax1,ax2,ax3), (ax4,ax5,ax6) ) = plt.subplots(2, 3, figsize=(11,8.2))
fig.subplots_adjust(left=0.06, bottom=0.05, right=0.93, top=0.94,
wspace=0.2, hspace=0.3)
ax_l = [ax1, ax2, ax3, ax4, ax5, ax6]
levs_dbz= np.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65])
cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue',
'lime', 'limegreen','yellow',
'orange', 'red', 'firebrick', 'magenta',
'purple'])
cmap_dbz.set_under('w', alpha=1.0)
cmap_dbz.set_over('gray', alpha=1.0)
cmap_rb = plt.cm.get_cmap("RdBu_r")
cmap_rb.set_under('gray', alpha=1.0)
cmap_rb.set_over('gray', alpha=1.0)
unit_dbz = "(dBZ)"
unit_crg = r'(nC m$^{-3}$)'
#levs_rb_qcrg = np.array([-1, -0.8, -0.6, -0.4, -0.2, -0.1,
# 0.1, 0.2, 0.4, 0.6, 0.8, 1])
levs_rb_qcrg = np.array([-0.4, -0.3, -0.2, -0.1, -0.05, -0.01,
0.01, 0.05, 0.1, 0.2, 0.3, 0.4, ])
levs_rb_qcrg = np.array([-0.6, -0.4, -0.2, -0.1, -0.05, -0.01,
0.01, 0.05, 0.1, 0.2, 0.4, 0.6])
levs_l = [ levs_dbz, levs_dbz, levs_dbz,
levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg]
cmap_l = [ cmap_dbz, cmap_dbz, cmap_dbz,
cmap_rb, cmap_rb, cmap_rb ]
unit_l = [ unit_dbz, unit_dbz, unit_dbz,
unit_crg, unit_crg, unit_crg ]
pnum_l = [
"(a)", "(b)", "(c)",
"(d)", "(e)", "(f)",
]
tvar = vname
if vname is "QCRG":
levs = levs_rb_qcrg
cmap = cmap_rb
unit = unit_crg
tvar = "Total charge density"
bbox = { 'facecolor':'w', 'alpha':0.95, 'pad':1.5, 'edgecolor':'w' }
xmin = 120
xmax = 280
ymin = 120
ymax = 320
ft_sec_a = int( ( ctime - INFO["time00"] ).total_seconds() )
print( "ctime",ctime, tlev, INFO["DT"])
xlabel = "X (km)"
ylabel = "Y (km)"
xaxis = INFO["X"][:] * 0.001
yaxis = INFO["Y"][:] * 0.001
x2d, y2d = np.meshgrid( yaxis, xaxis )
xdgrid = 20
ydgrid = 20
zlev_show = 8
zlev_show = 10
zlev_show = 16
zlev_show = 14 # comment out
if typ is not "fcst":
info = 't={0:.0f} min\nZ={1:} km'.format( ft_sec_a/60.0, INFO["Z"][zlev_show]/1000)
else:
info = 't={0:.0f} min (FT={1:.0f} min)\nZ={2:} km'.format( ft_sec_a/60.0, ft_sec/60.0, INFO["Z"][zlev_show]/1000)
# if typ is not "fcst":
# VAR_l = [
# z_exp1[zlev_show,:,:],
# z_exp2[zlev_show,:,:],
# z_nat[zlev_show,:,:],
# evar_exp1[0,zlev_show,:,:],
# evar_exp2[0,zlev_show,:,:],
# evar_nat[0,zlev_show,:,:]]
# else:
# VAR_l = [
# z_exp1[zlev_show,:,:],
# z_exp2[zlev_show,:,:],
# z_nat[zlev_show,:,:],
# evar_exp1[0,zlev_show,:,:],
# evar_exp2[0,zlev_show,:,:],
# evar_nat[0,zlev_show,:,:]
# ]
# FP_l = [ np.sum( efp_exp1[0,:,:,:], axis=0),
# np.sum(efp_exp2[0,:,:,:], axis=0),
# np.sum(efp_nat[0,:,:,:], axis=0) ]
for idx, ax in enumerate(ax_l):
fn = '{0:}/data{1:0=2}.npz'.format( data_path, idx )
print( fn )
# np.savez( fn, data=VAR_l[idx][:,:] )
data = np.load( fn )['data']
# print(idx,tit_l[idx])
# print( VAR_l[idx].shape, np.max(VAR_l[idx]), np.min(VAR_l[idx]) )
#SHADE = ax.pcolormesh(x2d, y2d,
SHADE = ax.contourf(x2d, y2d,
data,
#VAR_l[idx][:,:],
levels=levs_l[idx],
#vmin=np.min(levs),
#vmax=np.max(levs),
cmap=cmap_l[idx],
extend='both',
)
if typ is "fcst" and ft_sec > 0:
ssize = 10.0
idx_ = idx
if idx > 2:
idx_ = idx - 3
fp2d = FP_l[idx_]
#fp2d[ fp2d < 1.0 ] = np.nan
#fp2d = fp2d / ssize
fp2d = np.where( fp2d >= 1.0, ssize, np.nan )
ax.scatter( x2d, y2d, s=fp2d,
c='k', marker='s',
edgecolors="w", linewidths=0.5 )
ax.set_xlim( xmin, xmax )
ax.set_ylim( ymin, ymax )
ax.xaxis.set_ticks( np.arange(xmin, xmax, xdgrid) )
ax.yaxis.set_ticks( np.arange(ymin, ymax, ydgrid) )
ax.tick_params(axis='both', which='minor', labelsize=7 )
ax.tick_params(axis='both', which='major', labelsize=7 )
ax.text(0.5, 0.95, tit_l[idx],
fontsize=12, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='top',
bbox=bbox )
ax.text(0.1, 0.95, pnum_l[idx],
fontsize=10, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='top',
bbox=bbox )
ax.set_xlabel( xlabel, fontsize=8 )
ax.set_ylabel( ylabel, fontsize=8 )
if idx == 2 or idx == 5:
pos = ax.get_position()
cb_h = pos.height
cb_w = 0.01
ax_cb = fig.add_axes( [pos.x1+0.01, pos.y0, cb_w, cb_h] )
cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'vertical',
ticks=levs_l[idx], extend='both' )
cb.ax.tick_params( labelsize=8 )
ax.text( 1.0, -0.03, unit_l[idx],
fontsize=9, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='top', )
ax.text( 1.0, 1.1, info,
fontsize=10, transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='center', )
if idx == 1 or idx == 4:
tvar_ = tvar
if idx == 1:
tvar_ = "Radar reflectivity"
ax.text( 0.5, 1.1, tvar_,
fontsize=15, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center', )
# fig_tit = tvar
# fig.suptitle( fig_tit, fontsize=16 )
#odir = "png/6p_DA_var"
odir = "pdf/fig20210624"
#ofig = '6p_{:1}_{:2}_{:3}_fta{:05}_ft{:05}_z{:0=2}_{:}.png'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname)
ofig = '6p_{:1}_{:2}_{:3}_fta{:05}_ft{:05}_z{:0=2}_{:}.pdf'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname)
print( ofig, odir )
if not quick:
os.makedirs(odir, exist_ok=True)
plt.savefig(os.path.join(odir,ofig),
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
else:
plt.show()
###################
DX = 2000.0
DY = 2000.0
XDIM = 192
YDIM = 192
TDIM = 13
ZDIM = 40
XDIM = 176
YDIM = 176
ZDIM = 45
DZ = 500.0
DT = 300
X = np.arange( DX*0.5, DX*XDIM, DX )
Y = np.arange( DY*0.5, DY*YDIM, DY )
T = np.arange( 0, DT*TDIM, DT )
BAND = np.arange( 7, 17, 1 )
Z = np.arange(DZ*0.5, DZ*ZDIM, DZ)
#EXP = "2000m_NODA_1022_FIR2km_N"
#time0 = datetime( 2001, 1, 1, 1, 0, 0 )
EXP = "2000m_DA_1022_FIR2km_N"
EXP = "2000m_DA_0302"
EXP1 = "2000m_DA_0306"
EXP1 = "2000m_NODA_0306"
EXP2 = "2000m_DA_0306"
EXP1 = "2000m_NODA_0601"
EXP2 = "2000m_DA_0601"
EXP1 = "2000m_NODA_0723"
EXP2 = "2000m_DA_0723"
#EXP1 = "2000m_DA_0306_R_FP_180km"
time0 = datetime( 2001, 1, 1, 1, 20, 0 )
time0 = datetime( 2001, 1, 1, 1, 30, 0 )
GTOP = "/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT"
TYPE = "fcst"
MEM = "mean"
MEM = "0025"
time00 = datetime( 2001, 1, 1, 0, 0, 0 )
INFO = {"XDIM":XDIM, "YDIM":YDIM, "NBAND":10, "TDIM":TDIM,
"X":X, "Y":Y , "BAND":BAND, "T":T, "GTOP":GTOP,
"ZDIM":ZDIM, "Z":Z, "DT":DT,
"TYPE":TYPE, "MEM":MEM, "EXP":EXP,
"time0": time0, "time00": time00 }
tmax = 13
tmax = 7
tmin = 0
tmin = 0
tmax = tmin + 1
#tmin = 6
#tmax = 7
#tmax = 1
typ = "anal"
typ = "fcst"
vname = "QCRG"
if typ is not "fcst":
tmin = 1
for tlev in range( tmin, tmax ):
INFO["time0"] = time0
main( INFO, EXP1=EXP1, EXP2=EXP2, tlev=tlev, typ=typ, vname=vname )
| 29.604478
| 125
| 0.510881
|
abde790973a1a14fe2d2615e10dbf677f2bfb49d
| 3,623
|
py
|
Python
|
onlinexam/urls.py
|
DeepakDarkiee/exam
|
59c832d2107b8cf9fc08fb1d78d1046ee29204f6
|
[
"MIT"
] | null | null | null |
onlinexam/urls.py
|
DeepakDarkiee/exam
|
59c832d2107b8cf9fc08fb1d78d1046ee29204f6
|
[
"MIT"
] | null | null | null |
onlinexam/urls.py
|
DeepakDarkiee/exam
|
59c832d2107b8cf9fc08fb1d78d1046ee29204f6
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import include, path
from exam import views
def trigger_error(request):
division_by_zero = 1 / 0
urlpatterns = [
path("sentry-debug/", trigger_error),
path("admin/", admin.site.urls),
path("teacher/", include("teacher.urls")),
path("student/", include("student.urls")),
path("tinymce/", include("tinymce.urls")),
path("", views.home_view, name=""),
path(
"logout/", LogoutView.as_view(template_name="exam/logout.html"), name="logout"
),
path("contactus", views.contactus_view),
path("afterlogin", views.afterlogin_view, name="afterlogin"),
path("adminclick", views.adminclick_view),
path(
"adminlogin",
LoginView.as_view(template_name="exam/adminlogin.html"),
name="adminlogin",
),
path("admin-dashboard", views.admin_dashboard_view, name="admin-dashboard"),
path("admin-teacher", views.admin_teacher_view, name="admin-teacher"),
path(
"admin-view-teacher", views.admin_view_teacher_view, name="admin-view-teacher"
),
path("update-teacher/<int:pk>", views.update_teacher_view, name="update-teacher"),
path("delete-teacher/<int:pk>", views.delete_teacher_view, name="delete-teacher"),
path(
"admin-view-pending-teacher",
views.admin_view_pending_teacher_view,
name="admin-view-pending-teacher",
),
path(
"admin-view-teacher-salary",
views.admin_view_teacher_salary_view,
name="admin-view-teacher-salary",
),
path(
"approve-teacher/<int:pk>", views.approve_teacher_view, name="approve-teacher"
),
path("reject-teacher/<int:pk>", views.reject_teacher_view, name="reject-teacher"),
path("admin-student", views.admin_student_view, name="admin-student"),
path(
"admin-view-student", views.admin_view_student_view, name="admin-view-student"
),
path(
"admin-view-student-marks",
views.admin_view_student_marks_view,
name="admin-view-student-marks",
),
path(
"admin-view-marks/<int:pk>",
views.admin_view_marks_view,
name="admin-view-marks",
),
path(
"admin-check-marks/<int:pk>",
views.admin_check_marks_view,
name="admin-check-marks",
),
path("update-student/<int:pk>", views.update_student_view, name="update-student"),
path("delete-student/<int:pk>", views.delete_student_view, name="delete-student"),
path("admin-course", views.admin_course_view, name="admin-course"),
path("admin-add-course", views.admin_add_course_view, name="admin-add-course"),
path("admin-view-course", views.admin_view_course_view, name="admin-view-course"),
path("delete-course/<int:pk>", views.delete_course_view, name="delete-course"),
path("admin-question", views.admin_question_view, name="admin-question"),
path(
"admin-add-question", views.admin_add_question_view, name="admin-add-question"
),
path(
"admin-view-question",
views.admin_view_question_view,
name="admin-view-question",
),
path("view-question/<int:pk>", views.view_question_view, name="view-question"),
path(
"delete-question/<int:pk>", views.delete_question_view, name="delete-question"
),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.542553
| 86
| 0.676787
|
96ffbecf5d5ff03a411e05619197c470e0479f66
| 6,531
|
py
|
Python
|
_deprecated/driver.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 12
|
2017-02-16T10:22:59.000Z
|
2022-03-20T05:48:06.000Z
|
_deprecated/driver.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 618
|
2016-08-30T04:46:12.000Z
|
2022-03-25T16:03:10.000Z
|
_deprecated/driver.py
|
robotika/osgar
|
6f4f584d5553ab62c08a1c7bb493fefdc9033173
|
[
"MIT"
] | 11
|
2016-08-27T20:02:55.000Z
|
2022-03-07T08:53:53.000Z
|
#!/usr/bin/python
"""
Driver for John Deere
usage:
./driver.py <notes> | [<metalog> [<F>]]
"""
import sys
import math
from apyros.metalog import MetaLog, disableAsserts
from apyros.sourcelogger import SourceLogger
from can import CAN, DummyMemoryLog, ReplayLogInputsOnly, ReplayLog
from johndeere import JohnDeere, setup_faster_update, ENC_SCALE
FRONT_REAR_DIST = 1.3
LEFT_WHEEL_DIST_OFFSET = 0.4 # from central axis
class Driver:
pass
def go_straight(robot, distance, gas=None, speed=None, timeout=10.0, with_stop=True):
""" Drive 'distance' meters with given speed or given gas value """
start_time = robot.time
if speed is not None:
assert gas is None # only one of them has to be set
robot.set_desired_speed(speed)
elif gas is not None:
assert speed is None
robot.canproxy.cmd = gas
else:
assert 0 # one of [gas, speed] has to be set
start_dist = robot.canproxy.dist_left_raw + robot.canproxy.dist_right_raw
arr = []
robot.set_desired_steering(0.0) # i.e. go straight (!)
while robot.time - start_time < timeout:
robot.update()
arr.append(robot.canproxy.gas)
dist = ENC_SCALE*(robot.canproxy.dist_left_raw + robot.canproxy.dist_right_raw
- start_dist)/2.0
if abs(dist) > distance:
break
print("Dist OK at {}s".format(robot.time - start_time), sorted(arr)[len(arr)/2])
print(dist)
if with_stop:
robot.stop()
robot.wait(3.0)
dist = ENC_SCALE*(robot.canproxy.dist_left_raw + robot.canproxy.dist_right_raw
- start_dist)/2.0
print(dist)
print()
# inspiration from Eduro
# def turnG(self, angle, angularSpeed = None, radius = 0.0, angleThreshold = math.radians(10),
# withStop=True, verbose=False ):
def turn(robot, angle, radius, speed, timeout=10.0, with_stop=True):
assert radius > 0, radius # accept only positive radius
# angle corresponds to final heading change
if speed < 0:
# backup - invert steering logic
angle = -angle
start_time = robot.time
if angle > 0:
# turn left
base = radius - LEFT_WHEEL_DIST_OFFSET
else:
base = radius + LEFT_WHEEL_DIST_OFFSET
left_radius = math.sqrt(base*base + FRONT_REAR_DIST*FRONT_REAR_DIST)
steering_angle = math.atan2(FRONT_REAR_DIST, base)
if angle < 0:
steering_angle = -steering_angle
print("Steering", math.degrees(steering_angle))
robot.set_desired_steering(steering_angle)
while (robot.canproxy.desired_wheel_angle_raw is not None
and abs(robot.canproxy.desired_wheel_angle_raw - robot.canproxy.wheel_angle_raw) > 20):
print("wait", robot.canproxy.desired_wheel_angle_raw, robot.canproxy.wheel_angle_raw, abs(robot.canproxy.desired_wheel_angle_raw - robot.canproxy.wheel_angle_raw))
robot.update()
robot.set_desired_speed(speed)
start_left = robot.canproxy.dist_left_raw
while robot.time - start_time < timeout:
robot.update()
dist_left = (robot.canproxy.dist_left_raw - start_left) * ENC_SCALE
if abs(dist_left) > abs(angle * left_radius):
print('turned distance', dist_left)
break
if robot.time - start_time >= timeout:
print("TURN TIMEOUT!", robot.time - start_time)
if with_stop:
robot.stop()
robot.wait(1.0)
def normalizeAnglePIPI( angle ):
while angle < -math.pi:
angle += 2*math.pi
while angle > math.pi:
angle -= 2*math.pi
return angle
def follow_line_gen(robot, line, stopDistance=0.0, turnScale=4.0, offsetSpeed=math.radians(20), offsetDistance=0.03):
"""
line ... A line to follow.
stopDistance ... The robot stops when closer than this to the endpoint. [m]
turnScale ... Magic parameter for the rotational speed. [scaling factor]
offsetSpeed ... This extra rotational speed is added when the robot is too far from the line. [rad/s]
offsetDistance ... When the robot is further than this from the line, some extra correction may be needed. [m]
"""
while line.distanceToFinishLine(robot.localization.pose()) > stopDistance:
diff = normalizeAnglePIPI(line.angle - robot.localization.pose()[2])
x, y, a = robot.localization.pose()
d = 1.3 # FRONT_REAR_DIST
# print "deg %.1f" %( math.degrees(diff),),
signedDistance = line.signedDistance((x+d*math.cos(a), y+d*math.sin(a))) # + self.centerOffset
if math.fabs( signedDistance ) > offsetDistance:
step = max(0.0, min(offsetSpeed, offsetSpeed * (abs(signedDistance)-offsetDistance)/offsetDistance ))
step = step * 0.5 # hack
if signedDistance < 0:
diff += step
else:
diff -= step
# turn = restrictedTurn(turnScale * diff)
# speed = self.restrictedSpeed(turn)
# yield speed, turn
# print "dist=%0.3f, diff=%.2f" % (signedDistance, math.degrees(diff)), robot.localization.pose()
yield diff
def driver_self_test(driver, metalog):
assert metalog is not None
can_log_name = metalog.getLog('can')
if metalog.replay:
if metalog.areAssertsEnabled():
can = CAN(ReplayLog(can_log_name), skipInit=True)
else:
can = CAN(ReplayLogInputsOnly(can_log_name), skipInit=True)
else:
can = CAN()
can.relog(can_log_name)
can.resetModules(configFn=setup_faster_update)
robot = JohnDeere(can=can)
robot.UPDATE_TIME_FREQUENCY = 20.0 # TODO change internal and integrate setup
robot.localization = None # TODO
robot.canproxy.stop()
robot.canproxy.set_turn_raw(0)
go_straight(robot, distance=1.0, speed=0.3, with_stop=True)
for i in range(3):
turn(robot, math.radians(-45), radius=2.6, speed=0.5)
turn(robot, math.radians(-45), radius=2.6, speed=-0.5)
robot.canproxy.stop_turn()
robot.wait(3.0)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(__doc__)
sys.exit(2)
metalog=None
if 'meta_' in sys.argv[1]:
metalog = MetaLog(filename=sys.argv[1])
elif len(sys.argv) > 2:
metalog = MetaLog(filename=sys.argv[2])
if len(sys.argv) > 2 and sys.argv[-1] == 'F':
disableAsserts()
if metalog is None:
metalog = MetaLog()
driver_self_test(Driver(), metalog)
# vim: expandtab sw=4 ts=4
| 34.373684
| 171
| 0.645384
|
836dbd812961e03caa7eade8d261dc152d96f23e
| 1,243
|
py
|
Python
|
toolbox/course-1/week-6-dynamic-programming-2/task-3-maximum-value-of-an-arithmetic-expression.py
|
ichko/DataStructures
|
550c696ef57ab42b0ff4079fb45d417a8ccb58af
|
[
"MIT"
] | 3
|
2018-02-21T20:22:23.000Z
|
2022-02-08T04:23:07.000Z
|
toolbox/course-1/week-6-dynamic-programming-2/task-3-maximum-value-of-an-arithmetic-expression.py
|
ichko/algo-playground
|
550c696ef57ab42b0ff4079fb45d417a8ccb58af
|
[
"MIT"
] | null | null | null |
toolbox/course-1/week-6-dynamic-programming-2/task-3-maximum-value-of-an-arithmetic-expression.py
|
ichko/algo-playground
|
550c696ef57ab42b0ff4079fb45d417a8ccb58af
|
[
"MIT"
] | null | null | null |
# python3
import math
def max_val_expr(numbers, operations, ops_map):
min_vals = {(i, i): n for i, n in enumerate(numbers)}
max_vals = {(i, i): n for i, n in enumerate(numbers)}
def minMax(i, j):
min_val, max_val = math.inf, -math.inf
for t in range(i, j):
op = ops_map[operations[t]]
vals = [
op(max_vals[(i, t)], max_vals[(t + 1, j)]),
op(max_vals[(i, t)], min_vals[(t + 1, j)]),
op(min_vals[(i, t)], max_vals[(t + 1, j)]),
op(min_vals[(i, t)], min_vals[(t + 1, j)])
]
min_val = min([min_val] + vals)
max_val = max([max_val] + vals)
return min_val, max_val
for s in range(1, len(numbers) + 1):
for i in range(0, len(numbers) - s):
min_vals[(i, i + s)], max_vals[(i, i + s)] = minMax(i, i + s)
return max_vals[(0, len(numbers) - 1)]
if __name__ == '__main__':
expression = input()
numbers = [int(d) for d in expression[::2]]
operations = expression[1::2]
max_val = max_val_expr(numbers, operations, {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b
})
print(max_val)
| 27.021739
| 73
| 0.497184
|
98e69c900a9d4fe9bb718a5cf256962e4efdc62c
| 9,013
|
py
|
Python
|
ropgenerator/Load.py
|
sailay1996/ropgenerator
|
7ae5b952ad0b6cb2e37e911230168c647de5a889
|
[
"MIT"
] | null | null | null |
ropgenerator/Load.py
|
sailay1996/ropgenerator
|
7ae5b952ad0b6cb2e37e911230168c647de5a889
|
[
"MIT"
] | null | null | null |
ropgenerator/Load.py
|
sailay1996/ropgenerator
|
7ae5b952ad0b6cb2e37e911230168c647de5a889
|
[
"MIT"
] | 2
|
2019-04-07T04:15:45.000Z
|
2019-08-15T14:17:53.000Z
|
# -*- coding:utf-8 -*-
# Load module: load a binary and extract gadgets from it
import sys
import os
import subprocess
from ropgenerator.Database import build, initDB
from ropgenerator.semantic.Engine import initEngine
from ropgenerator.exploit.Scanner import initScanner
import ropgenerator.Architecture as Arch
from ropgenerator.IO import string_bold, info, string_special, banner, notify, error
from magic import from_file
from base64 import b16decode
from random import shuffle, random, randrange, Random
# Command options
OPTION_ARCH = '--arch'
OPTION_ARCH_SHORT = '-a'
OPTION_ROPGADGET_OPTIONS = '--ropgadget-opts'
OPTION_HELP = '--help'
OPTION_HELP_SHORT = '-h'
OPTION_ROPGADGET_OPTIONS_SHORT = '-r'
# Help for the load command
helpStr = banner([string_bold("'load' command"),
string_special("(Load gadgets from a binary file)")])
helpStr += "\n\n\t"+string_bold("Usage")+":\tload [OPTIONS] <filename>"
helpStr += "\n\n\t"+string_bold("Options")+":"
helpStr += "\n\t\t"+string_special(OPTION_ARCH_SHORT)+","+string_special(OPTION_ARCH)+\
" <arch>"+"\t\tmanualy specify architecture.\n\t\t\t\t\t\tAvailable: 'X86', 'X64'"
helpStr += '\n\n\t\t'+string_special(OPTION_ROPGADGET_OPTIONS_SHORT)+","+string_special(OPTION_ROPGADGET_OPTIONS)+\
" <opts>"+"\textra options for ROPgadget.\n\t\t\t\t\t\t<opts> must be a list of\n\t\t\t\t\t\toptions between ''"+\
"\n\t\t\t\t\t\te.g: \"-depth 4\""
helpStr += "\n\n\t"+string_bold("Examples")+":\n\t\tload /bin/ls\t\t(load gadgets from /bin/ls program)\n\t\tload ../test/vuln_prog\t(load gadgets from own binary)"
def print_help():
print(helpStr)
def getPlatformInfo(filename):
"""
Checks the binary type of the file
Precondition: the file exists !
Effects: set the Arch.currentBinType variable
Return : the architecture
"""
INTEL_strings = ["x86", "x86-64", "X86", "X86-64", "Intel", "80386"]
ELF32_strings = ["ELF 32-bit"]
ELF64_strings = ["ELF 64-bit"]
PE32_strings = ["PE32 "]
PE64_strings = ["PE32+"]
output = from_file(os.path.realpath(filename))
if( [sub for sub in INTEL_strings if sub in output]):
if( [sub for sub in ELF32_strings if sub in output]):
notify("ELF 32-bits detected")
Arch.currentBinType = Arch.BinaryType.X86_ELF
return Arch.ArchX86
elif( [sub for sub in ELF64_strings if sub in output]):
notify("ELF 64-bits detected")
Arch.currentBinType = Arch.BinaryType.X64_ELF
return Arch.ArchX64
elif( [sub for sub in PE32_strings if sub in output]):
notify("PE 32-bits detected")
Arch.currentBinType = Arch.BinaryType.X86_PE
return Arch.ArchX86
elif( [sub for sub in PE64_strings if sub in output]):
notify("PE 64-bits detected")
Arch.currentBinType = Arch.BinaryType.X64_PE
return Arch.ArchX64
else:
notify("Unknown binary type")
Arch.currentBinType = Arch.BinaryType.UNKNOWN
return None
else:
return None
def getGadgets(filename, extra_args=''):
"""
Returns a list of gadgets extracted from a file
Precondition: the file exists
Returns
-------
list of pairs (addr, asm) if succesful
None if failure
"""
ropgadget = "ROPgadget"
notify("Executing ROPgadget as: " + ropgadget)
try:
cmd = [ropgadget,"--binary",filename, "--dump", "--all"]
if( extra_args ):
cmd += extra_args.split(" ")
p = subprocess.Popen(cmd,stdout=subprocess.PIPE)
except Exception as e:
error("Could not execute '" +ropgadget+ " --binary " + filename + " --dump --all'" + extra_args)
print("\tError message is: " + str(e))
print("\n\t(Maybe check/update your config with the 'config' command,\n\t or make sure you have the last ROPgadget version installed)")
return None
# Get the gadget list
# Pairs (address, raw_asm)
first = True
count = 0
res = []
for l in p.stdout.readlines():
if("0x" in l):
arr = l.split(' ')
addr = arr[0]
raw = b16decode(arr[-1].upper().strip())
res.append((int(addr,16), raw))
count += 1
notify("Finished : %d gadgets generated" % (count))
return res
def load(args):
global helpStr
global loaded
# Parse arguments and filename
filename = None
user_arch = None
i = 0
seenArch = False
seenRopgadget = False
ropgadget_options = ''
if( not args ):
print(helpStr)
return
while i < len(args):
if( args[i] in [OPTION_ARCH, OPTION_ARCH_SHORT] ):
if( seenArch ):
error("Option {} can be used only one time"\
.format(args[i]))
return
seenArch = True
if( i+1 == len(args)):
error("Missing argument after {}.\n\tType 'load -h' for help"\
.format(args[i]))
return
elif( args[i+1] == Arch.ArchX86.name ):
user_arch = Arch.ArchX86
elif( args[i+1] == Arch.ArchX64.name ):
user_arch = Arch.ArchX64
else:
error("Unknown architecture: {}".format(args[i+1]))
return
i += 2
elif( args[i] in [OPTION_ROPGADGET_OPTIONS, OPTION_ROPGADGET_OPTIONS_SHORT]):
if( seenRopgadget ):
error("Option {} can be used only one time"\
.format(args[i]))
return
seenRopgadget = True
ropgadget_options = ''
if( i+1 == len(args)):
error("Missing argument after {}.\n\tType 'load -h' for help"\
.format(args[i]))
return
j = i+1
# Read the argments
if( args[j][0] != "'" ):
error("ROPgadget options must be given between '' ")
return
if( args[j][-1] == "'" and len(args[j]) != 1):
ropgadget_options += args[j][1:-1]
else:
ropgadget_options += args[j][1:]
j += 1
closed_ok = False
while( j < len(args)):
if( args[j][0] != "'" ):
if( args[j][-1] == "'"):
ropgadget_options += " " + args[j][0:-1]
closed_ok = True
break
elif( "'" in args[j] ):
error("ROPgadget options: You must leave a space after the closing '")
return
else:
ropgadget_options += " " + args[j]
else:
if( len(args[j]) > 1):
error("ROPgadget options: You must leave a space after the closing \'")
return
else:
closed_ok = True
break
j += 1
if( not closed_ok ):
error("ROPgadget options: missing closing \'")
return
i = j+1
elif( args[i] in [OPTION_HELP, OPTION_HELP_SHORT] ):
print(helpStr)
return
else:
filename = args[i]
break
if( not filename ):
error("Missing filename.\n\tType 'load help' for help")
return
# Test if the file exists
if( not os.path.isfile(filename)):
error("Error. Could not find file '{}'".format(filename))
return
print('')
info(string_bold("Extracting gadgets from file")+ " '" + filename + "'\n")
# Cleaning the data structures
initDB()
Arch.reinit()
# Get architecture and OS info
arch = getPlatformInfo(filename)
if(arch == user_arch == None):
error("Error. Could not determine architecture")
return
elif( arch and user_arch and (arch != user_arch) ):
error("Error. Conflicting architectures")
print("\tUser supplied: " + user_arch.name)
print("\tFound: " + arch.name)
return
elif( arch ):
Arch.setArch(arch)
else:
Arch.setArch(user_arch)
# Init the binary scanner
initScanner(filename)
# Extract the gadget list
gadgetList = getGadgets(filename, ropgadget_options)
if( not gadgetList ):
return
# Build the gadget database
# (we mix the list so that charging bar
# appears to grow steadily )
r = random()
shuffle(gadgetList, lambda: r)
build(gadgetList)
# Init engine
initEngine()
loaded = True
###################################
# Module wide
loaded = False
def loadedBinary():
global loaded
return loaded
| 34.665385
| 164
| 0.547875
|
9e5a4181932a365fd56a4b38119548c9cc975369
| 793
|
py
|
Python
|
chapter_6/ex_6-1.py
|
akshaymoharir/PythonCrashCourse
|
742b9841cff61d36567e8706efc69c5f5d5435ff
|
[
"MIT"
] | null | null | null |
chapter_6/ex_6-1.py
|
akshaymoharir/PythonCrashCourse
|
742b9841cff61d36567e8706efc69c5f5d5435ff
|
[
"MIT"
] | null | null | null |
chapter_6/ex_6-1.py
|
akshaymoharir/PythonCrashCourse
|
742b9841cff61d36567e8706efc69c5f5d5435ff
|
[
"MIT"
] | null | null | null |
## Python Crash Course
# Exercise 6.1: Person:
# Use a dictionary to store information about a person you know.
# Store their first name, last name, age, and the city in which they live.
# You should have keys such as first_name, last_name, age, and city.
# Print each piece of information stored in your dictionary.
#
def exercise_6_1():
print("\n")
myInfo = {
'first_name':'Akshay',
'last_name':'Moharir',
'age':29,
'city':'Novi'
}
print("Name:", myInfo['first_name'] + " " + myInfo['last_name'])
print("Age:", myInfo['age'])
print("City:", myInfo['city'])
print("\n")
if __name__ == '__main__':
exercise_6_1()
| 24.030303
| 89
| 0.534678
|
154477a463cfa3ea8c424f4964d215b13f67f309
| 5,313
|
py
|
Python
|
allennlp/training/metrics/auc.py
|
alle-pawols/allennlp
|
7d4a67263d7a210aca22d4f2b03e8568d3c34a48
|
[
"Apache-2.0"
] | 11,433
|
2017-06-27T03:08:46.000Z
|
2022-03-31T18:14:33.000Z
|
allennlp/training/metrics/auc.py
|
alle-pawols/allennlp
|
7d4a67263d7a210aca22d4f2b03e8568d3c34a48
|
[
"Apache-2.0"
] | 4,006
|
2017-06-26T21:45:43.000Z
|
2022-03-31T02:11:10.000Z
|
allennlp/training/metrics/auc.py
|
alle-pawols/allennlp
|
7d4a67263d7a210aca22d4f2b03e8568d3c34a48
|
[
"Apache-2.0"
] | 2,560
|
2017-06-26T21:16:53.000Z
|
2022-03-30T07:55:46.000Z
|
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from sklearn import metrics
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("auc")
class Auc(Metric):
"""
The AUC Metric measures the area under the receiver-operating characteristic
(ROC) curve for binary classification problems.
"""
def __init__(self, positive_label=1):
super().__init__()
self._positive_label = positive_label
self._all_predictions = torch.FloatTensor()
self._all_gold_labels = torch.LongTensor()
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A one-dimensional tensor of prediction scores of shape (batch_size).
gold_labels : `torch.Tensor`, required.
A one-dimensional label tensor of shape (batch_size), with {1, 0}
entries for positive and negative class. If it's not binary,
`positive_label` should be passed in the initialization.
mask : `torch.BoolTensor`, optional (default = `None`).
A one-dimensional label tensor of shape (batch_size).
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Sanity checks.
if gold_labels.dim() != 1:
raise ConfigurationError(
"gold_labels must be one-dimensional, "
"but found tensor of shape: {}".format(gold_labels.size())
)
if predictions.dim() != 1:
raise ConfigurationError(
"predictions must be one-dimensional, "
"but found tensor of shape: {}".format(predictions.size())
)
unique_gold_labels = torch.unique(gold_labels)
if unique_gold_labels.numel() > 2:
raise ConfigurationError(
"AUC can be used for binary tasks only. gold_labels has {} unique labels, "
"expected at maximum 2.".format(unique_gold_labels.numel())
)
gold_labels_is_binary = set(unique_gold_labels.tolist()) <= {0, 1}
if not gold_labels_is_binary and self._positive_label not in unique_gold_labels:
raise ConfigurationError(
"gold_labels should be binary with 0 and 1 or initialized positive_label "
"{} should be present in gold_labels".format(self._positive_label)
)
if mask is None:
batch_size = gold_labels.shape[0]
mask = torch.ones(batch_size, device=gold_labels.device).bool()
self._all_predictions = self._all_predictions.to(predictions.device)
self._all_gold_labels = self._all_gold_labels.to(gold_labels.device)
self._all_predictions = torch.cat(
[self._all_predictions, torch.masked_select(predictions, mask).float()], dim=0
)
self._all_gold_labels = torch.cat(
[self._all_gold_labels, torch.masked_select(gold_labels, mask).long()], dim=0
)
if is_distributed():
world_size = dist.get_world_size()
device = gold_labels.device
# Check if batch lengths are equal.
_all_batch_lengths = [torch.tensor(0) for i in range(world_size)]
dist.all_gather(
_all_batch_lengths, torch.tensor(len(self._all_predictions), device=device)
)
_all_batch_lengths = [batch_length.item() for batch_length in _all_batch_lengths]
if len(set(_all_batch_lengths)) > 1:
# Subsequent dist.all_gather() calls currently do not handle tensors of different length.
raise RuntimeError(
"Distributed aggregation for AUC is currently not supported for batches of unequal length."
)
_all_predictions = [
torch.zeros(self._all_predictions.shape, device=device) for i in range(world_size)
]
_all_gold_labels = [
torch.zeros(self._all_gold_labels.shape, device=device, dtype=torch.long)
for i in range(world_size)
]
dist.all_gather(_all_predictions, self._all_predictions)
dist.all_gather(_all_gold_labels, self._all_gold_labels)
self._all_predictions = torch.cat(_all_predictions, dim=0)
self._all_gold_labels = torch.cat(_all_gold_labels, dim=0)
def get_metric(self, reset: bool = False):
if self._all_gold_labels.shape[0] == 0:
return 0.5
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
self._all_gold_labels.cpu().numpy(),
self._all_predictions.cpu().numpy(),
pos_label=self._positive_label,
)
auc = metrics.auc(false_positive_rates, true_positive_rates)
if reset:
self.reset()
return auc
@overrides
def reset(self):
self._all_predictions = torch.FloatTensor()
self._all_gold_labels = torch.LongTensor()
| 39.066176
| 111
| 0.632035
|
cd9a47fe1143a0f7c88a9c102c6b6576161c1f18
| 257
|
py
|
Python
|
5-23.py
|
infintyol2/William
|
432676e0fd25b02af2d2fa49f105b98f9cc98c08
|
[
"MIT"
] | null | null | null |
5-23.py
|
infintyol2/William
|
432676e0fd25b02af2d2fa49f105b98f9cc98c08
|
[
"MIT"
] | null | null | null |
5-23.py
|
infintyol2/William
|
432676e0fd25b02af2d2fa49f105b98f9cc98c08
|
[
"MIT"
] | null | null | null |
num = int(input("enter a number "))
num1 = int(input("enter a numer "))
small = 0
g = 0
if num > num1:
small = num1
else:
small = num
for x in range(1, num+1):
if (num % x == 0) and (num1 % x ==0):
g = x
print((g*(num/g))*(num1/g))
| 18.357143
| 41
| 0.51751
|
6b241c46c79f35e0ebc52d433d23ca7306722000
| 1,069
|
py
|
Python
|
var/spack/repos/builtin/packages/xcb-util-cursor/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/xcb-util-cursor/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/xcb-util-cursor/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class XcbUtilCursor(AutotoolsPackage):
"""The XCB util modules provides a number of libraries which sit on top
of libxcb, the core X protocol library, and some of the extension
libraries. These experimental libraries provide convenience functions
and interfaces which make the raw X protocol more usable. Some of the
libraries also provide client-side code which is not strictly part of
the X protocol but which have traditionally been provided by Xlib."""
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-util-cursor-0.1.3.tar.gz"
version('0.1.3', sha256='a322332716a384c94d3cbf98f2d8fe2ce63c2fe7e2b26664b6cea1d411723df8')
depends_on('libxcb@1.4:')
depends_on('xcb-util-renderutil')
depends_on('xcb-util-image')
depends_on('pkgconfig', type='build')
| 39.592593
| 95
| 0.745557
|
b12013c44a913bdefba7c7fd2ec22726a76fac2b
| 24,741
|
py
|
Python
|
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Central registry for artifacts."""
import io
import logging
import os
import threading
from grr_response_core import config
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import objectfilter
from grr_response_core.lib import parsers
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.util.compat import yaml
from grr_response_server import data_store
# Names of fields that should no longer be used but might occur in old artifact
# files.
DEPRECATED_ARTIFACT_FIELDS = frozenset([
"labels",
])
class ArtifactRegistrySources(object):
"""Represents sources of the artifact registry used for getting artifacts."""
def __init__(self):
self._dirs = set()
self._files = set()
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False
def AddFile(self, filepath):
"""Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source.
"""
if filepath not in self._files:
self._files.add(filepath)
return True
return False
def Clear(self):
self._dirs.clear()
self._files.clear()
def GetDirs(self):
"""Returns an iterator over defined source directory paths."""
return iter(self._dirs)
def GetFiles(self):
"""Returns an iterator over defined source file paths."""
return iter(self._files)
def GetAllFiles(self):
"""Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory.
"""
for filepath in self._files:
yield filepath
for dirpath in self._dirs:
for filepath in ArtifactRegistrySources._GetDirYamlFiles(dirpath):
if filepath in self._files:
continue
yield filepath
@staticmethod
def _GetDirYamlFiles(dirpath):
try:
for filename in os.listdir(dirpath):
if filename.endswith(".json") or filename.endswith(".yaml"):
yield os.path.join(dirpath, filename)
except (IOError, OSError) as error:
logging.warning("problem with accessing artifact directory '%s': %s",
dirpath, error)
class ArtifactRegistry(object):
"""A global registry of artifacts."""
def __init__(self):
self._artifacts = {}
self._sources = ArtifactRegistrySources()
self._dirty = False
# Field required by the utils.Synchronized annotation.
self.lock = threading.RLock()
def _LoadArtifactsFromDatastore(self):
"""Load artifacts from the data store."""
loaded_artifacts = []
# TODO(hanuszczak): Why do we have to remove anything? If some artifact
# tries to shadow system artifact shouldn't we just ignore them and perhaps
# issue some warning instead? The datastore being loaded should be read-only
# during upload.
# A collection of artifacts that shadow system artifacts and need
# to be deleted from the data store.
to_delete = []
artifact_list = data_store.REL_DB.ReadAllArtifacts()
for artifact_value in artifact_list:
try:
self.RegisterArtifact(
artifact_value, source="datastore:", overwrite_if_exists=True)
loaded_artifacts.append(artifact_value)
except rdf_artifacts.ArtifactDefinitionError as e:
# TODO(hanuszczak): String matching on exception message is rarely
# a good idea. Instead this should be refectored to some exception
# class and then handled separately.
if "system artifact" in str(e):
to_delete.append(artifact_value.name)
else:
raise
if to_delete:
DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
self._dirty = True
# TODO(hanuszczak): This is connected to the previous TODO comment. Why
# do we throw exception at this point? Why do we delete something and then
# abort the whole upload procedure by throwing an exception?
detail = "system artifacts were shadowed and had to be deleted"
raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)
# Once all artifacts are loaded we can validate.
revalidate = True
while revalidate:
revalidate = False
for artifact_obj in loaded_artifacts[:]:
try:
Validate(artifact_obj)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.error("Artifact %s did not validate: %s", artifact_obj.name,
e)
artifact_obj.error_message = str(e)
loaded_artifacts.remove(artifact_obj)
revalidate = True
# TODO(hanuszczak): This method should be a stand-alone function as it doesn't
# use the `self` parameter at all.
@utils.Synchronized
def ArtifactsFromYaml(self, yaml_content):
"""Get a list of Artifacts from yaml."""
raw_list = yaml.ParseMany(yaml_content)
# TODO(hanuszczak): I am very sceptical about that "doing the right thing"
# below. What are the real use cases?
# Try to do the right thing with json/yaml formatted as a list.
if (isinstance(raw_list, list) and len(raw_list) == 1 and
isinstance(raw_list[0], list)):
raw_list = raw_list[0]
# Convert json into artifact and validate.
valid_artifacts = []
for artifact_dict in raw_list:
# Old artifacts might still use deprecated fields, so we have to ignore
# such. Here, we simply delete keys from the dictionary as otherwise the
# RDF value constructor would raise on unknown fields.
for field in DEPRECATED_ARTIFACT_FIELDS:
artifact_dict.pop(field, None)
# In this case we are feeding parameters directly from potentially
# untrusted yaml/json to our RDFValue class. However, safe_load ensures
# these are all primitive types as long as there is no other
# deserialization involved, and we are passing these into protobuf
# primitive types.
try:
artifact_value = rdf_artifacts.Artifact(**artifact_dict)
valid_artifacts.append(artifact_value)
except (TypeError, AttributeError, type_info.TypeValueError) as e:
name = artifact_dict.get("name")
raise rdf_artifacts.ArtifactDefinitionError(
name, "invalid definition", cause=e)
return valid_artifacts
def _LoadArtifactsFromFiles(self, file_paths, overwrite_if_exists=True):
"""Load artifacts from file paths as json or yaml."""
loaded_files = []
loaded_artifacts = []
for file_path in file_paths:
try:
with io.open(file_path, mode="r", encoding="utf-8") as fh:
logging.debug("Loading artifacts from %s", file_path)
for artifact_val in self.ArtifactsFromYaml(fh.read()):
self.RegisterArtifact(
artifact_val,
source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
loaded_artifacts.append(artifact_val)
logging.debug("Loaded artifact %s from %s", artifact_val.name,
file_path)
loaded_files.append(file_path)
except (IOError, OSError) as e:
logging.error("Failed to open artifact file %s. %s", file_path, e)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.error("Invalid artifact found in file %s with error: %s",
file_path, e)
raise
# Once all artifacts are loaded we can validate.
for artifact_value in loaded_artifacts:
Validate(artifact_value)
@utils.Synchronized
def ClearSources(self):
self._sources.Clear()
self._dirty = True
@utils.Synchronized
def AddFileSource(self, filename):
self._dirty |= self._sources.AddFile(filename)
@utils.Synchronized
def AddDirSource(self, dirname):
self._dirty |= self._sources.AddDir(dirname)
@utils.Synchronized
def AddDirSources(self, dirnames):
for dirname in dirnames:
self.AddDirSource(dirname)
@utils.Synchronized
def AddDefaultSources(self):
for path in config.CONFIG["Artifacts.artifact_dirs"]:
self.AddDirSource(path)
@utils.Synchronized
def RegisterArtifact(self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False):
"""Registers a new artifact."""
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
artifact_obj = self._artifacts[artifact_name]
if not artifact_obj.loaded_from.startswith("datastore:"):
# This artifact was not uploaded to the datastore but came from a
# file, refuse to overwrite.
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
# Preserve where the artifact was loaded from to help debugging.
artifact_rdfvalue.loaded_from = source
# Clear any stale errors.
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue
@utils.Synchronized
def UnregisterArtifact(self, artifact_name):
try:
del self._artifacts[artifact_name]
except KeyError:
raise ValueError("Artifact %s unknown." % artifact_name)
@utils.Synchronized
def ClearRegistry(self):
self._artifacts = {}
self._dirty = True
def _ReloadArtifacts(self):
"""Load artifacts from all sources."""
self._artifacts = {}
self._LoadArtifactsFromFiles(self._sources.GetAllFiles())
self.ReloadDatastoreArtifacts()
def _UnregisterDatastoreArtifacts(self):
"""Remove artifacts that came from the datastore."""
to_remove = []
for name, artifact in self._artifacts.items():
if artifact.loaded_from.startswith("datastore"):
to_remove.append(name)
for key in to_remove:
self._artifacts.pop(key)
@utils.Synchronized
def ReloadDatastoreArtifacts(self):
# Make sure artifacts deleted by the UI don't reappear.
self._UnregisterDatastoreArtifacts()
self._LoadArtifactsFromDatastore()
def _CheckDirty(self, reload_datastore_artifacts=False):
if self._dirty:
self._dirty = False
self._ReloadArtifacts()
else:
if reload_datastore_artifacts:
self.ReloadDatastoreArtifacts()
@utils.Synchronized
def GetArtifacts(self,
os_name=None,
name_list=None,
source_type=None,
exclude_dependents=False,
provides=None,
reload_datastore_artifacts=False):
"""Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
provides: return the artifacts that provide these dependencies
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria
"""
self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts)
results = {}
for artifact in self._artifacts.values():
# artifact.supported_os = [] matches all OSes
if os_name and artifact.supported_os and (
os_name not in artifact.supported_os):
continue
if name_list and artifact.name not in name_list:
continue
if source_type:
source_types = [c.type for c in artifact.sources]
if source_type not in source_types:
continue
if exclude_dependents and GetArtifactPathDependencies(artifact):
continue
if not provides:
results[artifact.name] = artifact
else:
# This needs to remain the last test, if it matches the result is added
for provide_string in artifact.provides:
if provide_string in provides:
results[artifact.name] = artifact
break
return list(results.values())
@utils.Synchronized
def GetRegisteredArtifactNames(self):
return [str(x) for x in self._artifacts]
@utils.Synchronized
def GetArtifact(self, name):
"""Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry.
"""
self._CheckDirty()
result = self._artifacts.get(name)
if not result:
raise rdf_artifacts.ArtifactNotRegisteredError(
"Artifact %s missing from registry. You may need to sync the "
"artifact repo by running make in the artifact directory." % name)
return result
@utils.Synchronized
def GetArtifactNames(self, *args, **kwargs):
return set([a.name for a in self.GetArtifacts(*args, **kwargs)])
@utils.Synchronized
def SearchDependencies(self,
os_name,
artifact_name_list,
existing_artifact_deps=None,
existing_expansion_deps=None):
"""Return a set of artifact names needed to fulfill dependencies.
Search the path dependency tree for all artifacts that can fulfill
dependencies of artifact_name_list. If multiple artifacts provide a
dependency, they are all included.
Args:
os_name: operating system string
artifact_name_list: list of artifact names to find dependencies for.
existing_artifact_deps: existing dependencies to add to, for recursion,
e.g. set(["WindowsRegistryProfiles", "WindowsEnvironmentVariablePath"])
existing_expansion_deps: existing expansion dependencies to add to, for
recursion, e.g. set(["users.userprofile", "users.homedir"])
Returns:
(artifact_names, expansion_names): a tuple of sets, one with artifact
names, the other expansion names
"""
artifact_deps = existing_artifact_deps or set()
expansion_deps = existing_expansion_deps or set()
artifact_objs = self.GetArtifacts(
os_name=os_name, name_list=artifact_name_list)
artifact_deps = artifact_deps.union([a.name for a in artifact_objs])
for artifact in artifact_objs:
expansions = GetArtifactPathDependencies(artifact)
if expansions:
expansion_deps = expansion_deps.union(set(expansions))
# Get the names of the artifacts that provide those expansions
new_artifact_names = self.GetArtifactNames(
os_name=os_name, provides=expansions)
missing_artifacts = new_artifact_names - artifact_deps
if missing_artifacts:
# Add those artifacts and any child dependencies
new_artifacts, new_expansions = self.SearchDependencies(
os_name,
new_artifact_names,
existing_artifact_deps=artifact_deps,
existing_expansion_deps=expansion_deps)
artifact_deps = artifact_deps.union(new_artifacts)
expansion_deps = expansion_deps.union(new_expansions)
return artifact_deps, expansion_deps
@utils.Synchronized
def DumpArtifactsToYaml(self, sort_by_os=True):
"""Dump a list of artifacts into a yaml string."""
artifact_list = self.GetArtifacts()
if sort_by_os:
# Sort so its easier to split these if necessary.
yaml_list = []
for os_name in rdf_artifacts.Artifact.SUPPORTED_OS_LIST:
done = {a.name: a for a in artifact_list if a.supported_os == [os_name]}
# Separate into knowledge_base and non-kb for easier sorting.
done_sorted = list(sorted(done.values(), key=lambda x: x.name))
yaml_list.extend(x.ToYaml() for x in done_sorted if x.provides)
yaml_list.extend(x.ToYaml() for x in done_sorted if not x.provides)
artifact_list = [a for a in artifact_list if a.name not in done]
yaml_list.extend(x.ToYaml() for x in artifact_list) # The rest.
else:
yaml_list = [x.ToYaml() for x in artifact_list]
return "---\n\n".join(yaml_list)
REGISTRY = ArtifactRegistry()
def DeleteArtifactsFromDatastore(artifact_names, reload_artifacts=True):
"""Deletes a list of artifacts from the data store."""
artifacts_list = REGISTRY.GetArtifacts(
reload_datastore_artifacts=reload_artifacts)
to_delete = set(artifact_names)
deps = set()
for artifact_obj in artifacts_list:
if artifact_obj.name in to_delete:
continue
if GetArtifactDependencies(artifact_obj) & to_delete:
deps.add(str(artifact_obj.name))
if deps:
raise ValueError(
"Artifact(s) %s depend(s) on one of the artifacts to delete." %
(",".join(deps)))
found_artifact_names = set()
for artifact_value in artifacts_list:
if artifact_value.name in to_delete:
found_artifact_names.add(artifact_value.name)
if len(found_artifact_names) != len(to_delete):
not_found = to_delete - found_artifact_names
raise ValueError("Artifact(s) to delete (%s) not found." %
",".join(not_found))
for artifact_name in to_delete:
data_store.REL_DB.DeleteArtifact(str(artifact_name))
REGISTRY.UnregisterArtifact(artifact_name)
def ValidateSyntax(rdf_artifact):
"""Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid.
"""
if not rdf_artifact.doc:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "missing doc")
for supp_os in rdf_artifact.supported_os:
valid_os = rdf_artifact.SUPPORTED_OS_LIST
if supp_os not in valid_os:
detail = "invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for condition in rdf_artifact.conditions:
# FIXME(hanuszczak): It does not look like the code below can throw
# `ConditionException`. Do we really need it then?
try:
of = objectfilter.Parser(condition).Parse()
of.Compile(objectfilter.BaseFilterImplementation)
except rdf_artifacts.ConditionError as e:
detail = "invalid condition '%s'" % condition
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail, e)
# Anything listed in provides must be defined in the KnowledgeBase
valid_provides = rdf_client.KnowledgeBase().GetKbFieldNames()
for kb_var in rdf_artifact.provides:
if kb_var not in valid_provides:
detail = "broken `provides` ('%s' not in %s)" % (kb_var, valid_provides)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
# Any %%blah%% path dependencies must be defined in the KnowledgeBase
for dep in GetArtifactPathDependencies(rdf_artifact):
if dep not in valid_provides:
detail = "broken path dependencies ('%s' not in %s)" % (dep,
valid_provides)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for source in rdf_artifact.sources:
try:
source.Validate()
except rdf_artifacts.ArtifactSourceSyntaxError as e:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "bad source", e)
def ValidateDependencies(rdf_artifact):
"""Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors.
"""
for dependency in GetArtifactDependencies(rdf_artifact):
try:
dependency_obj = REGISTRY.GetArtifact(dependency)
except rdf_artifacts.ArtifactNotRegisteredError as e:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "missing dependency", cause=e)
message = dependency_obj.error_message
if message:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "dependency error", cause=message)
def Validate(rdf_artifact):
"""Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid.
"""
ValidateSyntax(rdf_artifact)
ValidateDependencies(rdf_artifact)
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):
"""Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached.
"""
deps = set()
for source in rdf_artifact.sources:
# ARTIFACT is the legacy name for ARTIFACT_GROUP
# per: https://github.com/ForensicArtifacts/artifacts/pull/143
# TODO(user): remove legacy support after migration.
if source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT,
rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP):
if source.attributes.GetItem("names"):
deps.update(source.attributes.GetItem("names"))
if depth > 10:
raise RuntimeError("Max artifact recursion depth reached.")
deps_set = set(deps)
if recursive:
for dep in deps:
artifact_obj = REGISTRY.GetArtifact(dep)
new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)
if new_dep:
deps_set.update(new_dep)
return deps_set
# TODO(user): Add tests for this and for all other Get* functions in this
# package.
def GetArtifactsDependenciesClosure(name_list, os_name=None):
"""For all the artifacts in the list returns them and their dependencies."""
artifacts = {
a.name: a
for a in REGISTRY.GetArtifacts(os_name=os_name, name_list=name_list)
}
dep_names = set()
for art in artifacts.values():
dep_names.update(GetArtifactDependencies(art, recursive=True))
if dep_names:
for dep in REGISTRY.GetArtifacts(os_name=os_name, name_list=dep_names):
artifacts[dep.name] = dep
return list(artifacts.values())
def GetArtifactPathDependencies(rdf_artifact):
"""Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for source in rdf_artifact.sources:
for arg, value in source.attributes.items():
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg == "key_value_pairs":
# This is a REGISTRY_VALUE {key:blah, value:blah} dict.
paths.extend([x["key"] for x in value])
if arg in ["keys", "paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
deps.update(GetArtifactParserDependencies(rdf_artifact))
return deps
def GetArtifactParserDependencies(rdf_artifact):
"""Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
factory = parsers.ArtifactParserFactory(str(rdf_artifact.name))
deps = set()
for p in factory.AllParserTypes():
deps.update(p.knowledgebase_dependencies)
return deps
| 34.846479
| 80
| 0.697506
|
c7d3528c0d1bf91af1c30efa7ad6ddaeccc24703
| 203
|
py
|
Python
|
Python/Kivy/Kivy Testing 2/main.py
|
vaibhavkrkm/Mini-Projects
|
13cc3c6ee551aff5af48a8e9000816fc8767580f
|
[
"MIT"
] | null | null | null |
Python/Kivy/Kivy Testing 2/main.py
|
vaibhavkrkm/Mini-Projects
|
13cc3c6ee551aff5af48a8e9000816fc8767580f
|
[
"MIT"
] | null | null | null |
Python/Kivy/Kivy Testing 2/main.py
|
vaibhavkrkm/Mini-Projects
|
13cc3c6ee551aff5af48a8e9000816fc8767580f
|
[
"MIT"
] | null | null | null |
# float layout
import kivy
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
class MyApp(App):
def build(self):
return FloatLayout()
if __name__ == '__main__':
MyApp().run()
| 13.533333
| 44
| 0.729064
|
57fcdf88320482f81c108e0bd02b2d133a550005
| 4,814
|
py
|
Python
|
test/scripts/test_strands.py
|
fujiehuang/ecto
|
fea744337aa1fad1397c9a3ba5baa143993cb5eb
|
[
"BSD-3-Clause"
] | null | null | null |
test/scripts/test_strands.py
|
fujiehuang/ecto
|
fea744337aa1fad1397c9a3ba5baa143993cb5eb
|
[
"BSD-3-Clause"
] | null | null | null |
test/scripts/test_strands.py
|
fujiehuang/ecto
|
fea744337aa1fad1397c9a3ba5baa143993cb5eb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto, sys
import ecto.ecto_test as ecto_test
from ecto.test import test
@test
#def test_strand_basic_semantics():
# s = ecto.Strand()
# print "s.id =", s.id
# orig_id = s.id
#
# c = ecto_test.DontCallMeFromTwoThreads("CRASHY", strand=s)
# c2 = ecto_test.DontCallMeFromTwoThreads("CRASHY2", strand=s)
# c3 = ecto_test.DontCallMeFromTwoThreads("CRASHY3", strand=s)
# p = ecto.Plasm()
# gen = ecto_test.Generate("GENERATE", step=1.0, start=1.0)
# p.connect(gen[:] >> c[:])
# p.connect(c[:] >> c2[:])
# p.connect(c2[:] >> c3[:])
# sched = ecto.schedulers.Multithreaded(p)
# sched.execute(10)
@test
def test_user_defined_strands(nlevels, SchedType, execfn, expect):
s1 = ecto.Strand()
s2 = s1
s3 = ecto.Strand()
print("s1.id ==", s1.id)
print("s2.id ==", s2.id)
print("s3.id ==", s3.id)
assert s1.id == s2.id
assert s3.id != s2.id
assert s3.id != s1.id
plasm = ecto.Plasm()
# plasm.movie_out("strands_%03d.dot")
gen = ecto_test.Generate("GENERATE", step=1.0, start=1.0)
noncurr = ecto_test.DontCallMeFromTwoThreads("ALPHA", strand=s1)
plasm.connect(gen[:] >> noncurr[:])
for k in range(nlevels):
n = ecto_test.DontCallMeFromTwoThreads("BETA_%d" % k, strand=s2)
plasm.connect(noncurr[:] >> n[:])
noncurr = n
printer = ecto_test.Printer("PRINTER")
plasm.connect(noncurr[:] >> printer[:])
sched = SchedType(plasm)
print("sched=", sched)
execfn(sched)
result = noncurr.outputs.out
print("result=", result, "expect=", expect)
assert(result == expect)
# execfn(sched)
# result = noncurr.outputs.out
# print "result=", result
@test
def test_implicit_strands(nlevels, SchedType, execfn, expect):
plasm = ecto.Plasm()
gen = ecto_test.Generate(step=1.0, start=1.0)
noncurr = ecto_test.CantCallMeFromTwoThreads()
plasm.connect(gen, "out", noncurr, "in")
for k in range(nlevels):
next = ecto_test.CantCallMeFromTwoThreads()
plasm.connect(noncurr, "out", next, "in")
noncurr = next
printer = ecto_test.Printer()
plasm.connect(noncurr, "out", printer, "in")
sched = SchedType(plasm)
print("sched=", sched)
execfn(sched)
result = noncurr.outputs.out
print("result=", result)
assert(result == expect)
@test
def shouldfail():
plasm = ecto.Plasm()
gen = ecto_test.Generate(step=1.0, start=1.0)
nc1 = ecto_test.DontCallMeFromTwoThreads()
plasm.connect(gen, "out", nc1, "in")
nc2 = ecto_test.DontCallMeFromTwoThreads()
plasm.connect(nc1, "out", nc2, "in")
printer = ecto_test.Printer()
plasm.connect(nc2, "out", printer, "in")
sched = ecto.Scheduler(plasm)
try:
print("about to execute... this should throw")
sched.execute(niter=4)
util.fail()
except RuntimeError as e:
print("good, python caught error", e)
sched.stop()
sched.wait()
#test_strand_basic_semantics()
#shouldfail()
#print "shouldfail passed"
test_implicit_strands(4, ecto.Scheduler, lambda s: s.execute(niter=4), expect=4.0)
test_user_defined_strands(4, ecto.Scheduler, lambda s: s.execute(niter=16), expect=16.0)
| 32.308725
| 88
| 0.680723
|
32e97e6384e6419f473ef60d96011dea1a2b8d8f
| 2,563
|
py
|
Python
|
evennia/server/portal/mccp.py
|
meistermuka/evennia
|
45e0785c8e3a10e99ea97c8b26648ea3b01c656a
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/server/portal/mccp.py
|
meistermuka/evennia
|
45e0785c8e3a10e99ea97c8b26648ea3b01c656a
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/server/portal/mccp.py
|
meistermuka/evennia
|
45e0785c8e3a10e99ea97c8b26648ea3b01c656a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
MCCP - Mud Client Compression Protocol
This implements the MCCP v2 telnet protocol as per
http://tintin.sourceforge.net/mccp/. MCCP allows for the server to
compress data when sending to supporting clients, reducing bandwidth
by 70-90%.. The compression is done using Python's builtin zlib
library. If the client doesn't support MCCP, server sends uncompressed
as normal. Note: On modern hardware you are not likely to notice the
effect of MCCP unless you have extremely heavy traffic or sits on a
terribly slow connection.
This protocol is implemented by the telnet protocol importing
mccp_compress and calling it from its write methods.
"""
import zlib
# negotiations for v1 and v2 of the protocol
MCCP = chr(86)
FLUSH = zlib.Z_SYNC_FLUSH
def mccp_compress(protocol, data):
"""
Handles zlib compression, if applicable.
Args:
data (str): Incoming data to compress.
Returns:
stream (binary): Zlib-compressed data.
"""
if hasattr(protocol, 'zlib'):
return protocol.zlib.compress(data) + protocol.zlib.flush(FLUSH)
return data
class Mccp(object):
"""
Implements the MCCP protocol. Add this to a
variable on the telnet protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize MCCP by storing protocol on
ourselves and calling the client to see if
it supports MCCP. Sets callbacks to
start zlib compression in that case.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.protocol_flags['MCCP'] = False
# ask if client will mccp, connect callbacks to handle answer
self.protocol.will(MCCP).addCallbacks(self.do_mccp, self.no_mccp)
def no_mccp(self, option):
"""
Called if client doesn't support mccp or chooses to turn it off.
Args:
option (Option): Option dict (not used).
"""
if hasattr(self.protocol, 'zlib'):
del self.protocol.zlib
self.protocol.protocol_flags['MCCP'] = False
self.protocol.handshake_done()
def do_mccp(self, option):
"""
The client supports MCCP. Set things up by
creating a zlib compression stream.
Args:
option (Option): Option dict (not used).
"""
self.protocol.protocol_flags['MCCP'] = True
self.protocol.requestNegotiation(MCCP, '')
self.protocol.zlib = zlib.compressobj(9)
self.protocol.handshake_done()
| 28.477778
| 73
| 0.663675
|
495987829a3fad4072dd34a453dff579d375810d
| 837
|
py
|
Python
|
build/scripts/gen_py_reg.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 33
|
2016-12-15T21:47:13.000Z
|
2020-10-27T23:53:59.000Z
|
build/scripts/gen_py_reg.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | null | null | null |
build/scripts/gen_py_reg.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 14
|
2016-12-28T17:00:33.000Z
|
2022-01-16T20:15:27.000Z
|
import sys
template = '''
extern "C" void RegisterPythonModule(const char* name, void (*fn)(void));
extern "C" void {1}();
namespace {
struct TRegistrar {
inline TRegistrar() {
RegisterPythonModule("{0}", {1});
}
} REG;
}
'''
def mangle(name):
if '.' not in name:
return name
return ''.join('{}{}'.format(len(s), s) for s in name.split('.'))
if __name__ == '__main__':
if len(sys.argv) != 3:
print >>sys.stderr, 'Usage: <path/to/gen_py_reg.py> <python_module_name> <output_file>'
print >>sys.stderr, 'Passed: ' + ' '.join(sys.argv)
sys.exit(1)
with open(sys.argv[2], 'w') as f:
modname = sys.argv[1]
initname = 'init' + mangle(modname)
code = template.replace('{0}', modname).replace('{1}', initname)
f.write(code)
| 25.363636
| 95
| 0.55914
|
76bf69a181ad6b3393ba197a4aa15d17084c4be1
| 9,863
|
py
|
Python
|
torch/fx/experimental/fx2trt/example/fx2trt_example.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 3
|
2020-11-06T22:02:36.000Z
|
2021-06-25T13:50:47.000Z
|
torch/fx/experimental/fx2trt/example/fx2trt_example.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-04-22T18:37:42.000Z
|
2021-04-28T00:53:25.000Z
|
torch/fx/experimental/fx2trt/example/fx2trt_example.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-10-05T07:05:26.000Z
|
2021-10-05T07:05:26.000Z
|
from typing import Tuple, Dict, Callable, Any
import torch
import torch.fx
import torchvision.models as models
import torch.fx.passes.splitter_base as splitter_base
import torch.fx.passes.operator_support as op_support
import torch.fx.passes.net_min_base as net_min_base
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
# The purpose of this example is to demonstrate the overall flow of lowering a PyTorch
# model to TensorRT via FX with existing FX based tooling. The general lowering flow
# would be like:
#
# 1. Use splitter to split the model if there're ops in the model that we don't want to
# lower to TensorRT for some reasons like the ops are not supported in TensorRT or
# running them on other backends provides better performance.
# 2. Lower the model (or part of the model if splitter is used) to TensorRT via fx2trt.
#
# For this example, we use ResNet18 as example model and split out the linear layer to
# not run on TensorRT just to demonstrate how the splitter works. At the end of this
# example we did a benchmark for a model (named `split_mod`) with all the ops running
# on TensorRT execpt linear layer running on PyTorch Cuda versus a model (named `rn18`)
# fully on PyTorch Cuda.
def lower_mod_to_trt(mod: torch.fx.GraphModule, inputs: Tuple[torch.Tensor]):
"""
Helper function that given a GraphModule `mod` and its `inputs`, build a
TRTModule that runs the original `mod` on TensorRT.
"""
interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))
engine, input_names, output_names = interp.run(*inputs)
return TRTModule(engine, input_names, output_names)
class OpSupport(op_support.OperatorSupport):
"""
This class is used by splitter to determine which nodes are supported, i.e.
should be split to the accelerator part (TensorRT).
"""
def is_node_supported(
self, submodules: Dict[str, torch.nn.Module], node: torch.fx.Node
):
"""
Here we want linear layer to not run on TensorRT. Thus, we return
False for linear layer and True for all other ops.
"""
target = op_support.get_node_target(submodules, node)
if target == "torch.nn.modules.linear.Linear":
return False
return True
class TensorRTMinimizer(net_min_base._MinimizerBase):
"""
Need to define a Minimizer class for TensorRT because it's used in Splitter.
"""
def __init__(
self,
module: torch.fx.GraphModule,
sample_input: Tuple[torch.Tensor],
compare_fn: Callable[[Any, Any, Any], Tuple[float, bool]],
settings: net_min_base._MinimizerSettingBase = None,
):
if settings is None:
settings = net_min_base._MinimizerSettingBase()
super().__init__(module, sample_input, compare_fn, settings)
def run_a(self, mod, inputs):
"""
The output of this function serves as an reference.
"""
mod.eval()
with torch.no_grad():
return mod(*inputs)
def run_b(self, mod, inputs):
"""
Here we actually run mod on TensorRT return TensorRT result.
"""
mod.eval()
try:
mod = lower_mod_to_trt(mod, inputs)
output = mod(*inputs)
except RuntimeError as e:
raise net_min_base.FxNetMinimizerRunFuncError(
f"Encounter an error when processing \n{mod.graph}\n {e}"
)
else:
return output
# This in the future will be a global TensorRTSplitter and we don't need to create
# it per example.
class TensorRTSplitter(splitter_base._SplitterBase):
"""
Splitter for TensorRT.
"""
def __init__(
self,
module: torch.fx.GraphModule,
sample_input: Tuple[torch.Tensor],
operator_support: op_support.OperatorSupport = None,
settings: splitter_base._SplitterSettingBase = None
):
if not operator_support:
operator_support = op_support.OperatorSupport()
if not settings:
settings = splitter_base._SplitterSettingBase()
settings.allow_non_tensor = True
settings.skip_fusion = True
super().__init__(module, sample_input, operator_support, settings)
def _lower_model_to_backend(self, mod, inputs):
"""
Lower a GraphModule `mod` to TensorRT with `inputs`.
"""
mod = lower_mod_to_trt(mod, inputs)
return mod
def _find_culprit(self, mod, inputs):
"""
This function serves the preview functionality in Splitter. When previewing
splitting result, if something wrong happens during lowering model to TensorRT
or running a TensorRT model, this function will be called to find any culprit
that is responsible for the error.
"""
# Since we don't care about accuracy here, we pass in a dummy compare function.
minimizer = TensorRTMinimizer(mod, inputs, lambda a, b, c: (1, True))
minimizer.settings.traverse_method = "sequential"
minimizer.settings.find_all = True
culprits = minimizer.minimize()
if len(culprits) == 0:
reports = "Unable to find a culprit!\n"
else:
reports = "Found some problematic nodes:\n"
for node in culprits:
reports += f"{node.format_node()}\n"
return reports
if __name__ == "__main__":
# Create ResNet18 `rn18` and inputs `x`
rn18 = models.resnet18().eval().cuda()
x = torch.randn(5, 3, 224, 224, device="cuda")
# Trace the model with FX.
traced_rn18 = torch.fx.symbolic_trace(rn18)
# Create a splitter which takes in traced ResNet18.
splitter = TensorRTSplitter(traced_rn18, (x,), OpSupport())
# node_support_preview() shows the details of node supporting information based
# on the DummyOpSupport we created.
#
# In the output, we have supported node types
# and unsupported node types. Nodes in the model with supported types will be
# split into accelerator submodules while nodes with unsupported types will be
# split into cpu submodules.
splitter.node_support_preview()
"""
output:
Supported node types in the model:
torch.nn.modules.conv.Conv2d: ((torch.float32,), {})
torch.nn.modules.batchnorm.BatchNorm2d: ((torch.float32,), {})
torch.nn.modules.activation.ReLU: ((torch.float32,), {})
torch.nn.modules.pooling.MaxPool2d: ((torch.float32,), {})
_operator.add: ((torch.float32, torch.float32), {})
torch.nn.modules.pooling.AdaptiveAvgPool2d: ((torch.float32,), {})
torch.flatten: ((torch.float32,), {})
Unsupported node types in the model:
torch.nn.modules.linear.Linear: ((torch.float32,), {})
"""
# split_preview() shows the details of how the model looks like after split.
# And for every accelerator module in the split model, it would run a check
# by lowering and running the module. If any error is catched during the
# checking process, it will try to find which nodes are causing the trouble
# here with minimizer.
#
# Notice that after split, the model will have some submodules called either
# `_run_on_acc_{}` or `_run_on_cpu_{}`. We have all the supported nodes in
# `_run_on_acc_{}` modules and all other nodes in `_run_on_cpu_{}` modules.
#
# In the output, we can see it estimates the max qps based on PCIe bandwidth,
# this is something we need to consider when lowering to acceleartors chips,
# because the data will be flowing between cpu and accelerator which might not
# matter in GPU case.
splitter.split_preview()
"""
output:
Before removing small acc subgraphs, total 2 subgraphs are created: 1 acc subgraphs and 1 cpu subgraphs.
After removing small acc subgraphs, total 2 subgraphs are created: 1 acc subgraphs and 1 cpu subgraphs.
_run_on_acc_0: 68 node(s)
_run_on_cpu_1: 1 node(s)
Processing acc submodule _run_on_acc_0
Checking inputs...
Checking outputs...
Total input size in bytes is 3010560, total output size in bytes is 10240, theoretical max qps (bounds by PCIe bandwidth)
for this submodule is 35665.85034013606.
Lowering and running succeed!
Theoretical max qps (bounds by PCIe bandwidth) for this model is 35665.85034013606, bottleneck is submodule _run_on_acc_0.
"""
# After split we have two submodules, one is `_run_on_acc_0` and one is `_run_on_cpu_1`.
# We have only one op in `_run_on_cpu_1` which is a linear layer while all other ops are
# in `_run_on_acc_0`.
split_mod = splitter()
print(split_mod.graph)
"""
output:
graph():
%x : torch.Tensor [#users=1] = placeholder[target=x]
%_run_on_acc_0 : [#users=1] = call_module[target=_run_on_acc_0](args = (%x,), kwargs = {})
%_run_on_cpu_1 : [#users=1] = call_module[target=_run_on_cpu_1](args = (%_run_on_acc_0,), kwargs = {})
return _run_on_cpu_1
"""
# We want to lower _run_on_acc_0 to TensorRT.
split_mod._run_on_acc_0 = lower_mod_to_trt(split_mod._run_on_acc_0, (x,)) # type: ignore[arg-type]
# Assert results are equal with the original model.
rn18 = rn18.cuda()
torch.testing.assert_close(split_mod(x), rn18(x))
import time
NITER = 100
s = time.time()
for _ in range(NITER):
split_mod(x)
torch.cuda.synchronize()
print('trt time (ms/iter)', (time.time() - s) / NITER * 1000)
"""
output:
trt time (ms/iter) 1.978142261505127
"""
s = time.time()
for _ in range(NITER):
rn18(x)
torch.cuda.synchronize()
print('stock PyTorch time (ms/iter)', (time.time() - s) / NITER * 1000)
"""
output:
stock PyTorch time (ms/iter) 3.8208484649658203
"""
| 37.218868
| 126
| 0.671297
|
21167b6c2b3e140f98510021c7bbdc16854e9fe7
| 4,064
|
py
|
Python
|
recognize.py
|
alper111/affordance-learning
|
21b70f689a8299c6af7cd4ed763fc3133cf1681f
|
[
"MIT"
] | 3
|
2020-12-15T07:10:15.000Z
|
2021-06-10T20:15:33.000Z
|
recognize.py
|
alper111/DeepSym
|
21b70f689a8299c6af7cd4ed763fc3133cf1681f
|
[
"MIT"
] | 1
|
2020-06-10T09:51:44.000Z
|
2020-06-10T09:51:44.000Z
|
recognize.py
|
alper111/affordance-learning
|
21b70f689a8299c6af7cd4ed763fc3133cf1681f
|
[
"MIT"
] | 1
|
2020-04-02T08:20:21.000Z
|
2020-04-02T08:20:21.000Z
|
import os
import argparse
import rospy
import yaml
import numpy as np
import torch
from models import EffectRegressorMLP
import data
import utils
from simtools.rosutils import RosNode
parser = argparse.ArgumentParser("Make plan.")
parser.add_argument("-opts", help="option file", type=str, required=True)
parser.add_argument("-goal", help="goal state", type=str, default="(H3) (S0)")
parser.add_argument("-uri", help="master uri", type=str, default="http://localhost:11311")
args = parser.parse_args()
opts = yaml.safe_load(open(args.opts, "r"))
device = torch.device(opts["device"])
node = RosNode("recognize_scene", args.uri)
node.stopSimulation()
rospy.sleep(1.0)
node.startSimulation()
rospy.sleep(1.0)
model = EffectRegressorMLP(opts)
model.load(opts["save"], "_best", 1)
model.load(opts["save"], "_best", 2)
model.encoder1.eval()
model.encoder2.eval()
# Homogeneous transformation matrix
H = torch.load("H.pt")
# GENERATE A RANDOM SCENE
NUM_OBJECTS = 5
objTypes = np.random.randint(1, 6, (NUM_OBJECTS, ))
objSizes = np.random.uniform(1.0, 2, (5, )).tolist()
locations = np.array([
[-0.69, -0.09],
[-0.9, -0.35],
[-0.45, 0.175],
[-0.45, -0.35],
[-0.9, 0.175]
])
locations = locations[np.random.permutation(5)]
locations = locations[:NUM_OBJECTS].tolist()
for i in range(NUM_OBJECTS):
node.generateObject(objTypes[i], objSizes[i], locations[i]+[objSizes[i]*0.05+0.7])
rospy.sleep(1.0)
locations = torch.tensor(locations, dtype=torch.float)
x = torch.tensor(node.getDepthImage(8), dtype=torch.float)
objs, locs, _ = utils.find_objects(x, opts["size"])
transform = data.default_transform(size=opts["size"], affine=False, mean=0.279, std=0.0094)
for i, o in enumerate(objs):
objs[i] = transform(o)[0]
objs = objs.to(device)
locs = torch.cat([locs.float(), torch.ones(locs.shape[0], 1, device=locs.device)], dim=1)
locs = torch.matmul(locs, H.T)
locs = locs / locs[:, 2].reshape(-1, 1)
_, indices = torch.cdist(locs[:, :2], locations).min(dim=1)
obj_infos = []
comparisons = []
with torch.no_grad():
for i, obj in enumerate(objs):
cat = model.encoder1(obj.unsqueeze(0).unsqueeze(0))
# TODO: this uses true location and size.
print("Category: (%d %d), Location: (%.5f %.5f)" % (cat[0, 0], cat[0, 1], locations[indices[i], 0], locations[indices[i], 1]))
info = {}
info["name"] = "O{}".format(i+1)
info["loc"] = (locations[indices[i], 0].item(), locations[indices[i], 1].item())
info["size"] = objSizes[indices[i]]*0.1
info["type"] = "objtype{}".format(utils.binary_to_decimal([int(cat[0, 0]), int(cat[0, 1])]))
obj_infos.append(info)
for j in range(len(objs)):
if i != j:
rel = model.encoder2(torch.stack([obj, objs[j]]).unsqueeze(0))[0, 0]
if rel == -1:
comparisons.append("(relation0 O%d O%d)" % (i+1, j+1))
else:
comparisons.append("(relation1 O%d O%d)" % (i+1, j+1))
print(obj_infos)
print(comparisons)
file_loc = os.path.join(opts["save"], "problem.pddl")
file_obj = os.path.join(opts["save"], "objects.txt")
if os.path.exists(file_loc):
os.remove(file_loc)
if os.path.exists(file_obj):
os.remove(file_obj)
print("(define (problem dom1) (:domain stack)", file=open(file_loc, "a"))
print(str(len(obj_infos)), file=open(file_obj, "a"))
object_str = "\t(:objects"
init_str = "\t(:init\n"
for obj_i in obj_infos:
print("%s %.5f %.5f %.5f" % (obj_i["name"], obj_i["loc"][0], obj_i["loc"][1], obj_i["size"]), file=open(file_obj, "a"))
object_str += " " + obj_i["name"]
init_str += "\t\t(pickloc " + obj_i["name"] + ") (" + obj_i["type"] + " " + obj_i["name"] + ")\n"
object_str += ")"
for c_i in comparisons:
init_str += "\t\t" + c_i + "\n"
init_str += "\t\t(H0)\n"
init_str += "\t\t(S0)\n"
init_str += "\t)"
goal_str = "\t(:goal (and %s (not (stacked)) (not (inserted))))\n)" % args.goal
print(object_str, file=open(file_loc, "a"))
print(init_str, file=open(file_loc, "a"))
print(goal_str, file=open(file_loc, "a"))
| 34.735043
| 134
| 0.634104
|
ee53779c161f78215c22846fdcff7510460a9125
| 910
|
py
|
Python
|
venv/lib/python3.7/site-packages/zope/contentprovider/tests.py
|
leanhvu86/matrix-server
|
6e16fc53dfebaeaf222ff5a371ccffcc65de3818
|
[
"Apache-2.0"
] | 1
|
2018-11-08T15:04:13.000Z
|
2018-11-08T15:04:13.000Z
|
venv/lib/python3.7/site-packages/zope/contentprovider/tests.py
|
leanhvu86/matrix-server
|
6e16fc53dfebaeaf222ff5a371ccffcc65de3818
|
[
"Apache-2.0"
] | 6
|
2016-03-24T07:50:43.000Z
|
2018-11-08T15:24:09.000Z
|
venv/lib/python3.7/site-packages/zope/contentprovider/tests.py
|
leanhvu86/matrix-server
|
6e16fc53dfebaeaf222ff5a371ccffcc65de3818
|
[
"Apache-2.0"
] | 1
|
2015-04-03T08:29:58.000Z
|
2015-04-03T08:29:58.000Z
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Content provider tests
"""
import unittest
from zope.contentprovider.interfaces import UpdateNotCalled
class TestExceptionHandling(unittest.TestCase):
def test(self):
try:
raise UpdateNotCalled
except UpdateNotCalled:
pass
| 32.5
| 78
| 0.610989
|
74c2f2ed976dcb38aa087b982c6205ac77a628c1
| 27,174
|
py
|
Python
|
data_neural_fuzz_pdf_obj.py
|
GordonShinozaki/iust_deep_fuzz
|
9a244839f4028ba70cb4e18e8db8e3e9ce81e6af
|
[
"MIT"
] | 33
|
2018-07-13T13:09:08.000Z
|
2022-03-17T14:18:12.000Z
|
data_neural_fuzz_pdf_obj.py
|
yasinbakhtiar/iust_deep_fuzz
|
191e7bdf31840bd7d5052885029189bb6ffb262d
|
[
"MIT"
] | null | null | null |
data_neural_fuzz_pdf_obj.py
|
yasinbakhtiar/iust_deep_fuzz
|
191e7bdf31840bd7d5052885029189bb6ffb262d
|
[
"MIT"
] | 10
|
2019-04-17T10:30:03.000Z
|
2022-03-18T12:10:12.000Z
|
"""
PDF OBJ 9
- New in version 9 data fuzz
-- Create test data for data fuzzing (i.e p_t = 0.50) and prefix update.
-- Date: 1397-04-17
- New in version 8
-- Fuzzing back to generate_and_fuzz method.
-- Perplexity and cross entropy add to metrics list.
-- Use some Keras backend to reset model graph and state.
-- Lets pdf_file_incremental_update_4.py call the generate_and_fuzz method.
- New in version 7
-- Use for bidirectional LSTM model, model=model9
- New in version 6
-- Train with 256 LSTM search, model=model_8
-- Train on large dataset for first time!
-New in version 5:
-- Data generator fixed.
-- Train on large dataset for first time!
-New in version 4:
-- Changing the data generator method for use with model.fit_generator()
-New in version 3:
-- Add support for training in large dataset with the help of python generators.
-- Add callbacks to log most of training time events.
-- File and directory now mange by code in appropriate manner for each train run.
-- Add class FileFormatFuzz to do learn and fuzz process in one script.
-- Note: The ability of training small dataset in memory with model.fit() method was include in version 3.
"""
from __future__ import print_function
__version__ = '0.9.1'
__author__ = 'Morteza'
import sys
import os
import datetime
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.optimizers import RMSprop, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, CSVLogger, LambdaCallback
from keras.utils import plot_model
import pdf_object_preprocess as preprocess
from config import learning_config
import deep_models
def cross_entropy(y_true, y_pred):
"""
Compute cross_entropy loss metric
:param y_true:
:param y_pred:
:return:
"""
return K.categorical_crossentropy(y_true, y_pred)
def spars_cross_entropy(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred)
def perplexity(y_true, y_pred):
"""
Compute perplexity metric
:param y_true:
:param y_pred:
:return:
"""
ce = K.categorical_crossentropy(y_true, y_pred)
# pp = K.pow(np.e, ce) # Or 2?
# pp = K.pow(2., ce) # Or np.e
pp = K.exp(ce)
# print('Perplexity value in perplexity function: ', K.eval(pp))
return pp
class FileFormatFuzzer(object):
"""
Main class for learn and fuzz process
"""
def __init__(self, maxlen=85, step=1, batch_size=128):
"""
:param maxlen:
:param step:
:param batch_size:
"""
# os.chdir('./')
# learning hyper-parameters
self.maxlen = maxlen
self.step = step
self.batch_size = batch_size
self.text_all = ''
self.text_training = ''
self.text_validation = ''
self.text_test = ''
self.chars = None
self.char_indices = None
self.indices_char = None
# self.model = None
K.reset_uids()
K.clear_session()
self.load_dataset()
def define_model(self, input_dim, output_dim):
"""
Build the model: a single LSTM layer # We need to deep it # now is deep :)
:param input_dim:
:param output_dim:
:return:
"""
model, model_name = deep_models.model_7(input_dim, output_dim)
return model, model_name
def load_dataset(self):
""" Load all 3 part of each dataset and building dictionary index """
if learning_config['dataset_size'] == 'small':
self.text_training = preprocess.load_from_file(learning_config['small_training_set_path'])
self.text_validation = preprocess.load_from_file(learning_config['small_validation_set_path'])
self.text_test = preprocess.load_from_file(learning_config['small_testing_set_path'])
elif learning_config['dataset_size'] == 'medium':
self.text_training = preprocess.load_from_file(learning_config['medium_training_set_path'])
self.text_validation = preprocess.load_from_file(learning_config['medium_validation_set_path'])
self.text_test = preprocess.load_from_file(learning_config['medium_testing_set_path'])
elif learning_config['dataset_size'] == 'large':
self.text_training = preprocess.load_from_file(learning_config['large_training_set_path'])
self.text_validation = preprocess.load_from_file(learning_config['large_validation_set_path'])
self.text_test = preprocess.load_from_file(learning_config['large_testing_set_path'])
self.text_all = self.text_training + self.text_validation + self.text_test
print('Total corpus length:', len(self.text_all))
self.chars = sorted(list(set(self.text_all)))
print('Total corpus chars:', len(self.chars))
# print(chars)
# Building dictionary index
print('Building dictionary index ...')
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
# print(char_indices)
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# print(indices_char)
def generate_samples(self, text):
"""Cut the text in semi-redundant sequences of maxlen characters"""
sentences = [] # List of all sentence as input
next_chars = [] # List of all next chars as labels
for i in range(0, len(text) - self.maxlen, self.step): # arg2 why this?
sentences.append(text[i: i + self.maxlen])
# print(sentences)
next_chars.append(text[i + self.maxlen])
# print(next_chars)
print('Number of semi sequences or samples:', len(sentences))
return sentences, next_chars
def data_generator(self, sentences, next_chars):
"""
Batch data generator for large dataset not fit completely in memory
# Index j now increase Shuffle
:param sentences:
:param next_chars:
:return:
"""
j = random.randint(0, len(sentences) - (self.batch_size+1))
# print('Vectorization...')
while True:
# Fix generator :))
x = np.zeros((self.batch_size, self.maxlen, len(self.chars)), dtype=np.bool)
y = np.zeros((self.batch_size, len(self.chars)), dtype=np.bool)
# j = random.randint(0, len(sentences) - (self.batch_size + 1))
next_chars2 = next_chars[j: j + self.batch_size] ## F...:)
for i, one_sample in enumerate(sentences[j: j + self.batch_size]):
for t, char in enumerate(one_sample):
x[i, t, self.char_indices[char]] = 1
y[i, self.char_indices[next_chars2[i]]] = 1
yield (x, y)
# yield self.generate_single_batch(sentences, next_chars)
j += self.batch_size
if j > (len(sentences) - (self.batch_size+1)):
j = random.randint(0, len(sentences) - (self.batch_size+1))
def data_generator_validation(self, sentences, next_chars):
"""
Batch data generator for large dataset not fit completely in memory
# Index j now increase sequentially (validation don't need to shuffle)
:param sentences:
:param next_chars:
:return:
"""
j = 0
# print('Vectorization...')
while True:
# Fix generator :))
x = np.zeros((self.batch_size, self.maxlen, len(self.chars)), dtype=np.bool)
y = np.zeros((self.batch_size, len(self.chars)), dtype=np.bool)
# j = random.randint(0, len(sentences) - (self.batch_size + 1))
next_chars2 = next_chars[j: j + self.batch_size] ## F...:)
for i, one_sample in enumerate(sentences[j: j + self.batch_size]):
for t, char in enumerate(one_sample):
x[i, t, self.char_indices[char]] = 1
y[i, self.char_indices[next_chars2[i]]] = 1
yield (x, y)
# yield self.generate_single_batch(sentences, next_chars)
j += self.batch_size
if j > (len(sentences) - (self.batch_size + 1)):
j = 0
def data_generator_in_memory(self, sentences, next_chars):
"""All data generate for small dataset fit completely in memory"""
x = np.zeros((len(sentences), self.maxlen, len(self.chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(self.chars)), dtype=np.bool)
for i, one_sample in enumerate(sentences):
for t, char in enumerate(one_sample):
x[i, t, self.char_indices[char]] = 1
y[i, self.char_indices[next_chars[i]]] = 1
return x, y
def train(self,
epochs=1,
trained_model=None,
trained_model_name='trained_model_wn'):
"""
Create and train deep model
:param epochs: Specify number of epoch for training.
:param
:
:return: Nothing.
"""
# Start time of training
dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S_')
print('Generate training samples ...')
sentences_training, next_chars_training = self.generate_samples(self.text_training)
print('Generate validations samples ...')
sentences_validation, next_chars_validation = self.generate_samples(self.text_validation)
# print(sentences_training[0] + '\t' + next_chars_training[0])
# print(sentences_training[1] + '\t' + next_chars_training[1])
# print(sentences_training[2] + '\t' + next_chars_training[2])
# print(sentences_training[3] + '\t' + next_chars_training[3])
# print(sentences_training[4] + '\t' + next_chars_training[4])
#
# input()
print('Build and compile model ...')
model = None
model_name = None
if trained_model is None:
model, model_name = self.define_model((self.maxlen, len(self.chars)), len(self.chars))
else:
model = trained_model
model_name = trained_model_name
optimizer = RMSprop(lr=0.01) # [0.001, 0.01, 0.02, 0.05, 0.1]
optimizer = Adam(lr=0.001) # Reduce from 0.001 to 0.0001 for model_10
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
# metrics=['accuracy']
metrics=['accuracy', cross_entropy, perplexity])
print(model_name, ' summary ...')
model.summary()
print(model_name, ' count_params ...')
print(model.count_params())
# input()
print('Set #5 callback ...')
# callback #1 EarlyStopping
# monitor= 'val_loss' or monitor='loss'?
model_early_stopping = EarlyStopping(monitor='loss', min_delta=0.01, patience=5, verbose=1, mode='auto')
# callback #2 ModelCheckpoint
# Create a directory for each training process to keep model checkpoint in .h5 format
dir_name = './model_checkpoint/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
file_name = dir_name + model_name + dt + 'epoch_{epoch:02d}_val_loss_{val_loss:.4f}.h5'
model_checkpoint = ModelCheckpoint(file_name, verbose=1)
# callback #3 TensorBoard
dir_name = './logs_tensorboard/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
model_tensorboard = TensorBoard(log_dir=dir_name, histogram_freq=0, batch_size=self.batch_size,
write_graph=True, write_grads=False, write_images=True, embeddings_freq=0,
embeddings_layer_names=None, embeddings_metadata=None)
# callback #4 CSVLogger
# Create a directory and an empty csv file within to save mode csv log.
dir_name = './logs_csv/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
file_name = dir_name + model_name + dt + '_epochs_' + str(epochs) + '_step_' + str(self.step) + '.csv'
open(file_name, mode='a', newline='').close()
model_csv_logger = CSVLogger(file_name, separator=',', append=False)
# callback #5 LambdaCallback
dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def on_epoch_end(epoch, logs):
nonlocal model
nonlocal epochs
nonlocal model_name
nonlocal dir_name
print('Sampling model and save results ... ')
self.generate_and_fuzz_new_samples(model=model,
model_name=model_name,
epochs=epochs,
current_epoch=epoch,
dir_name=dir_name
)
generate_and_fuzz_new_samples_callback = LambdaCallback(on_epoch_begin=None,
on_epoch_end=on_epoch_end,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None
)
if learning_config['dataset_size'] == 'very_small': # very_small
print('Start training on small dataset ...')
x, y = self.data_generator_in_memory(sentences_training, next_chars_training)
model.fit(x, y,
batch_size=self.batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True,
callbacks=[model_checkpoint,
model_tensorboard,
model_csv_logger,
generate_and_fuzz_new_samples_callback]
)
else:
print('Build training and validation data generators ...')
training_data_generator = self.data_generator(sentences_training, next_chars_training)
validation_data_generator = self.data_generator_validation(sentences_validation, next_chars_validation)
# x, y = next(training_data_generator)
# print(x)
# print('+'*75)
# print(y)
# print('#'*50)
# x, y = next(training_data_generator)
# print(x)
# print('+' * 75)
# print(y)
# print('#' * 50)
# input()
print('Start training on large dataset ...')
model.fit_generator(generator=training_data_generator,
# steps_per_epoch=200,
steps_per_epoch=len(sentences_training) // self.batch_size, # 1000,
validation_data=validation_data_generator,
validation_steps=len(sentences_validation) // (self.batch_size*2), # 100,
# validation_steps=10,
use_multiprocessing=False,
workers=1,
epochs=epochs,
shuffle=True,
callbacks=[model_checkpoint,
model_tensorboard,
model_csv_logger,
generate_and_fuzz_new_samples_callback]
)
# end of train method
# --------------------------------------------------------------------
def generate_and_fuzz_new_samples(self,
model=None,
model_name='model_1',
epochs=1,
current_epoch=1,
dir_name=None):
"""
sampling the model and generate new object
:param model: The model which is training.
:param model_name: Name of model (base on hyperparameters config in deep_model.py file) e.g. [model_1, model_2,
...]
:param epochs: Number of total epochs of training, e.g. 10,20,30,40,50 or 60
:param current_epoch: Number of current epoch
:param dir_name: root directory for this running.
:return: Nothing
"""
# End time of current epoch
dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
dir_name = dir_name + 'epoch_' + str(current_epoch) + dt + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
# Fuzzing hyper-parameters
diversities = [i*0.10 for i in range(1, 20, 2)]
diversities = [0.2, 0.5, 1.0, 1.2, 1.5, 1.8]
diversities = [1.0] # for sou and for mou
# diversities = [1.5]
generated_obj_total = 30200 # [5, 10, 100, 1000, 3000] {1000-1100 for sou and 3000-3100 for muo}
generated_obj_with_same_prefix = 20 # [1, 5, 10, 20, 40] {10 for sou and 20 for mou}
generated_obj_max_allowed_len = random.randint(450, 550) # Choose max allowed len for object randomly
exclude_from_fuzzing_set = {'s', 't', 'r', 'e', 'a', 'm', 'e', 'n', 'd', 'o', 'b', 'j'} # set(['s', 't', 'r', 'e', 'a', 'm'])
# Learn and fuzz paper hyper-parameters
t_fuzz = 0.90 # For comparision with p_fuzz where p_fuzz is a random number (if p_fuzz > t_fuzz)
p_t = 0.40 # 0.9 and more for format fuzzing; 0.4 and less than 0.4 for data fuzzing. Now data fuzzing.
# End of fuzzing hyper-parameters
testset_objects_list = preprocess.get_list_of_object(self.text_test)
testset_object_gt_maxlen_list = []
for obj in testset_objects_list:
if len(obj) > self.maxlen+len(' endobj'):
testset_object_gt_maxlen_list.append(obj)
print('len filtered test-set: ', len(testset_object_gt_maxlen_list))
generated_total = ''
for diversity in diversities:
generated_total = ''
for q in range(round(generated_obj_total/generated_obj_with_same_prefix)):
obj_index = random.randint(0, len(testset_object_gt_maxlen_list) - 1)
generated_obj_counter = 0
generated_obj_len = 0
generated = ''
stop_condition = False
endobj_attach_manually = False
# print()
print('-- Diversity:', diversity)
obj_prefix = str(testset_object_gt_maxlen_list[obj_index])[0: self.maxlen]
generated += obj_prefix
# prob_vals = '1 ' * self.maxlen
# learnt_grammar = obj_prefix
# print('--- Generating ts_text with seed:\n "' + obj_prefix + '"')
# sys.stdout.write(generated)
if generated.endswith('endobj'):
generated_obj_counter += 1
if generated_obj_counter > generated_obj_with_same_prefix:
stop_condition = True
while not stop_condition:
x_pred = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(obj_prefix):
x_pred[0, t, self.char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index, prob, preds2 = self.sample(preds, diversity)
next_char = self.indices_char[next_index]
next_char_for_prefix = next_char
###### Fuzzing section we don't need it yet!
if next_char not in exclude_from_fuzzing_set:
p_fuzz = random.random()
if p_fuzz > t_fuzz and preds2[next_index] < p_t:
next_index = np.argmin(preds2)
print('((Fuzz!))')
next_char = self.indices_char[next_index] # next_char updated.
###### End of fuzzing section
obj_prefix = obj_prefix[1:] + next_char # next_char_for_prefix
generated += next_char # next_char_for_prefix #
generated_obj_len += 1
if generated.endswith('endobj'):
generated_obj_counter += 1
generated_obj_len = 0
elif (generated.endswith('endobj') is False) and \
(generated_obj_len > generated_obj_max_allowed_len):
# Attach '\nendobj\n' manually, and reset obj_prefix
generated += '\nendobj\n'
generated_obj_counter += 1
generated_obj_len = 0
endobj_attach_manually = True
if generated_obj_counter >= generated_obj_with_same_prefix: # Fix: Change > to >= (13970315)
stop_condition = True
elif endobj_attach_manually:
# Reset prefix:
# Here we need to modify obj_prefix because we manually change the generated_obj!
# Below we add this new repair:
# obj_prefix = obj_prefix[len('\nendobj\n'):] + '\nendobj\n'
# Instead of modify obj_prefix we can reset prefix if we found that 'endobj' dose not generate
# automatically. It seems to be better option, so we do this:
obj_index = random.randint(0, len(testset_object_gt_maxlen_list) - 1)
obj_prefix = str(testset_object_gt_maxlen_list[obj_index])[0: self.maxlen]
generated += obj_prefix
endobj_attach_manually = False
# sys.stdout.write(next_char)
# sys.stdout.flush()
# print()
generated_total += generated + '\n'
# save generated_result to file inside program
file_name = model_name \
+ '_diversity_' + repr(diversity) \
+ '_epochs_' + repr(epochs) \
+ '_step_' + repr(self.step) \
+ '.txt'
preprocess.save_to_file(dir_name + file_name, generated_total)
# preprocess.save_to_file(dir_name + file_name + 'probabilities.txt', prob_vals)
# preprocess.save_to_file(dir_name + file_name + 'learntgrammar.txt',learnt_grammar)
print('Diversity %s save to file successfully.' % diversity)
print('End of generation method.')
# print('Starting new epoch ...')
return generated_total
# Lower temperature will cause the model to make more likely,
# but also more boring and conservative predictions.
def sample(self, preds, temperature=1.0):
"""
Helper function to sample an index from a probability array
:param preds:
:param temperature:
:return:
"""
# print('raw predictions = ', preds)
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
# Sampling with numpy functions:
probas = np.random.multinomial(1, preds, 1)
# print()
# print('sanitize predictions = ', preds)
return np.argmax(probas), probas, preds
def no_sample(self):
pass
def sample_space(self):
pass
def save_model_plot(self, model, epochs):
"""
Save the model architecture plot.
:param model:
:param epochs:
:return:
"""
dt = datetime.datetime.now().strftime('_%Y%m%d_%H%M%S_')
# plot the model
plot_model(model, to_file='./modelpic/date_' + dt + 'epochs_' + str(epochs) + '.png',
show_shapes=True, show_layer_names=True)
def load_model_and_generate(self, model_name='model_7', epochs=38):
dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
model = load_model('./model_checkpoint/best_models/'
'model_7_date_2018-05-14_21-44-21_epoch_38_val_loss_0.3300.h5',
compile=False)
optimizer = Adam(lr=0.001) # Reduce from 0.001 to 0.0001 just for model_10
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
# metrics=['accuracy']
metrics=['accuracy'])
seq = self.generate_and_fuzz_new_samples(model=model,
model_name=model_name,
epochs=epochs,
current_epoch=38,
dir_name=dir_name)
list_of_obj = preprocess.get_list_of_object(seq=seq, is_sort=False)
return list_of_obj
def get_model_summary(self):
print('Get model summary ...')
model, model_name = self.define_model((self.maxlen, len(self.chars)), len(self.chars))
print(model_name, ' summary ...')
model.summary()
print(model_name, ' count_params ...')
print(model.count_params())
def main(argv):
""" The main function to call train() method"""
epochs = 100
fff = FileFormatFuzzer(maxlen=50, step=3, batch_size=256)
# trained_model_dir = './model_checkpoint/best_models/'
# trained_model_file_name = 'model_7_date_2018-05-14_21-44-21_epoch_65_val_loss_0.3335.h5'
# trained_model_path = trained_model_dir + trained_model_file_name
# trained_model = load_model(trained_model_path, compile=False)
# Train deep model from first or continue training for previous trained model.
# Trained model pass as argument.
# fff.train(epochs=epochs,
# trained_model=trained_model,
# trained_model_name='model_7-1'
# )
# fff.get_model_summary()
list_of_obj = fff.load_model_and_generate()
print('Len list_of_obj', len(list_of_obj))
dt = datetime.datetime.now().strftime('_%Y_%m_%d__%H_%M_%S_')
print('Generation complete successfully', dt)
if __name__ == "__main__":
main(sys.argv)
| 42.996835
| 134
| 0.569478
|
fc182fb99873e2c7e6bc8c75d8fbc02ee836a6e3
| 771
|
py
|
Python
|
tests/data.py
|
HumanCellAtlas/fusilllade
|
5634b77440bb71aa075b6ca442baccbdd436dfd7
|
[
"MIT"
] | 7
|
2018-01-15T15:35:50.000Z
|
2021-03-11T23:08:16.000Z
|
tests/data.py
|
HumanCellAtlas/fusilllade
|
5634b77440bb71aa075b6ca442baccbdd436dfd7
|
[
"MIT"
] | 339
|
2018-02-08T23:53:58.000Z
|
2021-04-30T20:46:14.000Z
|
tests/data.py
|
HumanCellAtlas/fusilllade
|
5634b77440bb71aa075b6ca442baccbdd436dfd7
|
[
"MIT"
] | 1
|
2018-02-08T21:47:47.000Z
|
2018-02-08T21:47:47.000Z
|
TEST_NAMES_POS = [('helloworl12345', "alpha numerica characters"),
('hello@world.com', "email format"),
('hello-world=_@.,ZDc', "special characters"),
('HellOWoRLd', "different cases"),
('ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789'
'ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789', "== 128 characters"),
("1", "one character")]
TEST_NAMES_NEG = [
("&^#$Hello", "illegal characters 1"),
("! <>?world", "illegal characters 2"),
('ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789'
'ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF01234567890', "> 128 characters"),
('', "empty")
]
| 48.1875
| 108
| 0.636835
|
5d869b8f7499437ef522dbff677755928ada15c0
| 6,698
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/xenorhabdusnematophila.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/xenorhabdusnematophila.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/xenorhabdusnematophila.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Xenorhabdus nematophila.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:44:45.815993
The undirected graph Xenorhabdus nematophila has 4423 nodes and 270215
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.02763 and has 31 connected components, where the component
with most nodes has 4350 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 96, the mean node degree is 122.19,
and the node degree mode is 3. The top 5 most central nodes are 406817.XNC1_1757
(degree 1403), 406817.XNC1_4331 (degree 1245), 406817.XNC1_4165 (degree
927), 406817.XNC1_1955 (degree 924) and 406817.XNC1_1954 (degree 915).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import XenorhabdusNematophila
# Then load the graph
graph = XenorhabdusNematophila()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def XenorhabdusNematophila(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Xenorhabdus nematophila graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Xenorhabdus nematophila graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:44:45.815993
The undirected graph Xenorhabdus nematophila has 4423 nodes and 270215
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.02763 and has 31 connected components, where the component
with most nodes has 4350 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 96, the mean node degree is 122.19,
and the node degree mode is 3. The top 5 most central nodes are 406817.XNC1_1757
(degree 1403), 406817.XNC1_4331 (degree 1245), 406817.XNC1_4165 (degree
927), 406817.XNC1_1955 (degree 924) and 406817.XNC1_1954 (degree 915).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import XenorhabdusNematophila
# Then load the graph
graph = XenorhabdusNematophila()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="XenorhabdusNematophila",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.439153
| 223
| 0.70424
|
22f166b10151506e6ad9231666d8b0dab98bf591
| 2,963
|
py
|
Python
|
server/py/runurlopen.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
server/py/runurlopen.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
server/py/runurlopen.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
import face_recognition
import cv2
import urllib
import sys,json,numpy as np
import glob,os
from pathlib import Path
import numpy as np
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
file_names = glob.glob(home + "/known_people/*.jp*g")
def read_in():
lines = sys.stdin.readline()
return lines
def authorised(name):
return not "Unknown" in name
def main():
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
known_encodings_file_path = home + "/data/known_encodings_file.csv"
people_file_path = home + "/data/people_file.csv"
known_encodings_file = Path(known_encodings_file_path)
if known_encodings_file.is_file():
known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
else:
known_encodings = []
people_file = Path(people_file_path)
if people_file.is_file():
people = np.genfromtxt(people_file, dtype='U',delimiter=',')
else:
people = []
stream=urllib.urlopen('rtsp://sreyas:123asd@192.168.0.210:554/videoMain')
bytes=''
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
small_frame = cv2.resize(frame, (0,0), fx=.25, fy=.25)
if process_this_frame:
face_locations = face_recognition.face_locations(small_frame)
face_encodings = face_recognition.face_encodings(small_frame, face_locations)
face_names=[]
other = 0
name ='';
for face_encoding in face_encodings:
match = face_recognition.compare_faces(known_encodings, face_encoding)
name = "Unknown"
for i in range(len(match)):
if match[i]:
name = people[i]
break
if "Unknown" in name:
other += 1
name += str(other)
face_names.append(name)
print(face_names)
process_this_frame = not process_this_frame
for (top, right, bottom, left),name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
color = (0,255,0)
if not authorised(name):
color = (0,0,255)
cv2.rectangle(frame, (left,top), (right,bottom), color, 2)
cv2.rectangle(frame, (left,bottom-35), (right, bottom), color, cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name,(left+6, bottom-6), font, 1.0, (255,255,255), 1)
cv2.imshow('i',frame)
if cv2.waitKey(1) ==27:
exit(0)
main()
| 37.506329
| 92
| 0.580493
|
1ff40cf0b7bf16f736946d5ffe94fd6cb374d85a
| 1,439
|
py
|
Python
|
Achive/rpyc_NI-SWITCH_NI-DCPower_NI-DMM.py
|
rscd27p/DockerTest
|
aee56356f7cdaded1c6ef787e6cdf8415308c8c3
|
[
"MIT"
] | 1
|
2021-08-30T14:22:15.000Z
|
2021-08-30T14:22:15.000Z
|
Achive/rpyc_NI-SWITCH_NI-DCPower_NI-DMM.py
|
rscd27p/DockerTest
|
aee56356f7cdaded1c6ef787e6cdf8415308c8c3
|
[
"MIT"
] | null | null | null |
Achive/rpyc_NI-SWITCH_NI-DCPower_NI-DMM.py
|
rscd27p/DockerTest
|
aee56356f7cdaded1c6ef787e6cdf8415308c8c3
|
[
"MIT"
] | null | null | null |
# Import libraries
import rpyc # python3 -m pip install rpyc
import nidcpower # python3 -m pip install dcpower
import niswitch # python3 -m pip install niswitch
import nidmm # python3 -m pip install nidmm
# Import rpyc thread server
from rpyc.utils.server import ThreadedServer
# Declare NI-SWITCH rpyc service
class NI_SWTICH_service(rpyc.Service):
exposed_niswitch = niswitch
print("NI-SWITCH thread started...")
# Declare NI-DCPower rpyc service
class NI_DCPower_service(rpyc.Service):
exposed_nidcpower = nidcpower
print("NI-DCPower thread started...")
# Declare NI-DCPower rpyc service
class NI_DMM_service(rpyc.Service):
exposed_nidmm = nidmm
print("NI-DMM thread started...")
# Declare Ports
NI_SWTICH_port = 18861
NI_DCPower_port = 18862
NI_DMM_port = 18863
# Main code
if __name__ == "__main__":
print("Starting NI-SWITCH and NI-DCPower rpyc service")
print("Press Ctrl+C to stop this service")
t1 = ThreadedServer(NI_SWTICH_service, port = 18861, protocol_config = {"allow_public_attrs" : True, "allow_all_attrs" : True})
t2 = ThreadedServer(NI_DCPower_service, port = 18861, protocol_config = {"allow_public_attrs" : True, "allow_all_attrs" : True})
t3 = ThreadedServer(NI_DMM_service, port = 18861, protocol_config = {"allow_public_attrs" : True, "allow_all_attrs" : True})
t1.start()
t2.start()
t3.start()
print("Press Ctrl+C to stop this service")
| 35.097561
| 130
| 0.734538
|
9fb0003a238037f220821726e92caf5ce3446b72
| 1,799
|
py
|
Python
|
notes/algo-ds-practice/problems/array/match_pattern.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 6
|
2020-07-05T05:15:19.000Z
|
2021-01-24T20:17:14.000Z
|
notes/algo-ds-practice/problems/array/match_pattern.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | null | null | null |
notes/algo-ds-practice/problems/array/match_pattern.py
|
Anmol-Singh-Jaggi/interview-notes
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
[
"MIT"
] | 2
|
2020-09-14T06:46:37.000Z
|
2021-06-15T09:17:21.000Z
|
'''
Find all strings that match specific pattern in a dictionary
Given a dictionary of words, find all strings that matches the given pattern where every character in the pattern is uniquely mapped to a character in the dictionary.
Examples:
Input:
dict = ["abb", "abc", "xyz", "xyy"];
pattern = "foo"
Output: [xyy abb]
Explanation:
xyy and abb have same character at index 1 and 2 like the pattern
Input:
dict = ["abb", "abc", "xyz", "xyy"];
pat = "mno"
Output: [abc xyz]
Explanation:
abc and xyz have all distinct characters, similar to the pattern
Input:
dict = ["abb", "abc", "xyz", "xyy"];
pattern = "aba"
Output: []
Explanation:
Pattern has same character at index 0 and 2.
No word in dictionary follows the pattern.
Input:
dict = ["abab", "aba", "xyz", "xyx"];
pattern = "aba"
Output: [aba xyx]
Explanation:
aba and xyx have same character at index 0 and 2 like the pattern
SOLUTION 1:
Encode all strings in a certain way such that finding the pattern becomes trivial.
For example:
abb -> 122
bcc -> 122
abca -> 1231
Now encode all the strings in the dictionary, and put them into a hashset.
Then just check if the input string's encoding is present in the hashmap.
SOLUTION 2:
Keep 2 pointers i and j on the 2 strings being matched.
Also, keep 2 dictionaries which map one string's char to another and vice-versa.
Think about why we need bidirectional map and not unidirectional.
Then just start iterating both strings simulatenously, and keep updating the hashmaps.
While iterating, keep a check on any inconsistency.
Let char1 = str1[i] and char2 = str[j].
If char1 not in dict1 and char2 not in dict2, then insert new mappings.
Then if char1 in dict1, then dict1[char1] == char2 else return False
Also, if char1 not in dict1, then dict2[char2] == char1 else return False.
'''
| 29.491803
| 166
| 0.738744
|
bbbda27e168aae1bbb00d9e2a657de841e9fd86b
| 5,175
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
bhok/PurePosCoin2.7
|
f7c4a066c860ed1ef4ceae7e992f8edeeb4b199f
|
[
"MIT"
] | 18
|
2017-04-01T01:45:43.000Z
|
2021-11-24T03:28:36.000Z
|
contrib/seeds/makeseeds.py
|
bhok/PurePosCoin2.7
|
f7c4a066c860ed1ef4ceae7e992f8edeeb4b199f
|
[
"MIT"
] | 7
|
2017-06-26T17:52:34.000Z
|
2019-03-07T16:58:24.000Z
|
contrib/seeds/makeseeds.py
|
bhok/PurePosCoin2.7
|
f7c4a066c860ed1ef4ceae7e992f8edeeb4b199f
|
[
"MIT"
] | 15
|
2017-04-20T23:56:18.000Z
|
2022-02-28T07:11:29.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 500000
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/bitcoinplus:2.6.(0|99)/|/bitcoinplus:2.7.(0|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in hist.items() if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print '[%s]:%i' % (ip['ip'], ip['port'])
else:
print '%s:%i' % (ip['ip'], ip['port'])
if __name__ == '__main__':
main()
| 32.142857
| 186
| 0.564251
|
a0d3031eb59b9a3112cad5ba5a75233f9a11c20f
| 5,569
|
py
|
Python
|
neo3/network/payloads/verification.py
|
CityOfZion/neo3-python
|
db4cd53041ce7a2c1d32bee18214fb31bb9f52be
|
[
"MIT"
] | null | null | null |
neo3/network/payloads/verification.py
|
CityOfZion/neo3-python
|
db4cd53041ce7a2c1d32bee18214fb31bb9f52be
|
[
"MIT"
] | 23
|
2020-05-12T15:01:12.000Z
|
2020-07-15T09:26:05.000Z
|
neo3/network/payloads/verification.py
|
CityOfZion/neo3-python
|
db4cd53041ce7a2c1d32bee18214fb31bb9f52be
|
[
"MIT"
] | 1
|
2020-07-01T09:32:46.000Z
|
2020-07-01T09:32:46.000Z
|
from __future__ import annotations
import hashlib
from enum import IntFlag
from neo3.core import serialization, utils, types, cryptography, Size as s
from neo3.network import payloads
from typing import List
class Cosigner(serialization.ISerializable):
"""
A class that specifies who can pass CheckWitness() verifications in a smart contract.
"""
def __init__(self, account: types.UInt160 = None,
scope: payloads.WitnessScope = None,
allowed_contracts: List[types.UInt160] = None,
allowed_groups: List[cryptography.EllipticCurve.ECPoint] = None):
#: The TX sender.
self.account = account if account else types.UInt160.zero()
#: payloads.WitnessScope: The configured validation scope.
self.scope = scope if scope else payloads.WitnessScope.GLOBAL
#: List[types.UInt160]: Whitelist of contract hashes if used with
#: :const:`~neo3.network.payloads.verification.WitnessScope.CUSTOM_CONTRACTS`.
self.allowed_contracts = allowed_contracts if allowed_contracts else []
#: List[cryptography.EllipticCurve.ECPoint]: Whitelist of public keys if used with
#: :const:`~neo3.network.payloads.verification.WitnessScope.CUSTOM_GROUPS`.
self.allowed_groups = allowed_groups if allowed_groups else []
def __len__(self):
contracts_size = 0
if payloads.WitnessScope.CUSTOM_CONTRACTS in self.scope:
contracts_size = utils.get_var_size(self.allowed_contracts)
groups_size = 0
if payloads.WitnessScope.CUSTOM_GROUPS in self.scope:
groups_size = utils.get_var_size(self.allowed_groups)
return s.uint160 + s.uint8 + contracts_size + groups_size
def serialize(self, writer: serialization.BinaryWriter) -> None:
"""
Serialize the object into a binary stream.
Args:
writer: instance.
"""
writer.write_serializable(self.account)
writer.write_uint8(self.scope)
if payloads.WitnessScope.CUSTOM_CONTRACTS in self.scope:
writer.write_serializable_list(self.allowed_contracts)
if payloads.WitnessScope.CUSTOM_GROUPS in self.scope:
writer.write_serializable_list(self.allowed_groups)
def deserialize(self, reader: serialization.BinaryReader) -> None:
"""
Deserialize the object from a binary stream.
Args:
reader: instance.
"""
self.account = reader.read_serializable(types.UInt160)
self.scope = payloads.WitnessScope(reader.read_uint8())
if payloads.WitnessScope.CUSTOM_CONTRACTS in self.scope:
self.allowed_contracts = reader.read_serializable_list(types.UInt160)
if payloads.WitnessScope.CUSTOM_GROUPS in self.scope:
self.allowed_groups = reader.read_serializable_list(cryptography.EllipticCurve.ECPoint)
class Witness(serialization.ISerializable):
"""
An executable verification script that validates a verifiable object like a transaction.
"""
def __init__(self, invocation_script: bytes = None, verification_script: bytes = None):
#: A set of VM instructions to setup the stack for verification.
self.invocation_script = invocation_script if invocation_script else b''
#: A set of VM instructions that does the actual verification.
#: It is expected to set the result stack to a boolean True if validation passed.
self.verification_script = verification_script if verification_script else b''
self._script_hash = None
def __len__(self):
return utils.get_var_size(self.invocation_script) + utils.get_var_size(self.verification_script)
def serialize(self, writer: serialization.BinaryWriter) -> None:
"""
Serialize the object into a binary stream.
Args:
writer: instance.
"""
writer.write_var_bytes(self.invocation_script)
writer.write_var_bytes(self.verification_script)
def deserialize(self, reader: serialization.BinaryReader) -> None:
"""
Deserialize the object from a binary stream.
Args:
reader: instance.
"""
self.invocation_script = reader.read_var_bytes(max=664)
self.verification_script = reader.read_var_bytes(max=360)
def script_hash(self) -> types.UInt160:
""" Get the script hash based on the verification script."""
intermediate_data = hashlib.sha256(self.verification_script).digest()
data = hashlib.new('ripemd160', intermediate_data).digest()
return types.UInt160(data=data)
class WitnessScope(IntFlag):
"""
Determine the rules for a smart contract :func:`CheckWitness()` sys call.
"""
#: Allow the witness in all context. Equal to NEO 2.x's default behaviour.
GLOBAL = 0x00
#: Allow the witness if the current calling script hash equals the entry script hash into the virtual machine.
#: Using this prevents passing :func:`CheckWitness()` in a smart contract called via another smart contract.
CALLED_BY_ENTRY = 0x01
#: Allow the witness if called from a smart contract that is whitelisted in the cosigner
#: :attr:`~neo3.network.payloads.verification.Cosigner.allowed_contracts` attribute.
CUSTOM_CONTRACTS = 0x10
#: Allow the witness if it any public key in the cosigner
#: :attr:`~neo3.network.payloads.verification.Cosigner.allowed_groups` attribute is whitelisted in the contracts
#: manifest.groups array.
CUSTOM_GROUPS = 0x20
| 42.838462
| 116
| 0.69815
|
3157265ec18f19d2828baa0bec2147a8a546df7e
| 11,520
|
py
|
Python
|
experiment.py
|
radidd/CausalMediationAnalysis
|
6fe0d75ddd301b82e9e0cb0228e43ec74c0670de
|
[
"MIT"
] | null | null | null |
experiment.py
|
radidd/CausalMediationAnalysis
|
6fe0d75ddd301b82e9e0cb0228e43ec74c0670de
|
[
"MIT"
] | null | null | null |
experiment.py
|
radidd/CausalMediationAnalysis
|
6fe0d75ddd301b82e9e0cb0228e43ec74c0670de
|
[
"MIT"
] | null | null | null |
import math
import statistics
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from utils_cma import batch, convert_results_to_pd
from UNITER.model.nlvr2 import UniterForNlvr2Triplet
from UNITER.utils.const import IMG_DIM
from apex import amp
np.random.seed(1)
torch.manual_seed(1)
class Model():
'''
Wrapper for all model logic
'''
def __init__(self,
ckpt_file,
model_config,
opts,
device='cuda',
output_attentions=False,
random_weights=False):
super()
self.device = device
# Load UNITER finetuned for NLVR2 (triplet version)
checkpoint = torch.load(ckpt_file)
self.model = UniterForNlvr2Triplet(model_config, img_dim=IMG_DIM)
self.model.init_type_embedding()
self.model.load_state_dict(checkpoint, strict=False)
self.model.to(self.device)
self.model, _ = amp.initialize(self.model, enabled=opts.fp16, opt_level='O2')
self.model.eval()
# TODO: this does not work currently
# if random_weights:
# print('Randomizing weights')
# self.model.init_weights()
# Grab model configuration parameters
self.num_layers = model_config.num_hidden_layers
self.num_neurons = model_config.hidden_size
def get_representations(self, context, position=0):
# Hook for saving the representation
def extract_representation_hook(module,
input,
output,
position,
representations,
layer):
representations[layer] = output[0][position]
handles = []
representation = {}
with torch.no_grad():
# construct all the hooks
# word embeddings will be layer -1
handles.append(self.model.uniter.embeddings.word_embeddings.register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=-1)))
# hidden layers; intervening on the output of the feedforward network
for layer in range(self.num_layers):
handles.append(self.model.uniter.encoder.layer[layer]\
.output.dropout.register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=layer)))
logits = self.model(context, compute_loss=False)
for h in handles:
h.remove()
return representation
def get_probabilities_for_examples(self, context):
"""Return probabilities of single-token candidates given context"""
logits = self.model(context, compute_loss = False)
probs = F.softmax(logits, dim=-1)
return probs.tolist()
def neuron_intervention(self,
context,
outputs,
rep,
layers,
neurons,
position,
intervention_type='replace',
alpha=1.):
# Hook for changing representation during forward pass
def intervention_hook(module,
input,
output,
position,
neurons,
intervention,
intervention_type):
# Get the neurons to intervene on
neurons = torch.LongTensor(neurons).to(self.device)
# First grab the position across batch
# Then, for each element, get correct index w/ gather
base = output[:, position, :].gather(
1, neurons)
intervention_view = intervention.view_as(base)
if intervention_type == 'replace':
base = intervention_view
elif intervention_type == 'diff':
base += intervention_view
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Overwrite values in the output
# First define mask where to overwrite
scatter_mask = torch.zeros_like(output).byte()
for i, v in enumerate(neurons):
scatter_mask[i, position, v] = 1
# Then take values from base and scatter
output.masked_scatter_(scatter_mask, base.flatten())
# Set up the context as batch
batch_size = len(neurons)
batch_context = {}
for key, value in context.items():
try:
if len(value.shape) == 2:
batch_context[key] = value.repeat(batch_size,1)
elif len(value.shape) == 3:
batch_context[key] = value.repeat(batch_size, 1, 1)
except AttributeError:
continue
handle_list = []
for layer in set(layers):
neuron_loc = np.where(np.array(layers) == layer)[0]
n_list = []
for n in neurons:
unsorted_n_list = [n[i] for i in neuron_loc]
n_list.append(list(np.sort(unsorted_n_list)))
intervention_rep = alpha * rep[layer][n_list]
if layer == -1:
wte_intervention_handle = self.model.uniter.embeddings.word_embeddings.register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type))
handle_list.append(wte_intervention_handle)
else:
mlp_intervention_handle = self.model.uniter.encoder.layer[layer]\
.output.dropout.register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type))
handle_list.append(mlp_intervention_handle)
new_probabilities = self.get_probabilities_for_examples(
batch_context)
for hndle in handle_list:
hndle.remove()
return new_probabilities
def neuron_intervention_single_experiment(self,
intervention,
intervention_type, layers_to_adj=[],
neurons_to_adj=[],
alpha=100,
bsize=500, intervention_loc='all'):
"""
run one full neuron intervention experiment
"""
with torch.no_grad():
# Compute representations of the non-negated and negated versions; position 0 corresponds to the [CLS] token
orig_representations = self.get_representations(
intervention[0],
position=0)
negated_representations = self.get_representations(
intervention[1],
position=0)
# e.g. There aren't two dogs.
if intervention_type == 'negate_direct':
context = intervention[1]
rep = orig_representations
replace_or_diff = 'replace'
# e.g. There are two dogs.
elif intervention_type == 'negate_indirect':
context = intervention[0]
rep = negated_representations
replace_or_diff = 'replace'
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Probabilities without intervention (Base case)
# Candidate 1 is False; Candidate 2 is True
# TODO: this can be simplified since there are only two candidates
candidate1_orig_prob, candidate2_orig_prob = self.get_probabilities_for_examples(
intervention[0])[0]
candidate1_neg_prob, candidate2_neg_prob = self.get_probabilities_for_examples(
intervention[1])[0]
# Running interventions
if intervention_loc == 'all':
candidate1_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
candidate2_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
for layer in range(-1, self.num_layers):
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
layers_to_search = [layer] + layers_to_adj
probs = self.neuron_intervention(
context=context,
outputs=[0,1],
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=0,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[layer + 1][neuron] = p1
candidate2_probs[layer + 1][neuron] = p2
elif intervention_loc == 'layer':
layers_to_search = (len(neurons_to_adj) + 1)*[layers_to_adj]
candidate1_probs = torch.zeros((1, self.num_neurons))
candidate2_probs = torch.zeros((1, self.num_neurons))
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
probs = self.neuron_intervention(
context=context,
outputs=[0,1],
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=0,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[0][neuron] = p1
candidate2_probs[0][neuron] = p2
else:
probs = self.neuron_intervention(
context=context,
outputs=[0,1],
rep=rep,
layers=layers_to_adj,
neurons=neurons_to_adj,
position=0,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons_to_adj, probs):
candidate1_probs = p1
candidate2_probs = p2
return (candidate1_orig_prob, candidate2_orig_prob,
candidate1_neg_prob, candidate2_neg_prob,
candidate1_probs, candidate2_probs)
| 40.56338
| 120
| 0.526476
|
f96eb2a13d97bc3c3ba06ae1689120e98344ef90
| 4,224
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0206-Reverse-Linked-List.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0206-Reverse-Linked-List.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0206-Reverse-Linked-List.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0206-Reverse-Linked-List.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-10
=================================================================="""
import sys
import time
from typing import List, Optional
"""
LeetCode - 0206 - (Easy) - Reverse Linked List
https://leetcode.com/problems/reverse-linked-list/
Description & Requirement:
Given the head of a singly linked list, reverse the list, and return the reversed list.
Example 1
Input: head = [1,2,3,4,5]
Output: [5,4,3,2,1]
Example 2:
Input: head = [1,2]
Output: [2,1]
Example 3:
Input: head = []
Output: []
Constraints:
The number of nodes in the list is the range [0, 5000].
-5000 <= Node.val <= 5000
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next # this means (by default): end_node.next == None
@staticmethod
def build_singly_linked_list(val_list: List[int]):
if not isinstance(val_list, list) or len(val_list) <= 0:
return None
head_node = ListNode(val=val_list[0])
ptr = head_node
len_val = len(val_list)
val_index = 1
while val_index < len_val:
new_node = ListNode(val=val_list[val_index]) # create new node
ptr.next = new_node # singly link
ptr = new_node # move
val_index += 1
return head_node
@staticmethod
def show_val_singly_linked_list(head_node) -> None:
# exception case
if (not isinstance(head_node, ListNode)) and (head_node is not None):
return None # Error head_node type
if not isinstance(head_node, ListNode):
return None # Error n type or needn't delete
ptr = head_node
while ptr:
print(ptr.val)
ptr = ptr.next
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
# exception case
if not isinstance(head, ListNode):
return None # Error head type
# main method: (two pointer, in-place reverse)
return self._reverseList(head)
def _reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
return self._reverse_singly_linked_list(head) # reverse
@staticmethod
def _reverse_singly_linked_list(head_node: Optional[ListNode]) -> Optional[ListNode]:
if (not isinstance(head_node, ListNode)) and (head_node is not None):
return head_node # Error head_node type
if not isinstance(head_node, ListNode) or not head_node.next:
return head_node # only 0 or 1 node, needn't reverse
# now the singly linked list must have >= 2 nodes
ptr_pre = head_node
ptr = head_node.next
head_node.next = None # the former head_node is the new end_node, so its next is None
while ptr.next:
next_one = ptr.next # record the next position of ptr
ptr.next = ptr_pre # link ptr -> ptr_pre (reverse link)
ptr_pre = ptr # move ptr_pre to current ptr position
ptr = next_one # move ptr to the recorded next position
# now ptr.next is None, so ptr is the new head_node now
ptr.next = ptr_pre # the last reverse link
head_node = ptr # set new head_node
return head_node
def main():
# Example 1 Output: [5,4,3,2,1]
head = [1, 2, 3, 4, 5]
# Example 2: Output: [2,1]
# head = [1, 2]
# Example 3: Output: []
# head = []
head_node = ListNode.build_singly_linked_list(head)
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.reverseList(head_node)
end = time.process_time()
# show answer
print('\nAnswer:')
# print(ans.val)
ListNode.show_val_singly_linked_list(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 31.522388
| 94
| 0.605587
|
661b79206b3ba4eef31cdffc5690efe9625228be
| 2,770
|
py
|
Python
|
tests/h/presenters/annotation_searchindex_test.py
|
hmstepanek/h
|
92e567005c474f3ce3095f9a8bf37548ca2c9621
|
[
"MIT"
] | null | null | null |
tests/h/presenters/annotation_searchindex_test.py
|
hmstepanek/h
|
92e567005c474f3ce3095f9a8bf37548ca2c9621
|
[
"MIT"
] | 5
|
2017-12-26T14:22:20.000Z
|
2018-04-02T02:56:38.000Z
|
tests/h/presenters/annotation_searchindex_test.py
|
SenseTW/h
|
dae2dfa8ab064ddb696e5657d48459114b2642d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import mock
import pytest
from h.presenters.annotation_searchindex import AnnotationSearchIndexPresenter
@pytest.mark.usefixtures('DocumentSearchIndexPresenter')
class TestAnnotationSearchIndexPresenter(object):
def test_asdict(self, DocumentSearchIndexPresenter):
annotation = mock.Mock(
id='xyz123',
created=datetime.datetime(2016, 2, 24, 18, 3, 25, 768),
updated=datetime.datetime(2016, 2, 29, 10, 24, 5, 564),
userid='acct:luke@hypothes.is',
target_uri='http://example.com',
target_uri_normalized='http://example.com/normalized',
text='It is magical!',
tags=['magic'],
groupid='__world__',
shared=True,
target_selectors=[{'TestSelector': 'foobar'}],
references=['referenced-id-1', 'referenced-id-2'],
thread_ids=['thread-id-1', 'thread-id-2'],
extra={'extra-1': 'foo', 'extra-2': 'bar'})
DocumentSearchIndexPresenter.return_value.asdict.return_value = {'foo': 'bar'}
annotation_dict = AnnotationSearchIndexPresenter(annotation).asdict()
assert annotation_dict == {
'authority': 'hypothes.is',
'id': 'xyz123',
'created': '2016-02-24T18:03:25.000768+00:00',
'updated': '2016-02-29T10:24:05.000564+00:00',
'user': 'acct:luke@hypothes.is',
'user_raw': 'acct:luke@hypothes.is',
'uri': 'http://example.com',
'text': 'It is magical!',
'tags': ['magic'],
'tags_raw': ['magic'],
'group': '__world__',
'shared': True,
'target': [{'scope': ['http://example.com/normalized'],
'source': 'http://example.com',
'selector': [{'TestSelector': 'foobar'}]}],
'document': {'foo': 'bar'},
'references': ['referenced-id-1', 'referenced-id-2'],
'thread_ids': ['thread-id-1', 'thread-id-2'],
}
def test_it_copies_target_uri_normalized_to_target_scope(self):
annotation = mock.Mock(
userid='acct:luke@hypothes.is',
target_uri_normalized='http://example.com/normalized',
extra={})
annotation_dict = AnnotationSearchIndexPresenter(annotation).asdict()
assert annotation_dict['target'][0]['scope'] == [
'http://example.com/normalized']
@pytest.fixture
def DocumentSearchIndexPresenter(self, patch):
class_ = patch('h.presenters.annotation_searchindex.DocumentSearchIndexPresenter')
class_.return_value.asdict.return_value = {}
return class_
| 37.945205
| 90
| 0.586643
|
d2facb6ddb1111eeae5b72834db16459f78a44ee
| 4,548
|
py
|
Python
|
python/sparktk/frame/ops/timeseries_from_observations.py
|
lewisc/spark-tk
|
5548fc925b5c278263cbdebbd9e8c7593320c2f4
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2016-05-20T22:26:05.000Z
|
2022-01-21T12:55:13.000Z
|
python/sparktk/frame/ops/timeseries_from_observations.py
|
aayushidwivedi01/spark-tk-old
|
fcf25f86498ac416cce77de0db4cf0aa503d20ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 70
|
2016-06-28T01:11:21.000Z
|
2021-03-15T21:40:01.000Z
|
python/sparktk/frame/ops/timeseries_from_observations.py
|
aayushidwivedi01/spark-tk-old
|
fcf25f86498ac416cce77de0db4cf0aa503d20ac
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2016-04-21T22:25:22.000Z
|
2020-10-06T09:23:43.000Z
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def timeseries_from_observations(self, date_time_index, timestamp_column, key_column, value_column):
"""
Returns a frame that has the observations formatted as a time series.
:param date_time_index: List of date/time strings. DateTimeIndex to conform all series to.
:param timestamp_column: The name of the column telling when the observation occurred.
:param key_column: The name of the column that contains which string key the observation belongs to.
:param value_column: The name of the column that contains the observed value.
:return: Frame formatted as a time series (with a column for key and a column for the vector of values).
Uses the specified timestamp, key, and value columns and the date/time index provided to format the observations
as a time series. The time series frame will have columns for the key and a vector of the observed values that
correspond to the date/time index.
Examples
--------
In this example, we will use a frame of observations of resting heart rate for three individuals over three days.
The data is accessed from Frame object called *my_frame*:
<hide>
>>> from datetime import datetime
>>> data = [["Edward", "2016-01-01T12:00:00Z", 62],["Stanley", "2016-01-01T12:00:00Z", 57],["Edward", "2016-01-02T12:00:00Z", 63],["Sarah", "2016-01-02T12:00:00Z", 64],["Stanley", "2016-01-02T12:00:00Z", 57],["Edward", "2016-01-03T12:00:00Z", 62],["Sarah", "2016-01-03T12:00:00Z", 64],["Stanley", "2016-01-03T12:00:00Z", 56]]
>>> schema = [("name", str), ("date", datetime), ("resting_heart_rate", float)]
>>> my_frame = tc.frame.create(data, schema)
-etc-
</hide>
>>> my_frame.inspect(my_frame.count())
[#] name date resting_heart_rate
======================================================
[0] Edward 2016-01-01T12:00:00Z 62
[1] Stanley 2016-01-01T12:00:00Z 57
[2] Edward 2016-01-02T12:00:00Z 63
[3] Sarah 2016-01-02T12:00:00Z 64
[4] Stanley 2016-01-02T12:00:00Z 57
[5] Edward 2016-01-03T12:00:00Z 62
[6] Sarah 2016-01-03T12:00:00Z 64
[7] Stanley 2016-01-03T12:00:00Z 56
We then need to create an array that contains the date/time index,
which will be used when creating the time series. Since our data
is for three days, our date/time index will just contain those
three dates:
>>> datetimeindex = ["2016-01-01T12:00:00.000Z","2016-01-02T12:00:00.000Z","2016-01-03T12:00:00.000Z"]
Then we can create our time series frame by specifying our date/time
index along with the name of our timestamp column (in this example, it's
"date"), key column (in this example, it's "name"), and value column (in
this example, it's "resting_heart_rate").
>>> ts = my_frame.timeseries_from_observations(datetimeindex, "date", "name", "resting_heart_rate")
<progress>
Take a look at the resulting time series frame schema and contents:
>>> ts.schema
[(u'name', <type 'unicode'>), (u'resting_heart_rate', vector(3))]
>>> ts.inspect()
[#] name resting_heart_rate
================================
[0] Stanley [57.0, 57.0, 56.0]
[1] Edward [62.0, 63.0, 62.0]
[2] Sarah [None, 64.0, 64.0]
"""
if not isinstance(date_time_index, list):
raise TypeError("date_time_index should be a list of date/times")
scala_date_list = self._tc.jutils.convert.to_scala_date_time_list(date_time_index)
from sparktk.frame.frame import Frame
return Frame(self._tc,
self._scala.timeSeriesFromObseravations(scala_date_list, timestamp_column, key_column, value_column))
| 46.886598
| 329
| 0.638083
|
936ccb2bbdc303b44f32c0f49b569cae86bdea8a
| 6,069
|
py
|
Python
|
js-axe-selenium/ansible/a11y/lib/python3.8/site-packages/ansible/modules/cloud/openstack/os_user_role.py
|
dalsontws/accessibility-axe-selenium
|
ca0b54d550c9763902bd22bbc48562b9b211dd42
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
js-axe-selenium/ansible/a11y/lib/python3.8/site-packages/ansible/modules/cloud/openstack/os_user_role.py
|
dalsontws/accessibility-axe-selenium
|
ca0b54d550c9763902bd22bbc48562b9b211dd42
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
env/lib/python3.9/site-packages/ansible/modules/cloud/openstack/os_user_role.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# Copyright (c) 2016 IBM
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
project:
description:
- Name or ID of the project to scope the role association to.
If you are using keystone version 2, then this value is required.
domain:
description:
- Name or ID of the domain to scope the role association to. Valid only
with keystone version 3, and required if I(project) is not specified.
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
role = module.params.get('role')
user = module.params.get('user')
group = module.params.get('group')
project = module.params.get('project')
domain = module.params.get('domain')
state = module.params.get('state')
sdk, cloud = openstack_cloud_from_module(module)
try:
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if domain:
d = cloud.get_domain(name_or_id=domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
if user:
if domain:
u = cloud.get_user(user, domain_id=filters['domain'])
else:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
domain_id = None
if project:
if domain:
p = cloud.get_project(project, domain_id=filters['domain'])
# OpenStack won't allow us to use both a domain and project as
# filter. Once we identified the project (using the domain as
# a filter criteria), we need to remove the domain itself from
# the filters list.
domain_id = filters.pop('domain')
else:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain_id)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain_id)
cloud.revoke_role(role, **kwargs)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 30.19403
| 125
| 0.604712
|
bb0d899ac66b950eb544b80f8b8523fbcc6adab9
| 391
|
py
|
Python
|
Action/action_enum.py
|
zonghan0904/Online-Realtime-Action-Recognition-based-on-OpenPose
|
7d81a84a46ab3a0f6aafc4734cd24e2bbc164a97
|
[
"Apache-2.0"
] | null | null | null |
Action/action_enum.py
|
zonghan0904/Online-Realtime-Action-Recognition-based-on-OpenPose
|
7d81a84a46ab3a0f6aafc4734cd24e2bbc164a97
|
[
"Apache-2.0"
] | null | null | null |
Action/action_enum.py
|
zonghan0904/Online-Realtime-Action-Recognition-based-on-OpenPose
|
7d81a84a46ab3a0f6aafc4734cd24e2bbc164a97
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
class Actions(Enum):
"""
Actions enum
"""
# framewise_recognition.h5
# squat = 0
# stand = 1
# walk = 2
# wave = 3
# framewise_recognition_under_scene.h5
# stand = 0
# walk = 1
# operate = 2
# fall_down = 3
# run = 4
# ncrl_framewise_recognition.h5
sit = 0
wave = 1
fall_down = 2
none = 3
| 15.038462
| 42
| 0.537084
|
1a0a292f9e4fd56a18019f6bb6182cea627b037f
| 1,890
|
py
|
Python
|
test/functional_requirements/fault_tolerance/REBUILD_TWICE_IN_RAID10_ARRAY.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T23:30:50.000Z
|
2022-02-07T23:30:50.000Z
|
test/functional_requirements/fault_tolerance/REBUILD_TWICE_IN_RAID10_ARRAY.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional_requirements/fault_tolerance/REBUILD_TWICE_IN_RAID10_ARRAY.py
|
so931/poseidonos
|
2aa82f26bfbd0d0aee21cd0574779a655634f08c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import os
import sys
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../volume/")
sys.path.append("../array/")
import json_parser
import pos
import pos_util
import cli
import api
import json
import MOUNT_VOL_ON_RAID10_ARRAY
import fio
import time
DETACH_TARGET_DEV1 = "unvme-ns-0"
DETACH_TARGET_DEV2 = "unvme-ns-1"
NEW_SPARE_DEV1 = "unvme-ns-4"
NEW_SPARE_DEV2 = "unvme-ns-5"
ARRAYNAME = MOUNT_VOL_ON_RAID10_ARRAY.ARRAYNAME
def execute():
MOUNT_VOL_ON_RAID10_ARRAY.execute()
fio_proc = fio.start_fio(0, 30)
fio.wait_fio(fio_proc)
api.detach_ssd_and_attach(DETACH_TARGET_DEV1)
time.sleep(1)
if api.check_situation(ARRAYNAME, "DEGRADED") is False:
return "fail"
api.detach_ssd_and_attach(DETACH_TARGET_DEV2)
time.sleep(1)
if api.check_situation(ARRAYNAME, "DEGRADED") is False:
return "fail"
cli.add_device(NEW_SPARE_DEV1, ARRAYNAME)
if api.wait_situation(ARRAYNAME, "REBUILDING") == True:
print ("now 1st rebuilding...")
newSitu = api.wait_situation_changed(ARRAYNAME, "REBUILDING")
print ("1st rebuilding done and new situ: " + newSitu)
if newSitu == "DEGRADED":
print ("may be 1st rebuild complete")
cli.add_device(NEW_SPARE_DEV2, ARRAYNAME)
if api.wait_situation(ARRAYNAME, "REBUILDING") == True:
print ("now 2nd rebuilding...")
if api.wait_situation(ARRAYNAME, "NORMAL") == True:
print ("may be 2nd rebuild complete")
return "pass"
return "fail"
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
api.clear_result(__file__)
result = execute()
ret = api.set_result_manually(cli.array_info(ARRAYNAME), result, __file__)
pos.flush_and_kill_pos()
exit(ret)
| 30.983607
| 78
| 0.670899
|
72227b2adb4d9549090c42c63365bf12654b8416
| 3,913
|
py
|
Python
|
ludwig/run.py
|
phueb/LudwigCluster
|
1c1f8cdb26031a9ed78232482cfa1e4fe9e36256
|
[
"MIT"
] | null | null | null |
ludwig/run.py
|
phueb/LudwigCluster
|
1c1f8cdb26031a9ed78232482cfa1e4fe9e36256
|
[
"MIT"
] | 1
|
2022-03-30T14:07:13.000Z
|
2022-03-30T14:07:13.000Z
|
ludwig/run.py
|
phueb/LudwigCluster
|
1c1f8cdb26031a9ed78232482cfa1e4fe9e36256
|
[
"MIT"
] | 2
|
2020-06-15T13:06:53.000Z
|
2021-02-12T00:33:29.000Z
|
import pickle
import socket
import yaml
import pandas as pd
import importlib
from pathlib import Path
import sys
from typing import Dict, Any
import shutil
# do not import ludwig here - this file is run on Ludwig workers
def save_job_files(param2val: Dict[str, Any],
series_list: list,
runs_path: Path,
) -> None:
if not series_list:
print('WARNING: Job did not return any results')
# job_path refers to local directory if --isolated but not --local
job_path = runs_path / param2val['param_name'] / param2val['job_name']
if not job_path.exists():
job_path.mkdir(parents=True)
# save series_list
for series in series_list:
if not isinstance(series, pd.Series):
raise TypeError('Object returned by job must be a pandas.Series.')
if series.name is None:
raise AttributeError('Each pandas.Series returned by job must have attribute name refer to unique string.')
with (job_path / '{}.csv'.format(series.name)).open('w') as f:
series.to_csv(f, index=True, header=[series.name]) # cannot name the index with "header" arg
print(f'Saved results to {job_path}')
# save param2val
param2val_path = runs_path / param2val['param_name'] / 'param2val.yaml'
if not param2val_path.exists():
param2val_path.parent.mkdir(exist_ok=True)
param2val['job_name'] = None
with param2val_path.open('w', encoding='utf8') as f:
yaml.dump(param2val, f, default_flow_style=False, allow_unicode=True)
print(f'Saved param2val to {param2val_path}')
# move contents of save_path to job_path (can be local or remote)
save_path = Path(param2val['save_path'])
src = str(save_path)
dst = str(job_path)
if save_path.exists(): # user may not create a directory at save path
shutil.move(src, dst) # src is no longer available afterwards
print(f'Moved contents of save_path to {job_path}')
def run_job_on_ludwig_worker(param2val):
"""
run a single job on on a single worker.
this function is called on a Ludwig worker.
this means that the package ludwig cannot be imported here.
the package ludwig works client-side, and cannot be used on the workers or the file server.
"""
# prepare save_path - this must be done on worker
save_path = Path(param2val['save_path'])
if not save_path.exists():
save_path.mkdir(parents=True)
# execute job
series_list = job.main(param2val) # name each returned series using 'name' attribute
# save results
runs_path = remote_root_path / 'runs'
save_job_files(param2val, series_list, runs_path)
if __name__ == '__main__':
# get src_name + project_name
project_name = Path(__file__).stem.replace('run_', '')
src_name = project_name.lower()
# define paths - do not use any paths defined in user project (they may be invalid)
ludwig_data = Path('/') / 'media' / 'ludwig_data'
remote_root_path = ludwig_data / project_name
# allow import of modules located in remote root path
sys.path.append(str(remote_root_path))
# import user's job to execute
job = importlib.import_module('{}.job'.format(src_name))
# find jobs
hostname = socket.gethostname()
pattern = f'{hostname.lower()}_*.pkl'
pickled_param2val_paths = list(remote_root_path.glob(pattern))
if not pickled_param2val_paths:
print('No jobs found.') # that's okay. run.py was triggered which triggered killing of active jobs on worker
else:
print(f'Found {len(pickled_param2val_paths)} jobs:')
for p in pickled_param2val_paths:
print(p)
# run all jobs
for param2val_path in pickled_param2val_paths:
with param2val_path.open('rb') as f:
param2val = pickle.load(f)
run_job_on_ludwig_worker(param2val)
| 36.570093
| 119
| 0.677741
|
511121e4661f8690ee3845332c4254173acb15a6
| 3,170
|
py
|
Python
|
math_helpers/time_systems.py
|
sonstarr/astralib
|
af99e5437add14d2e5a8528cf001c3f12a39690b
|
[
"MIT"
] | null | null | null |
math_helpers/time_systems.py
|
sonstarr/astralib
|
af99e5437add14d2e5a8528cf001c3f12a39690b
|
[
"MIT"
] | null | null | null |
math_helpers/time_systems.py
|
sonstarr/astralib
|
af99e5437add14d2e5a8528cf001c3f12a39690b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def get_JD(year=None, month=None, day=None, hour=None, min=None, sec=None,
string=None, format='yyyy-mm-dd hh:mm:ss', rtn='jd'):
"""compute the current Julian Date based on the given time input
:param year: given year between 1901 and 2099
:param month: month 1-12
:param day: days
:param hour: hours
:param min: minutes
:param sec: seconds
:param string: date string with format referencing "format" input
:param format: format of string input; currently accepts:
'yyyy-mm-dd hh:mm:ss'
'dd mmm yyyy hh:mm:ss'
:param rtn: optional return parameter; jd or mjd (modified julian)
default=jd
:return jd: Julian date
:return mjd: modified julian date
"""
if string:
if format == 'yyyy-mm-dd hh:mm:ss':
year = float(string[:4])
month = float(string[5:7])
day = float(string[8:10])
hour = float(string[11:13])
min = float(string[14:16])
sec = float(string[17:19])
elif format == 'dd mmm yyyy hh:mm:ss':
months = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5,
'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10,
'Nov':11, 'Dec':12}
year = float(string[7:11])
month = float(months[f'{string[3:6]}'])
day = float(string[:2])
hour = float(string[12:14])
min = float(string[15:17])
sec = float(string[18:20])
# compute julian date
jd = 1721013.5 + 367*year - int(7/4*(year+int((month+9)/12))) \
+ int(275*month/9) + day + (60*hour + min + sec/60)/1440
if rtn == 'mjd':
# compute mod julian
mjd = jd - 2400000.5
return mjd
else:
return jd
def cal_from_jd(jd, rtn=None):
"""convert from calendar date to julian
:param jd: julian date
:param rtn: optional return arg. "string" will return a string;
default is a tuple of values
:return: tuple of calendar date in format:
(year, month, day, hour, min, sec)
"""
# days in a month
lmonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# years
T1990 = (jd-2415019.5)/365.25
year = 1900 + int(T1990)
leapyrs = int((year-1900-1)*0.25)
days = (jd-2415019.5)-((year-1900)*365 + leapyrs)
if days < 1.:
year = year - 1
leapyrs = int((year-1900-1)*0.25)
days = (jd-2415019.5) - ((year-1900)*365 + leapyrs)
# determine if leap year
if year%4 == 0:
lmonth[1] = 29
# months, days
dayofyr = int(days)
mo_sum = 0
count = 1
for mo in lmonth:
if mo_sum + mo < dayofyr:
mo_sum += mo
count += 1
month = count
day = dayofyr - mo_sum
# hours, minutes, seconds
tau = (days-dayofyr)*24
h = int(tau)
minute = int((tau-h)*60)
s = (tau-h-minute/60)*3600
if rtn == 'string':
return f'{year}-{month}-{day} {h}:{minute}:{s}'
return (year, month, day, h, minute, s)
if __name__ == '__main__':
pass
| 29.90566
| 75
| 0.53123
|
805369623fd166376f4eecba8d4aa178969c9d21
| 15,663
|
py
|
Python
|
learning/tests.py
|
skyfather/Masomo-Quiz-App
|
585b50514134094303d8821b2c594b5c3f4def8a
|
[
"MIT"
] | 1
|
2020-07-20T17:13:40.000Z
|
2020-07-20T17:13:40.000Z
|
learning/tests.py
|
skyfather/Masomo-Quiz-App
|
585b50514134094303d8821b2c594b5c3f4def8a
|
[
"MIT"
] | 7
|
2021-04-08T21:16:26.000Z
|
2022-03-12T00:39:13.000Z
|
learning/tests.py
|
skyfather/Masomo-Quiz-App
|
585b50514134094303d8821b2c594b5c3f4def8a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase,TransactionTestCase,RequestFactory
from django.test import Client
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator,Page, PageNotAnInteger, EmptyPage
from .views.quiz import QuizList,CreateQuizView,QuizDetailView,QuizUpdateView,QuizDeleteView,quiz_taking,quiz_results
from .models import Quiz,Subject, Question, Answer, QuizTaker, QuizTakerResponse
from users.models import CustomUser, Student
# Create your tests here.
def create_custom_user2(username,is_student=False,is_teacher=False,password="secure"):
custom_user = CustomUser.objects.create(username=username,is_student=is_student,is_teacher=is_teacher)
# custom_user.set_password(password)
return custom_user
# CustomUser.objects.create(username="stud1",email="std1@students.com", is_student=True)
def create_custom_user(username,email="user@mail.com",is_student=False,is_teacher=False,password="secure"):
return get_user_model().objects.create_user(username=username,email=email,password=password,is_student=is_student,is_teacher=is_teacher)
def create_student(user):
return Student.objects.create(user=user)
def create_subject(name):
return Subject.objects.create(name=name)
def create_quiz(name,subject,owner):
return Quiz.objects.create(name=name,subject=subject,owner=owner)
def create_question(question,quiz):
return Question.objects.create(question=question,quiz=quiz)
def create_answer(answer,question,is_correct=False):
return Answer.objects.create(answer=answer,question=question,is_correct=is_correct)
def create_quiz_taker(quiz,student):
return QuizTaker.objects.create(quiz=quiz,student=student)
def create_quiz_taker_response(quiztaker,question,answer):
return QuizTakerResponse.objects.create(quiztaker=quiztaker,question=question,answer=answer)
class QuizTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 = create_subject(name="Science")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
def test_quiz(self):
self.assertIsInstance(self.quiz1,Quiz)
self.assertEqual(self.quiz1.subject,self.subject1)
class QuestionTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 = create_subject(name="Science")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
cls.question1 = create_question(question="What is science",quiz=cls.quiz1)
def test_question(self):
self.assertIsInstance(self.question1,Question)
self.assertNotEqual(self.quiz1.get_questions(),Question.objects.all())
class AnswerTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 = create_subject(name="Science")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
cls.quiz2 = create_quiz(name="Second Quiz", subject=cls.subject1, owner=cls.teacher)
cls.question1 = create_question(question="What is science",quiz=cls.quiz1)
cls.question2 = create_question(question="What is matter",quiz=cls.quiz2)
cls.answer1 = create_answer(answer="An attempt to explain a phenomenon",question=cls.question1,is_correct=True)
cls.answer2 = create_answer(answer="Just humanity ego in reasoning things out",question=cls.question1)
cls.answer3 = create_answer(answer="It's an art",question=cls.question2,is_correct=False)
def test_answer(self):
self.assertIsInstance(self.answer1,Answer)
self.assertIsInstance(self.answer3,Answer)
def test_answer_is_correct(self):
self.assertEqual(self.answer3.is_correct,False)
class QuizTakerTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 = create_subject(name="Science")
cls.subject2 = create_subject(name="Mathematics")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
cls.quiz2 = create_quiz(name="Second Quiz", subject=cls.subject1, owner=cls.teacher)
cls.question1 = create_question(question="What is science",quiz=cls.quiz1)
cls.question2 = create_question(question="What is matter",quiz=cls.quiz2)
cls.answer1 = create_answer(answer="An attempt to explain a phenomenon",question=cls.question1,is_correct=True)
cls.answer2 = create_answer(answer="Just humanity ego in reasoning things out",question=cls.question1)
cls.answer3 = create_answer(answer="It's an art",question=cls.question2,is_correct=False)
cls.stud_user1 = create_custom_user(username="stud1",email="std1@students.com", is_student=True)
cls.stud_user2 = create_custom_user(username="stud2",email="std2@students.com", is_student=True)
cls.student1 = create_student(user=cls.stud_user1)
cls.student2 = create_student(user=cls.stud_user2)
#Set interests for students
cls.student1.interests.add(cls.subject1)
cls.student2.interests.set([cls.subject1,cls.subject2])
cls.quiz_taker1 = create_quiz_taker(quiz=cls.quiz1, student=cls.student1)
cls.quiz_taker2 = create_quiz_taker(quiz=cls.quiz1, student=cls.student2)
def test_quiz_taker(self):
self.assertEqual(self.quiz_taker1.student, self.student1)
self.assertEqual(self.quiz_taker1.quiz, self.quiz1)
class QuizTakerResponseTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 = create_subject(name="Science")
cls.subject2 = create_subject(name="Mathematics")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
cls.quiz2 = create_quiz(name="Second Quiz", subject=cls.subject1, owner=cls.teacher)
cls.question1 = create_question(question="What is science",quiz=cls.quiz1)
cls.question2 = create_question(question="What is matter",quiz=cls.quiz2)
cls.question3 = create_question(question="How does lunar eclipse occur",quiz=cls.quiz2)
cls.answer1 = create_answer(answer="An attempt to explain a phenomenon",question=cls.question1,is_correct=True)
cls.answer2 = create_answer(answer="Just humanity ego in reasoning things out",question=cls.question1)
cls.answer3 = create_answer(answer="It's an art",question=cls.question2,is_correct=False)
cls.answer4 = create_answer(answer="When the sun is overshadowed by moon",question=cls.question3,is_correct=False)
cls.stud_user1 = create_custom_user(username="stud1",email="std1@students.com", is_student=True)
cls.stud_user2 = create_custom_user(username="stud2",email="std2@students.com", is_student=True)
cls.student1 = create_student(user=cls.stud_user1)
cls.student2 = create_student(user=cls.stud_user2)
#Set interests for students
cls.student1.interests.add(cls.subject1)
cls.student2.interests.set([cls.subject1,cls.subject2])
cls.quiz_taker1 = create_quiz_taker(quiz=cls.quiz1, student=cls.student1)
cls.quiz_taker2 = create_quiz_taker(quiz=cls.quiz1, student=cls.student2)
cls.stud_response1 = create_quiz_taker_response(quiztaker=cls.quiz_taker1, question=cls.question1, answer=cls.answer1)
cls.stud_response2 = create_quiz_taker_response(quiztaker=cls.quiz_taker1, question=cls.question1, answer=cls.answer2)
cls.stud_response3 = create_quiz_taker_response(quiztaker=cls.quiz_taker2, question=cls.question1, answer=cls.answer1)
def setUp(self):
self.stud_response1 = QuizTakerResponse.objects.get(quiztaker=self.quiz_taker1, question=self.question1, answer=self.answer1)
self.stud_response2 = QuizTakerResponse.objects.get(quiztaker=self.quiz_taker1, question=self.question1, answer=self.answer2)
self.stud_response3 = QuizTakerResponse.objects.get(quiztaker=self.quiz_taker2, question=self.question1, answer=self.answer1)
def test_response_creation(self):
self.assertIsInstance(self.student1, Student)
self.assertIsInstance(self.quiz_taker1, QuizTaker)
self.assertIsInstance(self.stud_response1,QuizTakerResponse)
def test_response_answer(self):
self.assertEqual(self.stud_response1.answer, self.answer1)
self.assertEqual(self.stud_response1.answer.is_correct,True)
self.assertEqual(self.stud_response2.answer.is_correct,False)
def test_quiz_responses(self):
self.assertEqual(self.quiz_taker1.get_quiz_response().count(),2)
self.assertEqual(self.quiz_taker2.get_quiz_response().count(),1)
def test_response_to_quiz_subject_not_in_interests(self):
with self.assertRaises(Quiz.DoesNotExist):
self.stud_response4 = QuizTakerResponse.objects.create(quiztaker=self.quiz_taker1, question=self.question2, answer=self.answer3)
class QuizViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.subject1 =Subject.objects.create(name="Science")
# cls.subject2 =Subject.objects.create(name="Mathematics")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = Quiz.objects.create(name="First Quiz", subject=cls.subject1, owner=cls.teacher)
cls.quiz2 = Quiz.objects.create(name="Second Quiz", subject=cls.subject1, owner=cls.teacher)
def test_quiz_list(self):
# response = self.client.get('/quiz/')
response = self.client.get(reverse('quiz_list'))
#check if the response is 200 OK
self.assertEqual(response.status_code,200)
self.assertContains(response,text=self.quiz1.name,count=1,status_code=200)
self.assertIn(self.quiz2, response.context['object_list'])
self.assertEqual(len(response.context['object_list']),2)
def test_quiz_create(self):
self.factory = RequestFactory()
request = self.factory.get(reverse('create_quiz'))
request.user = create_custom_user(username="stud1",is_teacher=True)
response = CreateQuizView.as_view()(request)
self.assertEqual(response.status_code,200)
#No reason to test this
def test_quiz_details(self):
response = self.client.get(reverse('quiz_detail', args=[self.quiz1.pk]))
self.assertEqual(response.status_code,200)
self.assertContains(response, self.quiz1.name)
#test question list in the context
self.assertEqual(len(response.context['question_list']),len(self.quiz1.get_questions()))
class QuizTakeViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = create_custom_user(username="studentias",is_student=True, password="kriumkram")
cls.user2 = create_custom_user(username="stud2",is_student=True)
cls.stud1 = create_student(user=cls.user)
cls.stud2 = create_student(user=cls.user2)
cls.subj1 = create_subject("Science")
#Add the subject to student interests
cls.stud1.interests.add(cls.subj1)
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz = create_quiz(name="First Science Quiz",subject=cls.subj1,owner=cls.teacher)
# cls.quiz2 = create_quiz(name="Second Science Quiz",subject=cls.subj1)
cls.question1 = create_question(question="Does Mars support life?",quiz=cls.quiz)
cls.answer1 = create_answer(answer="Yes",question=cls.question1)
cls.answer2 = create_answer(answer="Who knows",question=cls.question1, is_correct=True)
cls.answer3 = create_answer(answer="No",question=cls.question1)
def setUp(self):
self.factory = RequestFactory()
self.quiz1 = Quiz.objects.get(id=self.quiz.id)
# self.quiz2 = Quiz.objects.get(id=self.quiz2.id)
self.question_list = self.quiz1.get_questions()
def test_view_response(self):
self.response = self.client.get(reverse('take_quiz', args=[self.quiz1.pk]))
# self.assertIsInstance(self.quiz1,Quiz)
# self.assertEqual(self.response.status_code,200) # Status codeshould be 302->>redirects to login
redirect_url = reverse('login')+f'?next=/quiz/{self.quiz1.pk}/take/'
self.assertRedirects(self.response, redirect_url)
def test_post_start_quiz(self):
request = self.factory.post('take_quiz',{'start_quiz':'quiz_name'})
request.user = self.user
request.session = {}
response = quiz_taking(request,self.quiz1.pk)
#test response
self.assertEqual(response.status_code,200)
#test quiztaker in session
self.assertEqual(request.session['quiz_taker'],self.user.username)
#test quiz taken
self.assertEqual(request.session['quiz_taken'],self.quiz1.name)
def test_successful_post(self):
create_quiz_taker(quiz=self.quiz1,student=self.stud1)
request = self.factory.post('take_quiz',{'question':self.question1,'question_choice':self.answer3})
request.user = self.user
request.session = {}
response = quiz_taking(request,self.quiz1.pk)
self.assertEqual(response.status_code,200)
#test the one answering the question
self.assertEqual(request.session['taker'],self.user.username)
# #test the redirect of final question i.e self.question1 to the results page
# self.assertEqual(response.redirect_chain, f'http://127.0.0.1:8000/quiz/{self.quiz1.pk}/result/')
def test_unsuccessful_post(self):
create_quiz_taker(quiz=self.quiz1,student=self.stud1)
request = self.factory.post('take_quiz',{'question':self.question1,'question_choice':'Just kidding'})
request.user = self.user
request.session = {}
self.assertRaises(Answer.DoesNotExist, quiz_taking, request, self.quiz1.pk)
class QuizResultsViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = create_custom_user(username="studentias",is_student=True, password="kriumkram")
cls.stud1 = create_student(user=cls.user)
cls.subj1 = create_subject("Science")
cls.teacher = create_custom_user2(username="teacher_1",password="secure",is_teacher=True)
cls.quiz1 = create_quiz(name="First Science Quiz",subject=cls.subj1,owner=cls.teacher)
cls.question1 = create_question(question="Does Mars support life?",quiz=cls.quiz1)
cls.answer1 = create_answer(answer="Yes",question=cls.question1)
def setUp(self):
self.stud1.interests.add(self.subj1)
self.quiz_taker = create_quiz_taker(quiz=self.quiz1,student=self.stud1)
self.quiz_taker_response = create_quiz_taker_response(quiztaker=self.quiz_taker,question=self.question1,answer=self.answer1)
def test_final_post_results_redirect(self):
# self.client.login(username=self.user.username, password='kriumkram')
# self.client.force_login(self.user)
self.response = self.client.get('/quiz/1/result/',follow=True)
print("|||||||||||||||||||||||||||||||||||||||||||||")
print(self.response)
print(self.response.redirect_chain)
print(self.response.request)
print("|||||||||||||||||||||||||||||||||||||||||||||")
| 53.457338
| 140
| 0.726681
|
7c67de3590e90789ffdfe35ca3ff78ea03a2c4f3
| 73
|
py
|
Python
|
python/testData/formatter/comment_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/formatter/comment_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/formatter/comment_after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
def foo(a):
if a == 5:
# a is 5
print('no')
foo(5)
| 9.125
| 19
| 0.356164
|
3dc1a64c6a1e771c82e850542d15e10bf91050f9
| 1,271
|
py
|
Python
|
non_abundant_terms.py
|
wjma98/project-euler
|
301ca8ede66906f0d1723c64c37feb97322c2596
|
[
"MIT"
] | null | null | null |
non_abundant_terms.py
|
wjma98/project-euler
|
301ca8ede66906f0d1723c64c37feb97322c2596
|
[
"MIT"
] | null | null | null |
non_abundant_terms.py
|
wjma98/project-euler
|
301ca8ede66906f0d1723c64c37feb97322c2596
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 12:05:17 2021
@author: William
"""
def factors(n):
results = set()
for i in range(1, int(n**0.5) + 1):
if n % i == 0:
results.update([i,int(n/i)])
#delete last element since we dont want n in factor list
divisors = sorted(list(results))
del divisors[-1]
return divisors
def abundant_number(n):
if sum(factors(n)) > n:
return True
else:
return False
# Credit to StackOverflow post, https://stackoverflow.com/questions/11792708/generate-all-possible-combinations-from-a-int-list-under-a-limit
# User Lauritz V. Thaulow
# Finds all possible sums of a list below the limit
def sums(lst, limit):
from itertools import combinations_with_replacement
p = set(x + y for x, y in combinations_with_replacement(lst, 2))
return sorted([x for x in p if x <= limit])
# 12 is smallest abundant number
# 28123 is the biggest interger than can be written as sum of 2 abudant number
end = 28123
all_abund_numbers = []
for i in range(1, end+1):
if abundant_number(i) == True:
all_abund_numbers.append(i)
all_pairs = sums(all_abund_numbers, end)
unique =[x for x in range(1, end+1) if x not in all_pairs]
print(sum(unique))
| 27.630435
| 141
| 0.66247
|
2a0a34a3953cc9c10995f6d7fe9b95fc3c7d732a
| 1,560
|
py
|
Python
|
docs/examples/desktop_app_authorization_code.py
|
janLuke/spotipie
|
a902c6cda9a3e61bbcfd4915e5102479a68ef3cb
|
[
"MIT"
] | 1
|
2021-05-02T10:32:38.000Z
|
2021-05-02T10:32:38.000Z
|
docs/examples/desktop_app_authorization_code.py
|
janLuke/spotipie
|
a902c6cda9a3e61bbcfd4915e5102479a68ef3cb
|
[
"MIT"
] | null | null | null |
docs/examples/desktop_app_authorization_code.py
|
janLuke/spotipie
|
a902c6cda9a3e61bbcfd4915e5102479a68ef3cb
|
[
"MIT"
] | null | null | null |
from spotipie import Credentials, AuthorizationCodeSession, Spotify
from spotipie.auth import get_user_authorization
from spotipie.utils import (
get_default_http_adapter,
pretty
)
# Load OAuth2 credentials from environment variables:
# {prefix}_CLIENT_ID, {prefix}_CLIENT_SECRET, {prefix}_REDIRECT_URI,
# The default prefix is SPOTIPIE.
creds = Credentials.from_environment(prefix='SPOTIFY')
# Create a session following the Authorization Code flow.
scope = ['user-library-read'] # adapt this to your requirements
session = AuthorizationCodeSession(
creds.client_id, creds.client_secret, creds.redirect_uri, scope=scope
)
# With ImplicitGrantSession, you don't need to pass "client_secret"
# session = ImplicitGrantSession(creds.client_id, creds.redirect_uri, scope=scope)
# [Optional] Mount an HTTPAdapter to implement retrying behavior and/or caching.
# You can use get_default_http_adapter() for a reasonable default
session.mount('https://', get_default_http_adapter())
# Authorize through the local flask app
# NOTE:
# - you need to install the optional dependency: ``pip install spotipie[auth-app]``
# - you need to whitelist ``http://localhost:{port}/callback`` in your Spotify App
get_user_authorization(session, app_name='YourAppName', port=1234)
# [ALTERNATIVE] If you installed spotipie without the optional authorization app
# from spotipie.auth import prompt_for_user_authorization
# prompt_for_user_authorization(session)
# Wrap the session
spotify = Spotify(session)
# Use the client
print(pretty(spotify.current_user()))
| 38.04878
| 83
| 0.79359
|
75e51f0d0fc0ddab452ce083326bab0046acae4e
| 271
|
py
|
Python
|
examples/arkham.py
|
lucasmsa/arkham-theme-
|
5f8892728bf776482412a5d9da308dc9a586a56e
|
[
"MIT"
] | 32
|
2020-06-15T15:40:08.000Z
|
2022-01-24T01:53:17.000Z
|
examples/arkham.py
|
lucasmsa/arkham-theme-
|
5f8892728bf776482412a5d9da308dc9a586a56e
|
[
"MIT"
] | 3
|
2020-06-22T03:42:05.000Z
|
2021-11-29T03:48:15.000Z
|
examples/arkham.py
|
lucasmsa/arkham-theme-
|
5f8892728bf776482412a5d9da308dc9a586a56e
|
[
"MIT"
] | 2
|
2021-04-27T20:02:51.000Z
|
2021-12-11T09:33:49.000Z
|
class Arkham_Asylum():
def __init__(self, enemy, weapon):
self.enemy = enemy
self.weapon = weapon
def attack(self):
return f"{self.enemy} attacked using a {self.weapon}"
joker = Arkham_Asylum('Joker', 'Knife')
joker.attack()
| 24.636364
| 61
| 0.612546
|
94cfb69ff60e2554b9e5b2ba289c36b0b67fda39
| 9,778
|
py
|
Python
|
notebooks/pre_production_runs/run_solver_kern_opt.py
|
Joshuaalbert/bayes_tec
|
655c4ec29427c7bb0616d5752c34207714a0151c
|
[
"Apache-2.0"
] | null | null | null |
notebooks/pre_production_runs/run_solver_kern_opt.py
|
Joshuaalbert/bayes_tec
|
655c4ec29427c7bb0616d5752c34207714a0151c
|
[
"Apache-2.0"
] | null | null | null |
notebooks/pre_production_runs/run_solver_kern_opt.py
|
Joshuaalbert/bayes_tec
|
655c4ec29427c7bb0616d5752c34207714a0151c
|
[
"Apache-2.0"
] | null | null | null |
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from bayes_tec.utils.data_utils import define_equal_subsets
from bayes_tec.logging import logging
import numpy as np
from timeit import default_timer
import gpflow as gp
from bayes_tec.utils.stat_utils import log_normal_solve, log_normal_solve_fwhm
from gpflow.priors import LogNormal
def create_kern(name):
kerns = {'rbf':gp.kernels.RBF,'m12':gp.kernels.Matern12, 'm32':gp.kernels.Matern32, 'm52':gp.kernels.Matern52}
s = name.split("_")
k_time = kerns[s[1].lower()]
k_dir = kerns[s[2].lower()]
if s[0].lower() == 'sum':
return _sum(k_time,k_dir)
elif s[0].lower() == 'product':
return _product(k_time,k_dir)
def _product(kern_time_, kern_dir_):
def _kern(kern_ls_lower=0.75, kern_ls_upper=1.25, kern_dir_ls=0.5, kern_time_ls=50., kern_var=1., include_time=True, include_dir=True, **priors):
kern_dir = kern_dir_(2,active_dims=slice(0,2,1))
kern_time = kern_time_(1,active_dims=slice(2,3,1))
kern = kern_dir*kern_time
kern_var = 1. if kern_var == 0. else kern_var
kern_dir.variance.trainable = False
kern_dir.lengthscales = kern_dir_ls
kern_dir_ls = log_normal_solve_fwhm(kern_dir_ls*kern_ls_lower, kern_dir_ls*kern_ls_upper, D=0.1)
kern_dir.lengthscales.prior = LogNormal(kern_dir_ls[0], kern_dir_ls[1]**2)
kern_dir.lengthscales.trainable = True
kern_time.variance = kern_var
kern_var = log_normal_solve_fwhm(kern_var*kern_ls_lower, kern_var*kern_ls_upper, D=0.1)
kern_time.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_time.variance.trainable = True
kern_time.lengthscales = kern_time_ls
kern_time_ls = log_normal_solve_fwhm(kern_time_ls*kern_ls_lower, kern_time_ls*kern_ls_upper, D=0.1)
kern_time.lengthscales.prior = LogNormal(kern_time_ls[0], kern_time_ls[1]**2)
kern_time.lengthscales.trainable = True
return kern
return _kern
def _sum(kern_time_, kern_dir_):
def _kern(kern_ls_lower=0.75, kern_ls_upper=1.25, kern_dir_ls=0.5, kern_time_ls=50., kern_var=1., include_time=True, include_dir=True, **priors):
kern_dir = kern_dir_(2,active_dims=slice(0,2,1))
kern_time = kern_time_(1,active_dims=slice(2,3,1))
kern = kern_dir + kern_time
kern_var = 1. if kern_var == 0. else kern_var
kern_var = log_normal_solve_fwhm(kern_var*kern_ls_lower, kern_var*kern_ls_upper, D=0.1)
kern_dir.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_dir.variance.trainable = True
kern_dir.variance = np.exp(kern_var[0])
kern_dir.lengthscales = kern_dir_ls
kern_dir_ls = log_normal_solve_fwhm(kern_dir_ls*kern_ls_lower, kern_dir_ls*kern_ls_upper, D=0.1)
kern_dir.lengthscales.prior = LogNormal(kern_dir_ls[0], kern_dir_ls[1]**2)
kern_dir.lengthscales.trainable = True
kern_time.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_time.variance = np.exp(kern_var[0])
kern_time.variance.trainable = True
kern_time.lengthscales = kern_time_ls
kern_time_ls = log_normal_solve_fwhm(kern_time_ls*kern_ls_lower, kern_time_ls*kern_ls_upper, D=0.1)
kern_time.lengthscales.prior = LogNormal(kern_time_ls[0], kern_time_ls[1]**2)
kern_time.lengthscales.trainable = True
return kern
return _kern
def test_new_solver():
# opt = {'initial_learning_rate': 0.0469346965745387, 'learning_rate_steps': 2.3379450095649053, 'learning_rate_decay': 2.3096977604598385, 'minibatch_size': 257, 'dof_ratio': 15.32485312998133, 'gamma_start': 1.749795137201838e-05, 'gamma_add': 0.00014740343452076625, 'gamma_mul': 1.0555893705407017, 'gamma_max': 0.1063958902418518, 'gamma_fallback': 0.15444066000616663}
opt = {'initial_learning_rate': 0.030035792298837113, 'learning_rate_steps': 2.3915384159241064, 'learning_rate_decay': 2.6685242978751798, 'minibatch_size': 128, 'dof_ratio': 10., 'gamma_start': 6.876944103773131e-05, 'gamma_add': 1e-4, 'gamma_mul': 1.04, 'gamma_max': 0.14, 'gamma_fallback': 0.1, 'priors' : {'kern_time_ls': 50., 'kern_dir_ls': 0.80}}
datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_2.hdf5'
run_dir='run_dir_killms_kern_opt'
output_solset = "posterior_sol_kern_opt"
time_sel = slice(50,150,1)
ant_sel = "RS210HBA"
import itertools
res = []
for s in itertools.product(['product','sum'],['rbf','m32','m52'],['rbf','m32','m52']):
name = "_".join(s)
logging.info("Running {}".format(name))
solver = PhaseOnlySolver(run_dir, datapack)
solver._build_kernel = create_kern(name)
lik = solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
iterations=500,intra_op_threads=0, inter_op_threads=0, ant_sel=ant_sel, time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
W_diag=True, freq_sel=slice(0,48,1), plot_level=-1, return_likelihood=True, num_likelihood_samples=100, **opt)
res.append([name,-lik[0]/1e6,lik[1]/1e6])
logging.info("{} results {}".format(name,res))
with open("kern_opt_res.csv", 'a') as f:
f.write("{}\n".format(str(res[-1]).replace('[','').replace(']','') ))
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# iterations=500, intra_op_threads=0, inter_op_threads=0, ant_sel="CS*", time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
# W_diag=False, freq_sel=slice(0,48,1), **opt)
# W_diag = False
# dof_ratio = 20.
#
# run_dir = "run_dir_killms_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# output_solset = "posterior_sol_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# initial_learning_rate=1e-2, final_learning_rate=1e-3, iterations=2000, minibatch_size=128, dof_ratio=dof_ratio,
# intra_op_threads=0, inter_op_threads=0, ant_sel=ant_sel, time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=W_diag, freq_sel=slice(0,48,1))
# ###
# # RS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
#
# ###
# # CS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="CS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
# run_dir = "run_dir_killms_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
# run_dir = "run_dir_killms_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
#
# run_dir = "run_dir_ndppp_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/ndppp_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
#
# run_dir = "run_dir_ndppp_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
if __name__ == '__main__':
test_new_solver()
| 56.195402
| 377
| 0.703723
|
0c1ed814d7601cb720f27ab7ce9ec216496e14a5
| 628
|
py
|
Python
|
olwi/hw/system.py
|
molkoback/olwi
|
449874a1de1a9addaaefe74010d49ca0fa2f9ce3
|
[
"MIT"
] | null | null | null |
olwi/hw/system.py
|
molkoback/olwi
|
449874a1de1a9addaaefe74010d49ca0fa2f9ce3
|
[
"MIT"
] | null | null | null |
olwi/hw/system.py
|
molkoback/olwi
|
449874a1de1a9addaaefe74010d49ca0fa2f9ce3
|
[
"MIT"
] | 1
|
2021-11-23T07:19:00.000Z
|
2021-11-23T07:19:00.000Z
|
import psutil
import time
def _cpu():
return psutil.getloadavg()
def _uptime():
tmp = int(time.time() - psutil.boot_time())
s = tmp % 60
tmp //= 60
m = tmp % 60
tmp //= 60
h = tmp % 24
tmp //= 24
return tmp, h, m, s
def _memory():
mem = psutil.virtual_memory()
return {
"total": round(mem.total/(1024**3), 1),
"used": round(mem.used/(1024**3), 1)
}
def _disk():
disk = psutil.disk_usage("/")
return {
"total": round(disk.total/(1024**3), 1),
"used": round(disk.used/(1024**3), 1)
}
def readSystemParam():
return {
"Uptime": _uptime(),
"CPU": _cpu(),
"Memory": _memory(),
"Disk": _disk()
}
| 16.102564
| 44
| 0.585987
|
13bb01e466c735f27fe0a613acc049813ea2ed45
| 180
|
py
|
Python
|
02-19-Cuma/tryExcept.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 9
|
2021-03-16T20:21:54.000Z
|
2022-01-08T09:15:10.000Z
|
02-19-Cuma/tryExcept.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-02-28T21:27:17.000Z
|
2021-02-28T21:27:17.000Z
|
02-19-Cuma/tryExcept.py
|
cihatdev/misha-staj
|
d0ee95d5e77a7d7a1f16611d49c87be429a25b31
|
[
"MIT"
] | 1
|
2021-05-24T11:34:48.000Z
|
2021-05-24T11:34:48.000Z
|
try:
answer = 10 / 0
number = int(input("Enter a number: "))
print(number)
except ZeroDivisionError as err:
print(err)
except ValueError:
print("invalid input")
| 22.5
| 43
| 0.655556
|
bb272776e6b7d9e5ad4294a1474079895d8768fc
| 2,861
|
py
|
Python
|
scripts/cloud_results.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 35
|
2020-12-30T19:31:30.000Z
|
2022-03-28T11:10:00.000Z
|
scripts/cloud_results.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 24
|
2021-01-04T15:37:05.000Z
|
2022-03-14T00:45:20.000Z
|
scripts/cloud_results.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 10
|
2021-06-13T13:13:39.000Z
|
2021-12-20T22:05:50.000Z
|
#!python3
import argparse
import datetime
import json
import os
from experiments_utils import *
from cache import cache
sys.path.append(PROJECT_DIR)
parser = argparse.ArgumentParser(description='Run cloud experiments.')
parser.add_argument('experiment_json', type=str, help='Path to JSON summarizing experiment.')
parser.add_argument('output_dir', type=str, help='Output dir')
parser.add_argument('--cache', action='store', default='cache', type=str,
help='cache directory')
parser.add_argument('--end-time', action='store', default='0', type=int,
help='Seconds to add to begin time for logs query. When 0 use current time.')
args = parser.parse_args()
experiment = json.load(open(args.experiment_json, 'r'))
deployment = experiment['config']['experiments']['deployment']
language = experiment['config']['experiments']['language']
cache_client = cache(args.cache)
docker_client = docker.from_env()
cached_config = cache_client.get_config(deployment)
experiment['config'][deployment].update(cached_config)
# Create deployment client
if deployment == 'aws':
from cloud_frontend.aws import aws
deployment_client = aws.aws(cache_client, experiment['config'],
language, docker_client)
elif deployment == 'azure':
from cloud_frontend.azure import azure
deployment_client = azure.azure(cache_client, experiment['config'],
language, docker_client)
else:
from cloud_frontend.gcp import gcp
deployment_client = gcp.gcp(cache_client, experiment['config'], language, docker_client)
storage_client = deployment_client.get_storage()
def recursive_visit(json_data: dict):
for key, val in json_data.items():
recursive_visit(val)
results = experiment['results']
function_name = experiment['experiment']['function_name']
deployment_config = experiment['config'][deployment]
experiment_begin = experiment['experiment']['begin']
if args.end_time > 0:
experiment_end = experiment_begin + args.end_time
else:
experiment_end = int(datetime.datetime.now().timestamp())
result_dir = os.path.join(args.output_dir, 'results')
os.makedirs(result_dir, exist_ok=True)
# get results
download_bucket(storage_client, experiment['experiment']['results_bucket'], result_dir)
requests = {}
for result_file in os.listdir(result_dir):
# file name is ${request_id}.json
request_id = os.path.splitext(os.path.basename(result_file))[0]
with open(os.path.join(result_dir, result_file), 'rb') as binary_json:
json_data = json.loads(binary_json.read().decode('utf-8'))
requests[request_id] = json_data
# get cloud logs
deployment_client.download_metrics(function_name, deployment_config,
experiment_begin, experiment_end, requests)
with open(os.path.join(args.output_dir, 'results.json'), 'w') as out_f:
json.dump(requests, out_f, indent=2)
| 35.7625
| 97
| 0.74065
|
9206634b3029debb11e97fbb67d50890d57fd67a
| 430
|
py
|
Python
|
management/pycom_functions/data_formatting/format_data_raw.py
|
AaltoIIC/OSEMA
|
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
|
[
"MIT"
] | 2
|
2021-04-03T13:19:32.000Z
|
2022-01-03T00:38:55.000Z
|
management/pycom_functions/data_formatting/format_data_raw.py
|
AaltoIIC/OSEMA
|
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
|
[
"MIT"
] | null | null | null |
management/pycom_functions/data_formatting/format_data_raw.py
|
AaltoIIC/OSEMA
|
38740e3dcfeb72a6d87e25190e6a73f6c60b199b
|
[
"MIT"
] | null | null | null |
def format_data(header_ts, data):
data_string = "Begin:{}\n".format(convert_to_epoch(header_ts))
data_string += "Variables:{"
for variable in VARIABLE_NAMES:
data_string += str(variable) + ", "
data_string += "time }\n"
data_string += "Data:"
for value_pair in data:
ts_packed = ustruct.pack("<L", value_pair[1])
data_string += str(value_pair[0] + ts_packed)
return data_string
| 35.833333
| 66
| 0.644186
|
6a9eee86e04553bbb971a8153bb2615a43144c4b
| 4,427
|
py
|
Python
|
model.py
|
ace19-dev/image-retrieval-tf
|
ddcf79fcb448189b74fa0cbe6d000091f7a53e8c
|
[
"MIT"
] | 6
|
2019-08-27T16:12:51.000Z
|
2020-04-18T04:38:45.000Z
|
model.py
|
ace19-dev/image-retrieval-tf
|
ddcf79fcb448189b74fa0cbe6d000091f7a53e8c
|
[
"MIT"
] | 4
|
2019-09-15T07:44:58.000Z
|
2020-04-21T06:33:55.000Z
|
model.py
|
ace19-dev/image-retrieval-tf
|
ddcf79fcb448189b74fa0cbe6d000091f7a53e8c
|
[
"MIT"
] | 1
|
2019-12-10T14:37:38.000Z
|
2019-12-10T14:37:38.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_v2
slim = tf.contrib.slim
batch_norm_params = {
'decay': 0.997, # batch_norm_decay
'epsilon': 1e-5, # batch_norm_epsilon
'scale': True, # batch_norm_scale
'updates_collections': tf.compat.v1.GraphKeys.UPDATE_OPS, # batch_norm_updates_collections
'is_training': True, # is_training
'fused': None, # Use fused batch norm if possible.
}
def basic_model(inputs,
num_classes,
is_training=True,
is_reuse=tf.compat.v1.AUTO_REUSE,
keep_prob=0.8,
attention_module=None,
scope='basic_model'):
'''
:param inputs: N x H x W x C tensor
:return:
'''
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = \
resnet_v2.resnet_v2_50(inputs,
num_classes=num_classes,
is_training=is_training,
reuse=is_reuse,
attention_module=attention_module,
scope='resnet_v2_50')
# # Global average pooling.
# net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
# end_points['global_pool'] = net
#
# batch_norm_params['is_training'] = is_training
# # net = slim.batch_norm(net, scope='batch_norm')
# # end_points['batch_norm'] = net
# net = slim.flatten(net, scope='flatten')
# end_points['flatten'] = net
# net = slim.fully_connected(net, 256, normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params, scope='fc1')
# end_points['fc1'] = net
#
# net = slim.fully_connected(net, num_classes, normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params, activation_fn=None, scope='fc2')
# end_points['fc2'] = net
logits = net
return logits, end_points
def deep_cosine_softmax(inputs,
num_classes,
is_training=True,
is_reuse=tf.compat.v1.AUTO_REUSE,
keep_prob=0.6,
attention_module=None,
scope=''):
def batch_norm_fn(x):
return slim.batch_norm(x, scope=tf.compat.v1.get_variable_scope().name + "/bn")
'''
:param inputs: N x V x H x W x C tensor
:return:
'''
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, _ = \
resnet_v2.resnet_v2_50(inputs,
num_classes=num_classes,
is_training=is_training,
reuse=is_reuse,
attention_module=attention_module,
scope='resnet_v2_50')
# ##############################
# cosine-softmax
# ##############################
# (?,1,1,2048)
feature_dim = net.get_shape().as_list()[-1]
# print("feature dimensionality: ", feature_dim)
net = slim.flatten(net)
net = slim.dropout(net, keep_prob=keep_prob)
net = slim.fully_connected(
net, feature_dim, normalizer_fn=batch_norm_fn,
weights_regularizer=slim.l2_regularizer(1e-8),
scope="fc1", weights_initializer=tf.truncated_normal_initializer(stddev=1e-3),
biases_initializer=tf.zeros_initializer())
features = net
# Features in rows, normalize axis 1.
features = tf.nn.l2_normalize(features, dim=1)
with slim.variable_scope.variable_scope("ball", reuse=is_reuse):
weights = slim.model_variable(
"mean_vectors", (feature_dim, int(num_classes)),
initializer=tf.truncated_normal_initializer(stddev=1e-3),
regularizer=None)
scale = slim.model_variable(
"scale", (), tf.float32,
initializer=tf.constant_initializer(0., tf.float32),
regularizer=slim.l2_regularizer(1e-1))
# tf.summary.scalar("scale", scale)
scale = tf.nn.softplus(scale)
# Mean vectors in colums, normalize axis 0.
weights_normed = tf.nn.l2_normalize(weights, dim=0)
logits = scale * tf.matmul(features, weights_normed)
return features, logits
| 35.416
| 102
| 0.574881
|
bd4beedb8c92897ed42a35c601c600a6d6c925d5
| 65,611
|
py
|
Python
|
andriller/windows.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | 2
|
2021-11-22T03:38:06.000Z
|
2022-01-12T23:07:15.000Z
|
andriller/windows.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | null | null | null |
andriller/windows.py
|
pshem/andriller
|
c1e2d9e05f3630a7499907190552089be18563c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import json
import glob
import struct
import string
import shutil
import os.path
import pathlib
import logging
import binascii
import datetime
import functools
import itertools
import contextlib
import webbrowser
import tkinter as tk
from tkinter import ttk, font, filedialog, messagebox
from . import __version__, __app_name__
from . import config
from . import driller
from . import statics
from . import adb_conn
from . import decrypts
from . import decoders
from . import messages
from . import cracking
from . import screencap
from .utils import threaded, human_bytes, DrillerTools
from .tooltips import createToolTip
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def disable_control(event, *args, **kwargs):
try:
event.widget.config(state=tk.DISABLED)
yield
finally:
event.widget.config(state=tk.NORMAL)
class TextFieldHandler(logging.Handler):
def __init__(self, tk_obj, level=logging.NOTSET):
super().__init__(level=level)
self.tk_obj = tk_obj
def emit(self, record):
try:
log = self.format(record)
self.tk_obj.insert('end', f'{log}\n')
self.tk_obj.see('end')
except Exception:
self.handleError(record)
def log_errors(method):
@functools.wraps(method)
def func(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except Exception as e:
self.logger.exception(f'{e}')
return func
class BaseWindow:
def __init__(self, root=None, title=__app_name__, **kwargs):
self.log_level = kwargs.get('log_level', logging.INFO)
logger.setLevel(self.log_level)
if root:
self.root = tk.Toplevel(root, takefocus=True)
self.root.protocol("WM_TAKE_FOCUS")
self.root.transient(root)
self.root.bind('<Escape>', lambda e: self.root.destroy())
else:
self.root = tk.Tk()
self.root.bind('<Double-Escape>', self.quit_app)
self.root.protocol("WM_DELETE_WINDOW", self.quit_app)
self.root.title(title)
self.root.resizable(False, False)
self.set_icon()
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_rowconfigure(0, weight=1)
self.NWES = (tk.N, tk.W, tk.E, tk.S)
self.WE = (tk.W, tk.E)
logo_ = os.path.join(config.CODEPATH, 'res', 'logo.gif')
self.img_logo = tk.PhotoImage(master=root, file=logo_)
self.style_ttk = ttk.Style()
self.conf = config.Config()
if self.conf('theme'):
self.style_ttk.theme_use(self.conf('theme'))
self.FontMono = self.get_monospace_font()
self.FontStatus = font.Font(size='10', weight='bold')
self.FontTitle = font.Font(size='12', weight='bold')
self.FontInfo = font.Font(size='9', slant='italic')
self.OUTPUT = tk.StringVar()
self.mainframe = ttk.Frame(self.root, padding=5, relief='groove')
self.mainframe.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# self.mainframe.grid(row=0, column=0, sticky=self.NWES)
# self.mainframe.columnconfigure(1, weight=1)
# self.mainframe.rowconfigure(0, weight=1)
upframe = ttk.Frame(self.mainframe, padding="5 5 5 5")
upframe.grid(row=0, column=0, columnspan=3, sticky=self.NWES)
ttk.Label(upframe, image=self.img_logo).pack(expand=False, side=tk.TOP)
def set_icon(self):
if 'win32' in sys.platform:
icon_ = os.path.join(config.CODEPATH, 'res', 'icon3.ico')
self.root.iconbitmap(default=icon_)
elif 'linux' in sys.platform:
img_ = tk.Image('photo', file=os.path.join(config.CODEPATH, 'res', 'icon3.png'))
self.root.tk.call('wm', 'iconphoto', self.root._w, img_)
def mainloop(self):
self.root.mainloop()
def quit_app(self, event=None):
self.root.withdraw()
self.root.destroy()
@threaded
def set_output(self):
choose_dir = self.get_dir(path='default_path')
if choose_dir and os.path.isdir(choose_dir):
self.OUTPUT.set(os.path.realpath(choose_dir))
def about_msg(self):
messages.about_msg()
@staticmethod
def get_monospace_font():
return {
'linux': font.Font(size=9, family='Monospace'),
'win32': font.Font(size=9, family='Consolas'),
'darwin': font.Font(size=11, family='Menlo')
}.get(sys.platform, font.Font(size=9, family='Monospace'))
def get_file(self, fname, ftype=[], fsize=0, fsizes=[], lpath='last_path'):
filetypes = [("All files", "*")]
options = {'initialfile': fname, 'initialdir': self.conf(lpath)}
if not self.conf.is_mac:
options['filetypes'] = ftype + filetypes
dialog = filedialog.askopenfilename(**options)
if dialog and os.path.isfile(dialog):
size_ = os.path.getsize(dialog)
if fsize and (size_ != fsize):
raise FileHandlerError(f'The file selected is {size_} bytes, but {fsize} is expected.')
if fsizes and (size_ not in fsizes):
raise FileHandlerError('The file selected is of unexpected size.')
path_ = os.path.split(dialog)[0]
self.conf.update_conf(**{'DEFAULT': {lpath: path_}})
dialog = os.path.realpath(dialog)
return dialog
def get_dir(self, path='last_path'):
dialog = filedialog.askdirectory(initialdir=self.conf(path))
if dialog:
dialog = os.path.realpath(dialog)
self.conf.update_conf(**{'DEFAULT': {'last_path': dialog}})
return dialog
# Main Window -----------------------------------------------------------------
class MainWindow(BaseWindow):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_title()
# ADB moved to the bottom once the logger handler is configured
# self.adb = adb_conn.ADBConn(logger=logger, log_level=self.log_level)
self.registry = decoders.Registry()
self.menubar = tk.Menu(self.root, tearoff=0)
self.root['menu'] = self.menubar
self.build_file_menus()
self.build_decoders_menus()
self.build_utils_menus()
self.build_locks_menus()
self.build_tools_menus()
self.build_adb_menus()
self.build_help_menus()
self.DeviceStatus = tk.StringVar()
self.StatusMsg = tk.StringVar()
self.StatusMsg.set('Ready')
# MIDFRAME -----------------------------------------------------------
midframe = ttk.Frame(self.mainframe, padding=(5, 5, 5, 5))
midframe.grid(row=1, column=0, columnspan=2, sticky=self.NWES)
# Output folder
opframe = ttk.Labelframe(midframe,
text='Global Output Location (Decoders / Extraction / Parsing)',
padding=(5, 0, 0, 5))
opframe.pack(fill=tk.X, expand=0, side=tk.TOP)
self.output_button = ttk.Button(opframe, text="Output..", command=self.set_output)
self.output_button.pack(side=tk.LEFT)
createToolTip(self.output_button, 'Select the output location where data will be saved to')
ttk.Label(opframe, textvariable=self.OUTPUT, padding=(5, 0))\
.pack(expand=True, fill=tk.X, side=tk.LEFT)
noteframe = ttk.Notebook(midframe, padding=(1, 0))
noteframe.pack(fill=tk.X, expand=0, side=tk.TOP)
# ADB extract controls
extract_adb_frame = ttk.Frame(noteframe, padding=(5, 0))
noteframe.add(extract_adb_frame, text='Extraction (USB)')
self.check_button = ttk.Button(extract_adb_frame, text='Check')
self.check_button.bind('<Button-1>', self.check_usb)
createToolTip(self.check_button, 'Check if any Android devices are connected')
self.check_button.grid(row=1, column=0, sticky=tk.W)
ttk.Label(extract_adb_frame, font=self.FontStatus, textvariable=self.DeviceStatus, padding=(5, 0))\
.grid(row=1, column=1, sticky=tk.W)
self.extract_button = ttk.Button(extract_adb_frame, text='Extract')
self.extract_button.bind('<Button-1>', self.RunUsbExtraction)
self.extract_button.grid(row=2, column=0, sticky=tk.W)
createToolTip(self.extract_button, 'Extract and decode data from a connected Android device')
# self.open_report = tk.IntVar()
# self.open_report.set(1)
# self.open_report_button = ttk.Checkbutton(extract_adb_frame, text='Open REPORT.html in browser', var=self.open_report)
# self.open_report_button.grid(row=3, column=0, columnspan=2, sticky=tk.W)
# createToolTip(self.open_report_button, 'On successful extraction open the result in the browser')
self.force_backup = tk.IntVar()
self.force_backup_button = ttk.Checkbutton(extract_adb_frame, text='Use AB method (ignore root)', var=self.force_backup)
self.force_backup_button.grid(row=4, column=0, columnspan=2, sticky=tk.W)
createToolTip(self.force_backup_button, 'If rooted - force Android Backup extraction instead')
self.extract_shared = tk.IntVar()
self.extract_shared_button = ttk.Checkbutton(extract_adb_frame, text='Extract Shared Storage', var=self.extract_shared)
self.extract_shared_button.grid(row=5, column=0, columnspan=2, sticky=tk.W)
createToolTip(self.extract_shared_button, 'File system extraction of shared storage\n(Pictutes, Videos, Audios, other files)')
# Forder extract controls
extract_folder_frame = ttk.Frame(noteframe, padding=(5, 0))
noteframe.add(extract_folder_frame, text='Parse (Folder)')
self.extract_folder = ttk.Button(extract_folder_frame, text='Directory..', )
self.extract_folder.grid(row=1, column=0, sticky=tk.W)
self.extract_folder.bind('<Button-1>', self.RunDirExtraction)
createToolTip(self.extract_folder, "Choose the '/data/data' directory to be parsed and data decoded")
# TAR extract controls
extract_tar_frame = ttk.Frame(noteframe, padding=(5, 0))
noteframe.add(extract_tar_frame, text='Parse (.TAR)')
self.extract_tar = ttk.Button(extract_tar_frame, text='TAR File..', )
self.extract_tar.bind('<Button-1>', self.RunTarExtraction)
self.extract_tar.grid(row=1, column=0, sticky=tk.W)
createToolTip(self.extract_tar, "Choose the 'data.tar' backup file to be parsed and data decoded")
# AB extract controls
extract_backup_frame = ttk.Frame(noteframe, padding=(5, 0))
noteframe.add(extract_backup_frame, text='Parse (.AB)')
self.extract_backup = ttk.Button(extract_backup_frame, text='AB File..', )
self.extract_backup.bind('<Button-1>', self.RunAbExtraction)
self.extract_backup.grid(row=1, column=0, sticky=tk.W)
createToolTip(self.extract_backup, "Choose the 'backup.ab' file to be parsed and data decoded")
# LOG FRAME --------------------------------------------------------
textframe = ttk.Frame(self.mainframe)
textframe.grid(row=2, column=0, columnspan=2, sticky=self.NWES)
# Text Field + logger
self.TF = tk.Text(
textframe, font=self.FontMono, wrap=tk.WORD, width=65,
bg='white', height=self.conf('window_size'))
self.TF.bind('<Button-3>', rClicker, add='')
self.TF.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.set_logger()
# Scrolling
vs = ttk.Scrollbar(textframe, orient=tk.VERTICAL)
vs.pack(side=tk.RIGHT, fill=tk.Y, expand=False)
vs['command'] = self.TF.yview
self.TF['yscrollcommand'] = vs.set
# Bottom buttons
clear_field = ttk.Button(self.mainframe, text='Clear Log', command=self.clear_log)
clear_field.grid(row=3, column=0, sticky=tk.W)
save_log = ttk.Button(self.mainframe, text='Save Log', command=self.save_log)
save_log.grid(row=3, columnspan=2, sticky=tk.E)
# STATUS UPDATE --------------------------------------------------------
downframe = ttk.Frame(self.mainframe, relief='groove')
downframe.grid(row=4, column=0, columnspan=2, sticky=self.NWES)
self.StatusMsgLabel = ttk.Label(downframe, relief='flat', padding=(5, 1),
font=self.FontStatus, textvariable=self.StatusMsg)
self.StatusMsgLabel.grid(row=4, column=0, sticky=tk.W, padx=5, pady=3)
self.StatusMsgLabel.configure(background='light green')
# STARTUP & TIME SETTINGS
logger.info(f'Started: {__app_name__} {__version__}')
logger.info(f"Time settings/format: {self.conf('date_format')}")
logger.info(f"Detected/PC time: {self.time_now_local}")
logger.info(f"Universal time: {self.time_now_utc}")
logger.info(f"Time in reports: {self.time_now_configured} <--") # \u2190
self.conf.check_latest_version(logger=self.logger)
# Setup ADB
# def setup_adb(self):
self.adb = adb_conn.ADBConn(logger=logger, log_level=self.log_level)
@property
def time_now_local(self):
now = datetime.datetime.now()
return now.strftime(self.conf.date_format)
@property
def time_now_configured(self):
now = datetime.datetime.now(self.conf.tzone)
return now.strftime(self.conf.date_format)
@property
def time_now_utc(self):
now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=0)))
return now.strftime(self.conf.date_format)
def set_logger(self):
logging.basicConfig(level=self.log_level)
log_handler = TextFieldHandler(self.TF, level=self.log_level)
logger.addHandler(log_handler)
self.logger = logger
def set_title(self):
title = f'{__app_name__} - {__version__}'
self.root.title(title)
def clear_log(self):
if messagebox.askyesno(
message='Are you sure you want to clear the log?',
icon='question',
title='Clear log'):
self.TF.delete('1.0', tk.END)
def save_log(self):
dialog = filedialog.asksaveasfilename(
initialdir=self.conf('default_path'),
initialfile='andriller.log',
filetypes=[('Log files', '*.log')])
if dialog:
with open(dialog, 'w', encoding='UTF-8') as W:
W.write(self.TF.get('1.0', tk.END))
# Menu generators ---------------------------------------------------------
def build_file_menus(self):
menu_file = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_file, label='File', underline=0)
menu_file.add_command(label='Save log', underline=0, command=self.save_log)
menu_file.add_command(label='Clear log', underline=0, command=self.clear_log)
menu_file.add_command(label='Preferences', command=self.preferences)
menu_file.add_separator()
menu_file.add_command(label='Exit', underline=1, command=self.root.destroy, accelerator='Esc * 2')
def set_decoder(self, decoder):
name_ = f'menu_{decoder.__name__}'
setattr(self, name_, decoder)
return getattr(self, name_)
def build_decoders_menus(self):
menu_dec = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_dec, label='Decoders', underline=0)
for decoder in sorted(self.registry.decoders, key=lambda d: d.__name__):
dec = decoder.staged()
if dec.exclude_from_menus:
continue
action = lambda x = dec: self.decode_file(x)
label = f'{dec.title} ({dec.TARGET or dec.RETARGET})..'
menu_dec.add_command(label=label, command=action)
def build_help_menus(self):
menu_help = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_help, label='Help', underline=0)
menu_help.add_command(label='Visit website')
menu_help.add_separator()
menu_help.add_command(label='Run Update', command=lambda: self.conf.upgrade_package(logger=self.logger))
menu_help.add_separator()
menu_help.add_command(label='About', command=self.about_msg)
def build_adb_menus(self):
menu_adb = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_adb, label='ADB', underline=0)
for mode in ['', *adb_conn.ADBConn.MODES.keys()]:
label_ = f"Reboot: {mode.title() or 'Normal'}"
action = lambda x = mode: self.adb.reboot(mode=x)
menu_adb.add_command(label=label_, command=action)
def build_utils_menus(self):
menu_utils = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_utils, label='Apps Utils', underline=5)
# menu_utils.add_separator()
menu_utils.add_command(label="WhatsApp Crypt", command=self.whatsapp_crypt)
def build_locks_menus(self):
menu_locks = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_locks, label='Lockscreens', underline=0)
menu_locks.add_command(label='Gesture Pattern (Legacy)', command=self.brute_pattern)
menu_locks.add_separator()
menu_locks.add_command(label='PIN Cracking (Generic)', command=self.brute_gen_pin)
menu_locks.add_command(label='Password by Dictionary (Generic)', command=self.brute_gen_dict)
menu_locks.add_command(label='Password by Brute-Force (Generic)', command=self.brute_force_gen)
menu_locks.add_separator()
menu_locks.add_command(label='PIN Cracking (Samsung)', command=self.brute_sam_pin)
menu_locks.add_command(label='Password by Dictionary (Samsung)', command=self.brute_sam_dict)
# menu_locks.add_separator()
def build_tools_menus(self):
menu_tools = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_tools, label='Tools', underline=0)
menu_tools.add_command(label='Convert AB to TAR file..', command=self.ab_to_tar)
menu_tools.add_command(label='Extract AB to folder..', command=self.ab_to_folder)
menu_tools.add_separator()
menu_tools.add_command(label='Screen Capture', command=self.screencap)
@threaded
def ab_to_tar(self):
ab_file = self.get_file('', ftype=[('AB File', '*.ab')])
if ab_file:
logger.info(f'Converting {ab_file}')
self.StatusMsg.set('Converting to tar...')
tar_ = DrillerTools.ab_to_tar(ab_file, to_tmp=False)
logger.info(f'Converted to: {tar_}')
self.StatusMsg.set('Finished')
@threaded
def ab_to_folder(self):
ab_file = self.get_file('', ftype=[('AB File', '*.ab')])
if ab_file:
logger.info(f'Converting {ab_file}')
self.StatusMsg.set('Converting to tar...')
tar_ = DrillerTools.ab_to_tar(ab_file, to_tmp=False)
self.StatusMsg.set('Extracting tar members...')
dst_ = pathlib.Path(f'{ab_file}_extracted/')
dst_.mkdir()
for _ in DrillerTools.extract_form_tar(tar_, dst_, full=True):
pass
logger.info(f'Extracted to: {dst_}')
self.StatusMsg.set('Finished')
# Other Windows -----------------------------------------------------------
def preferences(self):
root = Preferences(root=self.root)
root.mainloop()
def whatsapp_crypt(self):
root = WhatsAppCrypt(root=self.root)
root.mainloop()
def brute_pattern(self):
root = BrutePattern(root=self.root)
root.mainloop()
def brute_gen_pin(self):
root = BruteGenPin(root=self.root)
root.mainloop()
def brute_gen_dict(self):
root = BruteGenDict(root=self.root)
root.mainloop()
def brute_sam_dict(self):
root = BruteSamDict(root=self.root)
root.mainloop()
def brute_sam_pin(self):
root = BruteSamPin(root=self.root)
root.mainloop()
def brute_force_gen(self):
root = BruteForceGen(root=self.root)
root.mainloop()
def screencap(self):
root = ScreenCap(root=self.root)
root.mainloop()
# Class functions ---------------------------------------------------------
@threaded
def check_usb(self, event):
with disable_control(event):
self.DeviceStatus.set('Please wait...')
if not self.adb.adb_bin:
self.DeviceStatus.set('ADB is not configured!')
return
self.adb('start-server')
serial, status = self.adb.device()
if status == 'offline':
self.DeviceStatus.set('Device is OFFLINE!')
elif status == 'unauthorized':
self.DeviceStatus.set('Device is UNAUTHORIZED!')
else:
self.DeviceStatus.set(f'Serial ID: {serial}' if serial else 'Device not detected!')
@threaded
@log_errors
def decode_file(self, decoder):
choose_file = self.get_file(decoder.TARGET)
if choose_file and os.path.isfile(choose_file):
file_path = os.path.realpath(choose_file)
logger.info(f'Decoding: {os.path.basename(file_path)}')
work_dir = self.OUTPUT.get() or os.path.split(file_path)[0]
dec = decoder.__class__(work_dir, file_path)
html_rep = dec.report_html()
report = work_dir / pathlib.Path(html_rep)
webbrowser.open_new_tab(report.as_uri())
dec.report_xlsx()
@threaded
def RunUsbExtraction(self, event):
with disable_control(event):
output_dir = self.OUTPUT.get()
if not output_dir:
messages.select_output()
elif self.DeviceStatus.get().endswith('!'):
messages.device_not_detected()
return
elif os.path.exists(output_dir):
self.StatusMsg.set('Running...')
drill = driller.ChainExecution(
output_dir,
status_msg=self.StatusMsg,
do_shared=self.extract_shared.get(),
use_adb=True,
logger=logger)
drill.InitialAdbRead()
drill.CreateWorkDir()
drill.DataAcquisition(
run_backup=self.force_backup.get(),
shared=self.extract_shared.get(),)
drill.DataExtraction()
drill.DecodeShared()
drill.DataDecoding()
drill.GenerateHtmlReport()
drill.GenerateXlsxReport()
drill.CleanUp()
@threaded
def RunAbExtraction(self, event):
with disable_control(event):
output_dir = self.OUTPUT.get()
if not output_dir:
messages.select_output()
elif os.path.exists(output_dir):
ab_file = self.get_file('', ftype=[('AB File', '*.ab')])
if ab_file and os.path.isfile(ab_file):
self.StatusMsg.set('Running...')
drill = driller.ChainExecution(
output_dir,
backup=ab_file,
status_msg=self.StatusMsg,
logger=logger)
drill.CreateWorkDir()
drill.DataExtraction()
drill.DataDecoding()
drill.DecodeShared()
drill.GenerateHtmlReport()
drill.GenerateXlsxReport()
drill.CleanUp()
@threaded
def RunTarExtraction(self, event=None):
with disable_control(event):
output_dir = self.OUTPUT.get()
if not output_dir:
messages.select_output()
elif os.path.exists(output_dir):
tar_file = self.get_file('', ftype=[('TAR File', '*.tar')])
if tar_file and os.path.isfile(tar_file):
self.StatusMsg.set('Running...')
drill = driller.ChainExecution(
output_dir,
tarfile=tar_file,
status_msg=self.StatusMsg,
logger=logger)
drill.CreateWorkDir()
drill.DataExtraction()
drill.DataDecoding()
drill.GenerateHtmlReport()
drill.GenerateXlsxReport()
drill.CleanUp()
@threaded
def RunDirExtraction(self, event=None):
with disable_control(event):
output_dir = self.OUTPUT.get()
if not output_dir:
messages.select_output()
elif os.path.exists(output_dir):
src_dir = self.get_dir()
if src_dir:
self.StatusMsg.set('Running...')
drill = driller.ChainExecution(
output_dir,
src_dir=src_dir,
status_msg=self.StatusMsg,
logger=logger)
drill.CreateWorkDir()
drill.ExtractFromDir()
drill.DataDecoding()
drill.GenerateHtmlReport()
drill.GenerateXlsxReport()
drill.CleanUp()
# WhatsApp Crypt --------------------------------------------------------------
class WhatsAppCrypt(BaseWindow):
KEY_SIZE = decrypts.WhatsAppCrypt.KEY_SIZE
SUFFIX = decrypts.WhatsAppCrypt.SUFFIX
def __init__(self, root=None, title='WhatsApp Crypt Decryptor'):
super().__init__(root=root, title=title)
self.guide = statics.WHATSAPP_CRYPT
self.work_dir = None
self.crypts = {}
self.key_file = None
self.supported = self.get_supported()
self._info = tk.StringVar()
self._info_but = tk.StringVar()
self._info_but.set('Show Info')
ttk.Label(self.mainframe, text=title, font=self.FontTitle).grid(row=1, column=0, columnspan=2)
tk.Button(self.mainframe, textvariable=self._info_but, relief='flat', command=self.info_toggle)\
.grid(row=1, column=2, columnspan=1, sticky=tk.E)
ttk.Label(self.mainframe, textvar=self._info).grid(row=5, column=0, columnspan=3, sticky=self.WE)
self.dir_label = tk.StringVar()
self.dir_but = ttk.Button(self.mainframe, text='Select directory', command=self.set_dir)
self.dir_but.grid(row=10, column=0, columnspan=1, sticky=tk.W)
ttk.Label(self.mainframe, textvar=self.dir_label).grid(row=10, column=1, columnspan=2, sticky=tk.W)
self.key_label = tk.StringVar()
self.key_but = ttk.Button(self.mainframe, text="Select 'key' file", command=self.set_key)
self.key_but.grid(row=11, column=0, columnspan=1, sticky=tk.W)
ttk.Label(self.mainframe, textvar=self.key_label).grid(row=11, column=1, columnspan=2, sticky=tk.W)
self.file_box = ttk.Treeview(self.mainframe, columns=['size', 'done'], selectmode=tk.EXTENDED)
self.file_box.heading('#0', text='File Name')
self.file_box.heading('size', text='Size')
self.file_box.heading('done', text='Decrypted')
self.file_box.column('size', width=30)
self.file_box.column('done', width=20)
self.file_box.tag_configure('success', background='light green')
self.file_box.tag_configure('failure', background='#ff8080')
self.file_box.grid(row=20, column=0, columnspan=3, sticky=self.WE)
self.dec_all = ttk.Button(self.mainframe, text='Decrypt All', command=self.decrypt_all)
self.dec_all.grid(row=30, column=0, sticky=tk.W)
self.dec_sel = ttk.Button(self.mainframe, text='Decrypt Selected', command=self.decrypt_sel)
self.dec_sel.grid(row=30, column=2, sticky=tk.E)
def info_toggle(self):
(self._info.set(''), self._info_but.set('Show Info')) if self._info.get() \
else (self._info.set(statics.WHATSAPP_CRYPT), self._info_but.set('Hide Info'))
def controls_state(self, state):
for c in [self.dir_but, self.key_but, self.dec_all, self.dec_sel]:
c.configure(state=state)
def set_dir(self):
dialog = self.get_dir()
if dialog:
self.work_dir = dialog
self.dir_label.set(self.work_dir)
self.check_dir()
self.try_key_file()
def set_key(self, key=None):
dialog = key or self.get_file('key', fsize=self.KEY_SIZE)
if dialog:
self.key_file = None
self.key_label.set('')
self.key_file = dialog
self.key_label.set(self.key_file)
def try_key_file(self):
key = os.path.join(self.work_dir, 'key')
if os.path.isfile(key) and os.path.getsize(key) == self.KEY_SIZE:
logger.info('WhatsAppCrypt: key file was detected & automatically selected')
self.set_key(key=key)
def check_dir(self):
self.crypts.clear()
self.file_box.delete(*self.file_box.get_children())
path_ = os.path.join(self.work_dir, '*.crypt*')
for f in glob.iglob(path_):
done = os.path.exists(f'{os.path.splitext(f)[0]}{self.SUFFIX}')
size = human_bytes(os.path.getsize(f))
item = self.file_box.insert('', tk.END, text=os.path.basename(f), values=[size, done])
self.crypts[item] = f
def tree_update(self, iid, values):
self.file_box.item(iid, values=values)
def decrypt_all(self):
self.file_box.selection_add(self.file_box.get_children())
self.decrypt_sel()
def decrypt_sel(self):
sel = self.file_box.selection()
if not sel:
messagebox.showwarning('No selection made', 'Select at least one database to decrypt.')
self.run_decrypt(sel)
def run_decrypt(self, sel):
try:
self.controls_state(tk.DISABLED)
for i in sel:
file_ = self.crypts[i]
fname = os.path.basename(file_)
file_ext = file_.split('.')[-1].lower()
decrypter = self.supported.get(file_ext)
if decrypter:
try:
wadec = decrypter(file_, self.key_file)
if wadec.decrypt():
vals = self.file_box.item(i)['values']
vals[1] = True
self.file_box.item(i, values=vals, tags='success')
logger.info(f'WhatsAppCrypt: {fname} successfully decrypted.')
except decrypts.WhatsAppCryptError as err:
logger.error(f'WhatsAppCrypt: {err}')
self.file_box.item(i, tags='failure')
messagebox.showerror('WhatsApp decryption error', str(err))
except Exception as err:
logger.error(f'WhatsAppCrypt: {fname}: {err}')
self.file_box.item(i, tags='failure')
finally:
self.file_box.selection_set()
self.controls_state(tk.NORMAL)
def get_supported(self):
return {kls.CRYPT: kls for kls in decrypts.WhatsAppCrypt.__subclasses__()}
# Pattern Decoding Window -----------------------------------------------------
class BrutePattern(BaseWindow):
CANVAS_SIZE = 210
FACTOR = 3
def __init__(self, root=None, title='Lockscreen Gesture Pattern'):
super().__init__(root=root, title=title)
ttk.Label(self.mainframe, font=self.FontTitle, text=f'\n{title}\n').grid(row=1, column=0, columnspan=3)
self.FILE = tk.StringVar()
self.HASH = tk.StringVar()
self.PATTERN = tk.StringVar()
browse = ttk.Button(self.mainframe, text='Browse', command=self.select_file)
browse.grid(row=2, column=0, sticky=tk.E)
createToolTip(browse, "Select 'gesture.key' and it will be decoded automatically.")
ttk.Label(self.mainframe, textvariable=self.FILE).grid(row=2, column=1, columnspan=2, sticky=tk.W)
ttk.Label(self.mainframe, text='or').grid(row=3, column=0, sticky=tk.E)
hash_label = ttk.Label(self.mainframe, text='Gesture Hash: ')
hash_label.grid(row=4, column=0, sticky=tk.E)
createToolTip(hash_label, "Enter gesture.key hash value (40 hexadecimal characters) from:\n--> '/data/system/gesture.key'")
hash_field = ttk.Entry(self.mainframe, font=self.FontMono, textvariable=self.HASH, width=45)
hash_field.grid(row=4, column=1, columnspan=2, sticky=tk.W)
hash_field.bind('<Button-3>', rClicker, add='')
pattern_label = ttk.Label(self.mainframe, text='Pattern: ')
pattern_label.grid(row=6, column=0, sticky=tk.E)
createToolTip(pattern_label, 'Double click on the entry field to edit and then to Draw\nEnter as a list of integers, eg: [8,4,0,1,2]')
result_field = ttk.Entry(self.mainframe, textvariable=self.PATTERN, font=self.FontTitle, width=25, state='disabled')
result_field.bind('<Button-3>', rClicker, add='')
result_field.bind('<Button-1>', lambda e: result_field.configure(state='normal'))
result_field.bind('<B3-Motion>', lambda e: result_field.configure(state='normal'))
result_field.bind('<FocusOut>', lambda e: result_field.configure(state='disabled'))
result_field.grid(row=6, column=1, columnspan=2, sticky=tk.W)
self.VISUAL = tk.Canvas(self.mainframe, width=self.CANVAS_SIZE, height=self.CANVAS_SIZE, borderwidth=0)
self.VISUAL.grid(row=7, column=1, sticky=tk.W)
self.draw_pattern(self.VISUAL, self.PATTERN.get())
decode_button = ttk.Button(self.mainframe, text='Decode', command=self.crack_pattern)
decode_button.grid(row=10, column=0, sticky=tk.E)
draw_button = ttk.Button(self.mainframe, text='Draw', command=lambda: self.draw_pattern(self.VISUAL, self.PATTERN.get()))
draw_button.grid(row=10, column=1, columnspan=2, sticky=tk.W)
ttk.Button(self.mainframe, text='Close', command=self.root.destroy).grid(row=10, column=2, sticky=tk.E)
# Pattern drawing code
def draw_pattern(self, widget, pattern=None):
POS = []
def checkered(canvas, line_distance):
for x in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(x, 0, x, self.CANVAS_SIZE, fill="#999999")
for y in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(0, y, self.CANVAS_SIZE, y, fill="#999999")
def numbered(canvas):
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
canvas.create_oval(
stepy + (self.CANVAS_SIZE // 15),
stepx + (self.CANVAS_SIZE // 15),
stepy - (self.CANVAS_SIZE // 15),
stepx - (self.CANVAS_SIZE // 15),
fill='#444444', outline='#444444')
canvas.create_text(
stepy,
stepx,
font=(self.CANVAS_SIZE // 10),
text=str(n),
fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
def clean_pat(pattern):
try:
return json.loads(pattern)
except Exception as e:
logger.warning(f'{e}')
return []
def draw(canvas, pattern=[]):
canvas.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
if pattern:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(pattern)]))
if combo:
canvas.create_line(
combo,
arrow='last',
arrowshape=[
self.CANVAS_SIZE // 25,
self.CANVAS_SIZE // 20,
self.CANVAS_SIZE // 40
],
width=(self.CANVAS_SIZE // 70),
fill='#00CC00')
checkered(widget, self.CANVAS_SIZE // self.FACTOR)
numbered(widget)
if pattern:
draw(widget, pattern)
def select_file(self):
selection = filedialog.askopenfilename(
initialdir=self.conf('last_path'),
initialfile='gesture.key',
filetypes=[('Key file', '*.key'), ('Any file', '*')])
if selection and os.path.isfile(selection):
file_path = os.path.realpath(selection)
if os.path.getsize(file_path) != 20:
return # TODO: error message
self.conf.update_conf(**{'DEFAULT': {'last_path': os.path.dirname(file_path)}})
with open(file_path, 'rb') as R:
self.HASH.set(binascii.hexlify(R.read()).decode())
self.crack_pattern()
@threaded
def crack_pattern(self):
# '6a062b9b3452e366407181a1bf92ea73e9ed4c48'
sha = self.HASH.get().strip()
if len(sha) != 40:
return # TODO: error message
self.VISUAL.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
self.PATTERN.set('Decoding...')
pat = cracking.crack_pattern(sha)
if pat:
pat = str(pat)
self.PATTERN.set(pat)
self.draw_pattern(self.VISUAL, pat)
else:
self.PATTERN.set(':(')
# Generic PIN Cracking Window -------------------------------------------------
class LockscreenBase(BaseWindow):
def __init__(self, root=None, title=None, logger=logger):
self.logger = logger
super().__init__(root=root, title=title)
ttk.Label(self.mainframe, font=self.FontTitle, text=f'\n{title}\n').grid(row=1, column=0, columnspan=3)
self.START = tk.StringVar()
self.END = tk.StringVar()
self.START.set('0000')
self.END.set('9999')
self.HASH = tk.StringVar()
self.SALT = tk.IntVar()
self.SALT.set('')
self.RESULT = tk.StringVar()
self.DICTFILE = tk.StringVar()
self.DICTLAB = tk.StringVar()
self.TRIED = tk.StringVar()
self.RATE = tk.StringVar()
self.PROG = tk.StringVar()
self.STOP = tk.BooleanVar()
self.stats_enabled = False
self.prog_enabled = False
self.menubar = tk.Menu(self.root, tearoff=0)
self.root['menu'] = self.menubar
menu_read = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(menu=menu_read, label='Read..', underline=0)
menu_read.add_command(label="Salt from 'settings.db'...", command=self.salt_settings)
menu_read.add_command(label="Salt from 'locksettings.db'...", command=self.salt_locksettings)
menu_read.add_command(label="Salt from 'locksettings.db-wal'...", command=self.salt_locksettings_wal)
menu_read.add_command(label="Hash from 'password.key'...", command=self.password_read)
# Hash - 4
hash_label = ttk.Label(self.mainframe, text='Password Hash: ')
hash_label.grid(row=40, column=0, sticky=tk.E)
hash_field = ttk.Entry(self.mainframe, font=self.FontMono, textvariable=self.HASH, width=40)
hash_field.grid(row=40, column=1, columnspan=2, sticky=tk.W)
# Salt - 5
salt_label = ttk.Label(self.mainframe, text='Salt (integer): ')
salt_label.grid(row=50, column=0, sticky=tk.E)
salt_field = ttk.Entry(self.mainframe, font=self.FontMono, textvariable=self.SALT, width=20)
salt_field.grid(row=50, column=1, columnspan=2, sticky=tk.W)
# Results - 6
ttk.Label(self.mainframe, text='Result: ').grid(row=60, column=0, sticky=tk.E)
self.result_field = ttk.Label(self.mainframe, textvariable=self.RESULT, font=self.FontTitle, foreground='grey')
self.result_field.grid(row=60, column=1, columnspan=2, sticky=tk.W)
# Controls - 8
self.start_button = ttk.Button(self.mainframe, text='Start', command=self.start)
# self.start_button.bind('<Button-1>', self.start)
self.start_button.grid(row=80, column=0, sticky=tk.E)
self.stop_button = ttk.Button(self.mainframe, text='Stop', command=lambda: self.STOP.set(1))
self.stop_button.config(state=tk.DISABLED)
self.stop_button.grid(row=80, column=1, columnspan=2, sticky=tk.W)
self.close_button = ttk.Button(self.mainframe, text='Close', command=self.root.destroy)
self.close_button.grid(row=80, column=2, sticky=tk.E)
def salt_settings(self, key='lockscreen.password_salt'):
dialog = self.get_file(
'settings.db',
ftype=[('SQLite Databases', '*.db')])
if dialog:
dec = decoders.SettingsDecoder(None, dialog)
salt_value = dec.DICT.get(key)
if salt_value:
logger.info(f'Lockscreen salt: {salt_value}')
self.SALT.set(salt_value)
else:
messagebox.showwarning(
'Value not found',
f'`{key}` not found in the file!')
def salt_locksettings(self, key='lockscreen.password_salt'):
dialog = self.get_file(
'locksettings.db',
ftype=[('SQLite Databases', '*.db')])
if dialog:
try:
dec = decoders.LocksettingsDecoder(None, dialog)
salt_value = dec.DICT.get(key)
if salt_value:
logger.info(f'Lockscreen salt: {salt_value}')
self.SALT.set(salt_value)
except Exception:
messagebox.showwarning(
'Value not found',
f'`{key}` not found in the database!\nTry parsing the `locksettings.db-wal` instead.')
def salt_locksettings_wal(self):
dialog = self.get_file(
'locksettings.db-wal',
ftype=[('SQLite Write Ahead Logs', '*.db-wal')])
if dialog and os.path.getsize(dialog):
salt_values = decoders.parse_lockscreen_wal(dialog)
if len(salt_values) == 1:
logger.info(f'Lockscreen salt: {salt_values[0]}')
self.SALT.set(salt_values[0])
elif len(salt_values) > 1:
for n, s in enumerate(salt_values, start=1):
logger.info(f'Lockscreen salt_{n}: {s}')
messagebox.showwarning(
'Multiple results found',
'More than one value for salt was found! Check the log window to pick a value manually.')
else:
messagebox.showwarning(
'Value not found',
'Salt was not found in the file!')
def password_read(self):
try:
file_ = self.get_file('password.key', ftype=[('Password File', '*.key')], fsizes=[40, 72])
if file_:
with open(file_, 'r') as R:
hash_val = R.read()
logger.info(f'Password hash: {hash_val}')
self.HASH.set(hash_val)
except FileHandlerError as err:
messagebox.showwarning('Wrong file size', str(err))
except UnicodeDecodeError:
messagebox.showwarning(
'Wrong file type', 'The file is binary, not suitable.')
def enable_pin_range(self):
self.start_label = ttk.Label(self.mainframe, text='Start from: ')
createToolTip(self.start_label, "Start the PIN from (Recommended: 0000)")
self.start_field = ttk.Entry(self.mainframe, textvariable=self.START, width=16)
self.start_field.bind('<Button-3>', rClicker, add='')
self.start_label.grid(row=20, column=0, sticky=tk.E)
self.start_field.grid(row=20, column=1, columnspan=2, sticky=tk.W)
self.end_label = ttk.Label(self.mainframe, text='Max value: ')
createToolTip(self.end_label, "Maximum PIN value")
self.end_field = ttk.Entry(self.mainframe, textvariable=self.END, width=16)
self.end_field.bind('<Button-3>', rClicker, add='')
self.end_label.grid(row=30, column=0, sticky=tk.E)
self.end_field.grid(row=30, column=1, columnspan=2, sticky=tk.W)
def enable_wordlist(self):
self.word_label = ttk.Label(self.mainframe, text='Word List File: ')
self.word_label.grid(row=20, column=0, sticky=tk.E)
createToolTip(self.word_label, "Select a Word List file (text file containing passwords)")
dict_button = ttk.Button(self.mainframe, text='Browse', command=self.select_wordlist)
dict_button.grid(row=20, column=1, sticky=tk.W)
dict_label = ttk.Label(self.mainframe, textvariable=self.DICTLAB, font=self.FontInfo)
dict_label.grid(row=20, column=2, columnspan=1, sticky=tk.W)
def select_wordlist(self):
dialog = self.get_file('', lpath='dict_path')
if dialog and os.path.isfile(dialog):
dialog = os.path.realpath(dialog)
self.DICTFILE.set(dialog)
self.DICTLAB.set(os.path.split(dialog)[1])
def enable_alpha_range(self):
self.MIN = tk.IntVar()
self.MIN.set(4)
self.MAX = tk.IntVar()
self.MAX.set(6)
self.LOWER = tk.IntVar()
self.UPPER = tk.IntVar()
self.DIGITS = tk.IntVar()
self.CUSTOM = tk.IntVar()
self.CUSTVALS = tk.StringVar()
min_label = ttk.Label(self.mainframe, text='Length min/max: ')
min_label.grid(row=20, column=0, sticky=tk.E)
createToolTip(min_label, 'Select minimum and maximum password length')
lframe = ttk.Frame(self.mainframe)
lframe.grid(row=20, column=1, sticky=tk.W)
self.min_value = tk.Spinbox(lframe, from_=1, to=16, textvariable=self.MIN, width=3, command=self.updatemin)
createToolTip(self.min_value, "Minimum password length")
self.min_value.pack(side=tk.LEFT)
self.max_value = tk.Spinbox(lframe, from_=1, to=16, textvariable=self.MAX, width=3, command=self.updatemax)
createToolTip(self.max_value, "Maximum password length")
self.max_value.pack()
char_label = ttk.Label(self.mainframe, text='Characters: ')
char_label.grid(row=30, column=0, sticky=tk.E)
createToolTip(char_label, "Choose characters combination for the password")
iframe = ttk.Frame(self.mainframe)
iframe.grid(row=30, column=1, sticky=tk.W)
ttk.Checkbutton(iframe, text='Lowercase [a-z]', var=self.LOWER).pack(side=tk.TOP, fill=tk.BOTH, expand=True)
ttk.Checkbutton(iframe, text='Uppercase [A-Z]', var=self.UPPER).pack(side=tk.TOP, fill=tk.BOTH, expand=True)
ttk.Checkbutton(iframe, text='Digits [0-9]', var=self.DIGITS).pack(side=tk.TOP, fill=tk.BOTH, expand=True)
ttk.Checkbutton(iframe, text='Custom:', var=self.CUSTOM).pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
ttk.Entry(iframe, textvariable=self.CUSTVALS, width=15).pack(fill=tk.BOTH, expand=True)
def updatemin(self):
if self.MIN.get() > self.MAX.get():
self.MAX.set(self.MAX.get() + 1)
_max = self.MAX.get()
_max = _max + 1 if _max < 16 else 16
self.min_value.config(to_=_max)
def updatemax(self):
if self.MIN.get() > self.MAX.get():
self.MIN.set(self.MIN.get() - 1)
self.max_value.config(from_=self.MIN.get() - 1)
def enable_stats(self):
self.stats_enabled = True
ttk.Label(self.mainframe, text='Words tried: ').grid(row=70, column=0, sticky=tk.E)
ttk.Label(self.mainframe, textvariable=self.TRIED).grid(row=70, column=1, columnspan=2, sticky=tk.W)
ttk.Label(self.mainframe, text='Rate (pw/sec): ').grid(row=71, column=0, sticky=tk.E)
ttk.Label(self.mainframe, textvariable=self.RATE).grid(row=71, column=1, columnspan=2, sticky=tk.W)
def enable_progress(self):
self.prog_enabled = True
ttk.Label(self.mainframe, text='Progress: ').grid(row=75, column=0, sticky=tk.E)
ttk.Label(self.mainframe, textvariable=self.PROG).grid(row=75, column=1, columnspan=2, sticky=tk.W)
@threaded
def start(self, **kwargs):
self.result_field.configure(foreground='grey')
try:
self.menubar.entryconfig(0, state=tk.DISABLED)
self.start_button.configure(state=tk.DISABLED)
self.stop_button.configure(state=tk.NORMAL)
self.close_button.configure(state=tk.DISABLED)
crack = cracking.PasswordCrack(
self.HASH.get(), self.SALT.get(),
start=self.START.get(), end=self.END.get(),
update_rate=int(self.conf('update_rate')), **kwargs)
result = crack.crack_password(
self.RESULT,
self.STOP,
self.TRIED if self.stats_enabled else None,
self.RATE if self.stats_enabled else None,
self.PROG if self.prog_enabled else None)
if result:
self.result_field.configure(foreground='red')
self.RESULT.set(result)
logger.info(f'Lockscreen credential found: {result}')
else:
self.result_field.configure(foreground='black')
self.RESULT.set('Stopped!' if self.STOP.get() else 'Not found!')
except Exception as err:
self.logger.exception('Error in password cracking.')
messagebox.showwarning('Error', str(err))
finally:
self.STOP.set(0)
self.menubar.entryconfig(0, state=tk.NORMAL)
self.start_button.configure(state=tk.NORMAL)
self.stop_button.configure(state=tk.DISABLED)
self.close_button.configure(state=tk.NORMAL)
# --------------------------------------------------------------------------- #
class BruteGenPin(LockscreenBase):
def __init__(self, root=None, title='Lockscreen PIN Cracking (Generic)'):
super().__init__(root=root, title=title)
self.enable_pin_range()
class BruteSamPin(BruteGenPin):
def __init__(self, root=None, title='Lockscreen PIN Cracking (Samsung)'):
super().__init__(root=root, title=title)
def start(self, samsung=True):
super().start(samsung=samsung)
class BruteGenDict(LockscreenBase):
def __init__(self, root=None, title='Lockscreen Password by Dictionary (Generic)'):
super().__init__(root=root, title=title)
self.enable_wordlist()
self.enable_stats()
def start(self):
dict_file = self.DICTFILE.get()
super().start(alpha=True, dict_file=dict_file)
class BruteSamDict(LockscreenBase):
def __init__(self, root=None, title='Lockscreen Password by Dictionary (Samsung)'):
super().__init__(root=root, title=title)
self.enable_wordlist()
self.enable_stats()
def start(self):
dict_file = self.DICTFILE.get()
super().start(alpha=True, samsung=True, dict_file=dict_file)
class BruteForceGen(LockscreenBase):
def __init__(self, root=None, title='Lockscreen Password by Brute-Force (Generic)'):
super().__init__(root=root, title=title)
self.enable_alpha_range()
self.enable_stats()
self.enable_progress()
def make_range(self):
selection = ''.join([k for k, v in {
string.ascii_lowercase: self.LOWER.get(),
string.ascii_uppercase: self.UPPER.get(),
string.digits: self.DIGITS.get(),
self.CUSTVALS.get(): self.CUSTOM.get(),
}.items() if v])
return selection
def start(self):
super().start(alpha=True, alpha_range=self.make_range(),
min_len=self.MIN.get(), max_len=self.MAX.get())
# --------------------------------------------------------------------------- #
class ScreenCap(BaseWindow):
def __init__(self, root=None, title=f'{__app_name__}: Screen Capture'):
super().__init__(root=root, title=title)
self.store = screencap.ScreenStore()
self.REPCOUNT = tk.StringVar()
self.REPCOUNT.set('Report')
self.OUTPUTLAB = tk.StringVar()
self.OUTPUTLAB.set('(Not saving screen shots)')
self.REMEMBER = tk.IntVar()
self.REMEMBER.set(0)
ttk.Label(self.mainframe, font=self.FontTitle, text=f'\n{title}\n').grid(row=1, column=0, columnspan=3)
# Make an empty Canvas
self.snap_frame = ttk.Labelframe(self.mainframe, text='Screen View', padding=(1, 0, 1, 0))
self.snap_frame.grid(row=10, column=0, rowspan=2, sticky=(tk.N, tk.W))
self.screen_view = tk.Canvas(self.snap_frame, width=210, height=350, borderwidth=0)
self.screen_view.create_rectangle(0, 0, 210, 350, fill="#FFFFFF")
self.screen_view.create_line(0, 0, 210, 350, fill="#000000")
self.screen_view.create_line(210, 0, 0, 350, fill="#000000")
self.screen_view.grid(row=0, column=0, sticky=(tk.N, tk.W))
# Controls Frame
control_frame = ttk.Frame(self.mainframe, padding=(1, 0, 1, 0))
control_frame.grid(row=10, column=1, rowspan=2, sticky=(tk.N, tk.W))
# Output Frame
output_frame = ttk.Frame(control_frame)
output_frame.pack(expand=True, fill=tk.X, side=tk.TOP)
# OUTDIR
OUTDIR = ttk.Button(output_frame, text='Output', command=self.set_directory)
createToolTip(OUTDIR, 'Set destination directory where to save screen captures.')
OUTDIR.pack(side=tk.LEFT)
ttk.Label(output_frame, textvar=self.OUTPUTLAB, font=self.FontInfo).pack(expand=True, fill=tk.X, side=tk.TOP)
# Assistance Frame
assist_frame = ttk.Frame(control_frame)
assist_frame.pack(side=tk.LEFT)
# Save
self.save_this = ttk.Button(assist_frame, text='Save this..', command=self.save)
# self.save_this = ttk.Button(assist_frame, text='Save this..')
self.save_this.configure(state=tk.DISABLED)
createToolTip(self.save_this, 'Save current screen capture to..')
self.save_this.pack(side=tk.TOP, expand=0, fill=tk.X)
# Report
self.report_button = ttk.Button(assist_frame, textvar=self.REPCOUNT)
self.report_button.bind('<Button-1>', self.report)
self.report_button.configure(state=tk.DISABLED)
createToolTip(self.report_button, 'Generate a report with created screen captures.\nNote: only works when Output is provided.')
self.report_button.pack(side=tk.TOP, expand=0, fill=tk.X)
# Guide
ttk.Button(assist_frame, text='Help', command=messages.screen_guide).pack(side=tk.TOP, expand=0, fill=tk.X)
# Note
self.note_text = ttk.Entry(self.mainframe, width=27)
self.note_text.configure(state=tk.DISABLED)
self.note_text.bind("<Return>", self.capture)
createToolTip(self.note_text, 'Type a comment press ENTER to Capture and Save.')
# Snap
self.snap_button = ttk.Button(self.mainframe, text='Capture', command=self.capture, takefocus=True)
self.snap_button.grid(row=15, column=0, columnspan=1, sticky=(tk.W,))
# Close
ttk.Button(self.mainframe, text='Close', command=self.root.destroy)\
.grid(row=15, column=1, columnspan=2, sticky=(tk.N, tk.E))
self.remember_button = ttk.Checkbutton(self.mainframe, text='Remember', var=self.REMEMBER)
createToolTip(self.remember_button, 'Keep last entered comment in field.')
# Status
status_frame = ttk.Frame(self.mainframe, padding=(5, 1), relief='groove')
status_frame.grid(row=20, column=0, columnspan=3, sticky=(tk.W, tk.E))
self.status_label = ttk.Label(status_frame, text='Ready', font=self.FontStatus)
self.status_label.grid(row=4, column=0, sticky=tk.W, padx=5, pady=3)
def set_directory(self):
_path = self.get_dir()
if _path and self.store.set_output(_path):
self.OUTPUTLAB.set(self.store.output if len(self.store.output) < 22 else f'..{self.store.output[-20:]}')
self.REPCOUNT.set(f'Report ({self.store.count})')
self.report_button.configure(state=tk.NORMAL)
self.note_text.configure(state=tk.NORMAL)
self.note_text.grid(row=14, column=0, columnspan=1, sticky=tk.W)
self.remember_button.grid(row=15, column=0, columnspan=1, sticky=tk.E)
def display(self, img_obj):
if not img_obj:
messagebox.showwarning('Nothing to display', 'Nothing was captured. Is a device connected?')
return None
self.save_this.configure(state=tk.NORMAL)
self.screen_view.grid_forget()
img_obj.seek(0)
head = img_obj.read(24)
width, height = struct.unpack('>ii', head[16:24])
factor = width // 200
fname = os.path.realpath(img_obj.name)
self.currentImage = tk.PhotoImage(file=fname).subsample(factor, factor)
self.PIC = ttk.Label(self.snap_frame, image=self.currentImage)
self.PIC.grid(row=0, column=0, sticky=(tk.N, tk.W))
_note = self.note_text.get().rstrip()
if _note:
tk.Label(self.snap_frame, text=_note, font=self.FontInfo, bg='#FFFFFF').grid(row=0, column=0, sticky=(tk.S, tk.W))
if self.REMEMBER.get() == 0:
self.note_text.delete(0, 'end')
@threaded
def capture(self):
self.status_label.configure(text='Capturing...', foreground="black")
self.snap_button.configure(state=tk.DISABLED)
img_obj = self.store.capture(self.note_text.get().rstrip())
if img_obj is False:
messagebox.showinfo('Content Protection Enabled', "It is not possible to capture this type of content.")
self.snap_button.configure(text="Capture")
self.snap_button.configure(state=tk.NORMAL)
self.status_label.configure(text=messages.content_protect, foreground="blue")
else:
if self.store.output:
self.REPCOUNT.set(f'Report ({self.store.count})')
self.snap_button.configure(state=tk.NORMAL)
self.status_label.configure(text='Ready')
return self.display(img_obj)
def save(self):
file_location = self.store.items[-1][0]
savefilename = filedialog.asksaveasfilename(
initialdir=os.getenv('HOME') or os.getcwd(),
initialfile=os.path.split(file_location)[1],
filetypes=[('Portable Network Graphics', '*.png')])
if savefilename:
shutil.copy2(file_location, savefilename)
@threaded
def report(self, event=None):
with disable_control(event):
if not self.store.count:
messagebox.showinfo('No Captures', "Nothing to report yet")
return
report = pathlib.Path(self.store.report())
webbrowser.open_new_tab(report.as_uri())
# Preferences -----------------------------------------------------------------
class Preferences(BaseWindow):
def __init__(self, root=None, title='User Preferences'):
super().__init__(root=root, title=title)
self.fields = {
'default_path': {
'label': 'Default OUTPUT path',
'tooltip': 'This will be the default location path where report outputs will be saved.',
'var': tk.StringVar,
'control': ttk.Entry,
'browse': True
},
'update_rate': {
'label': 'Cracking update rate',
'tooltip': 'Rate at which the UI is updated with a current value during password cracking.',
'var': tk.IntVar,
'control': tk.Spinbox,
'kwargs': {'from_': 1e4, 'to': 1e6, 'increment': 1e4}
},
'offline_mode': {
'label': 'Offline mode',
'tooltip': 'Offline mode skips latest version checking on startup.',
'var': tk.IntVar,
'control': ttk.Checkbutton,
},
'save_log': {
'label': 'Save logs',
'tooltip': 'When OUTPUT is defined, save logs automatically',
'var': tk.IntVar,
'control': ttk.Checkbutton,
},
'window_size': {
'label': 'Log window size',
'tooltip': 'Log window height in line numbers',
'var': tk.IntVar,
'control': ttk.OptionMenu,
'values': [12, 20],
},
'theme': {
'label': 'Theme',
'tooltip': 'Style appearance of the user interface',
'var': tk.StringVar,
'control': ttk.OptionMenu,
'values': self.style_ttk.theme_names(),
},
'time_zone': {
'label': 'Time zone offset',
'tooltip': 'UTC offset for reporting time and date stamps',
'var': tk.StringVar,
'control': ttk.OptionMenu,
'values': config.TIME_ZONES,
},
'date_format': {
'label': 'Date format',
'tooltip': 'Format in which the time and date are reported',
'var': tk.StringVar,
'control': ttk.Entry,
},
'custom_header': {
'label': 'Custom header',
'tooltip': 'Custom header information for HTML reports. Use HTML tags for customization.',
'var': tk.StringVar,
'control': ttk.Entry,
},
'custom_footer': {
'label': 'Custom footer',
'tooltip': 'Custom footer information for HTML reports. Use HTML tags for customization.',
'var': tk.StringVar,
'control': ttk.Entry,
},
}
self.objects = {}
self.render_view()
def set_obj(self, key, var):
obj_name = f'OBJ_{key}'
setattr(self, obj_name, var())
obj = getattr(self, obj_name)
self.objects[key] = obj
return obj
def browse(self, event):
with disable_control(event):
key = event.widget.key
value = self.get_dir(path=key)
if value:
self.update_obj(key, value)
def render_view(self):
_var = {
ttk.Entry: 'textvar',
tk.Spinbox: 'textvariable',
ttk.Checkbutton: 'var',
}
for n, (key, values) in enumerate(self.fields.items(), start=1):
obj = self.set_obj(key, values['var'])
obj.set(self.conf(key))
Control = values['control']
args = values.get('args', [])
kwargs = values.get('kwargs', {})
if _var.get(Control):
kwargs.update({_var.get(Control): obj})
elif hasattr(Control, '_options'):
args.extend([
obj,
self.conf(key),
*values.get('values', []),
])
L = ttk.Label(self.mainframe, text=f"{values['label']} : ")
createToolTip(L, values['tooltip'])
L.grid(row=n, column=0, sticky=tk.E)
C = Control(self.mainframe, *args, **kwargs)
if values.get('browse'):
C.key = key
C.bind('<Button-1>', self.browse)
C.grid(row=n, column=1, sticky=tk.W)
ttk.Button(self.mainframe, text='Save', command=self.save).grid(row=n + 1, column=0, sticky=tk.E)
ttk.Button(self.mainframe, text='Cancel', command=self.quit_app).grid(row=n + 1, column=1, sticky=tk.W)
# ttk.Label(self.mainframe, text='Restart Andriller for changes to take effect')
def update_obj(self, key, value):
obj = self.objects[key]
obj.set(value)
def save(self):
to_update = {}
for key, obj in self.objects.items():
if str(obj.get()) != self.conf(key):
to_update[key] = obj.get()
self.conf.update_conf(**{self.conf.NS: to_update})
self.quit_app()
# Extra helpers ---------------------------------------------------------------
def rClicker(e):
try:
def rClick_Copy(e, apnd=0):
e.widget.event_generate('<Control-c>')
def rClick_Cut(e):
e.widget.event_generate('<Control-x>')
def rClick_Paste(e):
e.widget.event_generate('<Control-v>')
e.widget.focus()
nclst = [
(' Cut', lambda e=e: rClick_Cut(e)),
(' Copy', lambda e=e: rClick_Copy(e)),
(' Paste', lambda e=e: rClick_Paste(e)),
]
rmenu = tk.Menu(None, tearoff=0, takefocus=0)
for (txt, cmd) in nclst:
rmenu.add_command(label=txt, command=cmd)
rmenu.tk_popup(e.x_root + 40, e.y_root + 10, entry="0")
except tk.TclError as e:
logger.error(f'rClicker error: {e}')
return "break"
# --------------------------------------------------------------------------- #
class FileHandlerError(Exception):
pass
| 43.740667
| 142
| 0.594199
|
1b3dd669ba387413c8881d7450738bd57338f7cd
| 2,022
|
py
|
Python
|
src/hanko_sdk/models/authentication_finalization.py
|
teamhanko/hanko-python
|
2455861b6edc3c393dde7ed62635f96c88b1350f
|
[
"Apache-2.0"
] | 1
|
2022-03-08T06:38:22.000Z
|
2022-03-08T06:38:22.000Z
|
src/hanko_sdk/models/authentication_finalization.py
|
teamhanko/hanko-python
|
2455861b6edc3c393dde7ed62635f96c88b1350f
|
[
"Apache-2.0"
] | null | null | null |
src/hanko_sdk/models/authentication_finalization.py
|
teamhanko/hanko-python
|
2455861b6edc3c393dde7ed62635f96c88b1350f
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import ClassVar
from enforce_typing import enforce_types
from .assertion import CredentialAssertionResponse, AuthenticatorAssertionResponse
from .base64_entities import UrlEncodedBase64
from .base_model import BaseModel
from .core import Credential
from .options import CredentialType
from .credential import PublicKeyCredential
@dataclass
class AuthenticationFinalizationRequest(CredentialAssertionResponse):
""" Contains the representation of a :py:class:`PublicKeyCredential` obtained through assertion generation via the browsers' ``navigator.credentials.get()``.
See also: https://www.w3.org/TR/webauthn-2/#publickeycredential """
@classmethod
def from_json_serializable(cls, d: dict):
if d is None:
return None
return AuthenticationFinalizationRequest(
d.get(AuthenticationFinalizationRequest.ID_KEY, None),
CredentialType.from_json_serializable(d.get(AuthenticationFinalizationRequest.TYPE_KEY, None)),
UrlEncodedBase64.from_json_serializable(d.get(AuthenticationFinalizationRequest.RAW_ID_KEY, None)),
d.get(AuthenticationFinalizationRequest.EXTENSIONS_KEY, None),
AuthenticatorAssertionResponse.from_json_serializable(d.get(AuthenticationFinalizationRequest.ASSERTION_RESPONSE_KEY, None))
)
@dataclass
class AuthenticationFinalizationResponse(BaseModel):
""" Represents the response of a successful authentication. """
credential: Credential
CREDENTIAL_KEY: ClassVar[str] = "credential"
def to_json_serializable_internal(self) -> dict:
return {
AuthenticationFinalizationResponse.CREDENTIAL_KEY: self.credential
}
@classmethod
def from_json_serializable(cls, d: dict):
if d is None:
return None
return AuthenticationFinalizationResponse(
Credential.from_json_serializable(d.get(AuthenticationFinalizationResponse.CREDENTIAL_KEY, None))
)
| 37.444444
| 161
| 0.757666
|
cfa9c02ead1642db9cf3d74e5c76ae8034f12ef1
| 35,563
|
py
|
Python
|
pkgs/tools/yasm/src/tools/python-yasm/pyxelator/ir.py
|
manggoguy/parsec-modified
|
d14edfb62795805c84a4280d67b50cca175b95af
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
pkgs/tools/yasm/src/tools/python-yasm/pyxelator/ir.py
|
manggoguy/parsec-modified
|
d14edfb62795805c84a4280d67b50cca175b95af
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
pkgs/tools/yasm/src/tools/python-yasm/pyxelator/ir.py
|
manggoguy/parsec-modified
|
d14edfb62795805c84a4280d67b50cca175b95af
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
""" ir.py - parse c declarations
(c) 2002, 2003, 2004, 2005 Simon Burton <simon@arrowtheory.com>
Released under GNU LGPL license.
version 0.xx
"""
import sys
#import cPickle as pickle
import pickle
#from lexer import Lexer
from parse_core import Symbols #, Parser
import node as node_module
import cparse
import genpyx
class Node(genpyx.Node, node_module.Node):
"""
tree structure
"""
def __init__( self, *args, **kw ):
node_module.Node.__init__( self, *args, **kw )
self._marked = False
def get_marked( self ):
return self._marked
def set_marked( self, marked ):
# if marked:
# print "MARK", self
self._marked = marked
marked = property( get_marked, set_marked )
# def __getstate__( self ):
# return self.__class__, tuple( [ item.__getstate__() for item in self ] )
# def __setstate__( self, state ):
# cls, states = state
# states = list(states)
# for idx, state in enumerate(states):
# items[idx] = items[idx].__setstate__(
def __getstate__(self):
return str(self)
def __setstate__(self, state):
Node.__init__(self)
self[:] = eval(state)
# _unique_id = 0
# def get_unique_id(cls):
# Node._unique_id += 1
# return Node._unique_id
# get_unique_id = classmethod(get_unique_id)
def __hash__( self ):
return hash( tuple([hash(type(self))]+[hash(item) for item in self]) )
def clone(self):
l = []
for item in self:
if isinstance(item,Node):
item = item.clone()
l.append(item)
return self.__class__(*l, **self.__dict__)
def init_from( self, other ): # class method ?
# Warning: shallow init
self[:] = other
self.__dict__.update( other.__dict__ )
return self
# def is_struct(self):
# for x in self:
# if isinstance(x,Node):
# if x.is_struct():
# return 1
# return 0
#def explain(self):
#l = []
#for x in self:
#if isinstance(x,Node):
#l.append(x.explain())
#else:
#l.append(str(x))
#return string.join(l," ")
##(self.__class__.__name__,string.join(l) )
def psource(self):
if hasattr(self,'lines'):
# print "# "+string.join(self.lines,"\n# ")+"\n"
print "# "+"\n# ".join(self.lines)+"\n"
def cstr(self,l=None):
"""
Build a list of tokens; return the joined tokens string
"""
if l is None:
l = []
for x in self:
if isinstance(x,Node):
x.cstr(l)
else:
l.insert(0,str(x)+' ')
s = ''.join(l)
return s
def ctype(self): # anon_clone
" return clone of self without identifiers "
#print "%s.ctype()"%self
l=[]
for x in self:
if isinstance(x,Node):
l.append(x.ctype())
else:
l.append(x)
#print "%s.__class__(*%s)"%(self,l)
return self.__class__(*l, **self.__dict__) # XX **self.__dict__ ?
def cbasetype(self):
" return ctype with all TypeAlias's replaced "
# WARNING: we cache results (so do not mutate self!!)
l=[]
for x in self:
if isinstance(x,Node):
l.append(x.cbasetype())
else:
l.append(x)
#print "%s.__class__(*%s)"%(self,l)
return self.__class__(*l, **self.__dict__) # XX **self.__dict__ ?
def signature( self, tank=None ):
if tank is None:
tank = {}
for node in self.nodes():
if not tank.has_key( type(node) ):
tank[ type(node) ] = {}
type(node).tank = tank[type(node)]
shape = tuple( [ type(_node).__name__ for _node in node ] )
if not tank[type(node)].has_key(shape):
tank[type(node)][shape] = []
tank[type(node)][shape].append( node )
return tank
def psig( self, tank=None ):
if tank is None:
tank = {}
tank = self.signature(tank)
for key in tank.keys():
print key.__name__
for shape in tank[key].keys():
print " ", shape
#
#################################################
class Named(genpyx.Named, Node):
" has a .name property "
def get_name(self):
if self:
assert type(self[0])==str
return self[0]
return None
def set_name(self, name):
if self:
self[0] = name
else:
self.append(name)
name = property(get_name,set_name)
class BasicType(genpyx.BasicType, Named):
"float double void char int"
pass
class Qualifier(genpyx.Qualifier, Named):
"register signed unsigned short long const volatile inline"
pass
class StorageClass(genpyx.StorageClass, Named):
"extern static auto"
pass
class Ellipses(genpyx.Ellipses, Named):
"..."
pass
class GCCBuiltin(genpyx.GCCBuiltin, BasicType):
"things with __builtin prefix"
pass
class Identifier(genpyx.Identifier, Named):
"""
shape = +( str, +ConstExpr )
"""
#def explain(self):
#if len(self)==1:
#return "%s"%self.name
#else:
#return "%s initialized to %s"%(self.name,
#Node(self[1]).explain()) # will handle Initializer
# def ctype(self):
# return self.__class__(*self[1:]) #.clone() ?
# def get_name(self):
# if self:
# return self[0]
# def set_name(self, name):
# if self:
# self[0] = name
# else:
# self.append(name)
# name = property(get_name,set_name)
def cstr(self,l=None):
if l is None:
l=[]
if len(self)>1:
assert len(self)==2
l.append( '%s = %s'%(self[0],self[1]) )
elif len(self)==1:
l.append( str(self[0]) )
return " ".join(l)
class TypeAlias(genpyx.TypeAlias, Named):
"""
typedefed things, eg. size_t
"""
def cbasetype( self ):
node = self.typedef.cbasetype().get_rest()
return node
class Function(genpyx.Function, Node):
"""
"""
#def explain(self):
#if len(self):
#return "function (%s), returning"%\
#", ".join( map(lambda x:x.explain(),self) )
#else:
#return "function returning"
def cstr(self,l):
#print '%s.cstr(%s)'%(self,l)
_l=[]
assert len(self)
i=0
while isinstance(self[i],Declarator):
_l.append( self[i].cstr() )
i=i+1
l.append( '(%s)'% ', '.join(_l) )
while i<len(self):
self[i].cstr(l)
i=i+1
return " ".join(l)
def return_type(self):
node = self[-1]
#assert isinstance(node,DeclarationSpecifiers)
return Declarator( Identifier(), node )
ret = property(return_type)
def get_args(self):
args = [ arg for arg in self[:-1] if not arg.is_void() ]
return args
args = property(get_args)
def arg_types(self):
return [ AbstractDeclarator().init_from( arg.ctype() ) for arg in self[:-1]]
def is_varargs(self):
for node in self.nodes():
if isinstance(node,Ellipses) or 'va_list' in node:
# print self, 'is_varargs'
return True
# print self, 'is_varargs'
return False
# return fn.deepfind(Ellipses) or fn.deepfind('va_list')
def ctype(self):
return Function(*self.arg_types()+[self[-1]]) # XX self[-1].ctype
class Pointer(genpyx.Pointer, Node):
"""
"""
def get_spec(self):
if type(self[0])==TypeSpecifiers: # isinstance ??
return self[0]
spec = property(get_spec)
#def explain(self):
#return "pointer to"
def cstr(self,l):
assert len(self)
node=self[0]
l.insert(0,'*')
if isinstance(node,Function):
l.insert(0,'(')
l.append(')')
elif isinstance(node,Array):
l.insert(0,'(')
l.append(')')
return Node.cstr(self,l)
class Array(genpyx.Array, Node):
"""
"""
#def explain(self):
#s=''
#if len(self):
#if type(self[0])==int:
#s='0 to %s '%(self[0]-1)
#return "array %sof"%s
def has_size(self):
try:
int(self.size)
return True
except:
return False
def get_size(self):
if type(self[-1])==str:
try: return int(self[-1])
except: return self[-1]
return self[-1] # None
size = property(get_size)
def get_spec(self):
if type(self[0])==TypeSpecifiers: # isinstance ??
return self[0]
spec = property(get_spec)
def to_pointer(self):
node = Pointer()
node.init_from( self.clone() )
node.pop() # pop the size element
return node
def cstr(self,l):
if self.size is None:
l.append('[]')
else:
l.append('[%s]'%self.size)
return Node( *self[:-1] ).cstr( l )
class Tag(genpyx.Tag, Named):
" the tag of a Struct, Union or Enum "
pass
class Taged(genpyx.Taged, Node):
"Struct, Union or Enum "
def get_tag(self):
if len(self):
tag = self[0]
assert type(tag)==Tag # isinstance ??
else:
tag = None
return tag
def set_tag(self,tag):
if len(self):
self[0] = tag
else:
self.append(tag)
tag = property( get_tag, set_tag )
def has_members(self):
return len(self)>1 # more than just a tag
def get_members(self):
return self[1:]
members = property(get_members) # fields ?
def ctype(self):
if not self.tag.name:
#print "# WARNING : anonymous struct " # OK i think
return self.clone()
# self = self.clone()
# return self[:1] # just the tag
return self.__class__( self.tag, **self.__dict__ ) # just the Tag
# return self.__class__( *self, **self.__dict__ )
def cbasetype(self):
return self.ctype() # is this enough ???
# return Node.cbasetype(self) # XX lookup my tag if i am empty ..?
class Compound(genpyx.Compound, Taged):
"Struct or Union"
def cstr(self,_l=None):
assert isinstance( self[0], Tag )
tag=''
if len(self[0]):
tag=' '+self[0][0]
if isinstance(self,Struct):
l=[ 'struct%s '%tag ]
elif isinstance(self,Union):
l=[ 'union%s '%tag ]
if len(self)>1:
l.append(' { ')
for decl in self[1:]:
l.append( decl.cstr()+"; " )
l.append('} ')
if _l is None:
_l=[]
while l:
_l.insert( 0, l.pop() )
# XX empty struct with no tag -> "struct" XX
return "".join( _l )
def ctype(self):
tp = Taged.ctype(self)
for i in range(1,len(tp)):
tp[i] = StructDeclarator().init_from( tp[i] )
return tp
class Struct(genpyx.Struct, Compound):
"""
"""
pass
class Union(genpyx.Union, Compound):
"""
"""
pass
class Enum(genpyx.Enum, Taged):
"""
"""
def cstr(self,_l=None):
assert isinstance( self[0], Tag )
tag=''
if len(self[0]):
tag=' '+self[0][0]
l=[ 'enum%s '%tag ]
if len(self)>1:
l.append(' { ')
for node in self[1:]:
l.append( node.cstr()+', ' )
l.append('} ')
if _l is None:
_l=[]
while l:
_l.insert( 0, l.pop() )
return ''.join( _l )
class Declarator(genpyx.Declarator, Node):
"""
"""
def __eq__(self,other):
" unordered equality "
# ordering sometimes gets lost when we do a cbasetype
if not isinstance(other,Node):
return False
a, b = self[:], other[:]
a.sort()
b.sort()
return a == b
def __hash__( self ):
hs = [hash(item) for item in self]
hs.sort()
return hash( tuple([hash(type(self))]+hs) )
def transform(self):
return
def get_identifier(self):
if len(self)>1:
return self[0]
def set_identifier(self, identifier):
if len(self)>1:
self[0] = identifier
else:
self.insert(0,identifier)
identifier = property(get_identifier,set_identifier)
def get_spec(self):
spec = self[-1]
if type(spec)==TypeSpecifiers: # isinstance ??
return spec
spec = property(get_spec)
def get_type_alias(self):
if self.spec:
if isinstance(self.spec[0], TypeAlias):
return self.spec[0]
type_alias = property(get_type_alias)
def get_tagged(self):
if self.spec:
return self.spec.tagged # i am a tagged
tagged = property(get_tagged)
def get_compound(self):
if self.spec:
return self.spec.compound # i am a compound
compound = property(get_compound)
def get_struct(self):
if self.spec:
return self.spec.struct # i am a struct
struct = property(get_struct)
def get_union(self):
if self.spec:
return self.spec.union # i am a union
union = property(get_union)
def get_enum(self):
if self.spec:
return self.spec.enum # i am an enum
enum = property(get_enum)
def get_function(self):
if len(self)>1 and type(self[1])==Function: # isinstance ??
return self[1]
function = property(get_function)
def get_pointer(self):
if len(self)>1 and type(self[1])==Pointer: # isinstance ??
return self[1]
pointer = property(get_pointer)
def get_array(self):
if len(self)>1 and type(self[1])==Array: # isinstance ??
return self[1]
array = property(get_array)
def get_name(self):
if self.identifier:
return self.identifier.name
def set_name(self, name):
assert self.identifier is not None
self.identifier.name = name
name = property(get_name, set_name)
def get_rest(self): # XX needs a better name
if len(self)>1:
return self[1]
return self[0]
def pointer_to( self ):
" return Declarator pointing to self's type "
decl = Declarator(Identifier(), Pointer(self.get_rest().clone()))
return decl
def deref( self ):
" return (clone of) Declarator that self is pointing to "
node = self.ctype() # clone
pointer = node.pointer or node.array
assert pointer, "cannot dereference non-pointer"
node[1:2] = pointer
return node
def is_void(self):
return self.spec and BasicType('void') in self.spec
def is_pointer_to_fn(self):
return self.pointer and self.deref().function
def is_pointer_to_char(self):
# return self.ctype() == TransUnit("char *a;").transform()[0].ctype()
node = self.pointer or self.array
if node:
spec = node.spec
if spec and BasicType('char') in spec and not BasicType('unsigned') in spec:
return True
return False
def is_callback(self):
" i am a pointer to a function whose last arg is void* "
if self.is_pointer_to_fn():
fn = self.deref().function
if fn.args:
arg = fn.args[-1]
if arg.pointer and arg.deref().is_void():
return True
def is_complete( self, tag_lookup ):
if self.tagged and self.tagged.tag.name in tag_lookup and not tag_lookup[self.tagged.tag.name].has_members():
return False
return True
def is_primative( self ):
"i am a char,short,int,float,double... "
spec = self.cbasetype().spec
return spec and spec.find(BasicType)
def is_pyxnative( self ):
# pyrex handles char* too
# but i don't know if we should make this the default
# sometimes we want to send a NULL, so ... XXX
self = self.cbasetype()
if self.is_void():
return False
if self.is_primative():
return True
if self.enum:
return True
# pointer = None
# if self.pointer:
# pointer = self.pointer
# elif self.array:
# pointer = self.array
# if pointer and pointer.spec:
# spec = pointer.spec
# if BasicType("char") in spec and not Qualifier("unsigned") in spec:
# # char*, const char*
## print self.deepstr()
# return True
return False
def cstr(self,l=None):
return Node.cstr(self,l).strip()
def ctype(self):
decl=Declarator()
decl.init_from( self.clone() )
decl.identifier = Identifier()
for i in range(1,len(decl)):
decl[i]=decl[i].ctype()
return decl
def cbasetype(self):
# WARNING: we cache results (so do not mutate self!!)
try:
# this cache improves performance by 50%
return self.__cbasetype.clone()
except AttributeError:
pass
decl = self.ctype() # gets rid of Identifier names
for i, node in enumerate(decl):
decl[i] = decl[i].cbasetype()
# return decl.get_rest()
done = False
while not done:
done = True
nodes = decl.deepfilter( TypeSpecifiers )
for node in nodes:
if node.deepfind( TypeSpecifiers ) != node:
# this node has another TypeSpecifier;
decl.expose_node( node )
done = False
break # start again...
# each TypeSpecifier needs to absorb primitive siblings (StorageClass, BasicType etc.)
nodes = decl.deepfilter( TypeSpecifiers )
for node in nodes:
parent = decl.get_parent(node)
i = 0
while i < len(parent):
assert not type(parent[i]) in (TypeAlias, Enum, Struct, Union)
if type(parent[i]) in (StorageClass, BasicType, Qualifier):
node.append( parent.pop(i) )
else:
i = i + 1
self.__cbasetype = decl.clone()
return decl
def invalidate(self):
# flush cache, etc.
try:
del self.__cbasetype
except AttributeError:
pass
def declare_str(self,name):
" return c string declaring name with same type as self "
tp = self.ctype()
tp.name = name
return tp.cstr()+";"
class Typedef(genpyx.Typedef, Declarator):
def cstr(self,l=None):
return 'typedef ' + Declarator.cstr(self,l) #.strip()
class AbstractDeclarator(genpyx.AbstractDeclarator, Declarator):
""" used in Function; may lack an identifier """
#def cstr(self,l=None):
#return Node.cstr(self,l)
# def ctype(self):
# # _type_ ignores the name of our identifier
# return Node.ctype(self)
class FieldLength(genpyx.FieldLength, Node):
"""
"""
#def explain(self):
#return ""
def cstr(self,l):
l.append(':%s'%self[0])
class StructDeclarator(genpyx.StructDeclarator, Declarator): # also used in Union
"""
"""
#def explain(self):
#flen = self.find(FieldLength)
#if flen is not None:
#i = self.index(flen)
#self.pop(i)
#s = Declarator.explain(self)
#self.insert(i,flen)
#width = flen[0]
#if width > 0:
#return s+" bitfield %s wide"%width
#else:
#return s+" alignment bitfield"
#else:
#return Declarator.explain(self)
# def ctype(self):
# return self
def get_field_length(self):
if len(self)>1 and isinstance( self[1], FieldLength ):
return self[1]
field_length = property(get_field_length)
class DeclarationSpecifiers(genpyx.DeclarationSpecifiers, Node):
#class TypeSpecifiers(Node):
"""
"""
def __eq__(self,other):
" unordered equality "
if not isinstance(other,Node):
return False
a, b = self[:], other[:]
a.sort()
b.sort()
return a == b
def __hash__( self ):
hs = [hash(item) for item in self]
hs.sort()
return hash( tuple([hash(type(self))]+hs) )
# def is_struct(self):
# return self.find(Struct) is not None
class TypeSpecifiers(genpyx.TypeSpecifiers, DeclarationSpecifiers):
"""
"""
def get_tagged(self):
if self and isinstance(self[0],Taged):
return self[0]
tagged = property(get_tagged)
def get_compound(self):
if self and isinstance(self[0],Compound):
return self[0]
compound = property(get_compound)
def get_struct(self):
if self and isinstance(self[0],Struct):
return self[0]
struct = property(get_struct)
def get_union(self):
if self and isinstance(self[0],Union):
return self[0]
union = property(get_union)
def get_enum(self):
if self and isinstance(self[0],Enum):
return self[0]
enum = property(get_enum)
def cbasetype(self):
node = Node.cbasetype(self)
# node.expose( TypeSpecifiers )
# if node.deepfind(TypeSpecifiers) != node:
return node
class Initializer(genpyx.Initializer, Node):
"""
"""
pass
class Declaration(genpyx.Declaration, Node):
"""
"""
def do_spec(self):
" distribute DeclarationSpecifiers over each Declarator "
spec=self[0]
assert isinstance(spec,DeclarationSpecifiers), spec.deepstr()
self.pop(0)
for declarator in self:
assert isinstance(declarator,Declarator)
#if isinstance(declarator,DeclarationSpecifiers #huh?
##for node in spec:
##declarator.append(node.clone())
declarator.append(spec)
def transform(self):
# children go first
for node in self.nodes():
if isinstance(node,Declaration):
node.do_spec()
node.file = self.file # overkill ?
self.expose(Declaration)
#def explain(self):
#return string.join([x.explain() for x in self],", ")
#return string.join(map(lambda x:x.explain(),self),", ")
class ParameterDeclaration(genpyx.ParameterDeclaration, Declaration):
"""
"""
pass
class StructDeclaration(genpyx.StructDeclaration, Declaration):
"""
"""
pass
class TransUnit(genpyx.TransUnit, Node):
"""
Top level node.
"""
def __init__( self, item ): # XX __init__ uses different signature ! XX
if type(item)==str:
node = cparse.TransUnit()
node.parse(item)
else:
node = item
assert isinstance( node, cparse.TransUnit ), str(node)
Node.__init__(self)
self[:] = [ self.convert(child) for child in node ]
self.__dict__.update( node.__dict__ )
assert "name" not in node.__dict__
self.syms = {} # map identifier names to their Declarator's
self.typedefs = {} # map names to Typedef's
self.tag_lookup = {} # map struct, union, enum tags to Taged's
# XX should call transform here XX
# print self.deepstr()
def __getstate__( self ):
nodes = tuple( [ repr(node) for node in self ] )
typedefs = tuple( [ (key,repr(val)) for key,val in self.typedefs.items() ] )
return nodes, typedefs
def __setstate__( self, state ):
Node.__init__(self)
nodes, typedefs = state
nodes = [ eval(node) for node in nodes ]
self[:] = nodes
typedefs = [ (key,eval(val)) for key,val in typedefs ]
self.typedefs = dict(typedefs)
def convert( self, node ):
# name = node.__class__.__name__
# cls = globals()[ name ]
cls = cls_lookup[ type(node) ]
_node = cls()
for child in node:
if isinstance(child, node_module.Node):
child = self.convert( child )
else:
assert child is None or type(child) in (str, int), type(child)
_node.append( child )
_node.__dict__.update( node.__dict__ )
return _node
def strip(self,files):
" leave only the declarations from <files> "
i=0
while i<len(self):
if self[i].file in files:
i=i+1
else:
self.pop(i)
def mark(self,cb,verbose=False):
" mark our child nodes such that cb(node).. mark dependants too. prune unmarked objects. "
# mark the nodes:
for node in self:
node.marked = cb(self, node)
if verbose and node.marked:
print '1:', node.cstr()
# propagate dependancy:
i=len(self)
while i:
i-=1 # we go backwards
for node in self[i].nodes(): # bottom-up search
if verbose and self[i].marked and not node.marked:
print '2:', str(node), '<--', self[i].cstr()
node.marked = self[i].marked or node.marked
if type(node)==TypeAlias:
if verbose and node.marked and not node.typedef.marked:
print '3:', node.typedef.cstr(), '<--', node.cstr()
node.typedef.marked = node.typedef.marked or node.marked
if isinstance(node, Taged):
if node.tag.name in self.tag_lookup:
_node = self.tag_lookup[ node.tag.name ] # look-up the def'n
if verbose and node.marked and not _node.marked:
print '4:', _node.cstr(), '<--', self[i].cstr()
# _node.marked = _node.marked or self[i].marked
_node.marked = _node.marked or node.marked
# else:
# # this guy has no tag
# print "lost tag:", self[i].cstr()
# XX struct defs acquire marks from members, but XX
# XX ordinary definitions do not XX
# if node.marked and not self[i].marked:
# # one of my descendants is marked
# if verbose:
# print '5:', self[i].cstr(), '<--', node.cstr()
# self[i].marked = True
# if verbose:
# for node in self:
# print '-'*79
# if node.enum:
# print str(node.marked) + ': ' + node.cstr()
# prune:
f = open(".tmp/pruned.txt","w")
f.write("// This file autogenerated by '%s' .\n"%__file__)
f.write("// List of functions pruned from parse tree, for various reasons.\n\n")
i=0
while i<len(self):
if not self[i].marked:
if verbose: print 'pop:', self[i].cstr()
f.write( self[i].cstr() + "\n" )
self.pop(i)
# elif self[i].compound:
# # XXXX for now, rip out all struct members XXXX
# self[i].compound[1:] = [] # XX encapsulation
# i = i + 1
else:
i = i + 1
for key, value in self.syms.items():
if not value.marked:
del self.syms[key]
for key, value in self.typedefs.items():
if not value.marked:
del self.typedefs[key]
for key, value in self.tag_lookup.items():
if not value.marked:
del self.tag_lookup[key]
# sys.exit(1)
def assert_no_dups(self):
check={}
for node in self.nodes():
assert not check.has_key(id(node))
check[id(node)]=1
def transform(self, verbose=False, test_parse=False, test_types=False ):
i=0
while i < len(self):
if verbose: print "##"*25
declaration=self[i]
if verbose: declaration.psource()
if verbose: print declaration.deepstr(),'\n'
assert isinstance(declaration,Declaration)
if verbose: print "# expose declarators from declaration"
# STAGE 1
declaration.transform()
if verbose: print declaration.deepstr(),'\n'
self[i:i+1] = declaration # expose declarators from declaration
for j in range(len(declaration)):
declarator=self[i]
assert isinstance(declarator,Declarator)
if verbose: print "# declarator.transform()"
# STAGE 2
declarator.transform()
if verbose: print declarator.deepstr(),'\n'
if verbose: print "# self.visit_declarator(declarator)"
# STAGE 3
self[i] = declarator = self.visit_declarator(declarator)
# STAGE 4
if declarator.name:
if isinstance(declarator, Typedef):
if verbose: print "# typedef %s" % declarator.name
self.typedefs[ declarator.name ] = declarator
else:
if verbose: print "# sym %s" % declarator.name
self.syms[ declarator.name ] = declarator
for node in declarator.nodes():
if isinstance(node,Taged) and node.tag.name:
assert type(node.tag.name)==str, node.deepstr()
taged = self.tag_lookup.get( node.tag.name, None )
if taged is None:
if verbose: print "# tag lookup %s = %s" % (declarator.name, node.tag.name)
self.tag_lookup[ node.tag.name ] = node
elif not taged.has_members():
# this is (maybe) the definition of this tag
if verbose: print "# definition %s = %s" % (declarator.name, node.tag.name)
self.tag_lookup[ node.tag.name ] = node
# Annotate the TypeAlias's
for node in declarator.deepfilter( TypeAlias ):
name = node[0]
assert type( name ) == str
node.typedef = self.typedefs[ name ]
if verbose: print declarator.deepstr(),'\n'
#print declarator.ctype().deepstr(),'\n'
#assert declarator.clone() == declarator
###################################################
# TESTS:
if test_parse:
# test that parse of cstr gives same answer
cstr = declarator.cstr()+';\n'
if verbose: print '# '+cstr.replace('\n','\n# ')
#print
if isinstance(declarator,Typedef):
name = declarator[0][0]
assert type(name)==str
self.lexer.rmtypedef( name )
declaration = cparse.Declaration()
self.lexer.lex( cstr )
#print self.lexer.err_string()
declaration.parse( self.lexer, Symbols() ) # use new name-space
#declaration.parse( Lexer( cstr ), Symbols() )
declaration = self.convert(declaration)
declaration.transform()
assert len(declaration)==1
decl=declaration[0]
decl.transform()
decl = self.visit_declarator(decl)
if decl!=declarator:
if verbose: print "#???????????"
if verbose: print decl.deepstr(),'\n\n'
#if verbose: print declaration.deepstr(),'\n\n'
#assert 0
elif verbose: print '# OK\n'
if test_types:
node = declarator.ctype()
declare_str= node.declare_str("my_name")
if verbose: print "# declarator.ctype() "
if verbose: print node.deepstr(),"\n"
if verbose: print "#",declare_str.replace('\n','\n# '), '\n'
i=i+1
return self
def visit(self,node):
#print 'visit(%s)'%node
for _node in node:
if isinstance(_node,Declarator):
_node = self.visit_declarator(_node) # XX replace _node
elif isinstance(_node,Node):
_node = self.visit(_node) # XX replace _node
return node
def visit_declarator(self,decl):
assert isinstance(decl,Declarator)
# STAGE 3.a
tp = decl.deepfind(Typedef)
if tp is not None:
decl.deeprm(tp)
tp.init_from( decl ) # warning: shallow init
decl = tp
# STAGE 3.b
i=len(decl)
# accumulate nodes (they become the children of decl)
children=[]
while i:
i=i-1
node=decl.pop(i)
if isinstance(node,Declarator):
node = self.visit_declarator(node) # replace node
else:
node = self.visit(node) # replace node
if isinstance(node,Pointer):
node+=children
children=[node]
elif isinstance(node,Function):
node+=children
children=[node]
elif isinstance(node,Array):
while children:
node.insert(0,children.pop())
children=[node]
# array size (if any) at end
#elif isinstance(node,Identifier):
#node+=children
#children=[node]
else:
# accumulate
children.insert(0,node)
decl[:]=children
return decl
cstr = None
ctype = None
cbasetype = None
# remap the global class definitions in genpyx to
# point to the definitions in this module
gbl = globals()
for key, val in gbl.items():
if type(val)==type:
if issubclass(val,Node):
setattr( genpyx, key, val )
assert genpyx.Node == Node
cls_lookup = {
# Node : Node ,
cparse.BasicType : BasicType ,
cparse.Qualifier : Qualifier ,
cparse.StorageClass : StorageClass ,
cparse.Ellipses : Ellipses ,
cparse.GCCBuiltin : GCCBuiltin ,
cparse.Identifier : Identifier ,
cparse.TypeAlias : TypeAlias ,
cparse.Function : Function ,
cparse.Pointer : Pointer ,
cparse.Array : Array ,
cparse.Tag : Tag ,
cparse.Compound : Compound ,
cparse.Struct : Struct ,
cparse.Union : Union ,
cparse.Enum : Enum ,
cparse.Declarator : Declarator ,
cparse.Typedef : Typedef ,
cparse.AbstractDeclarator : AbstractDeclarator ,
cparse.FieldLength : FieldLength ,
cparse.StructDeclarator : StructDeclarator ,
cparse.DeclarationSpecifiers : TypeSpecifiers ,
cparse.TypeSpecifiers : TypeSpecifiers ,
cparse.Initializer : Initializer ,
cparse.Declaration : Declaration ,
cparse.ParameterDeclaration : ParameterDeclaration ,
cparse.StructDeclaration : StructDeclaration ,
cparse.TransUnit : TransUnit ,
}
| 30.552405
| 117
| 0.534404
|
0d434e05acfff2841e27613c15863c99e20abe10
| 659
|
py
|
Python
|
util/bus.py
|
krisgesling/mycroft-timer
|
e4db07688d72a398223d43225be2f1b63d7500cd
|
[
"Apache-2.0"
] | null | null | null |
util/bus.py
|
krisgesling/mycroft-timer
|
e4db07688d72a398223d43225be2f1b63d7500cd
|
[
"Apache-2.0"
] | null | null | null |
util/bus.py
|
krisgesling/mycroft-timer
|
e4db07688d72a398223d43225be2f1b63d7500cd
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
# TODO remove this in v20.8 when it should be available in mycroft-core
def wait_for_message(bus, message_type, timeout=8):
"""Wait for specified Message type on the bus.
Arguments:
bus: an instance of the message bus to listen on
message_type: the Message type to wait for
timeout (int): how long to wait, defaults to 8 secs
"""
message_detected = False
def detected_speak(message=None):
nonlocal message_detected
message_detected = True
bus.on(message_type, detected_speak)
sleep(timeout)
bus.remove(message_type, detected_speak)
return message_detected
| 29.954545
| 71
| 0.705615
|
d3cde04f8f1cf49f6ec0f2fd31f38e16f1f37107
| 6,887
|
py
|
Python
|
tests/plugins/trackers/kinozal/test_kinozalplugin.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 465
|
2015-08-31T09:16:41.000Z
|
2022-03-12T10:33:04.000Z
|
tests/plugins/trackers/kinozal/test_kinozalplugin.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 340
|
2015-07-18T17:31:54.000Z
|
2022-03-30T15:16:25.000Z
|
tests/plugins/trackers/kinozal/test_kinozalplugin.py
|
mortifactor/monitorrent
|
2388ec5b82af5d078fa7e37930d3b66b4a797954
|
[
"WTFPL"
] | 87
|
2015-07-18T10:52:24.000Z
|
2022-03-27T09:52:35.000Z
|
# coding=utf-8
import pytz
from datetime import datetime
from mock import patch
from monitorrent.plugins.trackers import LoginResult, TrackerSettings
from monitorrent.plugins.trackers.kinozal import KinozalPlugin, KinozalLoginFailedException, KinozalTopic
from monitorrent.plugins.trackers.kinozal import KinozalDateParser
from tests import use_vcr, DbTestCase
from tests.plugins.trackers import TrackerSettingsMock
from tests.plugins.trackers.kinozal.kinozal_helper import KinozalHelper
helper = KinozalHelper()
# helper = KinozalHelper.login('realusername', 'realpassword')
class MockDatetime(datetime):
mock_now = None
@classmethod
def now(cls, tz=None):
return cls.mock_now
class KinozalPluginTest(DbTestCase):
def setUp(self):
super(KinozalPluginTest, self).setUp()
self.tracker_settings = TrackerSettingsMock(10, None)
self.plugin = KinozalPlugin()
self.plugin.init(self.tracker_settings)
self.urls_to_check = [
"http://kinozal.tv/details.php?id=1506818"
]
def test_can_parse_url(self):
for url in self.urls_to_check:
self.assertTrue(self.plugin.can_parse_url(url))
bad_urls = [
"http://kinozal.com/details.php?id=1506818",
"http://belzal.com/details.php?id=1506818",
]
for url in bad_urls:
self.assertFalse(self.plugin.can_parse_url(url))
@use_vcr
def test_parse_url_success(self):
parsed_url = self.plugin.parse_url("http://kinozal.tv/details.php?id=1506818")
assert parsed_url['original_name'] == u'Война против всех / War on Everyone / 2016 / ДБ / WEB-DLRip'
@use_vcr
def test_login_verify_fail(self):
assert not self.plugin.verify()
assert self.plugin.login() == LoginResult.CredentialsNotSpecified
credentials = {'username': '', 'password': ''}
assert self.plugin.update_credentials(credentials) == LoginResult.CredentialsNotSpecified
assert not self.plugin.verify()
credentials = {'username': helper.fake_login, 'password': helper.fake_password}
assert self.plugin.update_credentials(credentials) == LoginResult.IncorrentLoginPassword
assert not self.plugin.verify()
@helper.use_vcr
def test_login_verify_success(self):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Ok)
self.assertTrue(self.plugin.verify())
def test_login_failed_exceptions_1(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login',
side_effect=KinozalLoginFailedException(1, 'Invalid login or password')):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.IncorrentLoginPassword)
def test_login_failed_exceptions_173(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login',
side_effect=KinozalLoginFailedException(173, 'Invalid login or password')):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Unknown)
def test_login_unexpected_exceptions(self):
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'login', side_effect=Exception):
credentials = {'username': helper.real_login, 'password': helper.real_password}
self.assertEqual(self.plugin.update_credentials(credentials), LoginResult.Unknown)
def test_prepare_request(self):
cookies = {'uid': helper.fake_uid, 'pass': helper.fake_pass}
# noinspection PyUnresolvedReferences
with patch.object(self.plugin.tracker, 'get_cookies', result=cookies):
url = "http://kinozal.tv/details.php?id=1506818"
request = self.plugin._prepare_request(KinozalTopic(url=url))
self.assertIsNotNone(request)
self.assertEqual(request.headers['referer'], url)
self.assertEqual(request.url, 'http://dl.kinozal.tv/download.php?id=1506818')
@use_vcr
def test_get_last_torrent_update_for_updated_yesterday_success(self):
url = 'http://kinozal.tv/details.php?id=1478373'
topic = KinozalTopic(id=1, url=url, last_torrent_update=datetime(2017, 1, 17, 10, 10, tzinfo=pytz.utc))
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 19, 23, 27)).astimezone(pytz.utc)
server_now = datetime(2017, 1, 20, 12, 0, 0, tzinfo=pytz.utc)
MockDatetime.mock_now = server_now
with patch('monitorrent.plugins.trackers.kinozal.datetime.datetime', MockDatetime):
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_today_success(self):
url = 'http://kinozal.tv/details.php?id=1496310'
topic = KinozalTopic(id=1, url=url, last_torrent_update=None)
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 20, 1, 30)).astimezone(pytz.utc)
server_now = datetime(2017, 1, 20, 12, 0, 0, tzinfo=pytz.utc)
MockDatetime.mock_now = server_now
with patch('monitorrent.plugins.trackers.kinozal.datetime.datetime', MockDatetime):
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_in_particular_success(self):
url = 'http://kinozal.tv/details.php?id=1508210'
topic = KinozalTopic(id=1, url=url, last_torrent_update=datetime(2017, 1, 17, 10, 10, tzinfo=pytz.utc))
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 18, 21, 40)).astimezone(pytz.utc)
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_for_updated_in_particular_not_changed(self):
url = 'http://kinozal.tv/details.php?id=1508210'
expected = KinozalDateParser.tz_moscow.localize(datetime(2017, 1, 18, 21, 40)).astimezone(pytz.utc)
topic = KinozalTopic(id=1, url=url, last_torrent_update=expected)
assert not self.plugin.check_changes(topic)
assert topic.last_torrent_update == expected
@use_vcr
def test_get_last_torrent_update_without_updates_success(self):
url = 'http://kinozal.tv/details.php?id=1510727'
topic = KinozalTopic(id=1, url=url, last_torrent_update=None)
assert self.plugin.check_changes(topic)
assert topic.last_torrent_update is None
| 45.309211
| 111
| 0.706113
|
0d7586a8298a739e7d2abb6b72a7b0ad6e0ddb93
| 1,367
|
py
|
Python
|
test/jpypetest/test_charSequence.py
|
yinhejianke/jpype
|
257f2b010b32bb0f18d971e0e915849642c3cf1a
|
[
"Apache-2.0"
] | 1
|
2020-01-03T06:03:14.000Z
|
2020-01-03T06:03:14.000Z
|
test/jpypetest/test_charSequence.py
|
yinhejianke/jpype
|
257f2b010b32bb0f18d971e0e915849642c3cf1a
|
[
"Apache-2.0"
] | null | null | null |
test/jpypetest/test_charSequence.py
|
yinhejianke/jpype
|
257f2b010b32bb0f18d971e0e915849642c3cf1a
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import sys
import logging
import time
import common
class ConversionCharSequenceTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testAutoConvert(self):
Instant = jpype.JClass("java.time.Instant")
now = "2019-11-12T03:20:54.710948400Z"
then = Instant.parse(now)
self.assertEqual(str(then), now)
then = Instant.parse(jpype.JString(now))
self.assertEqual(str(then), now)
then = Instant.parse(jpype.JObject(now, "java.lang.CharSequence"))
self.assertEqual(str(then), now)
| 36.945946
| 79
| 0.623263
|
6a5f8ce74a376c0f86860d25ec0f5653a63d149a
| 3,490
|
py
|
Python
|
notebooks/a2/sgd.py
|
cicorias/scpd-xcs224n-general
|
783c4ed127fc06ea2cc92f05c0bb47debb73928e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/a2/sgd.py
|
cicorias/scpd-xcs224n-general
|
783c4ed127fc06ea2cc92f05c0bb47debb73928e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/a2/sgd.py
|
cicorias/scpd-xcs224n-general
|
783c4ed127fc06ea2cc92f05c0bb47debb73928e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
import pickle
import glob
import random
import numpy as np
import os.path as op
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
params_file = "saved_params_%d.npy" % st
state_file = "saved_state_%d.pickle" % st
params = np.load(params_file)
with open(state_file, "rb") as f:
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
params_file = "saved_params_%d.npy" % iter
np.save(params_file, params)
with open("saved_state_%d.pickle" % iter, "wb") as f:
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
PRINT_EVERY=10):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a loss and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
exploss = None
for iter in range(start_iter + 1, iterations + 1):
# You might want to print the progress every few iterations.
loss = None
### YOUR CODE HERE (~2 lines)
### END YOUR CODE
x = postprocessing(x)
if iter % PRINT_EVERY == 0:
if not exploss:
exploss = loss
else:
exploss = .95 * exploss + .05 * loss
print("iter %d: %f" % (iter, exploss))
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print("Running sanity checks...")
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print("test 1 result:", t1)
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print("test 2 result:", t2)
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print("test 3 result:", t3)
assert abs(t3) <= 1e-6
print("-" * 40)
print("ALL TESTS PASSED")
print("-" * 40)
if __name__ == "__main__":
sanity_check()
| 26.439394
| 75
| 0.595129
|
40aea50e166c5ac2781c41ab41e42b8eda87d50e
| 1,508
|
py
|
Python
|
packages/pyre/framework/Dashboard.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
packages/pyre/framework/Dashboard.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
packages/pyre/framework/Dashboard.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
# declaration
class Dashboard:
"""
Mix-in class that provides access to the pyre executive and its managers
"""
# grab the base of all pyre exceptions
from .exceptions import PyreError
# public data
# the executive
pyre_executive = None
# framework parts
pyre_fileserver = None
pyre_nameserver = None
pyre_configurator = None
# infrastructure managers
pyre_registrar = None # the component registrar
pyre_schema = None # the database schema
# information about the runtime environment
pyre_host = None # the current host
pyre_user = None # the current user
pyre_application = None # the current application
# debugging support
@classmethod
def dashboard(cls):
"""
Dump the status of the dashboard
"""
# show me
yield "executive: {.pyre_executive}".format(cls)
yield " fileserver: {.pyre_fileserver}".format(cls)
yield " nameserver: {.pyre_nameserver}".format(cls)
yield " configurator: {.pyre_configurator}".format(cls)
yield " registrar: {.pyre_registrar}".format(cls)
yield " schema: {.pyre_schema}".format(cls)
yield " host: {.pyre_host}".format(cls)
yield " user: {.pyre_user}".format(cls)
yield " application: {.pyre_application}".format(cls)
# all done
return
# end of file
| 25.559322
| 76
| 0.637931
|
6e84a1a991f5d28ae363947ccea1b1c89f1f97db
| 2,657
|
py
|
Python
|
hydra/_internal/grammar/functions.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | 5,847
|
2019-10-03T04:20:44.000Z
|
2022-03-31T17:07:46.000Z
|
hydra/_internal/grammar/functions.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | 1,393
|
2019-10-04T01:03:38.000Z
|
2022-03-31T20:29:35.000Z
|
hydra/_internal/grammar/functions.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | 505
|
2019-10-03T19:41:42.000Z
|
2022-03-31T11:40:16.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import inspect
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
from omegaconf._utils import type_str
from hydra._internal.grammar.utils import is_type_matching
from hydra.core.override_parser.types import QuotedString
from hydra.errors import HydraException
@dataclass
class FunctionCall:
name: str
args: List[Any]
kwargs: Dict[str, Any]
@dataclass
class Functions:
definitions: Dict[str, inspect.Signature] = field(default_factory=dict)
functions: Dict[str, Callable[..., Any]] = field(default_factory=dict)
def register(self, name: str, func: Callable[..., Any]) -> None:
if name in self.definitions:
raise HydraException(f"Function named '{name}' is already registered")
self.definitions[name] = inspect.signature(func)
self.functions[name] = func
def eval(self, func: FunctionCall) -> Any:
if func.name not in self.definitions:
raise HydraException(
f"Unknown function '{func.name}'"
f"\nAvailable: {','.join(sorted(self.definitions.keys()))}\n"
)
sig = self.definitions[func.name]
# unquote strings in args
args = []
for arg in func.args:
if isinstance(arg, QuotedString):
arg = arg.text
args.append(arg)
# Unquote strings in kwargs values
kwargs = {}
for key, val in func.kwargs.items():
if isinstance(val, QuotedString):
val = val.text
kwargs[key] = val
bound = sig.bind(*args, **kwargs)
for idx, arg in enumerate(bound.arguments.items()):
name = arg[0]
value = arg[1]
expected_type = sig.parameters[name].annotation
if sig.parameters[name].kind == inspect.Parameter.VAR_POSITIONAL:
for iidx, v in enumerate(value):
if not is_type_matching(v, expected_type):
raise TypeError(
f"mismatch type argument {name}[{iidx}]:"
f" {type_str(type(v))} is incompatible with {type_str(expected_type)}"
)
else:
if not is_type_matching(value, expected_type):
raise TypeError(
f"mismatch type argument {name}:"
f" {type_str(type(value))} is incompatible with {type_str(expected_type)}"
)
return self.functions[func.name](*bound.args, **bound.kwargs)
| 34.960526
| 98
| 0.587505
|
bc6a1fd5b133043c991687da49f403d764a91033
| 6,915
|
py
|
Python
|
SleepEDF/tensorflow_net/deep_cnn_baseline_1to3/equaldatagenerator_from_list_v2.py
|
sajjadkarimi91/MultitaskSleepNet
|
80c45e30467fde0f8303e5deabacd4988ab387b0
|
[
"MIT"
] | 29
|
2018-10-24T09:42:50.000Z
|
2022-03-18T07:03:21.000Z
|
SleepEDF/tensorflow_net/deep_cnn_baseline_1to3/equaldatagenerator_from_list_v2.py
|
sajjadkarimi91/MultitaskSleepNet
|
80c45e30467fde0f8303e5deabacd4988ab387b0
|
[
"MIT"
] | 3
|
2018-09-25T08:53:44.000Z
|
2020-08-16T12:37:15.000Z
|
SleepEDF/tensorflow_net/deep_cnn_baseline_1to3/equaldatagenerator_from_list_v2.py
|
sajjadkarimi91/MultitaskSleepNet
|
80c45e30467fde0f8303e5deabacd4988ab387b0
|
[
"MIT"
] | 17
|
2018-09-25T15:12:31.000Z
|
2022-02-23T09:38:39.000Z
|
# load data from mat files and generate batch-by-batch with equal sampling for different classes
# This is used when balancing data between different classes in a data batch for training
import numpy as np
from scipy.io import loadmat
import h5py
class EqualDataGenerator:
def __init__(self, filelist, data_shape=np.array([29, 129]),shuffle=False):
# Init params
self.shuffle = shuffle
self.filelist = filelist
self.data_shape = data_shape
self.X = np.array([])
self.y = np.array([])
self.label = np.array([])
self.boundary_index = np.array([])
self.Ncat = 5
# read from mat file
self.read_mat_filelist(self.filelist)
self.data_size = len(self.label)
# create pointers for different classes
self.nclass = self.y.shape[1]
self.data_index = []
for i in range(self.nclass):
ind = np.where(self.y[:,i] == 1)[0]
print(len(ind))
mask = np.in1d(ind,self.boundary_index, invert=True)
ind = ind[mask]
print(len(ind))
self.data_index.append(ind)
self.pointer = np.zeros([self.nclass,1])
if self.shuffle:
self.shuffle_data()
def read_mat_filelist(self,filelist):
"""
Scan the file list and read them one-by-one
"""
files = []
self.data_size = 0
with open(filelist) as f:
lines = f.readlines()
for l in lines:
items = l.split()
files.append(items[0])
self.data_size += int(items[1])
print(self.data_size)
self.X = np.ndarray([self.data_size, self.data_shape[0], self.data_shape[1]])
self.y = np.ndarray([self.data_size, self.Ncat])
self.label = np.ndarray([self.data_size])
count = 0
for i in range(len(files)):
X, y, label = self.read_mat_file(files[i].strip())
self.X[count : count + len(X)] = X
self.y[count : count + len(X)] = y
self.label[count : count + len(X)] = label
self.boundary_index = np.append(self.boundary_index, [count, (count + len(X) - 1)])
count += len(X)
print(count)
print("Boundary indices")
print(self.boundary_index)
print(self.X.shape, self.y.shape, self.label.shape)
def read_mat_file(self,filename):
"""
Read matfile HD5F file and parsing
"""
# Load data
print(filename)
data = h5py.File(filename,'r')
data.keys()
X = np.array(data['X'])
X = np.transpose(X, (2, 1, 0)) # rearrange dimension
y = np.array(data['y'])
y = np.transpose(y, (1, 0)) # rearrange dimension
label = np.array(data['label'])
label = np.transpose(label, (1, 0)) # rearrange dimension
label = np.squeeze(label)
return X, y, label
def shuffle_data(self):
"""
Random shuffle the data points indexes
"""
#create list of permutated index and shuffle data accoding to list
for i in range(self.nclass):
data_index = self.data_index[i]
idx = np.random.permutation(len(data_index))
data_index = data_index[idx]
self.data_index[i] = data_index
def numel_per_class(self, classid):
if(classid >= 0 and classid < self.nclass):
return len(self.data_index[classid])
else:
return 0
def reset_pointer(self):
"""
reset pointer to begin of the list
"""
for i in range(self.nclass):
self.pointer[i] = 0
if self.shuffle:
self.shuffle_data()
def next_batch_per_class(self, batch_size, classid):
"""
This function gets the next n ( = batch_size) sample from a class
"""
class_size = self.numel_per_class(classid)
if(self.pointer[classid] + batch_size <= class_size):
data_index = self.data_index[classid][int(self.pointer.item(classid)):int(self.pointer.item(classid)) + batch_size]
self.pointer[classid] += batch_size #update pointer
else:
data_index = self.data_index[classid][int(self.pointer.item(classid)): class_size]
leftover = batch_size - (class_size - self.pointer.item(classid))
data_index = np.concatenate([data_index, self.data_index[classid][0 : int(leftover)]])
self.pointer[classid] = leftover #update pointer
return data_index
def next_batch(self, batch_size_per_class):
"""
This function gets the next n ( = batch_size) sample from every class
"""
data_index = []
for i in range(self.nclass):
data_index_i = self.next_batch_per_class(batch_size_per_class, i)
data_index = np.concatenate([data_index, data_index_i])
idx = np.random.permutation(len(data_index))
data_index = data_index[idx]
batch_size = batch_size_per_class*self.nclass
batch_x = np.ndarray([batch_size, self.data_shape[0], self.data_shape[1]])
batch_y1 = np.ndarray([batch_size, self.y.shape[1]])
batch_y2 = np.ndarray([batch_size, self.y.shape[1]])
batch_y3 = np.ndarray([batch_size, self.y.shape[1]])
batch_label1 = np.ndarray([batch_size])
batch_label2 = np.ndarray([batch_size])
batch_label3 = np.ndarray([batch_size])
for i in range(len(data_index)):
batch_x[i] = self.X[int(data_index.item(i)), :, :]
batch_y2[i] = self.y[int(data_index.item(i))]
batch_label2[i] = self.label[int(data_index.item(i))]
batch_y1[i] = self.y[int(data_index.item(i) - 1)]
batch_label1[i] = self.label[int(data_index.item(i) -1)]
batch_y3[i] = self.y[int(data_index.item(i) + 1)]
batch_label3[i] = self.label[int(data_index.item(i) +1)]
# check condition to make sure all corrections
#assert np.sum(batch_y[i]) > 0.0
# Get next batch of image (path) and labels
batch_x.astype(np.float32)
batch_y1.astype(np.float32)
batch_label1.astype(np.float32)
batch_y2.astype(np.float32)
batch_label2.astype(np.float32)
batch_y3.astype(np.float32)
batch_label3.astype(np.float32)
#return array of images and labels
return batch_x, batch_y1, batch_label1, batch_y2, batch_label2, batch_y3, batch_label3
def filter_with_filterbank(self, Wfb):
X = np.reshape(self.X, (self.data_size*self.data_shape[0], self.data_shape[1]))
X = np.dot(X, Wfb)
self.X = np.reshape(X, (self.data_size, self.data_shape[0], Wfb.shape[1]))
self.data_shape = self.X.shape[1:]
del X
| 36.97861
| 127
| 0.586985
|
a8813799a0c0c565a262533d989f36568740e542
| 4,120
|
py
|
Python
|
workspace_tools/export/uvision5.py
|
OmarValdez/mbed
|
bd78f98496fab2f02162521d7e279d7bd0f0840e
|
[
"Apache-2.0"
] | null | null | null |
workspace_tools/export/uvision5.py
|
OmarValdez/mbed
|
bd78f98496fab2f02162521d7e279d7bd0f0840e
|
[
"Apache-2.0"
] | null | null | null |
workspace_tools/export/uvision5.py
|
OmarValdez/mbed
|
bd78f98496fab2f02162521d7e279d7bd0f0840e
|
[
"Apache-2.0"
] | null | null | null |
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import basename, join, dirname
from project_generator_definitions.definitions import ProGenDef
from workspace_tools.export.exporters import Exporter
from workspace_tools.targets import TARGET_MAP, TARGET_NAMES
# If you wish to add a new target, add it to project_generator_definitions, and then
# define progen_target name in the target class (`` self.progen_target = 'my_target_name' ``)
# There are 2 default mbed templates (predefined settings) uvision.uvproj and uvproj_microlib.uvproj.tmpl
class Uvision5(Exporter):
"""
Exporter class for uvision5. This class uses project generator.
"""
# These 2 are currently for exporters backward compatiblity
NAME = 'uVision5'
TOOLCHAIN = 'ARM'
# PROGEN_ACTIVE contains information for exporter scripts that this is using progen
PROGEN_ACTIVE = True
# backward compatibility with our scripts
TARGETS = []
for target in TARGET_NAMES:
try:
if (ProGenDef('uvision5').is_supported(str(TARGET_MAP[target])) or
ProGenDef('uvision5').is_supported(TARGET_MAP[target].progen['target'])):
TARGETS.append(target)
except AttributeError:
# target is not supported yet
continue
def get_toolchain(self):
return TARGET_MAP[self.target].default_toolchain
def generate(self):
""" Generates the project files """
project_data = self.progen_get_project_data()
tool_specific = {}
# Expand tool specific settings by uvision specific settings which are required
try:
if TARGET_MAP[self.target].progen['uvision5']['template']:
tool_specific['uvision5'] = TARGET_MAP[self.target].progen['uvision5']
except KeyError:
# use default template
# by the mbed projects
tool_specific['uvision5'] = {
'template': [join(dirname(__file__), 'uvision.uvproj.tmpl')],
}
project_data['tool_specific'] = {}
project_data['tool_specific'].update(tool_specific)
# get flags from toolchain and apply
project_data['tool_specific']['uvision5']['misc'] = {}
project_data['tool_specific']['uvision5']['misc']['asm_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['asm']))
project_data['tool_specific']['uvision5']['misc']['c_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['c']))
# not compatible with c99 flag set in the template
project_data['tool_specific']['uvision5']['misc']['c_flags'].remove("--c99")
project_data['tool_specific']['uvision5']['misc']['cxx_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['ld']))
project_data['tool_specific']['uvision5']['misc']['ld_flags'] = self.toolchain.flags['ld']
i = 0
for macro in project_data['common']['macros']:
# armasm does not like floating numbers in macros, timestamp to int
if macro.startswith('MBED_BUILD_TIMESTAMP'):
timestamp = macro[len('MBED_BUILD_TIMESTAMP='):]
project_data['common']['macros'][i] = 'MBED_BUILD_TIMESTAMP=' + str(int(float(timestamp)))
# armasm does not even accept MACRO=string
if macro.startswith('MBED_USERNAME'):
project_data['common']['macros'].pop(i)
i += 1
project_data['common']['macros'].append('__ASSERT_MSG')
self.progen_gen_file('uvision5', project_data)
| 45.777778
| 144
| 0.673301
|
e92bb226c43a5e3e4520c9c4ba92e52ace04d4e6
| 1,002
|
py
|
Python
|
kubernetes_asyncio/test/test_v1_local_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_v1_local_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_v1_local_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_local_volume_source import V1LocalVolumeSource # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1LocalVolumeSource(unittest.TestCase):
"""V1LocalVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1LocalVolumeSource(self):
"""Test V1LocalVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_local_volume_source.V1LocalVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.05
| 124
| 0.735529
|
104dd58ba1eb3de188d2d739e59cc6e16d17428d
| 6,442
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20190801/get_route_table.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190801/get_route_table.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190801/get_route_table.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRouteTableResult',
'AwaitableGetRouteTableResult',
'get_route_table',
]
@pulumi.output_type
class GetRouteTableResult:
"""
Route table resource.
"""
def __init__(__self__, disable_bgp_route_propagation=None, etag=None, id=None, location=None, name=None, provisioning_state=None, routes=None, subnets=None, tags=None, type=None):
if disable_bgp_route_propagation and not isinstance(disable_bgp_route_propagation, bool):
raise TypeError("Expected argument 'disable_bgp_route_propagation' to be a bool")
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if routes and not isinstance(routes, list):
raise TypeError("Expected argument 'routes' to be a list")
pulumi.set(__self__, "routes", routes)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[bool]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.RouteResponse']]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteTableResult(GetRouteTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteTableResult(
disable_bgp_route_propagation=self.disable_bgp_route_propagation,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
routes=self.routes,
subnets=self.subnets,
tags=self.tags,
type=self.type)
def get_route_table(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
route_table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteTableResult:
"""
Route table resource.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str route_table_name: The name of the route table.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['routeTableName'] = route_table_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190801:getRouteTable', __args__, opts=opts, typ=GetRouteTableResult).value
return AwaitableGetRouteTableResult(
disable_bgp_route_propagation=__ret__.disable_bgp_route_propagation,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
routes=__ret__.routes,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type)
| 34.449198
| 183
| 0.638777
|
b71e2c5fdccbcfa58acb8ebdc0803e4557887ab4
| 8,086
|
py
|
Python
|
scripts/kernelBinaryClassifDemo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 2
|
2021-04-10T18:12:19.000Z
|
2021-05-11T12:07:40.000Z
|
scripts/kernelBinaryClassifDemo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-04-19T12:25:26.000Z
|
2021-04-19T12:25:26.000Z
|
scripts/kernelBinaryClassifDemo.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-01-17T08:46:00.000Z
|
2021-01-17T08:46:00.000Z
|
import numpy as np
import h5py
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.kernel_approximation import RBFSampler
# Relevance Vector Machine Classifier using EM algorithm by Michael E. Tipping.
### This is a python implementation of Relevance Vector Machine Classifier, it's based on github.com/ctgk/PRML/blob/master/prml/kernel/relevance_vector_classifier.py
class RVC:
def sigmoid(self,a):
return np.tanh(a * 0.5) * 0.5 + 0.5
# Kernel matrix using rbf kernel with gamma = 0.3.
def kernel_mat(self,X, Y):
(x, y) = (np.tile(X, (len(Y), 1, 1)).transpose(1, 0, 2),
np.tile(Y, (len(X), 1, 1)))
d = np.repeat(1 / (0.3 * 0.3), X.shape[-1]) * (x - y) ** 2
return np.exp(-0.5 * np.sum(d, axis=-1))
def __init__(self, alpha=1.):
self.threshold_alpha = 1e8
self.alpha = alpha
self.iter_max = 100
self.relevance_vectors_ = []
# estimates for singulat matrices.
def ps_inv(self, m):
# assuming it is a square matrix.
a = m.shape[0]
i = np.eye(a, a)
return np.linalg.lstsq(m, i, rcond=None)[0]
'''
For the current fixed values of alpha, the most probable
weights are found by maximizing w over p(w/t,alpha)
using the Laplace approximation of finding an hessian.
(E step)
w = mean of p(w/t,alpha)
cov = negative hessian of p(w/t,alpha)
'''
def _map_estimate(self, X, t, w, n_iter=10):
for _ in range(n_iter):
y = self.sigmoid(X @ w)
g = X.T @ (y - t) + self.alpha * w
H = (X.T * y * (1 - y)) @ X + np.diag(self.alpha) # negated Hessian of p(w/t,alpha)
w -= np.linalg.lstsq(H, g, rcond=None)[0] # works even if for singular matrices.
return w, self.ps_inv(H) # inverse of H is the covariance of the gaussian approximation.
'''
Fitting of input-target pairs works by
iteratively finding the most probable weights(done by _map_estimate method)
and optimizing the hyperparameters(alpha) until there is no
siginificant change in alpha.
(M step)
Optimizing alpha:
For the given targets and current variance(sigma^2) alpha is optimized over p(t/alpha,variance)
It is done by Mackay approach(ARD).
alpha(new) = gamma/mean^2
where gamma = 1 - alpha(old)*covariance.
After finding the hyperparameters(alpha),
the samples which have alpha less than the threshold(hence weight >> 0)
are choosen as relevant vectors.
Now predicted y = sign(phi(X) @ mean) ( mean contains the optimal weights)
'''
def fit(self, X, y):
Phi = self.kernel_mat(X, X)
N = len(y)
self.alpha = np.zeros(N) + self.alpha
mean = np.zeros(N)
for i in range(self.iter_max):
param = np.copy(self.alpha)
mean, cov = self._map_estimate(Phi, y, mean, 10)
gamma = 1 - self.alpha * np.diag(cov)
self.alpha = gamma / np.square(mean)
np.clip(self.alpha, 0, 1e10, out=self.alpha)
if np.allclose(param, self.alpha):
break
ret_alpha = self.alpha < self.threshold_alpha
self.relevance_vectors_ = X[ret_alpha]
self.y = y[ret_alpha]
self.alpha = self.alpha[ret_alpha]
Phi = self.kernel_mat(self.relevance_vectors_, self.relevance_vectors_)
mean = mean[ret_alpha]
self.mean, self.covariance = self._map_estimate(Phi, self.y, mean, 100)
# gives probability for target to be class 0.
def predict_proba(self, X):
phi = self.kernel_mat(X, self.relevance_vectors_)
mu_a = phi @ self.mean
var_a = np.sum(phi @ self.covariance * phi, axis=1)
return 1 - self.sigmoid(mu_a / np.sqrt(1 + np.pi * var_a / 8))
def predict(self, X):
phi = self.kernel_mat(X, self.relevance_vectors_)
return (phi @ self.mean > 0).astype(np.int)
# scipy.io loadmat doesn't seem to be accept the version of this data file
data = {}
with h5py.File('/pyprobml/data/bishop2class.mat', 'r') as f:
for name, d in f.items():
data[name] = np.array(d)
X = data['X'].transpose()
Y = data['Y']
y = Y.flatten()
y = y - 1 # changing to {0,1}
# Feature Mapping X to rbf_features to simulate non-linear logreg using linear ones.
rbf_feature = RBFSampler(gamma=0.3, random_state=1)
X_rbf = rbf_feature.fit_transform(X)
# Using CV to find SVM regularization parameter.
C = np.power(2, np.linspace(-5, 5, 10))
mean_scores = [cross_val_score(SVC(kernel='rbf', gamma=0.3, C=c), X, y, cv=5).mean() for c in C]
c = C[np.argmax(mean_scores)]
classifiers = {
'logregL2': LogisticRegression(C=0.2, penalty='l2',
solver='saga',
multi_class='ovr',
max_iter=10000),
'logregL1': LogisticRegression(C=1, penalty='l1',
solver='saga',
multi_class='ovr',
max_iter=10000),
'RVM': RVC(),
'SVM': SVC(kernel='rbf', gamma=0.3, C=c, probability=True)
}
h = 0.05 # step size in the mesh
# Mesh to use in the boundary plotting.
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
def plot_scatters(X, y):
for class_value in range(2):
# get row indexes for samples with this class
row_ix = np.where(y == class_value)
# creating scatter of these samples
plt.scatter(X[row_ix, 0], X[row_ix, 1], cmap='Paired', marker='X', s=30)
def plot_SVs(SV):
plt.scatter(SV[:, 0], SV[:, 1], s=100, facecolor="none", edgecolor="green")
for (name, clf) in classifiers.items():
if name == 'logregL2':
clf.fit(X_rbf, y)
Z = clf.predict_proba(rbf_feature.fit_transform(np.c_[xx.ravel(), yy.ravel()]))
Z = Z[:, 0].reshape(xx.shape)
plt.title(name + ", nerr= {}".format(np.sum(y != clf.predict(X_rbf))))
plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['black', 'w'])
plot_scatters(X, y)
plt.show()
plt.savefig("../figures/kernelBinaryClassifDemo{}.pdf".format(name), dpi=300)
elif name == 'logregL1':
clf.fit(X_rbf, y)
Z = clf.predict_proba(rbf_feature.fit_transform(np.c_[xx.ravel(), yy.ravel()]))
Z = Z[:, 0].reshape(xx.shape)
plt.title(name + ", nerr= {}".format(np.sum(y != clf.predict(X_rbf))))
plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['w','black', 'w'])
plot_scatters(X, y)
conf_scores = np.abs(clf.decision_function(X_rbf))
SV = X[(conf_scores > conf_scores.mean())] # samples having a higher confidence scores are taken as support vectors.
plot_SVs(SV)
plt.show()
plt.savefig("../figures/kernelBinaryClassifDemo{}.pdf".format(name), dpi=300)
elif name == 'RVM':
clf.fit(X, y)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title(name + ", nerr= {}".format(np.sum(y != clf.predict(X))))
plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['black', 'w'])
plot_scatters(X, y)
plot_SVs(clf.relevance_vectors_)
plt.show()
plt.savefig("../figures/kernelBinaryClassifDemo{}.pdf".format(name), dpi=300)
elif name == 'SVM':
clf.fit(X, y)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 0]
Z = Z.reshape(xx.shape)
plt.title(name + ", nerr= {}".format(np.sum(y != clf.predict(X))))
plt.contour(xx, yy, Z, colors=['w', 'w', 'w', 'black'])
plot_scatters(X, y)
plot_SVs(clf.support_vectors_)
plt.show()
plt.savefig("../figures/kernelBinaryClassifDemo{}.pdf".format(name), dpi=300)
| 39.062802
| 165
| 0.593371
|
ef1cce3d6eef4dd0221050bfd6d3909a6597e76d
| 26,948
|
py
|
Python
|
Climate_v1.1.py
|
gschivley/co-fire
|
b364063fe36b733aee2a903b7d570d2ab11b9096
|
[
"MIT"
] | 2
|
2015-10-28T15:49:10.000Z
|
2019-09-15T06:08:10.000Z
|
Climate_v1.1.py
|
gschivley/co-fire
|
b364063fe36b733aee2a903b7d570d2ab11b9096
|
[
"MIT"
] | null | null | null |
Climate_v1.1.py
|
gschivley/co-fire
|
b364063fe36b733aee2a903b7d570d2ab11b9096
|
[
"MIT"
] | 2
|
2016-12-06T10:16:24.000Z
|
2021-05-18T12:51:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 16:09:30 2014
@author: Greg
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import fftconvolve
from scipy.integrate import cumtrapz
# Radiative efficiencies of each gas, calculated from AR5 & AR5 SM
co2_re, ch4_re, n2o_re, sf6_re = 1.756E-15, 1.277E-13 * 1.65, 3.845E-13, 2.010E-11
# AR5 2013 IRF values
a0, a1, a2, a3 = 0.2173, 0.224, 0.2824, 0.2763
tau1, tau2, tau3 = 394.4, 36.54, 4.304
def f0(t):
return a0
def f1(t):
return a1*np.exp(-t/tau1)
def f2(t):
return a2*np.exp(-t/tau2)
def f3(t):
return a3*np.exp(-t/tau3)
def CO2_AR5(t):
return f0(t) + f1(t) + f2(t) + f3(t)
#Methane response fuction
CH4tau = 12.4
def CH4_AR5(t):
return np.exp(-t/CH4tau)
#N2O response fuction
N2Otau = 121
def N2O_AR5(t):
return np.exp(-t/CH4tau)
#SF6 response fuction
SF6tau = 3200
def SF6_AR5(t):
return np.exp(-t/CH4tau)
#Temperature response function to radiative forcing
def AR5_GTP(t):
c1, c2, d1, d2 = 0.631, 0.429, 8.4, 409.5
""" The default response function for radiative forcing from AR5. Source is \
Boucher (2008). ECR is 3.9K, which is on the high side.
Convolve with radiative forcing to get temperature.
"""
return c1/d1*np.exp(-t/d1) + c2/d2*np.exp(-t/d2)
def Alt_GTP(t):
c1, c2, d1, d2 = 0.43, 0.32, 2.57, 82.24
""" The response function for radiative forcing. Taken from Olivie and Peters (2013),
Table 4, using the CMIP5 data. This has a slightly lower climate response value than
Boucher (2008), which is used in AR5.
Convolve with radiative forcing to get temperature.
"""
return c1/d1*np.exp(-t/d1) + c2/d2*np.exp(-t/d2)
def Alt_low_GTP(t):
c1, c2, d1, d2 = 0.43 / (1 + 0.29), 0.32 / (1 + 0.59), 2.57 * 1.46, 82.24 * 2.92
#c1, c2, d1, d2 = 0.48 * (1 - 0.3), 0.20 * (1 - 0.52), 7.15 * 1.35, 105.55 * 1.38
#c1, c2, d1, d2 = 0.48 * (1 - 0.3), 0.20 * (1 - 0.52), 7.15, 105.55
#c1, c2, d1, d2 = 0.631 * 0.7, 0.429 * 0.7, 8.4, 409.5
""" The response function for radiative forcing. Taken from Olivie and Peters (2013),
Table 4, using the CMIP5 data. This has a lower climate response value than AR5.
The uncertainty in Table 4 assumes lognormal distributions, which is why values less
than the median are determined by dividing by (1 + uncertainty).
Convolve with radiative forcing to get temperature.
"""
return c1/d1*np.exp(-t/d1) + c2/d2*np.exp(-t/d2)
def Alt_high_GTP(t):
c1, c2, d1, d2 = 0.43 * 1.29, 0.32 * 1.59, 2.57 / (1 + 0.46), 82.24 / (1 + 1.92)
#c1, c2, d1, d2 = 0.48 * 1.3, 0.20 * 1.52, 7.15 * (1 - 0.35), 105.55 * (1 - 0.38)
#c1, c2, d1, d2 = 0.48 * 1.2, 0.20 * 1.52, 7.15, 105.55
#c1, c2, d1, d2 = 0.631, 0.429 * 1.3, 8.4, 409.5
""" The response function for radiative forcing. Taken from Olivie and Peters (2013),
Table 4, using the CMIP5 data. This has a higher climate response value than AR5.
The uncertainty in Table 4 assumes lognormal distributions, which is why values less
than the median are determined by dividing by (1 + uncertainty).
Convolve with radiative forcing to get temperature.
"""
return c1/d1*np.exp(-t/d1) + c2/d2*np.exp(-t/d2)
def CO2_rf(emission, years, tstep=0.01, kind='linear', emiss_type='sustained'):
"""Transforms an array of CO2 emissions into radiative forcing with user-
defined time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
inter_emissions = f(time)
atmos = np.resize(fftconvolve(CO2_AR5(time), inter_emissions), time.size) * tstep
rf = atmos * co2_re
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return rf[fil]
def CO2_rate(emission, years, tstep=0.01, kind='linear', emiss_type='sustained'):
"""Transforms an array of CO2 emissions into radiative forcing with user-
defined time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
inter_emissions = f(time)
atmos = np.resize(fftconvolve(CO2_AR5(time), inter_emissions), time.size) * tstep
rf = atmos * co2_re
dx = np.gradient(time)
rate = np.gradient(rf, dx)
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return rate[fil]
def CO2_crf(emission, years, tstep=0.01, kind='linear', emiss_type='sustained'):
"""Transforms an array of CO2 emissions into radiative forcing with user-
defined time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
inter_emissions = f(time)
atmos = np.resize(fftconvolve(CO2_AR5(time), inter_emissions), time.size) * tstep
rf = atmos * co2_re
crf = cumtrapz(rf, dx = tstep, initial = 0)
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return crf[fil]
def CO2_temp(emission, years, tstep=0.01, kind='linear', source='AR5',
emiss_type='sustained'):
"""Transforms an array of CO2 emissions into temperature with user-defined
time-step. Default temperature IRF is from AR5, use 'Alt_low' or 'Alt_high'
for a sensitivity test.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
source: the source of parameters for the temperature IRF. default is AR5,
'Alt', 'Alt_low', and 'Alt_high' are also options.
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind, bounds_error=False)
time = np.linspace(min(years), end, end/ + 1)
inter_emissions = f(time)
atmos = np.resize(fftconvolve(CO2_AR5(time), inter_emissions), time.size) * tstep
rf = atmos * co2_re
if source == 'AR5':
temp = np.resize(fftconvolve(AR5_GTP(time), rf), time.size) * tstep
elif source == 'Alt':
temp = np.resize(fftconvolve(Alt_GTP(time), rf), time.size) * tstep
elif source == 'Alt_low':
temp = np.resize(fftconvolve(Alt_low_GTP(time), rf), time.size) * tstep
elif source == 'Alt_high':
temp = np.resize(fftconvolve(Alt_high_GTP(time), rf), time.size) * tstep
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return temp[fil]
def ch42co2(t, alpha=0.51):
"""As methane decays some fraction is converted to CO2. This function is
from Boucher (2009). By default it converts 51%. The convolution of this
function with the methane emission profile gives the CO2 emission profile.
t: time
alpha: fraction of methane converted to CO2
"""
ch4tau = 12.4
return 1/ch4tau * alpha * np.exp(-t/ch4tau)
def CH4_rf(emission, years, tstep=0.01, kind='linear',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into radiative forcing with user-defined
time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
fch4 = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = fch4(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + co2_atmos * co2_re
else:
rf = ch4_atmos * ch4_re
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return rf[fil]
def CH4_rf_cc(emission, years, tstep=0.01, kind='linear',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into radiative forcing with user-defined
time-step, accounting for climate-carbon feedbacks.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
gamma = (44.0/12.0) * 10**12
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
fch4 = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = fch4(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
cc_co2 = CH4_cc_tempforrf(emission, years) * gamma
cc_co2_atmos = np.resize(fftconvolve(CO2_AR5(time), cc_co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + (co2_atmos +cc_co2_atmos) * co2_re
else:
rf = ch4_atmos * ch4_re + (cc_co2_atmos) * co2_re
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return rf[fil]
def CH4_rate(emission, years, tstep=0.01, kind='linear', emiss_type='sustained'):
"""Transforms an array of methane emissions into radiative forcing with user-defined
time-step, accounting for climate-carbon feedbacks.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
gamma = (44.0/12.0) * 10**12
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
fch4 = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = fch4(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
cc_co2 = CH4_cc_tempforrf(emission, years) * gamma
cc_co2_atmos = np.resize(fftconvolve(CO2_AR5(time), cc_co2),
time.size) * tstep
rf = ch4_atmos * ch4_re + (co2_atmos +cc_co2_atmos) * co2_re
dx = np.gradient(time)
rate = np.gradient(rf, dx)
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return rate[fil]
def CH4_crf(emission, years, tstep=0.01, kind='linear',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into radiative forcing with user-defined
time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
fch4 = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = fch4(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + co2_atmos * co2_re
else:
rf = ch4_atmos * ch4_re
crf = cumtrapz(rf, dx = tstep, initial = 0)
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return crf[fil]
def CH4_crf_cc(emission, years, tstep=0.01, kind='linear',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into radiative forcing with user-defined
time-step.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
gamma = (44.0/12.0) * 10**12
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
fch4 = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = fch4(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
cc_co2 = CH4_cc_tempforrf(emission, years) * gamma
cc_co2_atmos = np.resize(fftconvolve(CO2_AR5(time), cc_co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + (co2_atmos +cc_co2_atmos) * co2_re
else:
rf = ch4_atmos * ch4_re + (cc_co2_atmos) * co2_re
crf = cumtrapz(rf, dx = tstep, initial = 0)
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return crf[fil]
def CH4_temp(emission, years, tstep=0.01, kind='linear', source='AR5',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into temperature with user-defined
time-step. Default temperature IRF is from AR5, use 'Alt_low' or 'Alt_high'
for a sensitivity test.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
source: the source of parameters for the temperature IRF. default is AR5,
'Alt', 'Alt_low', and 'Alt_high' are also options.
decay: a boolean variable for if methane decay to CO2 should be included
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = f(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + co2_atmos * co2_re
else:
rf = ch4_atmos * ch4_re
if source == 'AR5':
temp = np.resize(fftconvolve(AR5_GTP(time), rf), time.size) * tstep
elif source == 'Alt':
temp = np.resize(fftconvolve(Alt_GTP(time), rf), time.size) * tstep
elif source == 'Alt_low':
temp = np.resize(fftconvolve(Alt_low_GTP(time), rf), time.size) * tstep
elif source == 'Alt_high':
temp = np.resize(fftconvolve(Alt_high_GTP(time), rf), time.size) * tstep
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return temp[fil]
def CH4_cc_tempforrf(emission, years, tstep=0.01, kind='linear', source='AR5',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into temperature with user-defined
time-step. Default temperature IRF is from AR5, use 'Alt_low' or 'Alt_high'
for a sensitivity test.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
source: the source of parameters for the temperature IRF. default is AR5,
'Alt', 'Alt_low', and 'Alt_high' are also options.
decay: a boolean variable for if methane decay to CO2 should be included
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = f(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + co2_atmos * co2_re
else:
rf = ch4_atmos * ch4_re
if source == 'AR5':
temp = np.resize(fftconvolve(AR5_GTP(time), rf), time.size) * tstep
elif source == 'Alt':
temp = np.resize(fftconvolve(Alt_GTP(time), rf), time.size) * tstep
elif source == 'Alt_low':
temp = np.resize(fftconvolve(Alt_low_GTP(time), rf), time.size) * tstep
elif source == 'Alt_high':
temp = np.resize(fftconvolve(Alt_high_GTP(time), rf), time.size) * tstep
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return temp
def CH4_temp_cc(emission, years, tstep=0.01, kind='linear', source='AR5',
decay=True, emiss_type='sustained'):
"""Transforms an array of methane emissions into temperature with user-defined
time-step. Default temperature IRF is from AR5, use 'Alt_low' or 'Alt_high'
for a sensitivity test. Accounts for climate-carbon feedbacks.
emission: an array of emissions, should be same size as years
years: an array of years at which the emissions take place
: time step to be used in the calculations
kind: the type of interpolation to use; can be linear or cubic
source: the source of parameters for the temperature IRF. default is AR5,
'Alt', 'Alt_low', and 'Alt_high' are also options.
decay: a boolean variable for if methane decay to CO2 should be included
emiss_type: 'sustained' or 'pulse' - if 'pulse', then the emission values are
divided by . this allows a pulse of 1 kg in the first value of the array to
represent a full kg of emission, and 1 kg in all values associated with the first
year to also represent a full kg when 'sustained'.
"""
gamma = (44.0/12.0) * 10**12
#emission is a series of emission numbers, years should match up with it
if min(years) > 0:
years = years - min(years)
if emiss_type == 'pulse':
emission /= tstep
end = max(years)
f = interp1d(years, emission, kind=kind)
time = np.linspace(years[0], end, end/ + 1)
ch4_inter_emissions = f(time)
ch4_atmos = np.resize(fftconvolve(CH4_AR5(time), ch4_inter_emissions),
time.size) * tstep
co2 = np.resize(fftconvolve(ch42co2(time), ch4_inter_emissions),
time.size) * tstep
co2_atmos = np.resize(fftconvolve(CO2_AR5(time), co2),
time.size) * tstep
cc_co2 = CH4_cc_tempforrf(emission, years) * gamma
cc_co2_atmos = np.resize(fftconvolve(CO2_AR5(time), cc_co2),
time.size) * tstep
if decay == True:
rf = ch4_atmos * ch4_re + (co2_atmos + cc_co2_atmos) * co2_re
else:
rf = ch4_atmos * ch4_re + cc_co2_atmos * co2_re
if source == 'AR5':
temp = np.resize(fftconvolve(AR5_GTP(time), rf), time.size) * tstep
elif source == 'Alt':
temp = np.resize(fftconvolve(Alt_GTP(time), rf), time.size) * tstep
elif source == 'Alt_low':
temp = np.resize(fftconvolve(Alt_low_GTP(time), rf), time.size) * tstep
elif source == 'Alt_high':
temp = np.resize(fftconvolve(Alt_high_GTP(time), rf), time.size) * tstep
fil = np.zeros_like(time, dtype=bool)
for i in time:
if i == int(i):
fil[i/tstep] = True
return temp[fil]
| 41.016743
| 89
| 0.639417
|
dccba77e47dd5c2a8f9b631a6c099d0742c65293
| 7,802
|
py
|
Python
|
third_party/python/Lib/test/libregrtest/runtest_mp.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/libregrtest/runtest_mp.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
third_party/python/Lib/test/libregrtest/runtest_mp.py
|
appotry/cosmopolitan
|
af4687cc3f2331a23dc336183ab58fe001cda082
|
[
"ISC"
] | null | null | null |
import faulthandler
import json
import os
import queue
import sys
import time
import traceback
import types
from test import support
try:
import _thread
import threading
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from test.libregrtest.runtest import (
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
format_test_result)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import format_duration
# Display the running tests if nothing happened last N seconds
PROGRESS_UPDATE = 30.0 # seconds
# If interrupted, display the wait progress every N seconds
WAIT_PROGRESS = 2.0 # seconds
def run_test_in_subprocess(testname, ns):
"""Run the given test in a subprocess with --worker-args.
ns is the option Namespace parsed from command-line arguments. regrtest
is invoked in a subprocess with the --worker-args argument; when the
subprocess exits, its return code, stdout and stderr are returned as a
3-tuple.
"""
from subprocess import Popen, PIPE
ns_dict = vars(ns)
worker_args = (ns_dict, testname)
worker_args = json.dumps(worker_args)
cmd = [sys.executable, *support.args_from_interpreter_flags(),
'-u', # Unbuffered stdout and stderr
'-m', 'test.regrtest',
'--worker-args', worker_args]
if ns.pgo:
cmd += ['--pgo']
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(cmd,
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
with popen:
stdout, stderr = popen.communicate()
retcode = popen.wait()
return retcode, stdout, stderr
def run_tests_worker(worker_args):
ns_dict, testname = json.loads(worker_args)
ns = types.SimpleNamespace(**ns_dict)
setup_tests(ns)
try:
result = runtest(ns, testname)
except KeyboardInterrupt:
result = INTERRUPTED, '', None
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
print() # Force a newline (just in case)
print(json.dumps(result), flush=True)
sys.exit(0)
# We do not use a generator so multiple threads can call next().
class MultiprocessIterator:
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
class MultiprocessThread(threading.Thread):
def __init__(self, pending, output, ns):
super().__init__()
self.pending = pending
self.output = output
self.ns = ns
self.current_test = None
self.start_time = None
def _runtest(self):
try:
test = next(self.pending)
except StopIteration:
self.output.put((None, None, None, None))
return True
try:
self.start_time = time.monotonic()
self.current_test = test
retcode, stdout, stderr = run_test_in_subprocess(test, self.ns)
finally:
self.current_test = None
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode, None)
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result))
return False
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
self.output.put((None, None, None, None))
return True
result = json.loads(result)
assert len(result) == 3, f"Invalid result tuple: {result!r}"
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
result))
return False
def run(self):
try:
stop = False
while not stop:
stop = self._runtest()
except BaseException:
self.output.put((None, None, None, None))
raise
def run_tests_multiprocess(regrtest):
output = queue.Queue()
pending = MultiprocessIterator(regrtest.tests)
test_timeout = regrtest.ns.timeout
use_timeout = (test_timeout is not None)
workers = [MultiprocessThread(pending, output, regrtest.ns)
for i in range(regrtest.ns.use_mp)]
print("Run tests in parallel using %s child processes"
% len(workers))
for worker in workers:
worker.start()
def get_running(workers):
running = []
for worker in workers:
current_test = worker.current_test
if not current_test:
continue
dt = time.monotonic() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
text = '%s (%s)' % (current_test, format_duration(dt))
running.append(text)
return running
finished = 0
test_index = 1
get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
try:
while finished < regrtest.ns.use_mp:
if use_timeout:
faulthandler.dump_traceback_later(test_timeout, exit=True)
try:
item = output.get(timeout=get_timeout)
except queue.Empty:
running = get_running(workers)
if running and not regrtest.ns.pgo:
print('running: %s' % ', '.join(running), flush=True)
continue
test, stdout, stderr, result = item
if test is None:
finished += 1
continue
regrtest.accumulate_result(test, result)
# Display progress
ok, test_time, xml_data = result
text = format_test_result(test, ok)
if (ok not in (CHILD_ERROR, INTERRUPTED)
and test_time >= PROGRESS_MIN_TIME
and not regrtest.ns.pgo):
text += ' (%s)' % format_duration(test_time)
elif ok == CHILD_ERROR:
text = '%s (%s)' % (text, test_time)
running = get_running(workers)
if running and not regrtest.ns.pgo:
text += ' -- running: %s' % ', '.join(running)
regrtest.display_progress(test_index, text)
# Copy stdout and stderr from the child process
if stdout:
print(stdout, flush=True)
if stderr and not regrtest.ns.pgo:
print(stderr, file=sys.stderr, flush=True)
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
test_index += 1
except KeyboardInterrupt:
regrtest.interrupted = True
pending.interrupted = True
print()
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
# If tests are interrupted, wait until tests complete
wait_start = time.monotonic()
while True:
running = [worker.current_test for worker in workers]
running = list(filter(bool, running))
if not running:
break
dt = time.monotonic() - wait_start
line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
if dt >= WAIT_PROGRESS:
line = "%s since %.0f sec" % (line, dt)
print(line, flush=True)
for worker in workers:
worker.join(WAIT_PROGRESS)
| 31.333333
| 79
| 0.59536
|
b863adacf420596c7df16c370c5b4fbce57208ad
| 429
|
py
|
Python
|
server/src/schemas/userfile.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
server/src/schemas/userfile.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
server/src/schemas/userfile.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pydantic import BaseModel
from src.db.models import Permissions
class UserFileBaseSchema(BaseModel):
user_id: str
access_type: Permissions
class Config:
orm_mode = True
class UserFileSchema(UserFileBaseSchema):
file_id: str
class UserFileInfoSchema(UserFileSchema):
file: Optional[object]
class UserInfoFileSchema(UserFileSchema):
user: Optional[object]
| 17.16
| 41
| 0.762238
|
87c40e3638021744a10188b745babc8fa206ce8d
| 562
|
py
|
Python
|
backend/src/local_settings.py
|
oopaze/portifolio-josepedro
|
6f8f84290f81ee3bef4556da690e64fbcc8f8893
|
[
"MIT"
] | 1
|
2021-12-30T12:07:43.000Z
|
2021-12-30T12:07:43.000Z
|
backend/src/local_settings.py
|
oopaze/portifolio-josepedro-backend
|
6f8f84290f81ee3bef4556da690e64fbcc8f8893
|
[
"MIT"
] | 1
|
2021-11-04T04:50:31.000Z
|
2021-11-04T04:50:31.000Z
|
backend/src/local_settings.py
|
oopaze/portifolio-josepedro
|
6f8f84290f81ee3bef4556da690e64fbcc8f8893
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from dotenv import load_dotenv
BASE_DIR = Path(__file__).resolve().parent.parent
dotenv_path = BASE_DIR / '.env.dev'
load_dotenv(dotenv_path)
SITE_URL = os.getenv("SITE_URL", default='http://localhost:8000')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWD'),
'HOST': os.getenv('DB_HOST', 'localhost'),
'PORT': os.getenv('DB_POST', 5432)
}
}
| 23.416667
| 65
| 0.642349
|
64cd8696d555e2600dc9d27d40cfcc53faef2066
| 5,263
|
py
|
Python
|
src/cfnlint/rules/parameters/Default.py
|
sthagen/aws-cloudformation-cfn-lint
|
8628b2bab208acb2ac7843d2cadf7b56252058f7
|
[
"MIT-0"
] | 445
|
2018-04-19T14:43:33.000Z
|
2019-03-01T11:00:21.000Z
|
src/cfnlint/rules/parameters/Default.py
|
sthagen/cfn-lint
|
80c8211eb028b374fdf547f21a8e218248dedc89
|
[
"MIT-0"
] | 464
|
2018-04-19T17:29:50.000Z
|
2019-03-01T14:20:19.000Z
|
src/cfnlint/rules/parameters/Default.py
|
sthagen/cfn-lint
|
80c8211eb028b374fdf547f21a8e218248dedc89
|
[
"MIT-0"
] | 93
|
2018-04-19T14:55:35.000Z
|
2019-03-01T03:26:47.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import re
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class Default(CloudFormationLintRule):
"""Check if Parameters are configured correctly"""
id = 'E2015'
shortdesc = 'Default value is within parameter constraints'
description = 'Making sure the parameters have a default value inside AllowedValues, MinValue, MaxValue, AllowedPattern'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
tags = ['parameters']
def check_allowed_pattern(self, allowed_value, allowed_pattern, path):
"""
Check allowed value against allowed pattern
"""
message = 'Default should be allowed by AllowedPattern'
try:
if not re.match(allowed_pattern, str(allowed_value)):
return([RuleMatch(path, message)])
except re.error as ex:
self.logger.debug('Regex pattern "%s" isn\'t supported by Python: %s',
allowed_pattern, ex)
return []
def check_min_value(self, allowed_value, min_value, path):
"""
Check allowed value against min value
"""
message = 'Default should be equal to or higher than MinValue'
if isinstance(allowed_value, int) and isinstance(min_value, int):
if allowed_value < min_value:
return([RuleMatch(path, message)])
return []
def check_max_value(self, allowed_value, max_value, path):
"""
Check allowed value against max value
"""
message = 'Default should be less than or equal to MaxValue'
if isinstance(allowed_value, int) and isinstance(max_value, int):
if allowed_value > max_value:
return([RuleMatch(path, message)])
return []
def check_allowed_values(self, allowed_value, allowed_values, path):
"""
Check allowed value against allowed values
"""
message = 'Default should be a value within AllowedValues'
if allowed_value not in allowed_values:
return([RuleMatch(path, message)])
return []
def check_min_length(self, allowed_value, min_length, path):
"""
Check allowed value against MinLength
"""
message = 'Default should have a length above or equal to MinLength'
value = allowed_value if isinstance(allowed_value, str) else str(allowed_value)
if isinstance(min_length, int):
if len(value) < min_length:
return([RuleMatch(path, message)])
return []
def check_max_length(self, allowed_value, max_length, path):
"""
Check allowed value against MaxLength
"""
message = 'Default should have a length below or equal to MaxLength'
value = allowed_value if isinstance(allowed_value, str) else str(allowed_value)
if isinstance(max_length, int):
if len(value) > max_length:
return([RuleMatch(path, message)])
return []
def match(self, cfn):
matches = []
for paramname, paramvalue in cfn.get_parameters_valid().items():
default_value = paramvalue.get('Default')
if default_value is not None:
path = ['Parameters', paramname, 'Default']
allowed_pattern = paramvalue.get('AllowedPattern')
if allowed_pattern:
matches.extend(
self.check_allowed_pattern(
default_value, allowed_pattern, path
)
)
min_value = paramvalue.get('MinValue')
if min_value:
matches.extend(
self.check_min_value(
default_value, min_value, path
)
)
max_value = paramvalue.get('MaxValue')
if max_value is not None:
matches.extend(
self.check_max_value(
default_value, max_value, path
)
)
allowed_values = paramvalue.get('AllowedValues')
if allowed_values:
matches.extend(
self.check_allowed_values(
default_value, allowed_values, path
)
)
min_length = paramvalue.get('MinLength')
if min_length is not None:
matches.extend(
self.check_min_length(
default_value, min_length, path
)
)
max_length = paramvalue.get('MaxLength')
if max_length is not None:
matches.extend(
self.check_max_length(
default_value, max_length, path
)
)
return matches
| 36.548611
| 124
| 0.551777
|
4944d4ae9771e03d4307d4fd90963f258c099ab8
| 8,044
|
py
|
Python
|
cacheTraceAnalysis/core/traceStat.py
|
Thesys-lab/cacheWorkloadAnalysisOSDI20
|
cfc5bbb5c8d909571546c78c247561c9db449469
|
[
"Apache-2.0"
] | 6
|
2020-11-12T07:51:02.000Z
|
2022-03-27T20:20:01.000Z
|
cacheTraceAnalysis/core/traceStat.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | null | null | null |
cacheTraceAnalysis/core/traceStat.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:16:09.000Z
|
2021-12-31T01:16:09.000Z
|
# coding=utf-8
"""
this module calculates the stat of the trace
"""
from pprint import pformat
from collections import defaultdict
class TraceStat:
"""
this class provides statistics calculation of a given trace
"""
def __init__(self, reader, top_N_popular=8):
self.reader = reader
self.top_N_popular = top_N_popular
# stat data representation:
# 0: not initialized,
# -1: error while obtaining data
self.num_of_requests = 0
self.num_of_uniq_obj = 0
self.cold_miss_ratio = 0
self.top_N_popular_obj = []
self.num_one_hit_wonders = 0
self.freq_mean = 0
self.time_span = 0
self.ttl_dict = defaultdict(int)
self.top_ttl_dict = {}
self.key_size_mean_weighted_by_req = 0
self.value_size_mean_weighted_by_req = 0
self.obj_size_mean_weighted_by_req = 0
self.req_size_mean_weighted_by_req = 0
self.key_size_mean_weighted_by_obj = 0
self.value_size_mean_weighted_by_obj = 0
self.obj_size_mean_weighted_by_obj = 0
self.req_size_mean_weighted_by_obj = 0
self.op_ratio = defaultdict(int)
self._calculate()
def _calculate(self):
"""
calculate all the stat using the reader
:return:
"""
req_cnt = defaultdict(int)
sum_key_size_req, sum_value_size_req, sum_obj_size_req, sum_req_size_req = 0, 0, 0, 0
sum_key_size_obj, sum_value_size_obj, sum_obj_size_obj, sum_req_size_obj = 0, 0, 0, 0
first_req = next(self.reader)
n_nonzero_sz_obj = 0
for req in self.reader:
if req.req_size > 0:
sum_key_size_req += req.key_size * req.cnt
sum_value_size_req += req.value_size * req.cnt
sum_obj_size_req += req.obj_size * req.cnt
sum_req_size_req += req.req_size * req.cnt
if req.obj_id not in req_cnt:
sum_key_size_obj += req.key_size
sum_value_size_obj += req.value_size
sum_obj_size_obj += req.obj_size
sum_req_size_obj += req.req_size
n_nonzero_sz_obj += 1
if req.op:
self.op_ratio[req.op] += 1
if req.op in ("set", "add", "set", "add", "cas", "replace", "append", "prepend"):
ttl = req.ttl
# round up
if abs(ttl//10*10 - ttl) <= 2:
ttl = ttl // 10 * 10
if ttl < 3600:
ttl = "{}s".format(ttl)
elif 24*3600 > ttl >= 3600:
ttl = "{:.1f}h".format(ttl/3600)
elif ttl >= 24*3600:
ttl = "{:.1f}d".format(ttl/3600/24)
ttl = ttl.replace(".0", "")
self.ttl_dict[ttl] += 1
req_cnt[req.obj_id] += req.cnt
last_req = req
self.reader.reset()
self.num_of_uniq_obj = len(req_cnt)
self.num_of_requests = sum(req_cnt.values())
self.cold_miss_ratio = self.num_of_uniq_obj / self.num_of_requests
self.time_span = last_req.real_time - first_req.real_time
if n_nonzero_sz_obj == 0:
print("all requests size 0")
else:
self.key_size_mean_weighted_by_req = sum_key_size_req/self.num_of_requests
self.value_size_mean_weighted_by_req = sum_value_size_req/self.num_of_requests
self.obj_size_mean_weighted_by_req = sum_obj_size_req/self.num_of_requests
self.req_size_mean_weighted_by_req = sum_req_size_req/self.num_of_requests
self.key_size_mean_weighted_by_obj = sum_key_size_obj/n_nonzero_sz_obj
self.value_size_mean_weighted_by_obj = sum_value_size_obj/n_nonzero_sz_obj
self.obj_size_mean_weighted_by_obj = sum_obj_size_obj/n_nonzero_sz_obj
self.req_size_mean_weighted_by_obj = sum_req_size_obj/n_nonzero_sz_obj
for op, cnt in self.op_ratio.items():
self.op_ratio[op] = cnt/self.num_of_requests
# find the top ttl used in the workload
total_ttl_cnt = sum(self.ttl_dict.values())
for ttl, cnt in sorted(self.ttl_dict.items(), key=lambda x:-x[1]):
self.top_ttl_dict[ttl] = cnt/total_ttl_cnt
if len(self.top_ttl_dict) >= 10:
break
# l is a list of (obj, freq) in descending order
l = sorted(req_cnt.items(), key=lambda x: x[1], reverse=True)
self.top_N_popular_obj = l[:self.top_N_popular]
# count one-hit-wonders
for i in range(len(l)-1, -1, -1):
if l[i][1] == 1:
self.num_one_hit_wonders += 1
else:
break
self.freq_mean = self.num_of_requests / (float) (self.num_of_uniq_obj)
def _gen_stat_str(self):
"""
gegerate a stat str
"""
s = "dat: {}\nnumber of requests: {}\nnumber of uniq obj/blocks: {}\n" \
"cold miss ratio: {:.4f}\ntop N popular (obj, num of requests): \n{}\n" \
"number of obj/block accessed only once: {} ({:.4f})\n" \
"weighted_by_req: obj_size_mean {:.0f}, req_size_mean {:.0f}, key_size_mean {:.0f}, value_size_mean {:.0f}\n"\
"weighted_by_obj: obj_size_mean {:.0f}, req_size_mean {:.0f}, key_size_mean {:.0f}, value_size_mean {:.0f}\n"\
"frequency mean: {:.2f}\n".format(self.reader.trace_path,
self.num_of_requests, self.num_of_uniq_obj,
self.cold_miss_ratio, pformat(self.top_N_popular_obj),
self.num_one_hit_wonders, self.num_one_hit_wonders/self.num_of_uniq_obj,
self.obj_size_mean_weighted_by_req, self.req_size_mean_weighted_by_req,
self.key_size_mean_weighted_by_req, self.value_size_mean_weighted_by_req,
self.obj_size_mean_weighted_by_obj, self.req_size_mean_weighted_by_obj,
self.key_size_mean_weighted_by_obj, self.value_size_mean_weighted_by_obj,
self.freq_mean)
if self.time_span:
s += "time span: {} ({:.2f} day)\n".format(self.time_span, self.time_span/3600/24)
if len(self.op_ratio):
op_ratio_str = "op: " + ", ".join(["{}:{:.4f}".format(op, ratio) for op, ratio in self.op_ratio.items()])
s += op_ratio_str + "\n"
# s += "op ratio: {}\n".format(pformat(self.op_ratio))
if len(self.top_ttl_dict):
s += "ttl: {} ttls used, ".format(len(self.ttl_dict)) + ", ".join(["{}:{:.4f}".format(ttl, ratio) for ttl, ratio in self.top_ttl_dict.items() if ratio >= 0.01])
return s
def _gen_stat_json(self):
raise RuntimeError("not implemented")
def get_stat(self, return_format="str"):
"""
return stat in the format of string or tuple
:param return_format:
:return:
"""
if return_format == "str":
return self._gen_stat_str()
elif return_format == "tuple":
return (self.num_of_requests, self.num_of_uniq_obj, self.cold_miss_ratio, self.top_N_popular_obj,
self.num_one_hit_wonders, self.freq_mean, self.time_span)
elif return_format == "dict":
d = self.__dict__.copy()
elif return_format == "json":
return self._gen_json()
else:
raise RuntimeError("unknown return format, return string instead")
return s
def get_top_N(self):
return self.top_N_popular_obj
def __repr__(self):
return self.get_stat()
def __str__(self):
return self.get_stat()
| 38.304762
| 172
| 0.570985
|
663ed3c36ba367852d899279a7e335bb7dad02fc
| 3,133
|
py
|
Python
|
copula/bayes_birth_only_frank.py
|
PetropoulakisPanagiotis/copula
|
d6ff9751539e948f88dd529f4f4fe8f2d86c05bd
|
[
"MIT"
] | 8
|
2020-04-09T14:20:49.000Z
|
2021-12-24T19:17:58.000Z
|
copula/bayes_birth_only_frank.py
|
PetropoulakisPanagiotis/copula
|
d6ff9751539e948f88dd529f4f4fe8f2d86c05bd
|
[
"MIT"
] | null | null | null |
copula/bayes_birth_only_frank.py
|
PetropoulakisPanagiotis/copula
|
d6ff9751539e948f88dd529f4f4fe8f2d86c05bd
|
[
"MIT"
] | 1
|
2020-04-09T13:59:32.000Z
|
2020-04-09T13:59:32.000Z
|
from __future__ import division
from allclayton import allclayton
from allgumbel import allgumbel
from allfrank import allfrank
from pandas import read_excel
from bayes_birth_frank import *
import numpy as np
def bayes_birth_only_frank(currentModel, newModel, kn, u, v, s, q, Q, zita, chain):
current = np.sort(currentModel)
new = np.sort(newModel)
t2 = new[new != 0]
min_new = np.min(t2)
max_new = np.max(t2)
L = len(u)
l = len(current)
ss = -1
R = 1/2
if not np.any(current):
if(s[1] == 2 and s[2] == 2):
result1 = allfrank(u[:max_new], v[:max_new])
result2 = allfrank(u[max_new:L], v[max_new:L])
R = R * 3
else:
if(s[1] == 1 and s[2] == 2):
result1 = allclayton(u[:max_new], v[:max_new])
result2 = allfrank(u[max_new:L], v[max_new:L])
R = R * 3/2
else:
if(s[1] == 2 and s[2] == 1):
result1 = allfrank(u[:max_new], v[:max_new])
result2 = allclayton(u[max_new:L], v[max_new:L])
R = R * 3/2
else:
if(s[1] == 2 and s[2] == 3):
result1 = allfrank(u[:max_new], v[:max_new])
result2 = allgumbel(u[max_new:L], v[max_new:L])
R = R * 3/2
else:
if(s[1] == 3 and s[2] == 2):
result1 = allgumbel(u[:max_new], v[:max_new])
result2 = allfrank(u[max_new:L], v[max_new:L])
R = R * 3/2
resultOld = allfrank(u, v)
BFu = result1["BFu"] + result2["BFu"] - resultOld["BFu"]
if BFu.imag:
ss = -2
U2 = np.random.uniform(low=np.nextafter(0.0, 1.0))
if (np.log(U2) < min(0, ((zita ** (chain - 1)) * BFu) + np.log(R))) and not BFu.imag:
new_model = new
rejected = current
QQ = Q
w = 37
else:
new_model = current
rejected = new
QQ = q
w = 38
result = {"new_model": new_model, "rejected": rejected, "w": w, "QQ": QQ, "ss": ss}
else:
result = bayes_birth_frank(currentModel, newModel, kn, u, v, s, q, Q, zita, chain)
return result
# Test #
if __name__ == "__main__":
df = read_excel("../data/artificial_data.xlsx", sheet_name='Sheet1')
u = []
v = []
for index, row in df.iterrows():
u.append([float(row['u'])])
v.append([float(row['v'])])
u = np.asarray(u, dtype=np.float32)
v = np.asarray(v, dtype=np.float32)
dist = 30
numbrk = 5
kn = 137
s = [2, 1, 2]
q = [1, 2, 2, 3, 3, 3]
Q = [ 1, 2, 3, 2, 3, 3]
zita = 1
chain = 1
currentModel = np.zeros(numbrk, dtype=int)
newModel = np.zeros(numbrk, dtype=int)
newModel[1] = 99
newModel[2] = 137
newModel[3] = 180
newModel[4] = 250
result = bayes_birth_only_frank(currentModel, newModel, kn, u, v, s, q, Q, zita, chain)
print(result)
| 29.009259
| 95
| 0.487392
|
fc90fb47e1d78ff00bce415c2679c28db7839f64
| 10,277
|
py
|
Python
|
bw2io/strategies/ecospold2.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
bw2io/strategies/ecospold2.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
bw2io/strategies/ecospold2.py
|
mfastudillo/brightway2-io
|
dc383ddb6003a46e78259aeb7f87b9d80965d689
|
[
"BSD-3-Clause"
] | null | null | null |
from .migrations import migrate_exchanges, migrations
from ..utils import format_for_logging, es2_activity_hash
from bw2data import Database
from bw2data.logs import get_io_logger, close_log
from stats_arrays import (
LognormalUncertainty,
UndefinedUncertainty,
)
import math
import warnings
def link_biosphere_by_flow_uuid(db, biosphere="biosphere3"):
biosphere_codes = {x["code"] for x in Database(biosphere)}
for ds in db:
for exc in ds.get("exchanges", []):
if (
exc.get("type") == "biosphere"
and exc.get("flow")
and exc.get("flow") in biosphere_codes
):
exc["input"] = (biosphere, exc.get("flow"))
return db
def remove_zero_amount_coproducts(db):
"""Remove coproducts with zero production amounts from ``exchanges``"""
for ds in db:
ds[u"exchanges"] = [
exc
for exc in ds["exchanges"]
if (exc["type"] != "production" or exc["amount"])
]
return db
def remove_zero_amount_inputs_with_no_activity(db):
"""Remove technosphere exchanges with amount of zero and no uncertainty.
Input exchanges with zero amounts are the result of the ecoinvent linking algorithm, and can be safely discarded."""
for ds in db:
ds[u"exchanges"] = [
exc
for exc in ds["exchanges"]
if not (
exc["uncertainty type"] == UndefinedUncertainty.id
and exc["amount"] == 0
and exc["type"] == "technosphere"
)
]
return db
def remove_unnamed_parameters(db):
"""Remove parameters which have no name. They can't be used in formulas or referenced."""
for ds in db:
if "parameters" in ds:
ds["parameters"] = {
key: value
for key, value in ds["parameters"].items()
if not value.get("unnamed")
}
return db
def es2_assign_only_product_with_amount_as_reference_product(db):
"""If a multioutput process has one product with a non-zero amount, assign that product as reference product.
This is by default called after ``remove_zero_amount_coproducts``, which will delete the zero-amount coproducts in any case. However, we still keep the zero-amount logic in case people want to keep all coproducts."""
for ds in db:
amounted = [
prod
for prod in ds["exchanges"]
if prod["type"] == "production" and prod["amount"]
]
# OK if it overwrites existing reference product; need flow as well
if len(amounted) == 1:
ds[u"reference product"] = amounted[0]["name"]
ds[u"flow"] = amounted[0][u"flow"]
if not ds.get("unit"):
ds[u"unit"] = amounted[0]["unit"]
ds[u"production amount"] = amounted[0]["amount"]
return db
def assign_single_product_as_activity(db):
for ds in db:
prod_exchanges = [
exc for exc in ds.get("exchanges") if exc["type"] == "production"
]
# raise ValueError
if len(prod_exchanges) == 1:
prod_exchanges[0]["activity"] = ds["activity"]
return db
def create_composite_code(db):
"""Create composite code from activity and flow names"""
for ds in db:
ds[u"code"] = es2_activity_hash(ds["activity"], ds["flow"])
return db
def link_internal_technosphere_by_composite_code(db):
"""Link internal technosphere inputs by ``code``.
Only links to process datasets actually in the database document."""
candidates = {ds["code"] for ds in db}
for ds in db:
for exc in ds.get("exchanges", []):
if exc["type"] in {
"technosphere",
"production",
"substitution",
} and exc.get("activity"):
key = es2_activity_hash(exc["activity"], exc["flow"])
if key in candidates:
exc[u"input"] = (ds["database"], key)
return db
def delete_exchanges_missing_activity(db):
"""Delete exchanges that weren't linked correctly by ecoinvent.
These exchanges are missing the "activityLinkId" attribute, and the flow they want to consume is not produced as the reference product of any activity. See the `known data issues <http://www.ecoinvent.org/database/ecoinvent-version-3/reports-of-changes/known-data-issues/>`__ report.
"""
log, logfile = get_io_logger("Ecospold2-import-error")
count = 0
for ds in db:
exchanges = ds.get("exchanges", [])
if not exchanges:
continue
skip = []
for exc in exchanges:
if exc.get("input"):
continue
if not exc.get("activity") and exc["type"] in {
"technosphere",
"production",
"substitution",
}:
log.critical(
u"Purging unlinked exchange:\nFilename: {}\n{}".format(
ds[u"filename"], format_for_logging(exc)
)
)
count += 1
skip.append(exc)
ds[u"exchanges"] = [exc for exc in exchanges if exc not in skip]
close_log(log)
if count:
print(
(
u"{} exchanges couldn't be linked and were deleted. See the "
u"logfile for details:\n\t{}"
).format(count, logfile)
)
return db
def delete_ghost_exchanges(db):
"""Delete technosphere which can't be linked due to ecoinvent errors.
A ghost exchange is one which links to a combination of *activity* and *flow* which aren't provided in the database."""
log, logfile = get_io_logger("Ecospold2-import-error")
count = 0
for ds in db:
exchanges = ds.get("exchanges", [])
if not exchanges:
continue
skip = []
for exc in exchanges:
if exc.get("input") or exc.get("type") != "technosphere":
continue
log.critical(
u"Purging unlinked exchange:\nFilename: {}\n{}".format(
ds[u"filename"], format_for_logging(exc)
)
)
count += 1
skip.append(exc)
ds[u"exchanges"] = [exc for exc in exchanges if exc not in skip]
close_log(log)
if count:
print(
(
u"{} exchanges couldn't be linked and were deleted. See the "
u"logfile for details:\n\t{}"
).format(count, logfile)
)
return db
def remove_uncertainty_from_negative_loss_exchanges(db):
"""Remove uncertainty from negative lognormal exchanges.
There are 15699 of these in ecoinvent 3.3 cutoff.
The basic uncertainty and pedigree matrix are applied rather blindly,
and the can produce strange net production values. It makes much more
sense to assume that these loss factors are static.
Only applies to exchanges which decrease net production.
"""
for ds in db:
production_names = {
exc["name"]
for exc in ds.get("exchanges", [])
if exc["type"] == "production"
}
for exc in ds.get("exchanges", []):
if (
exc["amount"] < 0
and exc["uncertainty type"] == LognormalUncertainty.id
and exc["name"] in production_names
):
exc["uncertainty type"] = UndefinedUncertainty.id
exc["loc"] = exc["amount"]
del exc["scale"]
return db
def set_lognormal_loc_value(db):
"""Make sure ``loc`` value is correct for lognormal uncertainty distributions"""
for ds in db:
for exc in ds.get("exchanges", []):
if exc["uncertainty type"] == LognormalUncertainty.id:
exc["loc"] = math.log(abs(exc["amount"]))
return db
def fix_unreasonably_high_lognormal_uncertainties(db, cutoff=2.5, replacement=0.25):
"""Fix unreasonably high uncertainty values.
With the default cutoff value of 2.5 and a median of 1, the 95% confidence
interval has a high to low ratio of 20.000."""
for ds in db:
for exc in ds.get("exchanges", []):
if exc["uncertainty type"] == LognormalUncertainty.id:
if exc["scale"] > cutoff:
exc["scale"] = replacement
return db
def fix_ecoinvent_flows_pre35(db):
if "fix-ecoinvent-flows-pre-35" in migrations:
return migrate_exchanges(db, "fix-ecoinvent-flows-pre-35")
else:
warnings.warn(
(
"Skipping migration 'fix-ecoinvent-flows-pre-35' "
"because it isn't installed"
)
)
return db
def drop_temporary_outdated_biosphere_flows(db):
"""Drop biosphere exchanges which aren't used and are outdated"""
names = {
"Fluorene_temp",
"Fluoranthene_temp",
"Dibenz(a,h)anthracene_temp",
"Benzo(k)fluoranthene_temp",
"Benzo(ghi)perylene_temp",
"Benzo(b)fluoranthene_temp",
"Benzo(a)anthracene_temp",
"Acenaphthylene_temp",
"Chrysene_temp",
"Pyrene_temp",
"Phenanthrene_temp",
"Indeno(1,2,3-c,d)pyrene_temp",
}
for ds in db:
ds["exchanges"] = [
obj
for obj in ds["exchanges"]
if not (obj.get("name") in names and obj.get("type") == "biosphere")
]
return db
def add_cpc_classification_from_single_reference_product(db):
def has_cpc(exc):
return (
"classifications" in exc
and "CPC" in exc["classifications"]
and exc["classifications"]["CPC"]
)
for ds in db:
assert "classifications" in ds
products = [exc for exc in ds["exchanges"] if exc["type"] == "production"]
if len(products) == 1 and has_cpc(products[0]):
ds["classifications"].append(
("CPC", products[0]["classifications"]["CPC"][0])
)
return db
def delete_none_synonyms(db):
for ds in db:
ds["synonyms"] = [s for s in ds["synonyms"] if s is not None]
return db
| 33.366883
| 287
| 0.5776
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.