hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acea023013d681c9b5ace9a2d436f2384e26a074 | 16,425 | py | Python | sdk/python/pulumi_azure_native/sql/v20201101preview/instance_failover_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20201101preview/instance_failover_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20201101preview/instance_failover_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InstanceFailoverGroupArgs', 'InstanceFailoverGroup']
@pulumi.input_type
class InstanceFailoverGroupArgs:
def __init__(__self__, *,
location_name: pulumi.Input[str],
managed_instance_pairs: pulumi.Input[Sequence[pulumi.Input['ManagedInstancePairInfoArgs']]],
partner_regions: pulumi.Input[Sequence[pulumi.Input['PartnerRegionInfoArgs']]],
read_write_endpoint: pulumi.Input['InstanceFailoverGroupReadWriteEndpointArgs'],
resource_group_name: pulumi.Input[str],
failover_group_name: Optional[pulumi.Input[str]] = None,
read_only_endpoint: Optional[pulumi.Input['InstanceFailoverGroupReadOnlyEndpointArgs']] = None):
"""
The set of arguments for constructing a InstanceFailoverGroup resource.
:param pulumi.Input[str] location_name: The name of the region where the resource is located.
:param pulumi.Input[Sequence[pulumi.Input['ManagedInstancePairInfoArgs']]] managed_instance_pairs: List of managed instance pairs in the failover group.
:param pulumi.Input[Sequence[pulumi.Input['PartnerRegionInfoArgs']]] partner_regions: Partner region information for the failover group.
:param pulumi.Input['InstanceFailoverGroupReadWriteEndpointArgs'] read_write_endpoint: Read-write endpoint of the failover group instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] failover_group_name: The name of the failover group.
:param pulumi.Input['InstanceFailoverGroupReadOnlyEndpointArgs'] read_only_endpoint: Read-only endpoint of the failover group instance.
"""
pulumi.set(__self__, "location_name", location_name)
pulumi.set(__self__, "managed_instance_pairs", managed_instance_pairs)
pulumi.set(__self__, "partner_regions", partner_regions)
pulumi.set(__self__, "read_write_endpoint", read_write_endpoint)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if failover_group_name is not None:
pulumi.set(__self__, "failover_group_name", failover_group_name)
if read_only_endpoint is not None:
pulumi.set(__self__, "read_only_endpoint", read_only_endpoint)
@property
@pulumi.getter(name="locationName")
def location_name(self) -> pulumi.Input[str]:
"""
The name of the region where the resource is located.
"""
return pulumi.get(self, "location_name")
@location_name.setter
def location_name(self, value: pulumi.Input[str]):
pulumi.set(self, "location_name", value)
@property
@pulumi.getter(name="managedInstancePairs")
def managed_instance_pairs(self) -> pulumi.Input[Sequence[pulumi.Input['ManagedInstancePairInfoArgs']]]:
"""
List of managed instance pairs in the failover group.
"""
return pulumi.get(self, "managed_instance_pairs")
@managed_instance_pairs.setter
def managed_instance_pairs(self, value: pulumi.Input[Sequence[pulumi.Input['ManagedInstancePairInfoArgs']]]):
pulumi.set(self, "managed_instance_pairs", value)
@property
@pulumi.getter(name="partnerRegions")
def partner_regions(self) -> pulumi.Input[Sequence[pulumi.Input['PartnerRegionInfoArgs']]]:
"""
Partner region information for the failover group.
"""
return pulumi.get(self, "partner_regions")
@partner_regions.setter
def partner_regions(self, value: pulumi.Input[Sequence[pulumi.Input['PartnerRegionInfoArgs']]]):
pulumi.set(self, "partner_regions", value)
@property
@pulumi.getter(name="readWriteEndpoint")
def read_write_endpoint(self) -> pulumi.Input['InstanceFailoverGroupReadWriteEndpointArgs']:
"""
Read-write endpoint of the failover group instance.
"""
return pulumi.get(self, "read_write_endpoint")
@read_write_endpoint.setter
def read_write_endpoint(self, value: pulumi.Input['InstanceFailoverGroupReadWriteEndpointArgs']):
pulumi.set(self, "read_write_endpoint", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="failoverGroupName")
def failover_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the failover group.
"""
return pulumi.get(self, "failover_group_name")
@failover_group_name.setter
def failover_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "failover_group_name", value)
@property
@pulumi.getter(name="readOnlyEndpoint")
def read_only_endpoint(self) -> Optional[pulumi.Input['InstanceFailoverGroupReadOnlyEndpointArgs']]:
"""
Read-only endpoint of the failover group instance.
"""
return pulumi.get(self, "read_only_endpoint")
@read_only_endpoint.setter
def read_only_endpoint(self, value: Optional[pulumi.Input['InstanceFailoverGroupReadOnlyEndpointArgs']]):
pulumi.set(self, "read_only_endpoint", value)
class InstanceFailoverGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
failover_group_name: Optional[pulumi.Input[str]] = None,
location_name: Optional[pulumi.Input[str]] = None,
managed_instance_pairs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedInstancePairInfoArgs']]]]] = None,
partner_regions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PartnerRegionInfoArgs']]]]] = None,
read_only_endpoint: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadOnlyEndpointArgs']]] = None,
read_write_endpoint: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadWriteEndpointArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An instance failover group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] failover_group_name: The name of the failover group.
:param pulumi.Input[str] location_name: The name of the region where the resource is located.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedInstancePairInfoArgs']]]] managed_instance_pairs: List of managed instance pairs in the failover group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PartnerRegionInfoArgs']]]] partner_regions: Partner region information for the failover group.
:param pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadOnlyEndpointArgs']] read_only_endpoint: Read-only endpoint of the failover group instance.
:param pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadWriteEndpointArgs']] read_write_endpoint: Read-write endpoint of the failover group instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceFailoverGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An instance failover group.
:param str resource_name: The name of the resource.
:param InstanceFailoverGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceFailoverGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
failover_group_name: Optional[pulumi.Input[str]] = None,
location_name: Optional[pulumi.Input[str]] = None,
managed_instance_pairs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedInstancePairInfoArgs']]]]] = None,
partner_regions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PartnerRegionInfoArgs']]]]] = None,
read_only_endpoint: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadOnlyEndpointArgs']]] = None,
read_write_endpoint: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverGroupReadWriteEndpointArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceFailoverGroupArgs.__new__(InstanceFailoverGroupArgs)
__props__.__dict__["failover_group_name"] = failover_group_name
if location_name is None and not opts.urn:
raise TypeError("Missing required property 'location_name'")
__props__.__dict__["location_name"] = location_name
if managed_instance_pairs is None and not opts.urn:
raise TypeError("Missing required property 'managed_instance_pairs'")
__props__.__dict__["managed_instance_pairs"] = managed_instance_pairs
if partner_regions is None and not opts.urn:
raise TypeError("Missing required property 'partner_regions'")
__props__.__dict__["partner_regions"] = partner_regions
__props__.__dict__["read_only_endpoint"] = read_only_endpoint
if read_write_endpoint is None and not opts.urn:
raise TypeError("Missing required property 'read_write_endpoint'")
__props__.__dict__["read_write_endpoint"] = read_write_endpoint
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["replication_role"] = None
__props__.__dict__["replication_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-native:sql:InstanceFailoverGroup"), pulumi.Alias(type_="azure-nextgen:sql:InstanceFailoverGroup"), pulumi.Alias(type_="azure-native:sql/v20171001preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20171001preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-native:sql/v20200202preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-native:sql/v20200801preview:InstanceFailoverGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:InstanceFailoverGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(InstanceFailoverGroup, __self__).__init__(
'azure-native:sql/v20201101preview:InstanceFailoverGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'InstanceFailoverGroup':
"""
Get an existing InstanceFailoverGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = InstanceFailoverGroupArgs.__new__(InstanceFailoverGroupArgs)
__props__.__dict__["managed_instance_pairs"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partner_regions"] = None
__props__.__dict__["read_only_endpoint"] = None
__props__.__dict__["read_write_endpoint"] = None
__props__.__dict__["replication_role"] = None
__props__.__dict__["replication_state"] = None
__props__.__dict__["type"] = None
return InstanceFailoverGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="managedInstancePairs")
def managed_instance_pairs(self) -> pulumi.Output[Sequence['outputs.ManagedInstancePairInfoResponse']]:
"""
List of managed instance pairs in the failover group.
"""
return pulumi.get(self, "managed_instance_pairs")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerRegions")
def partner_regions(self) -> pulumi.Output[Sequence['outputs.PartnerRegionInfoResponse']]:
"""
Partner region information for the failover group.
"""
return pulumi.get(self, "partner_regions")
@property
@pulumi.getter(name="readOnlyEndpoint")
def read_only_endpoint(self) -> pulumi.Output[Optional['outputs.InstanceFailoverGroupReadOnlyEndpointResponse']]:
"""
Read-only endpoint of the failover group instance.
"""
return pulumi.get(self, "read_only_endpoint")
@property
@pulumi.getter(name="readWriteEndpoint")
def read_write_endpoint(self) -> pulumi.Output['outputs.InstanceFailoverGroupReadWriteEndpointResponse']:
"""
Read-write endpoint of the failover group instance.
"""
return pulumi.get(self, "read_write_endpoint")
@property
@pulumi.getter(name="replicationRole")
def replication_role(self) -> pulumi.Output[str]:
"""
Local replication role of the failover group instance.
"""
return pulumi.get(self, "replication_role")
@property
@pulumi.getter(name="replicationState")
def replication_state(self) -> pulumi.Output[str]:
"""
Replication state of the failover group instance.
"""
return pulumi.get(self, "replication_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 51.489028 | 735 | 0.694307 |
acea02c3a6b4b277ddf2c78707ab576f8f59f72a | 726 | py | Python | places/migrations/0006_auto_20210828_0037.py | post1blues/where_to_go | 3d81320bad57439cffcc2d0b176136e0a1adea06 | [
"MIT"
] | null | null | null | places/migrations/0006_auto_20210828_0037.py | post1blues/where_to_go | 3d81320bad57439cffcc2d0b176136e0a1adea06 | [
"MIT"
] | null | null | null | places/migrations/0006_auto_20210828_0037.py | post1blues/where_to_go | 3d81320bad57439cffcc2d0b176136e0a1adea06 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-27 21:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('places', '0005_location'),
]
operations = [
migrations.AddField(
model_name='location',
name='place',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='places.place'),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='imgs', to='places.place'),
),
]
| 27.923077 | 121 | 0.61708 |
acea032102c6315e80563507aef47d3040f61ee1 | 671 | py | Python | gs_quant/datetime/__init__.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | 5 | 2019-10-14T17:02:26.000Z | 2021-02-27T09:07:35.000Z | gs_quant/datetime/__init__.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | 1 | 2020-02-15T11:58:29.000Z | 2020-02-15T11:58:29.000Z | gs_quant/datetime/__init__.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | 1 | 2019-11-27T16:34:45.000Z | 2019-11-27T16:34:45.000Z | """
Copyright 2018 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from .date import *
from .time import *
from .gscalendar import *
from .point import *
__name__ = 'datetime'
| 30.5 | 64 | 0.774963 |
acea04b247abbaae671755c95d4dc9027a149533 | 6,534 | py | Python | tests/70_program_swe_sphere_timestepper_convergence_ln2_std/postprocessing_convergence_test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 6 | 2017-11-20T08:12:46.000Z | 2021-03-11T15:32:36.000Z | tests/70_program_swe_sphere_timestepper_convergence_ln2_std/postprocessing_convergence_test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 4 | 2018-02-02T21:46:33.000Z | 2022-01-11T11:10:27.000Z | tests/70_program_swe_sphere_timestepper_convergence_ln2_std/postprocessing_convergence_test.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 12 | 2016-03-01T18:33:34.000Z | 2022-02-08T22:20:31.000Z | #! /usr/bin/env python3
import sys
import math
from mule_local.JobMule import *
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_phi_pert.res_norm_linf',
'sphere_data_diff_prog_div.res_norm_linf',
'sphere_data_diff_prog_vrt.res_norm_linf',
]
j = JobsData(verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(" + "+key)
for tagname_y in tagnames_y:
print("*"*80)
print("Processing tagname "+tagname_y)
print("*"*80)
tagname_x = 'runtime.timestep_size'
if True:
"""
Use plotting format to create (x/y) data
"""
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
meta_attribute_name = 'runtime.timestepping_order',
)
for group_name, group_data in d.get_data_float().items():
print("*"*80)
print("Group: "+group_name)
prev_value = -1.0
conv = '-'
convergence_order = None
for (x, y, convergence_order_) in zip(group_data['x_values'], group_data['y_values'], group_data['meta_values']):
if prev_value > 0:
conv = y/prev_value
elif prev_value == 0:
conv = '[error=0]'
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv))
prev_value = y
if convergence_order == None:
convergence_order = convergence_order_
else:
if convergence_order != convergence_order_:
raise Exception("Convergence order mismatch!!!")
print("")
print("Testing convergence")
#
# Setup default values
#
# 'convergence', 'error'
test_type = 'convergence'
# Convergence tolerance
error_tolerance_convergence = 0.1
# Range to check for convergence
conv_test_range_start = 0
conv_test_range_end = 4
if False:
#
# Use convergence tests for all implementations since the nonlienearity will lead to "converging" errors
#
if 'vrt' in tagname_y or 'div' in tagname_y:
if 'exp' in group_name:
test_type = 'error'
error_tolerance_error = 1e-12
elif 'phi' in tagname_y:
if 'exp' in group_name:
test_type = 'error'
error_tolerance_error = 1e-4
else:
raise Exception("Tagname "+tagname_y+" unknown")
print(" + test_type: "+test_type)
if test_type == 'convergence':
print(" + error_tolerance_convergence: "+str(error_tolerance_convergence))
elif test_type == 'error':
print(" + error_tolerance_error: "+str(error_tolerance_error))
print(" + range start/end: "+str(conv_test_range_start)+", "+str(conv_test_range_end))
if len(group_data['meta_values']) < conv_test_range_end:
raise Exception("Not enough samples to run convergence test")
for i in range(len(group_data['meta_values'])):
if group_data['meta_values'][i] != group_data['meta_values'][0]:
print("FATAL: Different convergence orders in same test")
for i in range(len(group_data['meta_values'])):
print("order: "+str(group_data['meta_values']))
raise Exception("FATAL: Different convergence orders in same test")
l = len(group_data['x_values'])
if l < conv_test_range_end:
print("There are only "+str(l)+" values, but we need at least "+str(conv_test_range_end)+" values")
raise Exception("Not enough values to study convergence")
prev_value = -1.0
conv = '-'
for i in range(conv_test_range_start, conv_test_range_end):
x = group_data['x_values'][i]
y = group_data['y_values'][i]
meta = group_data['meta_values'][i]
if prev_value > 0:
conv = y/prev_value
elif prev_value == 0:
conv = '[error=0]'
error_convergence = '-'
if isinstance(conv, float):
# Convergence order is stored in meta value
target_conv = pow(2.0, meta)
error_convergence = abs(conv - target_conv)/target_conv
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv)+"\terror: "+str(error_convergence))
if test_type == 'convergence':
# Test for convergence if exists
if error_convergence != '-':
if error_convergence > error_tolerance_convergence:
print("Error: "+str(error_convergence))
if len(sys.argv) <= 1:
raise Exception("Convergence exceeds tolerance of "+str(error_tolerance_convergence))
elif test_type == 'error':
# Alternate tests instead of convergence check
# Convergence doesn't really make sense for REXI in the way how it's applied
# This should be only used for l_exp and lg_exp
# Just ensure that the errors are below a certain level
if y > error_tolerance_error:
print("Error: "+str(y))
if len(sys.argv) <= 1:
raise Exception("Error exceeds tolerance of "+str(error_tolerance_error))
else:
raise Exception("Unknown test type "+test_type)
prev_value = y
if len(sys.argv) <= 1:
print("[OK]")
if len(sys.argv) <= 1:
print("*"*80)
print("Convergence tests successful")
print("*"*80)
| 34.571429 | 125 | 0.532599 |
acea04ba1352dc283858f6786968b1a6c37ca2e9 | 5,160 | py | Python | kubernetes/client/models/v2beta1_cross_version_object_reference.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v2beta1_cross_version_object_reference.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v2beta1_cross_version_object_reference.py | dix000p/kubernetes-client-python | 22e473e02883aca1058606092c86311f02f42be2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2beta1CrossVersionObjectReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name'
}
def __init__(self, api_version=None, kind=None, name=None):
"""
V2beta1CrossVersionObjectReference - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.kind = kind
self.name = name
@property
def api_version(self):
"""
Gets the api_version of this V2beta1CrossVersionObjectReference.
API version of the referent
:return: The api_version of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V2beta1CrossVersionObjectReference.
API version of the referent
:param api_version: The api_version of this V2beta1CrossVersionObjectReference.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V2beta1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:return: The kind of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V2beta1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:param kind: The kind of this V2beta1CrossVersionObjectReference.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V2beta1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V2beta1CrossVersionObjectReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V2beta1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V2beta1CrossVersionObjectReference.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2beta1CrossVersionObjectReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.891892 | 121 | 0.583527 |
acea05ba9036ace62049c0e137d67f6a034f14b5 | 12,013 | py | Python | test/unit/tools/test_collect_primary_datasets.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | null | null | null | test/unit/tools/test_collect_primary_datasets.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | test/unit/tools/test_collect_primary_datasets.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | import os
import json
import unittest
import tools_support
from galaxy import model
from galaxy import util
from galaxy.tools.parameters import output_collect
DEFAULT_TOOL_OUTPUT = "out1"
DEFAULT_EXTRA_NAME = "test1"
class CollectPrimaryDatasetsTestCase( unittest.TestCase, tools_support.UsesApp, tools_support.UsesTools ):
def setUp( self ):
self.setup_app( mock_model=False )
object_store = MockObjectStore()
self.app.object_store = object_store
self._init_tool( tools_support.SIMPLE_TOOL_CONTENTS )
self._setup_test_output( )
self.app.config.collect_outputs_from = "job_working_directory"
self.app.model.Dataset.object_store = object_store
def tearDown( self ):
if self.app.model.Dataset.object_store is self.app.object_store:
self.app.model.Dataset.object_store = None
def test_empty_collect( self ):
assert len( self._collect() ) == 0
def test_collect_multiple( self ):
path1 = self._setup_extra_file( name="test1" )
path2 = self._setup_extra_file( name="test2" )
datasets = self._collect()
assert DEFAULT_TOOL_OUTPUT in datasets
self.assertEquals( len( datasets[ DEFAULT_TOOL_OUTPUT ] ), 2 )
created_hda_1 = datasets[ DEFAULT_TOOL_OUTPUT ][ "test1" ]
self.app.object_store.assert_created_with_path( created_hda_1.dataset, path1 )
created_hda_2 = datasets[ DEFAULT_TOOL_OUTPUT ][ "test2" ]
self.app.object_store.assert_created_with_path( created_hda_2.dataset, path2 )
# Test default metadata stuff
assert created_hda_1.visible
assert created_hda_1.dbkey == "?"
def test_collect_hidden( self ):
self._setup_extra_file( visible="hidden" )
created_hda = self._collect_default_extra()
assert not created_hda.visible
def test_collect_ext( self ):
self._setup_extra_file( ext="txt" )
created_hda = self._collect_default_extra()
assert created_hda.ext == "txt"
def test_copied_to_imported_histories( self ):
self._setup_extra_file( )
cloned_hda = self.hda.copy()
history_2 = self._new_history( hdas=[ cloned_hda ])
assert len( history_2.datasets ) == 1
self._collect()
# Make sure extra primary was copied to cloned history with
# cloned output.
assert len( history_2.datasets ) == 2
def test_dbkey_from_filename( self ):
self._setup_extra_file( dbkey="hg19" )
created_hda = self._collect_default_extra()
assert created_hda.dbkey == "hg19"
def test_dbkey_from_galaxy_json( self ):
path = self._setup_extra_file( )
self._append_job_json( dict( dbkey="hg19" ), output_path=path )
created_hda = self._collect_default_extra()
assert created_hda.dbkey == "hg19"
def test_name_from_galaxy_json( self ):
path = self._setup_extra_file( )
self._append_job_json( dict( name="test_from_json" ), output_path=path )
created_hda = self._collect_default_extra()
assert "test_from_json" in created_hda.name
def test_info_from_galaxy_json( self ):
path = self._setup_extra_file( )
self._append_job_json( dict( info="extra output info" ), output_path=path )
created_hda = self._collect_default_extra()
assert created_hda.info == "extra output info"
def test_extension_from_galaxy_json( self ):
path = self._setup_extra_file( )
self._append_job_json( dict( ext="txt" ), output_path=path )
created_hda = self._collect_default_extra()
assert created_hda.ext == "txt"
def test_new_file_path_collection( self ):
self.app.config.collect_outputs_from = "new_file_path"
self.app.config.new_file_path = self.test_directory
self._setup_extra_file( )
created_hda = self._collect_default_extra( job_working_directory="/tmp" )
assert created_hda
def test_job_param( self ):
self._setup_extra_file( )
assert len( self.job.output_datasets ) == 1
self._collect_default_extra()
assert len( self.job.output_datasets ) == 2
extra_job_assoc = filter( lambda job_assoc: job_assoc.name.startswith( "__" ), self.job.output_datasets )[ 0 ]
assert extra_job_assoc.name == "__new_primary_file_out1|test1__"
def test_pattern_override_designation( self ):
self._replace_output_collectors( '''<output><discover_datasets pattern="__designation__" directory="subdir" ext="txt" /></output>''' )
self._setup_extra_file( subdir="subdir", filename="foo.txt" )
primary_outputs = self._collect( )[ DEFAULT_TOOL_OUTPUT ]
assert len( primary_outputs ) == 1
created_hda = primary_outputs.values()[ 0 ]
assert "foo.txt" in created_hda.name
assert created_hda.ext == "txt"
def test_name_and_ext_pattern( self ):
self._replace_output_collectors( '''<output><discover_datasets pattern="__name_and_ext__" directory="subdir" /></output>''' )
self._setup_extra_file( subdir="subdir", filename="foo1.txt" )
self._setup_extra_file( subdir="subdir", filename="foo2.tabular" )
primary_outputs = self._collect( )[ DEFAULT_TOOL_OUTPUT ]
assert len( primary_outputs ) == 2
assert primary_outputs[ "foo1" ].ext == "txt"
assert primary_outputs[ "foo2" ].ext == "tabular"
def test_custom_pattern( self ):
# Hypothetical oral metagenomic classifier that populates a directory
# of files based on name and genome. Use custom regex pattern to grab
# and classify these files.
self._replace_output_collectors( '''<output><discover_datasets pattern="(?P<designation>.*)__(?P<dbkey>.*).fasta" directory="genome_breakdown" ext="fasta" /></output>''' )
self._setup_extra_file( subdir="genome_breakdown", filename="samp1__hg19.fasta" )
self._setup_extra_file( subdir="genome_breakdown", filename="samp2__lactLact.fasta" )
self._setup_extra_file( subdir="genome_breakdown", filename="samp3__hg19.fasta" )
self._setup_extra_file( subdir="genome_breakdown", filename="samp4__lactPlan.fasta" )
self._setup_extra_file( subdir="genome_breakdown", filename="samp5__fusoNucl.fasta" )
# Put a file in directory we don't care about, just to make sure
# it doesn't get picked up by pattern.
self._setup_extra_file( subdir="genome_breakdown", filename="overview.txt" )
primary_outputs = self._collect( )[ DEFAULT_TOOL_OUTPUT ]
assert len( primary_outputs ) == 5
genomes = dict( samp1="hg19", samp2="lactLact", samp3="hg19", samp4="lactPlan", samp5="fusoNucl" )
for key, hda in primary_outputs.iteritems():
assert hda.dbkey == genomes[ key ]
def test_name_versus_designation( self ):
""" This test demonstrates the difference between name and desgination
in grouping patterns and named patterns such as __designation__,
__name__, __designation_and_ext__, and __name_and_ext__.
"""
self._replace_output_collectors( '''<output>
<discover_datasets pattern="__name_and_ext__" directory="subdir_for_name_discovery" />
<discover_datasets pattern="__designation_and_ext__" directory="subdir_for_designation_discovery" />
</output>''')
self._setup_extra_file( subdir="subdir_for_name_discovery", filename="example1.txt" )
self._setup_extra_file( subdir="subdir_for_designation_discovery", filename="example2.txt" )
primary_outputs = self._collect( )[ DEFAULT_TOOL_OUTPUT ]
name_output = primary_outputs[ "example1" ]
designation_output = primary_outputs[ "example2" ]
# While name is also used for designation, designation is not the name -
# it is used in the calculation of the name however...
assert name_output.name == "example1"
assert designation_output.name == "%s (%s)" % ( self.hda.name, "example2" )
def test_cannot_read_files_outside_job_directory( self ):
self._replace_output_collectors( '''<output>
<discover_datasets pattern="__name_and_ext__" directory="../../secrets" />
</output>''')
exception_thrown = False
try:
self._collect( )
except Exception:
exception_thrown = True
assert exception_thrown
def _collect_default_extra( self, **kwargs ):
return self._collect( **kwargs )[ DEFAULT_TOOL_OUTPUT ][ DEFAULT_EXTRA_NAME ]
def _collect( self, job_working_directory=None ):
if not job_working_directory:
job_working_directory = self.test_directory
return self.tool.collect_primary_datasets( self.outputs, job_working_directory, "txt" )
def _replace_output_collectors( self, xml_str ):
# Rewrite tool as if it had been created with output containing
# supplied dataset_collector elem.
elem = util.parse_xml_string( xml_str )
self.tool.outputs[ DEFAULT_TOOL_OUTPUT ].dataset_collectors = output_collect.dataset_collectors_from_elem( elem )
def _append_job_json( self, object, output_path=None, line_type="new_primary_dataset" ):
object[ "type" ] = line_type
if output_path:
name = os.path.basename( output_path )
object[ "filename" ] = name
line = json.dumps( object )
with open( os.path.join( self.test_directory, "galaxy.json" ), "a" ) as f:
f.write( "%s\n" % line )
def _setup_extra_file( self, **kwargs ):
path = kwargs.get( "path", None )
filename = kwargs.get( "filename", None )
if not path and not filename:
name = kwargs.get( "name", DEFAULT_EXTRA_NAME )
visible = kwargs.get( "visible", "visible" )
ext = kwargs.get( "ext", "data" )
template_args = ( self.hda.id, name, visible, ext )
directory = kwargs.get( "directory", self.test_directory )
path = os.path.join( directory, "primary_%s_%s_%s_%s" % template_args )
if "dbkey" in kwargs:
path = "%s_%s" % ( path, kwargs[ "dbkey" ] )
if not path:
assert filename
subdir = kwargs.get( "subdir", "." )
path = os.path.join( self.test_directory, subdir, filename )
directory = os.path.dirname( path )
if not os.path.exists( directory ):
os.makedirs( directory )
contents = kwargs.get( "contents", "test contents" )
open( path, "w" ).write( contents )
return path
def _setup_test_output( self ):
dataset = model.Dataset()
dataset.external_filename = "example_output" # This way object store isn't asked about size...
self.hda = model.HistoryDatasetAssociation( name="test", dataset=dataset )
job = model.Job()
job.add_output_dataset( DEFAULT_TOOL_OUTPUT, self.hda )
self.app.model.context.add( job )
self.job = job
self.history = self._new_history( hdas=[ self.hda ] )
self.outputs = { DEFAULT_TOOL_OUTPUT: self.hda }
def _new_history( self, hdas=[], flush=True ):
history = model.History()
self.app.model.context.add( history )
for hda in hdas:
history.add_dataset( hda, set_hid=False )
self.app.model.context.flush( )
return history
class MockObjectStore( object ):
def __init__( self ):
self.created_datasets = {}
def update_from_file( self, dataset, file_name, create ):
if create:
self.created_datasets[ dataset ] = file_name
def size( self, dataset ):
path = self.created_datasets[ dataset ]
return os.stat( path ).st_size
def get_filename( self, dataset ):
return self.created_datasets[ dataset ]
def assert_created_with_path( self, dataset, file_name ):
assert self.created_datasets[ dataset ] == file_name
| 44.165441 | 191 | 0.666028 |
acea05e683060cda399847fadf618dab6b0eaa07 | 1,827 | py | Python | sublime_djhtml.py | jordaneremieff/sublime_djhtml | 8cc32c231746d2c25d7788904da4e4282f954eed | [
"MIT"
] | null | null | null | sublime_djhtml.py | jordaneremieff/sublime_djhtml | 8cc32c231746d2c25d7788904da4e4282f954eed | [
"MIT"
] | null | null | null | sublime_djhtml.py | jordaneremieff/sublime_djhtml | 8cc32c231746d2c25d7788904da4e4282f954eed | [
"MIT"
] | null | null | null | import os
import sys
import logging
import sublime
import sublime_plugin
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from vendor.djhtml.__main__ import verify_changed
from vendor.djhtml.modes import DjHTML
__version__ = "0.1.0"
__version_info__ = (0, 1, 0)
logger = logging.getLogger("DjHTML")
SUBLIME_SETTINGS = "sublime_djhtml.sublime-settings"
def validate_and_indent(source):
settings = sublime.load_settings(SUBLIME_SETTINGS)
tabwidth = settings.get("tabwidth", 4)
formatted = DjHTML(source).indent(tabwidth)
if not verify_changed(source, formatted):
return None
return formatted
def check_indent_on_save(view):
settings = sublime.load_settings(SUBLIME_SETTINGS)
if settings.get("indent_on_save") and view.settings().get("syntax") in settings.get(
"enabled_syntax", []
):
view.run_command("djhtml_indent")
class DjhtmlIndentCommand(sublime_plugin.TextCommand):
def run(self, view):
region = sublime.Region(0, self.view.size())
source = self.view.substr(region)
error = None
try:
formatted = validate_and_indent(source)
except Exception:
error = (
"DjHTML: An unknown error occured, the template could not be processed."
)
logger.exception(error)
if error:
sublime.error_message(error)
elif not formatted:
sublime.status_message(
"No indentation required, template file is unchanged."
)
else:
sublime.status_message("Template has been reindented.")
self.view.replace(view, region, formatted)
class DjhtmlIndentOnSaveListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
check_indent_on_save(view)
| 27.681818 | 88 | 0.673235 |
acea06849abc5d83b2e8f819e0a7af6b7565c744 | 16,807 | py | Python | lib/multiviews/pictorial.py | CHUNYUWANG/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 72 | 2020-03-26T13:26:39.000Z | 2022-03-16T08:45:34.000Z | lib/multiviews/pictorial.py | zhezh/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 10 | 2020-04-05T07:17:49.000Z | 2022-03-04T05:32:12.000Z | lib/multiviews/pictorial.py | CHUNYUWANG/imu-human-pose-pytorch | f4813336571789f46eabdfb520e7ed5b20ac04ea | [
"MIT"
] | 13 | 2020-04-12T20:33:38.000Z | 2022-02-17T11:23:13.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import multiviews.cameras as cameras
from utils.transforms import get_affine_transform, affine_transform, affine_transform_pts
tv1, tv2, _ = torch.__version__.split('.')
tv = int(tv1) * 10 + int(tv2) * 1
if tv >= 13: # api change since 1.3.0 for grid_sample
grid_sample = functools.partial(F.grid_sample, align_corners=True)
else:
grid_sample = F.grid_sample
def infer(unary, pairwise, body, config, **kwargs):
"""
Args:
unary: a list of unary terms for all JOINTS
pairwise: a list of pairwise terms of all EDGES
body: tree structure human body
Returns:
pose3d_as_cube_idx: 3d pose as cube index
"""
# current_device = torch.device('cuda:{}'.format(pairwise.items()[0].get_device()))
current_device = kwargs['current_device']
skeleton = body.skeleton
skeleton_sorted_by_level = body.skeleton_sorted_by_level
root_idx = config.DATASET.ROOTIDX
nbins = len(unary[root_idx])
states_of_all_joints = {}
# print('dev {} id unary: {}'.format(current_device, id(unary)))
# zhe 20190104 replace torch with np
for node in skeleton_sorted_by_level:
# energy = []
children_state = []
unary_current = unary[node['idx']]
# unary_current = torch.tensor(unary_current, dtype=torch.float32).to(current_device)
if len(node['children']) == 0:
energy = unary[node['idx']].squeeze()
children_state = [[-1]] * len(energy)
else:
children = node['children']
for child in children:
child_energy = states_of_all_joints[child][
'Energy'].squeeze()
pairwise_mat = pairwise[(node['idx'], child)]
# if type(pairwise_mat) == scipy.sparse.csr.csr_matrix:
# pairwise_mat = pairwise_mat.toarray()
# unary_child = child_energy
unary_child = torch.tensor(child_energy, dtype=torch.float32).to(current_device).expand_as(pairwise_mat)
# unary_child_with_pairwise = np.multiply(pairwise_mat, unary_child)
# unary_child_with_pairwise = ne.evaluate('pairwise_mat*unary_child')
unary_child_with_pairwise = torch.mul(pairwise_mat, unary_child)
# max_i = np.argmax(unary_child_with_pairwise, axis=1)
# max_v = np.max(unary_child_with_pairwise, axis=1)
max_v, max_i = torch.max(unary_child_with_pairwise, dim=1)
unary_current = torch.mul(unary_current, max_v)
# unary_current = np.multiply(unary_current, max_v)
# children_state.append(max_i)
children_state.append(max_i.detach().cpu().numpy())
# rearrange children_state
children_state = np.array(children_state).T # .tolist()
res = {'Energy': unary_current.detach().cpu().numpy(), 'State': children_state}
states_of_all_joints[node['idx']] = res
# end here 20181225
pose3d_as_cube_idx = []
energy = states_of_all_joints[root_idx]['Energy']
cube_idx = np.argmax(energy)
pose3d_as_cube_idx.append([root_idx, cube_idx])
queue = pose3d_as_cube_idx.copy()
while queue:
joint_idx, cube_idx = queue.pop(0)
children_state = states_of_all_joints[joint_idx]['State']
state = children_state[cube_idx]
children_index = skeleton[joint_idx]['children']
if -1 not in state:
for joint_idx, cube_idx in zip(children_index, state):
pose3d_as_cube_idx.append([joint_idx, cube_idx])
queue.append([joint_idx, cube_idx])
pose3d_as_cube_idx.sort()
return pose3d_as_cube_idx
def get_loc_from_cube_idx(grid, pose3d_as_cube_idx):
"""
Estimate 3d joint locations from cube index.
Args:
grid: a list of grids
pose3d_as_cube_idx: a list of tuples (joint_idx, cube_idx)
Returns:
pose3d: 3d pose
"""
njoints = len(pose3d_as_cube_idx)
pose3d = np.zeros(shape=[njoints, 3])
is_single_grid = len(grid) == 1
for joint_idx, cube_idx in pose3d_as_cube_idx:
gridid = 0 if is_single_grid else joint_idx
pose3d[joint_idx] = grid[gridid][cube_idx]
return pose3d
def compute_grid(boxSize, boxCenter, nBins):
grid1D = np.linspace(-boxSize / 2, boxSize / 2, nBins)
gridx, gridy, gridz = np.meshgrid(
grid1D + boxCenter[0],
grid1D + boxCenter[1],
grid1D + boxCenter[2],
)
dimensions = gridx.shape[0] * gridx.shape[1] * gridx.shape[2]
gridx, gridy, gridz = np.reshape(gridx, (dimensions, -1)), np.reshape(
gridy, (dimensions, -1)), np.reshape(gridz, (dimensions, -1))
grid = np.concatenate((gridx, gridy, gridz), axis=1)
return grid
def compute_pairwise_constrain(skeleton, limb_length, grid, tolerance, **kwargs):
do_bone_vectors = False
if 'do_bone_vectors' in kwargs:
if kwargs['do_bone_vectors']:
do_bone_vectors = True
bone_vectors = kwargs['bone_vectors']
pairwise_constrain = {}
for node in skeleton:
current = node['idx']
children = node['children']
if do_bone_vectors:
bone_index = node['imubone']
for idx_child, child in enumerate(children):
expect_length = limb_length[(current, child)]
if do_bone_vectors:
if bone_index[idx_child] >= 0: # if certain bone has imu
expect_orient_vector = bone_vectors[bone_index[idx_child]]
norm_expect_orient_vector = expect_orient_vector / (np.linalg.norm(expect_orient_vector)+1e-9)
nbin_current = len(grid[current])
nbin_child = len(grid[child])
constrain_array = np.zeros((nbin_current, nbin_child), dtype=np.float32)
for i in range(nbin_current):
for j in range(nbin_child):
actual_length = np.linalg.norm(grid[current][i] -
grid[child][j]) + 1e-9
offset = np.abs(actual_length - expect_length)
if offset <= tolerance:
constrain_array[i, j] = 1
if do_bone_vectors and bone_index[idx_child] >= 0:
acutal_orient_vector = (grid[current][i] - grid[child][j]) / actual_length
cos_theta = np.dot(-norm_expect_orient_vector, acutal_orient_vector)
# notice norm_expect_orient_vector is child - parent
# while acutal_orient_vector is parent - child
constrain_array[i, j] *= cos_theta
pairwise_constrain[(current, child)] = constrain_array
return pairwise_constrain
def compute_unary_term(heatmap, grid, bbox2D, cam, imgSize, **kwargs):
"""
Args:
heatmap: array of size (n * k * h * w)
-n: number of views, -k: number of joints
-h: heatmap height, -w: heatmap width
grid: list of k ndarrays of size (nbins * 3)
-k: number of joints; 1 when the grid is shared in PSM
-nbins: number of bins in the grid
bbox2D: bounding box on which heatmap is computed
Returns:
unary_of_all_joints: a list of ndarray of size nbins
"""
n, k = heatmap.shape[0], heatmap.shape[1]
h, w = heatmap.shape[2], heatmap.shape[3]
nbins = grid[0].shape[0]
current_device = torch.device('cuda:{}'.format(heatmap.get_device()))
# unary_of_all_joints = []
# for j in range(k):
# unary = np.zeros(nbins, dtype=np.float32)
# for c in range(n):
#
# grid_id = 0 if len(grid) == 1 else j
# xy = cameras.project_pose(grid[grid_id], cam[c])
# trans = get_affine_transform(bbox2D[c]['center'],
# bbox2D[c]['scale'], 0, imgSize)
#
# xy = affine_transform_pts(xy, trans) * np.array([w, h]) / imgSize
# # for i in range(nbins):
# # xy[i] = affine_transform(xy[i], trans) * np.array([w, h]) / imgSize
#
# hmap = heatmap[c, j, :, :]
# point_x, point_y = np.arange(hmap.shape[0]), np.arange(
# hmap.shape[1])
# rgi = RegularGridInterpolator(
# points=[point_x, point_y],
# values=hmap.transpose(),
# bounds_error=False,
# fill_value=0)
# score = rgi(xy)
# unary = unary + np.reshape(score, newshape=unary.shape)
# unary_of_all_joints.append(unary)
# return unary_of_all_joints
# torch version
# heatmaps = torch.tensor(heatmap, dtype=torch.float32)
heatmaps = heatmap
grid_cords = np.zeros([n, k, nbins, 2], dtype=np.float32)
for c in range(n):
for j in range(k):
grid_id = 0 if len(grid) == 1 else j
xy = cameras.project_pose(grid[grid_id], cam[c])
trans = get_affine_transform(bbox2D[c]['center'],
bbox2D[c]['scale'], 0, imgSize)
xy = affine_transform_pts(xy, trans) * np.array([w, h]) / imgSize
# xy of shape (4096,2)
# xy is cord of certain view and certain joint
if len(grid) == 1: # psm 4096bins
grid_cords[c, 0, :, :] = xy/np.array([h-1, w-1], dtype=np.float32) * 2.0 - 1.0
for j in range(1, k):
grid_cords[c, j, :, :] = grid_cords[c, 0, :, :]
break # since all joints share same grid, no need computing for each joint, just copy it
else:
grid_cords[c, j, :, :] = xy/np.array([h-1, w-1], dtype=np.float32) * 2.0 - 1.0
grid_cords_tensor = torch.as_tensor(grid_cords).to(current_device)
unary_all_views_joints = grid_sample(heatmaps, grid_cords_tensor)
# unary_all_views_joints -> shape(4,16,16,4096)
unary_all_views = torch.zeros(n,k,nbins).to(current_device)
for j in range(k):
unary_all_views[:,j,:] = unary_all_views_joints[:, j, j, :]
unary_tensor = torch.zeros(k, nbins).to(current_device)
for una in unary_all_views:
unary_tensor = torch.add(unary_tensor, una)
return unary_tensor
def recursive_infer(initpose, cams, heatmaps, boxes, img_size, heatmap_size,
body, limb_length, grid_size, nbins, tolerance, config, **kwargs):
current_device = kwargs['current_device']
k = initpose.shape[0]
grids = []
for i in range(k):
point = initpose[i]
grid = compute_grid(grid_size, point, nbins)
grids.append(grid)
unary = compute_unary_term(heatmaps, grids, boxes, cams, img_size)
skeleton = body.skeleton
pairwise_constrain = compute_pairwise_constrain(skeleton, limb_length,
grids, tolerance, **kwargs)
pairwise_tensor = dict()
for edge in pairwise_constrain:
# edge_pairwise = pairwise_constrain[edge].astype(np.int64)
edge_pairwise = torch.as_tensor(pairwise_constrain[edge], dtype=torch.float32)
pairwise_tensor[edge] = edge_pairwise.to(current_device)
pairwise_constrain = pairwise_tensor
kwargs_infer = kwargs
pose3d_cube = infer(unary, pairwise_constrain, body, config, **kwargs_infer)
pose3d = get_loc_from_cube_idx(grids, pose3d_cube)
return pose3d
def rpsm(cams, heatmaps, boxes, grid_center, limb_length, pairwise_constraint,
config, **kwargs):
"""
Args:
cams : camera parameters for each view
heatmaps: 2d pose heatmaps (n, k, h, w)
boxes: on which the heatmaps are computed; n dictionaries
grid_center: 3d location of the root
limb_length: template limb length
pairwise_constrain: pre-computed pairwise terms (iteration 0 psm only)
Returns:
pose3d: 3d pose
"""
image_size = config.NETWORK.IMAGE_SIZE
heatmap_size = config.NETWORK.HEATMAP_SIZE
first_nbins = config.PICT_STRUCT.FIRST_NBINS
recur_nbins = config.PICT_STRUCT.RECUR_NBINS
recur_depth = config.PICT_STRUCT.RECUR_DEPTH
grid_size = config.PICT_STRUCT.GRID_SIZE
tolerance = config.PICT_STRUCT.LIMB_LENGTH_TOLERANCE
# Iteration 1: discretizing 3d space
# body = HumanBody()
# current_device = torch.device('cuda:{}'.format(pairwise_constraint.values()[0].get_device()))
current_device = kwargs['current_device']
body = kwargs['human_body']
grid = compute_grid(grid_size, grid_center, first_nbins)
heatmaps = torch.as_tensor(heatmaps, dtype=torch.float32).to(current_device) # todo: do this in dataloader
extra_kwargs = kwargs
# PSM
do_bone_vectors = False
if 'do_bone_vectors' in kwargs:
if kwargs['do_bone_vectors']:
do_bone_vectors = True
bone_vectors = kwargs['bone_vectors']
if do_bone_vectors:
# merge limb length pairwise and bone orientation/vector pairwise term
orient_pairwise = kwargs['orient_pairwise']
new_pairwise_constrain = {}
for node in body.skeleton:
current = node['idx']
children = node['children']
bone_index = node['imubone']
for idx_child, child in enumerate(children):
constrain_array = pairwise_constraint[(current, child)]
if bone_index[idx_child] >= 0: # if certain bone has imu
expect_orient_vector = bone_vectors[bone_index[idx_child]]
expect_orient_vector = torch.as_tensor(expect_orient_vector, dtype=torch.float32).to(current_device)
norm_expect_orient_vector = expect_orient_vector / (torch.norm(expect_orient_vector) + 1e-9)
norm_expect_orient_vector = norm_expect_orient_vector.view(-1) # (3,)
acutal_orient_vector = orient_pairwise # (4096, 4096, 3)
cos_theta = torch.matmul(acutal_orient_vector, -norm_expect_orient_vector)
# todo we can add cos_theta activation func here
# acutal_orient_vector refer to 2 bin direction
# norm_expect_orient_vector refer to groundtruth direction
constrain_array = torch.mul(constrain_array, cos_theta)
new_pairwise_constrain[(current, child)] = constrain_array
pairwise_constraint = new_pairwise_constrain
unary = compute_unary_term(heatmaps, [grid], boxes, cams, image_size)
pose3d_as_cube_idx = infer(unary, pairwise_constraint, body, config, **extra_kwargs)
pose3d = get_loc_from_cube_idx([grid], pose3d_as_cube_idx)
cur_grid_size = grid_size / first_nbins
for i in range(recur_depth):
pose3d = recursive_infer(pose3d, cams, heatmaps, boxes, image_size,
heatmap_size, body, limb_length, cur_grid_size,
recur_nbins, tolerance, config, **extra_kwargs)
cur_grid_size = cur_grid_size / recur_nbins
return pose3d
class RpsmFunc(nn.Module):
def __init__(self, pairwise_constraint, human_body, **kwargs):
super().__init__()
# self.pairwise_constraint = pairwise_constraint
# self.register_parameter('pairwise_constraint', pairwise_constraint) # auto to dev when replicating
# register pairwise constraint in buff
self.current_device = None
self.pairwise_constraint = dict()
for idx, k in enumerate(pairwise_constraint):
buff_name = 'pairwise_constraint_{}'.format(idx)
self.register_buffer(buff_name, pairwise_constraint[k])
self.pairwise_constraint[k] = self.__getattr__(buff_name)
self.human_body = human_body
self.do_bone_vectors = kwargs['do_bone_vectors']
if self.do_bone_vectors:
orient_pairwise = kwargs['orient_pairwise']
self.register_buffer('orient_pairwise', orient_pairwise)
def __call__(self, *args, **kwargs):
if self.current_device is None:
self.current_device = torch.device('cuda:{}'.format(list(self.pairwise_constraint.values())[0].get_device()))
extra_kwargs = dict()
extra_kwargs['human_body'] = self.human_body
extra_kwargs['current_device'] = self.current_device
if self.do_bone_vectors:
extra_kwargs['orient_pairwise'] = self.orient_pairwise
# do_bone_vectors has already been in kwargs
return rpsm(pairwise_constraint=self.pairwise_constraint, **kwargs, **extra_kwargs)
| 42.875 | 121 | 0.626049 |
acea06c923a945fea15f85271e64e07d258eaee8 | 3,771 | py | Python | XGRN_dream.py | geodimitrak/XGBoost-GRN | 8d75953e5d013cc42b9bdadd2ef84d17d82583e1 | [
"BSD-2-Clause"
] | null | null | null | XGRN_dream.py | geodimitrak/XGBoost-GRN | 8d75953e5d013cc42b9bdadd2ef84d17d82583e1 | [
"BSD-2-Clause"
] | null | null | null | XGRN_dream.py | geodimitrak/XGBoost-GRN | 8d75953e5d013cc42b9bdadd2ef84d17d82583e1 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 12:51:42 2021
@author: Georgios N. Dimitrakopoulos, geodimitrak@upatras.gr
"""
from xgboost import XGBRegressor
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, roc_auc_score
import pickle
import random
import time
random.seed(2021)
DREAM = 5
DS = 1
SV = 0.5
P = 1 - SV
SAVE = True
file = './data/dream' + str(DREAM) + str(DS) + '.pkl'
print(file)
print(SV)
(gex_tf, gex_target, adj) = pickle.load(open(file,'rb'))
adj[adj>1] = 1
gex_target_v = gex_target.reshape((gex_target.shape[0]*gex_target.shape[1],1))
# (dt,net_mse,net_rsq,net_mae,tr) = pickle.load(open('results_dream' + str(DREAM) + str(DS) + '_70.pkl','rb'))
targets_per_tf = np.sum(adj, axis=1)
#final network
net_mse = np.zeros(adj.shape)
net_rsq = np.zeros(adj.shape)
net_mae = np.zeros(adj.shape)
tr = []
start = time.time()
for i in range(len(gex_tf)):
if(targets_per_tf[i]==0):
tr.append([]) #no known targets
continue
#split targets to P% test and rest as train
tmp = adj[i,:]>0
targets = np.where(tmp)[0]
n_test = np.ceil(len(targets) * P).astype('int32')
random.shuffle(targets)
targets_test = targets[0:n_test]
targets_train = targets[n_test:len(targets)]
#fix for small number of known targets (empty train set)
if(targets_per_tf[i] == 1):
targets_test = []
targets_train = targets[0:1]
else:
if(len(targets_train)==0):
targets_test = targets[1:len(targets)]
targets_train = targets[0:1]
tr.append(targets_train)
print(i, len(targets), len(targets_test), len(targets_train))
y = gex_tf[i,].reshape(-1,1)
mse = np.zeros((gex_target.shape[0],len(targets_train)))
rsq = np.zeros((gex_target.shape[0],len(targets_train)))
mae = np.zeros((gex_target.shape[0],len(targets_train)))
for j in range(len(targets_train)):
g2 = targets_train[j]
x = gex_target[g2,].reshape(-1,1)
model = XGBRegressor(objective='reg:squarederror', eval_metric='rmse', max_depth=5, eta=0.1, n_estimators=50, seed=1)
model.fit(x, y)
##make prediction for all targts with at one step and reshape
y_pred = model.predict(gex_target_v)
y_pred = y_pred.reshape(gex_target.shape)
for k in range(len(gex_target)):
# x2 = gex_target[k,].reshape(-1,1)
# y_pred = model.predict(x2)
mse[k,j] = mean_squared_error(y, y_pred[k,])
mae[k,j] = mean_absolute_error(y, y_pred[k,])
rsq[k,j] = r2_score(y, y_pred[k,])
#keep best prediction among targets_train
mse_pred = np.min(mse, axis=1)
rsq_pred = np.max(rsq, axis=1)
mae_pred = np.min(mae, axis=1)
net_mse[i,:] = mse_pred
net_rsq[i,:] = rsq_pred
net_mae[i,:] = mae_pred
end = time.time()
dt = end - start
print('dt='+str(dt))
#calucalte AUROC
#vectorize results and ground truth
a_v = adj.reshape((adj.shape[0]*adj.shape[1],1))
n_v = net_mse.reshape((net_mse.shape[0]*net_mse.shape[1],1))
auc1_all = roc_auc_score(a_v, -n_v) #error: smaller is better, use "-" sign
n_v = net_rsq.reshape((net_rsq.shape[0]*net_rsq.shape[1],1))
auc2_all = roc_auc_score(a_v, n_v)
n_v = net_mae.reshape((net_mae.shape[0]*net_mae.shape[1],1))
auc3_all = roc_auc_score(a_v, -n_v) #error: smaller is better, use "-" sign
print(auc1_all)
print(auc2_all)
print(auc3_all)
if(SAVE):
pickle.dump((dt,net_mse,net_rsq,net_mae, tr), open('results_dream' + str(DREAM) + str(DS) + '_' + str(int(100*SV))+'.pkl','wb'))
| 30.41129 | 134 | 0.619464 |
acea06e3fb5631bd8cce32e75c33f5706e6ea7e6 | 458 | py | Python | approved_accounting/approved_accounting/doctype/approved_purchase_invoice/approved_purchase_invoice.py | theonlynexus/erpnext_approved_accounting | 0d1882bf8fcde1bd246c76b5f888086741b9334b | [
"MIT"
] | null | null | null | approved_accounting/approved_accounting/doctype/approved_purchase_invoice/approved_purchase_invoice.py | theonlynexus/erpnext_approved_accounting | 0d1882bf8fcde1bd246c76b5f888086741b9334b | [
"MIT"
] | null | null | null | approved_accounting/approved_accounting/doctype/approved_purchase_invoice/approved_purchase_invoice.py | theonlynexus/erpnext_approved_accounting | 0d1882bf8fcde1bd246c76b5f888086741b9334b | [
"MIT"
] | 3 | 2019-11-25T20:06:41.000Z | 2020-09-14T15:49:27.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, One Asset Management and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from erpnext.accounts.doctype.purchase_invoice.purchase_invoice import PurchaseInvoice
class ApprovedPurchaseInvoice(PurchaseInvoice):
def __init__(self, *args, **kwargs):
super(ApprovedPurchaseInvoice, self).__init__(*args, **kwargs)
def validate(self):
super().validate()
| 30.533333 | 86 | 0.781659 |
acea074c61bbd349933374b656c87f6c52780547 | 367 | py | Python | src/bowcaster/servers/__init__.py | zcutlip/bowcaster | 17d69c1ad973356b1ea42ee9cb80525bfef8c255 | [
"MIT"
] | 108 | 2015-01-17T14:04:43.000Z | 2021-03-11T12:18:46.000Z | src/bowcaster/servers/__init__.py | zcutlip/bowcaster | 17d69c1ad973356b1ea42ee9cb80525bfef8c255 | [
"MIT"
] | 1 | 2015-01-20T02:45:10.000Z | 2015-01-20T02:45:10.000Z | src/bowcaster/servers/__init__.py | zcutlip/bowcaster | 17d69c1ad973356b1ea42ee9cb80525bfef8c255 | [
"MIT"
] | 33 | 2015-01-04T21:27:53.000Z | 2021-02-14T01:26:37.000Z | # Copyright (c) 2013
# - Zachary Cutlip <uid000@gmail.com>
# - Tactical Network Solutions, LLC
#
# See LICENSE.txt for more details.
#
class ServerException(Exception):
pass
from connectback_server import *
from multiplexing_server import *
from http_server import *
__all__=["ConnectbackServer","TrojanServer","MultiplexingServer","HTTPConnectbackServer"] | 21.588235 | 89 | 0.765668 |
acea0757a99b742674ab18359f0d231bf46cd2cf | 88,826 | py | Python | sapp/ui/tests/interactive_test.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 74 | 2020-12-18T20:04:30.000Z | 2022-03-22T22:26:02.000Z | sapp/ui/tests/interactive_test.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 61 | 2020-12-21T21:33:05.000Z | 2022-01-27T21:22:20.000Z | sapp/ui/tests/interactive_test.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 20 | 2021-04-08T01:28:53.000Z | 2022-03-22T22:26:05.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import sys
from datetime import datetime
from io import StringIO
from typing import List
from unittest import TestCase
from unittest.mock import mock_open, patch
from sqlalchemy.orm import Session
from ...db import DB, DBType
from ...decorators import UserError
from ...models import (
DBID,
IssueInstanceSharedTextAssoc,
IssueInstanceTraceFrameAssoc,
IssueStatus,
Run,
RunStatus,
SharedText,
SharedTextKind,
SourceLocation,
TraceFrame,
TraceFrameLeafAssoc,
TraceKind,
)
from ...models import create as create_models
from ...pipeline.pysa_taint_parser import Parser
from ...tests.fake_object_generator import FakeObjectGenerator
from ..interactive import (
Interactive,
IssueQueryResult,
TraceFrameQueryResult,
TraceTuple,
)
class InteractiveTest(TestCase):
def setUp(self) -> None:
self.db = DB(DBType.MEMORY)
create_models(self.db)
self.interactive = Interactive(
database=self.db, repository_directory="", parser_class=Parser
)
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout # redirect output
sys.stderr = self.stderr # redirect output
self.fakes = FakeObjectGenerator()
def tearDown(self) -> None:
sys.stdout = sys.__stdout__ # reset redirect
sys.stderr = sys.__stderr__ # reset redirect
def _clear_stdout(self):
self.stdout = StringIO()
sys.stdout = self.stdout
def _add_to_session(self, session, data):
if not isinstance(data, list):
session.add(data)
return
for row in data:
session.add(row)
def _frame_to_query_result(
self, session: Session, trace_frame: TraceFrame
) -> TraceFrameQueryResult:
caller = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.caller_id)
.scalar()
)
callee = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.callee_id)
.scalar()
)
filename = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.filename_id)
.scalar()
)
return TraceFrameQueryResult(
id=trace_frame.id,
caller=caller,
caller_port=trace_frame.caller_port,
callee=callee,
callee_port=trace_frame.callee_port,
caller_id=trace_frame.caller_id,
callee_id=trace_frame.callee_id,
callee_location=trace_frame.callee_location,
# pyre-fixme[6]: Expected `Optional[TraceKind]` for 9th param but got `str`.
kind=trace_frame.kind,
filename=filename,
)
def testState(self) -> None:
self.interactive._current_run_id = DBID(1)
self.interactive.current_issue_instance_id = DBID(2)
self.interactive.current_frame_id = DBID(3)
self.interactive.sources = {"1"}
self.interactive.sinks = {"2"}
self.interactive.state()
output = self.stdout.getvalue()
self.assertIn("Database: memory:sapp.db", output)
self.assertIn("Repository directory: ", output)
self.assertIn("Current run: 1", output)
self.assertIn("Current issue instance: 2", output)
self.assertIn("Current trace frame: 3", output)
self.assertIn("Sources filter: {'1'}", output)
self.assertIn("Sinks filter: {'2'}", output)
def testListIssuesBasic(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance(
message="message1", filename="file.py", callable="module.function1"
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Code: 6016", output)
self.assertIn("Message: message1", output)
self.assertIn("Callable: module.function1", output)
self.assertIn("Location: file.py:6|7|8", output)
def testListIssuesFromLatestRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance() # part of run1
self.fakes.save_all(self.db) # early flush to resolve DBID's
run2 = self.fakes.run()
self.fakes.instance() # part of run2
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertIn("Issue 2", output)
def _list_issues_filter_setup(self):
run = self.fakes.run()
issue1 = self.fakes.issue(status="do_not_care")
self.fakes.instance(
issue_id=issue1.id,
callable="module.sub.function1",
filename="module/sub.py",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
)
self.fakes.save_all(self.db)
issue2 = self.fakes.issue(status="valid_bug")
self.fakes.instance(
issue_id=issue2.id,
callable="module.sub.function2",
filename="module/sub.py",
min_trace_length_to_sources=2,
min_trace_length_to_sinks=2,
)
self.fakes.save_all(self.db)
issue3 = self.fakes.issue(status="bad_practice")
self.fakes.instance(
issue_id=issue3.id,
callable="module.function3",
filename="module/__init__.py",
min_trace_length_to_sources=3,
min_trace_length_to_sinks=3,
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
def testListIssuesFilterCodes(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(codes="a string")
stderr = self.stderr.getvalue().strip()
self.assertIn("'codes' should be", stderr)
self.interactive.issues(codes=6016)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(codes=[6017, 6018])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterCallables(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(callables=1234)
stderr = self.stderr.getvalue().strip()
self.assertIn("'callables' should be", stderr)
self.interactive.issues(callables="%sub%")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(callables=["%function3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterFilenames(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(filenames=1234)
stderr = self.stderr.getvalue().strip()
self.assertIn("'filenames' should be", stderr)
self.interactive.issues(filenames="module/s%")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(filenames=["%__init__.py"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterMinTraceLength(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(exact_trace_length_to_sources="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'exact_trace_length_to_sources' should be", stderr)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sinks="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'exact_trace_length_to_sinks' should be", stderr)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'max_trace_length_to_sources' should be", stderr)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'max_trace_length_to_sinks' should be", stderr)
self._clear_stdout()
self.interactive.issues(
exact_trace_length_to_sources=1, max_trace_length_to_sources=1
)
stderr = self.stderr.getvalue().strip()
self.assertIn("can't be set together", stderr)
self._clear_stdout()
self.interactive.issues(
exact_trace_length_to_sinks=1, max_trace_length_to_sinks=1
)
stderr = self.stderr.getvalue().strip()
self.assertIn("can't be set together", stderr)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sources=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sinks=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources=2)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks=2)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(
max_trace_length_to_sources=1, max_trace_length_to_sinks=1
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(
max_trace_length_to_sources=1, max_trace_length_to_sinks=2
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
def testListIssuesFilterAllFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(all_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
def testListIssuesFilterAnyFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(any_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
def testListIssuesFilterExcludeFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(exclude_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
def testListIssuesFilterAllFeatureAndAnyFeature(self):
self._list_issues_filter_setup()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
feature3 = self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
self._add_to_session(
session,
[
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature3.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=2
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=2
),
],
)
session.commit()
self.interactive.setup()
self.interactive.issues(
any_features=["via:feature2", "via:feature3"],
all_features="via:feature1",
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
def testListIssuesFilterStatuses(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(statuses=1234)
stderr = self.stderr.getvalue().strip()
self.assertIn("'statuses' should be", stderr)
self.interactive.issues(statuses=["do_not_care", "bad_practice"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testNoRunsFound(self):
self.interactive.setup()
stderr = self.stderr.getvalue().strip()
self.assertIn("No runs found.", stderr)
def testListRuns(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED),
Run(id=2, date=datetime.now(), status=RunStatus.INCOMPLETE),
Run(id=3, date=datetime.now(), status=RunStatus.FINISHED),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.setup()
self.interactive.runs()
output = self.stdout.getvalue().strip()
self.assertIn("Run 1", output)
self.assertNotIn("Run 2", output)
self.assertIn("Run 3", output)
def testSetRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
run2 = self.fakes.run()
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.interactive.run(1)
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
def testSetRunNonExistent(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED),
Run(id=2, date=datetime.now(), status=RunStatus.INCOMPLETE),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.setup()
self.interactive.run(2)
self.interactive.run(3)
stderr = self.stderr.getvalue().strip()
self.assertIn("Run 2 doesn't exist", stderr)
self.assertIn("Run 3 doesn't exist", stderr)
def testSetLatestRun(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=2, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=3, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=4, date=datetime.now(), status=RunStatus.FINISHED, kind="b"),
Run(id=5, date=datetime.now(), status=RunStatus.FINISHED, kind="b"),
Run(id=6, date=datetime.now(), status=RunStatus.FINISHED, kind="c"),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.latest_run("c")
self.assertEqual(int(self.interactive._current_run_id), 6)
self.interactive.latest_run("b")
self.assertEqual(int(self.interactive._current_run_id), 5)
self.interactive.latest_run("a")
self.assertEqual(int(self.interactive._current_run_id), 3)
self.interactive.latest_run("d")
self.assertEqual(int(self.interactive._current_run_id), 3)
self.assertIn("No runs with kind 'd'", self.stderr.getvalue())
def testSetIssue(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance(message="Issue message")
self.fakes.instance(message="Issue message")
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(2)
self.assertEqual(int(self.interactive.current_issue_instance_id), 2)
stdout = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", stdout)
self.assertIn("Issue 2", stdout)
self.assertNotIn("Issue 3", stdout)
self.interactive.issue(1)
self.assertEqual(int(self.interactive.current_issue_instance_id), 1)
stdout = self.stdout.getvalue().strip()
self.assertIn("Issue 1", stdout)
self.assertNotIn("Issue 3", stdout)
def testSetIssueNonExistent(self):
run = self.fakes.run()
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
stderr = self.stderr.getvalue().strip()
self.assertIn("Issue 1 doesn't exist", stderr)
def testSetIssueUpdatesRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance()
self.fakes.save_all(self.db)
run2 = self.fakes.run()
self.fakes.instance()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.assertEqual(int(self.interactive._current_run_id), 2)
self.interactive.issue(1)
self.assertEqual(int(self.interactive._current_run_id), 1)
def testGetSources(self):
self.fakes.instance()
source1 = self.fakes.source("source1")
source2 = self.fakes.source("source2")
self.fakes.source("source3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=source1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=source2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
sources = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.SOURCE
)
self.assertEqual(len(sources), 2)
self.assertIn("source1", sources)
self.assertIn("source2", sources)
def testGetSinks(self):
self.fakes.instance()
sink1 = self.fakes.sink("sink1")
sink2 = self.fakes.sink("sink2")
self.fakes.sink("sink3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(shared_text_id=sink1.id, issue_instance_id=1),
IssueInstanceSharedTextAssoc(shared_text_id=sink2.id, issue_instance_id=1),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
sinks = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.SINK
)
self.assertEqual(len(sinks), 2)
self.assertIn("sink1", sinks)
self.assertIn("sink2", sinks)
def testGetFeatures(self):
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
features = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.FEATURE
)
self.assertEqual(len(features), 2)
self.assertIn("via:feature1", features)
self.assertIn("via:feature2", features)
def _basic_trace_frames(self):
return [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param0",
callee="leaf",
callee_port="sink",
location=(1, 2, 1),
),
]
def testCreateTraceTuples(self):
# reverse order
postcondition_traces = [
(
TraceFrameQueryResult(
id=DBID(1),
callee="call3",
callee_port="result",
filename="file3.py",
callee_location=SourceLocation(1, 1, 3),
caller="main",
caller_port="root",
),
1,
),
(
TraceFrameQueryResult(
id=DBID(2),
callee="call2",
callee_port="result",
caller="dummy caller",
caller_port="dummy caller",
filename="file2.py",
callee_location=SourceLocation(1, 1, 2),
),
2,
),
(
TraceFrameQueryResult(
id=DBID(3),
callee="leaf",
callee_port="source",
caller="dummy caller",
caller_port="dummy caller",
filename="file1.py",
callee_location=SourceLocation(1, 1, 1),
),
3,
),
]
trace_tuples = self.interactive._create_trace_tuples(postcondition_traces)
self.assertEqual(len(trace_tuples), 3)
self.assertEqual(
trace_tuples,
[
TraceTuple(postcondition_traces[0][0], 1),
TraceTuple(postcondition_traces[1][0], 2),
TraceTuple(postcondition_traces[2][0], 3),
],
)
def testOutputTraceTuples(self):
features = [
SharedText(kind=SharedTextKind.FEATURE, contents="one"),
SharedText(kind=SharedTextKind.FEATURE, contents="two"),
SharedText(kind=SharedTextKind.FEATURE, contents="three"),
]
trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(1),
caller="unused",
caller_port="unused",
callee="leaf",
callee_port="source",
filename="file1.py",
callee_location=SourceLocation(1, 1, 1),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(2),
caller="unused",
caller_port="unused",
callee="call2",
callee_port="result",
filename="file2.py",
callee_location=SourceLocation(1, 1, 2),
shared_texts=[features[0], features[1]],
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(3),
caller="unused",
caller_port="unused",
callee="call3",
callee_port="result",
filename="file3.py",
callee_location=SourceLocation(1, 1, 3),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(4),
caller="unused",
caller_port="unused",
callee="main",
callee_port="root",
filename="file4.py",
callee_location=SourceLocation(1, 1, 4),
shared_texts=[features[1], features[2]],
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(5),
caller="unused",
caller_port="unused",
callee="call4",
callee_port="param0",
filename="file4.py",
callee_location=SourceLocation(1, 1, 4),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(6),
caller="unused",
caller_port="unused",
callee="call5",
callee_port="param1",
filename="file5.py",
callee_location=SourceLocation(1, 1, 5),
shared_texts=[features[0], features[1], features[2]],
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(7),
caller="unused",
caller_port="unused",
callee="leaf",
callee_port="sink",
filename="file6.py",
callee_location=SourceLocation(1, 1, 6),
)
),
]
self.interactive.current_trace_frame_index = 1
self.interactive._output_trace_tuples(trace_tuples)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" --> 2 call2 result file2.py:1|1|2",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
self._clear_stdout()
self.interactive._output_trace_tuples(trace_tuples, True)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" --> 2 call2 result file2.py:1|1|2",
" --F: ['one', 'two']",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" --F: ['two', 'three']",
" 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" --F: ['one', 'two', 'three']",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
self._clear_stdout()
self.interactive.current_trace_frame_index = 4
self.interactive._output_trace_tuples(trace_tuples)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" 2 call2 result file2.py:1|1|2",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" --> 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
self._clear_stdout()
self.interactive.current_trace_frame_index = 4
self.interactive._output_trace_tuples(trace_tuples, True)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" 2 call2 result file2.py:1|1|2",
" --F: ['one', 'two']",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" --F: ['two', 'three']",
" --> 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" --F: ['one', 'two', 'three']",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
def testTraceFromIssue(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="sink",
location=(1, 1, 2),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.trace()
stderr = self.stderr.getvalue().strip()
self.assertIn("Use 'issue ID' or 'frame ID'", stderr)
self.interactive.issue(1)
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:1|1|1",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 leaf sink lib/server/posts/request.py:1|1|2",
"",
],
)
def testTraceFromFrame(self):
run = self.fakes.run()
frames = self._basic_trace_frames()
sink = self.fakes.sink("sink")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.frame(int(frames[0].id))
self._clear_stdout()
self.interactive.trace()
self.assertEqual(self.interactive.sinks, {"sink"})
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" --> 1 call1 root lib/server/posts/request.py:1|1|1",
" 2 call2 param0 lib/server/posts/request.py:1|1|1",
" 3 leaf sink lib/server/posts/request.py:1|2|1",
"",
],
)
def testTraceMissingFrames(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self.interactive.trace()
stdout = self.stdout.getvalue().strip()
self.assertIn("Missing trace frame: call2:param0", stdout)
def testTraceCursorLocation(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance(callable="Issue callable")
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="sink",
location=(1, 2, 1),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.assertIsNone(self.interactive.callable())
self.interactive.issue(1)
self.assertEqual(self.interactive.callable(), "Issue callable")
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.next_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.assertEqual(self.interactive.callable(), "leaf")
self.interactive.next_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 0)
def testJumpToLocation(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
),
self.fakes.precondition(
caller="call1", caller_port="root", callee="leaf", callee_port="sink"
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.jump(1)
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.interactive.jump(3)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.jump(4)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.jump(0)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
def testTraceNoSinks(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
frame = self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frame.id, issue_instance_id=instance.id
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frame.id, leaf_id=source.id, trace_length=0
)
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.sources = {"source1"}
self.interactive.issue(1)
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:4|5|6",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
"",
],
)
def _set_up_branched_trace(self) -> List[TraceFrame]:
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
sink = self.fakes.sink("sink1")
self.fakes.saver.add_all(
[
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=source.id
),
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=sink.id
),
]
)
frames = []
for i in range(6):
if i < 2: # 2 postconditions
frames.append(
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id, leaf_id=source.id, trace_length=i
)
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[-1].id, issue_instance_id=instance.id
)
)
elif i < 4:
frames.append(
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param2",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id, leaf_id=sink.id, trace_length=i
)
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[-1].id, issue_instance_id=instance.id
)
)
else:
frames.append(
self.fakes.precondition(
caller="call2",
caller_port="param2",
callee="leaf",
callee_port="sink",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id,
leaf_id=sink.id,
trace_length=5 - i,
)
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
return frames
def testTraceBranchNumber(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
self.assertEqual(self.interactive.sources, {"source1"})
self.assertEqual(self.interactive.sinks, {"sink1"})
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 +2 leaf source lib/server/posts/response.py:0|0|0",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 +2 call2 param2 lib/server/posts/request.py:2|2|2",
" 4 +2 leaf sink lib/server/posts/request.py:5|5|5",
"",
],
)
def testShowBranches(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
# Parent at root
self.interactive.prev_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] leaf : source\n"
" [0 hops: source1]\n"
" [lib/server/posts/response.py:0|0|0]",
output,
)
self.assertIn(
"[2] leaf : source\n"
" [1 hops: source1]\n"
" [lib/server/posts/response.py:1|1|1]",
output,
)
self._clear_stdout()
# Move to call2:param2
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] call2 : param2\n"
" [2 hops: sink1]\n"
" [lib/server/posts/request.py:2|2|2]",
output,
)
self.assertIn(
"[2] call2 : param2\n"
" [3 hops: sink1]\n"
" [lib/server/posts/request.py:3|3|3]",
output,
)
self._clear_stdout()
# Move to leaf:sink
self.interactive.next_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] leaf : sink\n"
" [0 hops: sink1]\n"
" [lib/server/posts/request.py:5|5|5]",
output,
)
self.assertIn(
"[2] leaf : sink\n"
" [1 hops: sink1]\n"
" [lib/server/posts/request.py:4|4|4]",
output,
)
def testGetTraceFrameBranches(self):
frames = self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
# Parent at root
self.interactive.prev_cursor_location()
with self.db.make_session() as session:
branches = self.interactive._get_trace_frame_branches(session)
self.assertEqual(len(branches), 2)
self.assertEqual(int(branches[0].id), int(frames[0].id))
self.assertEqual(int(branches[1].id), int(frames[1].id))
# Parent is no longer root
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
branches = self.interactive._get_trace_frame_branches(session)
self.assertEqual(len(branches), 2)
self.assertEqual(int(branches[0].id), int(frames[5].id))
self.assertEqual(int(branches[1].id), int(frames[4].id))
def testBranch(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
self.interactive.prev_cursor_location()
# We are testing for the source location, which differs between branches
self._clear_stdout()
self.interactive.branch(2) # location 0|0|0 -> 1|1|1
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 1 +2 leaf source lib/server/posts/response.py:1|1|1", output
)
self._clear_stdout()
self.interactive.branch(1) # location 1|1|1 -> 0|0|0
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 1 +2 leaf source lib/server/posts/response.py:0|0|0", output
)
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
self._clear_stdout()
self.interactive.branch(2) # location 2|2|2 -> 3|3|3
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 3 +2 call2 param2 lib/server/posts/request.py:3|3|3", output
)
self.interactive.next_cursor_location()
self._clear_stdout()
self.interactive.branch(2) # location 4|4|4 -> 5|5|5
output = self.stdout.getvalue().strip()
self.assertIn(
" 3 +2 call2 param2 lib/server/posts/request.py:3|3|3", output
)
self.assertIn(
" --> 4 +2 leaf sink lib/server/posts/request.py:4|4|4", output
)
self.interactive.branch(3) # location 4|4|4 -> 5|5|5
stderr = self.stderr.getvalue().strip()
self.assertIn("Branch number invalid", stderr)
def testBranchPrefixLengthChanges(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
sink = self.fakes.sink("sink1")
frames = [
self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
),
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="prev_call",
callee_port="result",
),
self.fakes.postcondition(
caller="prev_call",
caller_port="result",
callee="leaf",
callee_port="source",
),
self.fakes.precondition(
caller="call1", caller_port="root", callee="leaf", callee_port="sink"
),
]
self.fakes.saver.add_all(
[
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=source.id
),
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=sink.id
),
]
)
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[0].id
),
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[1].id
),
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[3].id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[2].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[3].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self._clear_stdout()
self.interactive.prev_cursor_location()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" --> 1 +2 leaf source lib/server/posts/response.py:4|5|6",
" 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 leaf sink lib/server/posts/request.py:4|5|6",
"",
],
)
self._clear_stdout()
self.interactive.branch(2)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:4|5|6",
" --> 2 +2 prev_call result lib/server/posts/response.py:4|5|6",
" 3 Foo.barMethod root /r/some/filename.py:6|7|8",
" 4 leaf sink lib/server/posts/request.py:4|5|6",
"",
],
)
self._clear_stdout()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn("[*] prev_call : result", output)
self.assertIn(" [1 hops: source1]", output)
def testCurrentBranchIndex(self):
trace_frames = [TraceFrame(id=1), TraceFrame(id=2), TraceFrame(id=3)]
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [TraceTuple(trace_frame=TraceFrame(id=1))]
self.assertEqual(0, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 2
self.assertEqual(1, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 3
self.assertEqual(2, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 4
self.assertEqual(-1, self.interactive._current_branch_index(trace_frames))
def testVerifyEntrypointSelected(self) -> None:
self.interactive.current_issue_instance_id = DBID(-1)
self.interactive.current_frame_id = DBID(-1)
with self.assertRaises(UserError):
self.interactive._verify_entrypoint_selected()
self.interactive.current_issue_instance_id = DBID(1)
try:
self.interactive._verify_entrypoint_selected()
except UserError:
self.fail("Unexpected UserError")
self.interactive.current_issue_instance_id = DBID(-1)
self.interactive.current_frame_id = DBID(1)
try:
self.interactive._verify_entrypoint_selected()
except UserError:
self.fail("Unexpected UserError")
self.interactive.current_issue_instance_id = DBID(1)
with self.assertRaises(AssertionError):
self.interactive._verify_entrypoint_selected()
def testVerifyMultipleBranches(self):
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(trace_frame=TraceFrame(id=1), branches=1),
TraceTuple(trace_frame=TraceFrame(id=2), branches=2),
]
with self.assertRaises(UserError):
self.interactive._verify_multiple_branches()
self.interactive.current_trace_frame_index = 1
try:
self.interactive._verify_multiple_branches()
except UserError:
self.fail("Unexpected UserError")
def testAddListOrElementFilterErrors(self):
with self.assertRaises(UserError):
self.interactive._add_list_or_element_filter_to_query(
"not a list", None, None, "arg0", int
)
with self.assertRaises(UserError):
self.interactive._add_list_or_element_filter_to_query(
[], None, None, "arg0", str
)
def testAddListOrStringFilterToQuery(self):
shared_texts = [
SharedText(id=1, contents="prefix"),
SharedText(id=2, contents="suffix"),
SharedText(id=3, contents="prefix_suffix"),
SharedText(id=4, contents="fix"),
]
with self.db.make_session() as session:
self._add_to_session(session, shared_texts)
session.commit()
query = session.query(SharedText.contents)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["prefix", "suffix"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("suffix",)],
)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["%prefix%"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("prefix_suffix",)],
)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["%fix%"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("suffix",), ("prefix_suffix",), ("fix",)],
)
def testCreateIssueOutputStringNoSourcesNoSinks(self):
issue = IssueQueryResult(
issue_id=1,
issue_instance_id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
features=set(),
is_new_issue=False,
source_names=None,
source_kinds=None,
sink_names=None,
sink_kinds=["sink1", "sink2"],
status=IssueStatus.UNCATEGORIZED,
first_seen="2001-02-24 16:31:27.1234",
similar_issues={(2, "0.24")},
run_id=1,
)
sources = []
sinks = ["sink1", "sink2"]
features = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Sources: No sources", result)
self.assertIn("Sinks: sink1", result)
sources = ["source1", "source2"]
sinks = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Sources: source1", result)
self.assertIn("Sinks: No sinks", result)
def testCreateIssueOutputStringNoFeatures(self):
issue = IssueQueryResult(
issue_id=1,
issue_instance_id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
features=set(),
is_new_issue=False,
source_names=None,
source_kinds=None,
sink_names=None,
sink_kinds=["sink1"],
status=IssueStatus.UNCATEGORIZED,
first_seen="2001-02-24 16:31:27.1234",
similar_issues={(2, "0.24")},
run_id=1,
)
sources = []
sinks = ["sink1"]
features = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Features: No features", result)
sources = []
sinks = ["sink1"]
features = ["via:feature1"]
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Features: via:feature1", result)
def testCreateIssueOutputStringTraceLength(self):
issue1 = IssueQueryResult(
issue_id=1,
issue_instance_id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=0,
min_trace_length_to_sinks=6,
features=set(),
is_new_issue=False,
source_names=None,
source_kinds=None,
sink_names=None,
sink_kinds=["sink1", "sink2"],
status=IssueStatus.UNCATEGORIZED,
first_seen="2001-02-24 16:31:27.1234",
similar_issues={(2, "0.24")},
run_id=1,
)
sources = []
sinks = ["sink1", "sink2"]
features = []
result = self.interactive._create_issue_output_string(
issue1, sources, sinks, features
)
self.assertIn("Min Trace Length: Source (0) | Sink (6)", result)
issue2 = IssueQueryResult(
issue_id=1,
issue_instance_id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=3,
min_trace_length_to_sinks=1,
features=set(),
is_new_issue=False,
source_names=None,
source_kinds=None,
sink_names=None,
sink_kinds=["sink1", "sink2"],
status=IssueStatus.UNCATEGORIZED,
first_seen="2001-02-24 16:31:27.1234",
similar_issues={(2, "0.24")},
run_id=1,
)
sources = []
sinks = ["sink1", "sink2"]
result = self.interactive._create_issue_output_string(
issue2, sources, sinks, features
)
self.assertIn("Min Trace Length: Source (3) | Sink (1)", result)
def testListSourceCode(self):
mock_data = """if this_is_true:
print("This was true")
else:
print("This was false")
"""
self.interactive.setup()
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(0),
filename="file.py",
caller="",
caller_port="",
callee="callee",
callee_port="",
callee_location=SourceLocation(2, 10, 25),
),
placeholder=True,
)
]
with patch("builtins.open", mock_open(read_data=mock_data)) as mock_file:
self._clear_stdout()
self.interactive.list_source_code(2)
mock_file.assert_called_once_with(f"{os.getcwd()}/file.py", "r")
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
"In callee [file.py:2|10|25]",
" 1 if this_is_true:",
' --> 2 print("This was true")',
" ^^^^^^^^^^^^^^^",
" 3 else:",
' 4 print("This was false")',
"",
],
)
mock_file.reset_mock()
self._clear_stdout()
self.interactive.list_source_code(1)
mock_file.assert_called_once_with(f"{os.getcwd()}/file.py", "r")
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
"In callee [file.py:2|10|25]",
" 1 if this_is_true:",
' --> 2 print("This was true")',
" ^^^^^^^^^^^^^^^",
" 3 else:",
"",
],
)
def testListSourceCodeFileNotFound(self):
self.interactive.setup()
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(0),
caller="",
caller_port="",
callee="",
callee_port="",
filename="file.py",
callee_location=SourceLocation(2, 1, 1),
)
)
]
with patch("builtins.open", mock_open(read_data="not read")) as mock_file:
mock_file.side_effect = FileNotFoundError()
self.interactive.list_source_code()
self.assertIn("Couldn't open", self.stderr.getvalue())
self.assertNotIn("file.py", self.stdout.getvalue())
def testGroupTraceFrames(self):
trace_frames = [
TraceFrameQueryResult(
id=DBID(1),
caller="caller1",
caller_port="port1",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(2),
caller="caller1",
caller_port="port1",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(3),
caller="caller2",
caller_port="port2",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(4),
caller="caller2",
caller_port="port2",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(5),
caller="caller2",
caller_port="port3",
callee="",
callee_port="",
),
]
buckets = self.interactive._group_trace_frames(trace_frames, 5)
self.assertEqual(3, len(buckets.keys()))
self.assertIn(("caller1", "port1"), buckets.keys())
self.assertIn(("caller2", "port2"), buckets.keys())
self.assertIn(("caller2", "port3"), buckets.keys())
self.assertEqual(
[1, 2], [int(frame.id) for frame in buckets[("caller1", "port1")]]
)
self.assertEqual(
[3, 4], [int(frame.id) for frame in buckets[("caller2", "port2")]]
)
self.assertEqual(
[5], [int(frame.id) for frame in buckets[("caller2", "port3")]]
)
def testListTracesBasic(self):
self.fakes.run()
post1 = self.fakes.postcondition(
caller="caller1", caller_port="port1", callee="callee1", callee_port="port1"
)
post2 = self.fakes.postcondition(
caller="caller1", caller_port="port1", callee="callee2", callee_port="port2"
)
post3 = self.fakes.postcondition(
caller="caller2", caller_port="port2", callee="callee3", callee_port="port3"
)
post4 = self.fakes.postcondition(
caller="caller2", caller_port="port2", callee="callee4", callee_port="port4"
)
post5 = self.fakes.postcondition(
caller="caller2", caller_port="port3", callee="callee5", callee_port="port5"
)
self.fakes.save_all(self.db)
self.interactive._current_run_id = 1
self._clear_stdout()
self.interactive.frames(kind=TraceKind.POSTCONDITION)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- caller1:port1 ->",
f"{post1.id} callee1:port1",
f"{post2.id} callee2:port2",
"---- caller2:port2 ->",
f"{post3.id} callee3:port3",
f"{post4.id} callee4:port4",
"---- caller2:port3 ->",
f"{post5.id} callee5:port5",
"",
],
)
self._clear_stdout()
self.interactive.frames(kind=TraceKind.PRECONDITION)
self.assertEqual(self.stdout.getvalue().strip(), "No trace frames found.")
def testListTracesFilterCallersCallees(self):
run = self.fakes.run()
frames = self._basic_trace_frames()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive._current_run_id = 1
self._clear_stdout()
self.interactive.frames(callers=["call2"])
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call2:param0 ->",
f"{frames[1].id} leaf:sink",
"",
],
)
self._clear_stdout()
self.interactive.frames(callees=["call2"])
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call1:root ->",
f"{frames[0].id} call2:param0",
"",
],
)
def testListFramesWithLimit(self):
frames = self._set_up_branched_trace()
self.interactive.run(1)
self._clear_stdout()
self.interactive.frames(limit=3)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call1:root ->",
f"{frames[3].id} call2:param2",
f"{frames[2].id} call2:param2",
f"{frames[1].id} leaf:source",
"...",
"Showing 3/6 matching frames. To see more, call 'frames' with "
"the 'limit' argument.",
"",
],
)
def testSetFrame(self):
frames = self._basic_trace_frames()
sink = self.fakes.sink("sink")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
self.interactive.setup()
self.interactive.frame(0)
self.assertIn("Trace frame 0 doesn't exist.", self.stderr.getvalue())
self._clear_stdout()
self.interactive.frame(1)
self.assertIn("Trace frame 1", self.stdout.getvalue())
self.assertNotIn("Trace frame 2", self.stdout.getvalue())
self._clear_stdout()
self.interactive.frame(2)
self.assertNotIn("Trace frame 1", self.stdout.getvalue())
self.assertIn("Trace frame 2", self.stdout.getvalue())
def testSetFrameUpdatesRun(self):
run1 = self.fakes.run()
frames = [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param1",
callee="call3",
callee_port="param2",
location=(1, 1, 1),
),
]
run2 = self.fakes.run()
sink = self.fakes.sink("sink1")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.assertEqual(int(self.interactive._current_run_id), 2)
self.interactive.frame(int(frames[0].id))
self.assertEqual(int(self.interactive._current_run_id), 1)
def testIsBeforeRoot(self):
self.interactive.trace_tuples = [
TraceTuple(trace_frame=TraceFrame(kind=TraceKind.POSTCONDITION)),
TraceTuple(trace_frame=TraceFrame(kind=TraceKind.PRECONDITION)),
]
self.interactive.current_trace_frame_index = 0
self.assertTrue(self.interactive._is_before_root())
self.interactive.current_trace_frame_index = 1
self.assertFalse(self.interactive._is_before_root())
def testIsRootTraceTuple(self):
trace_tuple = TraceTuple(trace_frame=TraceFrame(callee_port="root"))
self.assertTrue(self.interactive._is_root_trace_tuple(trace_tuple))
trace_tuple = TraceTuple(trace_frame=TraceFrame(callee_port="not_root"))
self.assertFalse(self.interactive._is_root_trace_tuple(trace_tuple))
def testParents(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.frame(3)
self.interactive.current_trace_frame_index = 1
self._clear_stdout()
with patch("click.prompt", return_value=0):
self.interactive.parents()
self.assertEqual(
self.stdout.getvalue().split("\n"),
["[1] call1 : root", "[2] call1 : root", ""],
)
self._clear_stdout()
self.interactive.current_trace_frame_index = 0
self.interactive.parents()
self.assertIn("No parents calling", self.stdout.getvalue())
self.interactive.current_trace_frame_index = 2
self.interactive.parents()
self.assertIn("Try running from a non-leaf node", self.stderr.getvalue())
def testParentsSelectParent(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.frame(3)
self.interactive.current_trace_frame_index = 1
self._clear_stdout()
with patch("click.prompt", return_value=1):
self.interactive.parents()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[1] call1 : root",
"[2] call1 : root",
"",
" # ⎇ [callable] [port] [location]",
" --> 1 call1 root lib/server/posts/request.py:2|2|2",
" 2 call2 param2 lib/server/posts/request.py:2|2|2",
" 3 +2 leaf sink lib/server/posts/request.py:5|5|5",
"",
],
)
def testUpdateTraceTuplesNewParent(self):
frames = [
self.fakes.postcondition(callee="A"),
self.fakes.postcondition(callee="B"),
self.fakes.postcondition(callee="C"),
self.fakes.postcondition(callee="D"),
self.fakes.postcondition(callee="E"),
]
self.fakes.save_all(self.db)
self.interactive.setup()
# Test postcondition
self.interactive.current_trace_frame_index = 2
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[1])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[2])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[3])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[4])),
]
trace_frame = TraceFrameQueryResult(
id=DBID(0),
caller="caller",
caller_port="caller_port",
callee="F",
callee_port="callee_port",
filename="file.py",
callee_location=SourceLocation(1, 1, 1),
kind=TraceKind.POSTCONDITION,
)
self.interactive._update_trace_tuples_new_parent(trace_frame)
self.assertEqual(self.interactive.current_trace_frame_index, 3)
self.assertEqual(
[
self.interactive._get_callable_from_trace_tuple(trace_tuple)[0]
for trace_tuple in self.interactive.trace_tuples
],
["A", "B", "F", "caller"],
)
self.assertTrue(self.interactive.trace_tuples[-1].placeholder)
# Test precondition
self.interactive.current_trace_frame_index = 2
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[1])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[2])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[3])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[4])),
]
trace_frame = TraceFrameQueryResult(
id=DBID(0),
caller="caller",
caller_port="caller_port",
callee="F",
callee_port="callee_port",
filename="file.py",
callee_location=SourceLocation(1, 1, 1),
kind=TraceKind.PRECONDITION,
)
self.interactive._update_trace_tuples_new_parent(trace_frame)
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.assertEqual(
[
self.interactive._get_callable_from_trace_tuple(trace_tuple)[0]
for trace_tuple in self.interactive.trace_tuples
],
["caller", "F", "D", "E"],
)
self.assertTrue(self.interactive.trace_tuples[0].placeholder)
def testDetails(self):
run = self.fakes.run()
frames = [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param1",
callee="call3",
callee_port="param2",
location=(1, 1, 1),
),
]
issues = [self.fakes.issue(), self.fakes.issue(), self.fakes.issue()]
self.fakes.instance(issue_id=issues[0].id, callable="call2"),
self.fakes.instance(issue_id=issues[1].id, callable="call3"),
self.fakes.instance(issue_id=issues[2].id, callable="call2"),
self.fakes.save_all(self.db)
with self.db.make_session(expire_on_commit=False) as session:
session.add(run)
session.commit()
self.interactive.setup()
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0]))
]
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self._clear_stdout()
self.interactive.details()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
f"Trace frame {frames[0].id}",
" Caller: call1 : root",
" Callee: call2 : param0",
" Kind: TraceKind.precondition",
" Sinks: ",
" Location: lib/server/posts/request.py:1|1|1",
"",
"Issues in callable (call2): 2",
"",
"Postconditions with caller (call2):",
"No trace frames found.",
"",
"Preconditions with caller (call2):",
"[id] [caller:caller_port -> callee:callee_port]",
"---- call2:param1 ->",
f"{frames[1].id} call3:param2",
"",
],
)
def testListLeaves(self) -> None:
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
sink_detail_1 = self.fakes.sink_detail("sink_detail_1")
sink_detail_2 = self.fakes.sink_detail("sink_detail_2")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=sink_detail_1.id,
issue_instance_id=instance.id,
),
IssueInstanceSharedTextAssoc(
shared_text_id=sink_detail_2.id,
issue_instance_id=instance.id,
),
]
with self.db.make_session() as session:
session.add(run)
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.leaves()
output = self.stdout.getvalue()
self.assertIn("sink_detail_1", output)
self.assertIn("sink_detail_2", output)
def mock_pager(self, output_string):
self.pager_calls += 1
def testPager(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
# Default is no pager in tests
self.pager_calls = 0
with patch("IPython.core.page.page", self.mock_pager):
self.interactive.setup()
self.interactive.issues(use_pager=False)
self.interactive.runs(use_pager=False)
self.assertEqual(self.pager_calls, 0)
self.pager_calls = 0
with patch("IPython.core.page.page", self.mock_pager):
self.interactive.setup()
self.interactive.issues(use_pager=True)
self.interactive.runs(use_pager=True)
self.assertEqual(self.pager_calls, 2)
| 35.687425 | 88 | 0.538108 |
acea07ab2a61cb9c75701a28fb06d8e45e55e75c | 2,201 | py | Python | src/garage/tf/policies/discrete_qf_derived_policy.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | 1 | 2019-07-31T06:53:38.000Z | 2019-07-31T06:53:38.000Z | src/garage/tf/policies/discrete_qf_derived_policy.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | null | null | null | src/garage/tf/policies/discrete_qf_derived_policy.py | researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | [
"MIT"
] | null | null | null | """
Discrete QfDerived policy.
This policy chooses the action that yields to the largest q-value.
"""
import akro
import numpy as np
import tensorflow as tf
from garage.misc.overrides import overrides
from garage.tf.policies.base2 import Policy2
class DiscreteQfDerivedPolicy(Policy2):
"""
DiscreteQfDerived policy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
qf (garage.q_functions.QFunction): The q-function used.
name (str): Name of the policy.
"""
def __init__(self, env_spec, qf, name='DiscreteQfDerivedPolicy'):
super().__init__(name, env_spec)
assert isinstance(env_spec.action_space, akro.Discrete)
self._env_spec = env_spec
self._qf = qf
self._initialize()
def _initialize(self):
self._f_qval = tf.compat.v1.get_default_session().make_callable(
self._qf.q_vals,
feed_list=[self._qf.model.networks['default'].input])
@property
def vectorized(self):
"""Vectorized or not."""
return True
@overrides
def get_action(self, observation):
"""
Get action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
Single optimal action from this policy.
"""
q_vals = self._f_qval([observation])
opt_action = np.argmax(q_vals)
return opt_action
@overrides
def get_actions(self, observations):
"""
Get actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
Optimal actions from this policy.
"""
q_vals = self._f_qval(observations)
opt_actions = np.argmax(q_vals, axis=1)
return opt_actions
def __getstate__(self):
"""Object.__getstate__."""
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
self.__dict__.update(state)
self._initialize()
| 25.298851 | 75 | 0.634257 |
acea095814c8f10c81cf44e12983324f30857989 | 31 | py | Python | testsuite/modulegraph-dir/alias_to_package_import_from.py | xoviat/modulegraph2 | 766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a | [
"MIT"
] | 9 | 2020-03-22T14:48:01.000Z | 2021-05-30T12:18:12.000Z | testsuite/modulegraph-dir/alias_to_package_import_from.py | xoviat/modulegraph2 | 766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a | [
"MIT"
] | 15 | 2020-01-06T10:02:32.000Z | 2021-05-28T12:22:44.000Z | testsuite/modulegraph-dir/alias_to_package_import_from.py | ronaldoussoren/modulegraph2 | b6ab1766b0098651b51083235ff8a18a5639128b | [
"MIT"
] | 4 | 2020-05-10T18:51:41.000Z | 2021-04-07T14:03:12.000Z | from the_package import submod
| 15.5 | 30 | 0.870968 |
acea09eb0f71e65d15629e4957dd5141b765b829 | 1,282 | py | Python | tests/base/sdk/python/setup.py | rajeshnokia/monolithe | f07d1a53ed1ff82c293ebac6fc6f02bafdf6dfb6 | [
"BSD-3-Clause"
] | 18 | 2015-06-24T18:35:20.000Z | 2022-01-19T19:04:00.000Z | tests/base/sdk/python/setup.py | rajeshnokia/monolithe | f07d1a53ed1ff82c293ebac6fc6f02bafdf6dfb6 | [
"BSD-3-Clause"
] | 63 | 2015-11-03T18:57:12.000Z | 2020-09-30T02:54:49.000Z | tests/base/sdk/python/setup.py | rajeshnokia/monolithe | f07d1a53ed1ff82c293ebac6fc6f02bafdf6dfb6 | [
"BSD-3-Clause"
] | 38 | 2015-10-23T19:04:44.000Z | 2021-06-04T08:13:33.000Z | # -*- coding: utf-8 -*-
#
# __code_header example
# put your license header here
# it will be added to all the generated files
#
from setuptools import setup
import os
packages = ['tdldk', 'tdldk.cli']
resources = []
api_version_path = "./tdldk"
for version_folder in os.listdir(api_version_path):
if os.path.isfile("%s/%s" % (api_version_path, version_folder)):
continue
if version_folder == "cli":
continue
packages.append("tdldk.%s" % version_folder)
packages.append("tdldk.%s.fetchers" % version_folder)
if os.path.exists('tdldk/%s/resources' % version_folder):
resources.append(('tdldk/%s/resources' % version_folder, ['tdldk/%s/resources/attrs_defaults.ini' % version_folder]))
setup(
name='tdldk',
version="1.0",
url='www.mycompany.net/mysdk',
author='someone',
author_email='someone@yourcompany.com',
packages=packages,
description='SDK for the My Product',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD',
include_package_data=True,
install_requires=[line for line in open('requirements.txt')],
data_files=resources,
entry_points={
'console_scripts': [
'tdl = tdldk.cli.cli:main']
}
)
| 26.708333 | 125 | 0.673167 |
acea0aa3a226130e7453bb222b39be3538c258e0 | 3,315 | py | Python | todo_app/settings.py | DanielG-H/todo_app | 1f888c70fc345ac03b27872e02cffdcd290f39d2 | [
"MIT"
] | null | null | null | todo_app/settings.py | DanielG-H/todo_app | 1f888c70fc345ac03b27872e02cffdcd290f39d2 | [
"MIT"
] | null | null | null | todo_app/settings.py | DanielG-H/todo_app | 1f888c70fc345ac03b27872e02cffdcd290f39d2 | [
"MIT"
] | null | null | null | """
Django settings for todo_app project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_5p-6w(rf#7bocy(j&4nlm9$x1-9u0udc25h@g-+un+#o%(3gz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'toDo.apps.TodoConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'toDo-home'
LOGIN_URL = 'login'
| 24.924812 | 91 | 0.697134 |
acea0bfd5246d81bbb483e84f777b7b80788cf42 | 105 | py | Python | server/player/mahjong_soul/apps.py | eIGato/mahjong-portal | 550a2a872c4287adab6ce30c3440dc2141430a20 | [
"MIT"
] | 10 | 2018-02-12T10:30:22.000Z | 2020-06-29T21:06:15.000Z | server/player/mahjong_soul/apps.py | eIGato/mahjong-portal | 550a2a872c4287adab6ce30c3440dc2141430a20 | [
"MIT"
] | 62 | 2018-01-05T04:52:38.000Z | 2021-04-10T07:14:45.000Z | server/player/mahjong_soul/apps.py | MahjongRepository/mahjong-leaderboard | 77dfd26cb812c12fa7c2b11e862bb80a9135ccb0 | [
"MIT"
] | 8 | 2018-05-11T11:05:41.000Z | 2021-03-10T08:10:50.000Z | from django.apps import AppConfig
class MahjongSoulConfig(AppConfig):
name = "player.mahjong_soul"
| 17.5 | 35 | 0.780952 |
acea0c964208846adbeb0af4905ab96c5329dbfd | 9,165 | py | Python | main_functions/train.py | sebag90/easymt | 4e401136f4e82f7cc1f8b466d8b1765240abdc57 | [
"MIT"
] | null | null | null | main_functions/train.py | sebag90/easymt | 4e401136f4e82f7cc1f8b466d8b1765240abdc57 | [
"MIT"
] | null | null | null | main_functions/train.py | sebag90/easymt | 4e401136f4e82f7cc1f8b466d8b1765240abdc57 | [
"MIT"
] | null | null | null | """
train a model from scratch or resume training
"""
import datetime
import math
import os
from pathlib import Path
import time
import torch
import torch.nn as nn
from model.model_generator import ModelGenerator
from model.loss import MaskedLoss
from model.optimizers import get_optimizer
from utils.lang import Language
from utils.dataset import (
DataLoader, BatchedData,
RNNDataConverter,
TransformerDataConverter
)
from utils.parameters import Parameters
class Memory:
"""
class to keep track of loss
during training
"""
def __init__(self):
self._print_loss = 0
self._print_counter = 0
@property
def print_loss(self):
return self._print_loss / self._print_counter
def add(self, loss):
self._print_loss += loss
self._print_counter += 1
def print_reset(self):
self._print_loss = 0
self._print_counter = 0
class Trainer:
def __init__(self, resume, batched, params):
self.resume = resume
self.batched = batched
self.params = params
self.model_generator = ModelGenerator(self.params.model.type)
# pick device
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
torch.set_num_threads(os.cpu_count())
def read_data(self):
"""
read and prepare train and eval dataset
"""
if self.resume is None:
# create language objects
self.src_language = Language(self.params.model.source)
self.tgt_language = Language(self.params.model.target)
# read vocabulary from file
self.src_language.read_vocabulary(
Path(f"data/vocab.{self.src_language.name}")
)
self.tgt_language.read_vocabulary(
Path(f"data/vocab.{self.tgt_language.name}")
)
else:
# load from file
self.checkpoint = torch.load(
Path(self.resume),
map_location=self.device
)
self.model = self.checkpoint["model"]
self.src_language = self.model.src_lang
self.tgt_language = self.model.tgt_lang
# load eval dataset
self.eval_data = DataLoader.from_files(
"eval", self.src_language, self.tgt_language,
self.params.model.max_length, self.params.training.batch_size
)
# load train dataset
if self.batched:
self.train_data = BatchedData(Path("data/batched"))
else:
self.train_data = DataLoader.from_files(
"train", self.src_language, self.tgt_language,
self.params.model.max_length, self.params.training.batch_size
)
def create_model(self):
"""
create a model, either from scratch or load it from file
"""
if self.resume is None:
self.model = self.model_generator.generate_model(
self.params,
self.src_language,
self.tgt_language
)
# initialize parameters uniformly
for name, param in self.model.named_parameters():
if "embedding" not in name:
param.data.uniform_(
- self.params.model.uniform_init,
self.params.model.uniform_init
)
# define data converter
if self.model.type == "rnn":
self.data_converter = RNNDataConverter()
elif self.model.type == "transformer":
self.data_converter = TransformerDataConverter()
print(self.model, flush=True)
# move model to device
self.model.to(self.device)
# set training mode
self.model.train()
# loss
self.criterion = MaskedLoss(
padding_idx=0,
smoothing=self.params.training.label_smoothing
)
self.optimizer = get_optimizer(self.model, self.params)
# load optimizer
if self.resume:
self.optimizer.load_state_dict(
self.checkpoint["optimizer"]
)
# remove checkpoint attribute from trainer
delattr(self, "checkpoint")
def save_model(self):
# save model
os.makedirs("pretrained_models", exist_ok=True)
l1 = self.model.src_lang.name
l2 = self.model.tgt_lang.name
st = self.model.steps
path = Path(f"pretrained_models/{self.model.type}_{l1}-{l2}_{st}.pt")
torch.save({
"model": self.model,
"optimizer": self.optimizer.state_dict()
},
path
)
print("Model saved", flush=True)
@torch.no_grad()
def evaluate(self):
"""
evaluate the model on the evaluation data set
"""
self.model.eval()
losses = list()
for batch in self.eval_data:
input_batch = self.data_converter(
*batch,
self.model.max_len,
self.tgt_language.word2index["<sos>"]
)
loss = self.model(
input_batch,
self.device,
1, # with teacher for consistent results
self.criterion
)
losses.append(loss)
self.model.train()
return torch.tensor(losses).mean()
def train_loop(self):
"""
main function of the train loop
"""
t_init = time.time()
training = True
steps = 0
self.model.to(self.device)
while training:
# initialize variables for monitoring
loss_memory = Memory()
# shuffle data
self.train_data.shuffle()
# start training loop over batches
for batch in self.train_data:
self.optimizer.zero_grad()
# convert batch to model input
input_batch = self.data_converter(
*batch,
self.model.max_len,
self.tgt_language.word2index["<sos>"]
)
# process batch
loss = self.model(
input_batch,
self.device,
self.params.training.teacher_ratio,
self.criterion
)
loss_memory.add(loss.item())
# calculate gradient
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(self.model.parameters(), 5)
# optimizer step
self.optimizer.step()
steps += 1
self.model.steps += 1
# print every x steps
if steps % self.params.training.print_every == 0:
t_1 = time.time()
ts = int(t_1 - t_init)
print_loss = loss_memory.print_loss
ppl = math.exp(print_loss)
lr = self.optimizer.lr
print_time = datetime.timedelta(seconds=ts)
to_print = (
f"Step: {steps}/{self.params.training.steps} | "
f"lr: {round(lr, 5)} | "
f"Loss: {round((print_loss), 5):.5f} | "
f"ppl: {round(ppl, 5):.5f} | "
f"Time: {print_time}"
)
print(to_print, flush=True)
# reset loss
loss_memory.print_reset()
# validation step
if steps % self.params.training.valid_steps == 0:
eval_loss = self.evaluate()
self.optimizer.scheduler_step(eval_loss)
print("-"*len(to_print), flush=True)
print(
f"Validation loss: {round((eval_loss.item()), 5):.5f}",
flush=True
)
print("-"*len(to_print), flush=True)
# save model
if self.params.training.save_every != 0:
if steps % self.params.training.save_every == 0:
self.save_model()
# check if end of training
if steps == self.params.training.steps:
training = False
break
# calculate and print total time for training
t_end = time.time()
ts = int(t_end - t_init)
print_time = datetime.timedelta(seconds=ts)
print(f"Training completed in: {print_time}", flush=True)
def train(args):
# extract arguments
resume = args.resume
batched = args.batched
params = Parameters.from_config(args.path)
# initialize trainer
trainer = Trainer(resume, batched, params)
trainer.read_data()
trainer.create_model()
try:
trainer.train_loop()
except KeyboardInterrupt:
print("Aborting...")
trainer.save_model()
| 29.85342 | 79 | 0.529405 |
acea0d796a81664c124fb6c56a643eea7f17b67c | 1,581 | py | Python | backend/test/pecas_testes/fachada_testes.py | Joaohigor/JCUsinagem | c0e48129a9d1607fb896fe9d66975182b9402933 | [
"MIT"
] | null | null | null | backend/test/pecas_testes/fachada_testes.py | Joaohigor/JCUsinagem | c0e48129a9d1607fb896fe9d66975182b9402933 | [
"MIT"
] | null | null | null | backend/test/pecas_testes/fachada_testes.py | Joaohigor/JCUsinagem | c0e48129a9d1607fb896fe9d66975182b9402933 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from gaepermission.model import MainUser
from mommygae import mommy
from pecas_app import pecas_facade
class SalvarPecasTestes(GAETestCase):
def teste_sucesso(self):
usuario = mommy.save_one(MainUser)
salvar_cmd = pecas_facade.save_peca_cmd(**{'title': 'Testando Teste', 'price': '3.44', 'amount': '23'})
peca = salvar_cmd()
listar_pecas_cmd = pecas_facade.list_pecas_cmd(usuario)
pecas = listar_pecas_cmd()
self.assertEqual(1, len(pecas))
peca = pecas[0]
self.assertEqual('Testando Teste', peca.title)
self.assertEqual(float('3.44'), peca.price)
self.assertEqual(int('23'), peca.amount)
def teste_com_erros_validacao(self):
usuario = mommy.save_one(MainUser)
salvar_cmd = pecas_facade.save_peca_cmd()
self.assertRaises(CommandExecutionException,salvar_cmd)
erros=salvar_cmd.errors
self.assertIn('title',erros)
self.assertIn('price',erros)
self.assertIn('amount',erros)
# resposta = pecas_facade.save_peca_cmd(**{'title': '', 'price': '', 'amount': ''})
# self.assertIsInstance(resposta, TemplateResponse)
# self.assert_can_render(resposta)
# self.assertDictEqual({'title':u'Required field','price':u'Required field','amount':u'Required field'}, resposta.context['errors']) | 40.538462 | 140 | 0.697027 |
acea0dacabcc85a7b239a8463352c2217b63d6ef | 29,897 | py | Python | toontown/golf/PhysicsWorldBase.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/golf/PhysicsWorldBase.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/golf/PhysicsWorldBase.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].removeNode()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.removeNode()
self.root.removeNode()
for marker in self.jointMarkers:
marker.removeNode()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
self.contactgroup.empty()
self.world.destroy()
self.space.destroy()
self.world = None
self.space = None
return
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in range(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def simulate(self):
self.colCount = self.space.autoCollide()
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in range(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in [2, 4]:
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in range(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in range(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b.bam')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in range(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def attachMarker(self, body):
if self.canRender:
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.25)
testMarker.setPos(0.0, 0.0, -100.0)
self.odePandaRelationList.append((testMarker, body))
| 41.523611 | 170 | 0.581363 |
acea0e3e637964eccb95f74d87dc44b555c07af3 | 647 | py | Python | tests/conftest.py | doreenfan/vacation_router | 638e5ae6fbed4d93cb89d5c801172c439e98f79d | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | doreenfan/vacation_router | 638e5ae6fbed4d93cb89d5c801172c439e98f79d | [
"Apache-2.0"
] | 1 | 2021-12-26T01:49:37.000Z | 2021-12-26T01:49:37.000Z | tests/conftest.py | doreenfan/vacation_router | 638e5ae6fbed4d93cb89d5c801172c439e98f79d | [
"Apache-2.0"
] | 1 | 2021-12-26T01:30:53.000Z | 2021-12-26T01:30:53.000Z | import pytest
from pathlib import Path
from vacation_router.parser import get_distance_graph, parse_user_inputs_to_df
@pytest.fixture
def user_files():
test_files_dir = Path(__file__).parent.parent / "test_files"
return {
"kml": str(test_files_dir / "Toronto2021.kml"),
"loc_data": str(test_files_dir / "interest_and_time.csv"),
}
@pytest.fixture
def user_input_df(user_files):
df = parse_user_inputs_to_df(
user_files["kml"], user_input_csv=user_files["loc_data"]
)
assert df.shape[0] == 24
return df
@pytest.fixture
def ex_graph(user_input_df):
return get_distance_graph(user_input_df) | 26.958333 | 78 | 0.727975 |
acea0e749401124e528ebbea217b93961704e6d9 | 5,532 | py | Python | PDF.co Web API/PDF To JSON API/Python/Advanced Conversion Options/ConvertPdfToJSONFromUploadedFile.py | atkins126/ByteScout-SDK-SourceCode | cc4bc9e779ad95f85be0a8630c17878006059684 | [
"Apache-2.0"
] | 24 | 2017-01-13T13:43:21.000Z | 2021-12-23T07:57:19.000Z | PDF.co Web API/PDF To JSON API/Python/Advanced Conversion Options/ConvertPdfToJSONFromUploadedFile.py | atkins126/ByteScout-SDK-SourceCode | cc4bc9e779ad95f85be0a8630c17878006059684 | [
"Apache-2.0"
] | 1 | 2017-03-29T08:22:18.000Z | 2017-05-13T12:27:02.000Z | PDF.co Web API/PDF To JSON API/Python/Advanced Conversion Options/ConvertPdfToJSONFromUploadedFile.py | atkins126/ByteScout-SDK-SourceCode | cc4bc9e779ad95f85be0a8630c17878006059684 | [
"Apache-2.0"
] | 35 | 2016-08-03T19:15:44.000Z | 2022-03-27T16:38:58.000Z | import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination JSON file name
DestinationFile = ".\\result.json"
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
convertPdfToJson(uploadedFileUrl, DestinationFile)
def convertPdfToJson(uploadedFileUrl, destinationFile):
"""Converts PDF To Json using PDF.co Web API"""
# Some of advanced options available through profiles:
# (it can be single/double-quoted and contain comments.)
# {
# "profiles": [
# {
# "profile1": {
# "SaveImages": "None", // Whether to extract images. Values: "None", "Embed".
# "ImageFormat": "PNG", // Image format for extracted images. Values: "PNG", "JPEG", "GIF", "BMP".
# "SaveVectors": false, // Whether to extract vector objects (vertical and horizontal lines). Values: true / false
# "ExtractInvisibleText": true, // Invisible text extraction. Values: true / false
# "ExtractShadowLikeText": true, // Shadow-like text extraction. Values: true / false
# "LineGroupingMode": "None", // Values: "None", "GroupByRows", "GroupByColumns", "JoinOrphanedRows"
# "ColumnDetectionMode": "ContentGroupsAndBorders", // Values: "ContentGroupsAndBorders", "ContentGroups", "Borders", "BorderedTables"
# "Unwrap": false, // Unwrap grouped text in table cells. Values: true / false
# "ShrinkMultipleSpaces": false, // Shrink multiple spaces in table cells that affect column detection. Values: true / false
# "DetectNewColumnBySpacesRatio": 1, // Spacing ratio that affects column detection.
# "CustomExtractionColumns": [ 0, 50, 150, 200, 250, 300 ], // Explicitly specify columns coordinates for table extraction.
# "CheckPermissions": true, // Ignore document permissions. Values: true / false
# }
# }
# ]
# }
# Sample profile that sets advanced conversion options
# Advanced options are properties of JSONExtractor class from ByteScout JSON Extractor SDK used in the back-end:
# https://cdn.bytescout.com/help/BytescoutPDFExtractorSDK/html/84356d44-6249-3251-2da8-83c1f34a2f39.htm
profiles = '{ "profiles": [ { "profile1": { "TrimSpaces": "False", "PreserveFormattingOnTextExtraction": "True" } } ] }'
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["password"] = Password
parameters["pages"] = Pages
parameters["url"] = uploadedFileUrl
parameters["profiles"] = profiles
# Prepare URL for 'PDF To Json' API request
url = "{}/pdf/convert/to/json".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() | 42.883721 | 154 | 0.61081 |
acea109ab9ccf8f9f87a88289ea2bbb553de792f | 6,458 | py | Python | .history/feature_engineering_20200506140108.py | shenglinqian/quant_feature_engineering | 4040a6aea8f78be2a12a382c9cafcf2888c0f6c7 | [
"MIT"
] | null | null | null | .history/feature_engineering_20200506140108.py | shenglinqian/quant_feature_engineering | 4040a6aea8f78be2a12a382c9cafcf2888c0f6c7 | [
"MIT"
] | null | null | null | .history/feature_engineering_20200506140108.py | shenglinqian/quant_feature_engineering | 4040a6aea8f78be2a12a382c9cafcf2888c0f6c7 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import asyncio
import os,sys,datetime
import scipy as sp
import statsmodels.tsa.stattools as sts
import statsmodels.api as sm
import matplotlib.pyplot as plt
import talib
import pickle
import xgboost as xgb
from sklearn import metrics
from sklearn.impute import KNNImputer
from sklearn.pipeline import FeatureUnion,Pipeline
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
#用于生成特征的函数
class my_features_functions:
def __init__(self):
#print(self.__str__())
return
#计算价量相关性
def p2vol(self,open_price,high_price,low_price,close_price,vol):
price=np.array(close_price)
vol=np.array(vol)
return np.corrcoef(price,vol)[0][1]
#计算高价对低价的回归系数
def low2high(self,open_price,high_price,low_price,close_price,vol):
low=np.array(low_price)
high=np.array(high_price)
X=sm.add_constant(low)
model = sm.OLS(high,X)
results = model.fit()
return results.params[1]
#低成交量动量-高成交动量
def lowmom_highrev(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,close_price,vol],index=["open","close","vol"]).T
df.sort_values(by="vol",inplace=True)
df["ret"]=df["close"]/df["open"]-1
count_num=int(len(df)/4)
count_num=max(2,count_num)
return (df.head(count_num)["ret"].mean()-df.tail(count_num)["ret"].mean())
#eff
def trend_eff(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
total_chg=df["close"].iloc[-1]/df["open"].iloc[0]-1
df["abs_chg"]=np.abs(df["close"]/df["open"]-1)
return total_chg/df["abs_chg"].sum()
#最高值距离开盘价
def max_high_open(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
return df["high"].max()/df["open"].iloc[0]-1
#最低值距离开盘价
def min_low_open(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
return df["low"].min()/df["open"].iloc[0]-1
#最大波动比开收盘价波动
def maxvola(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
maxvolatility=df["high"].max()/df["low"].min()-1
close2open=df["close"].iloc[-1]/df["open"].iloc[0]-1
if close2open!=0:
return maxvolatility/close2open-1
else:
return maxvolatility
#上涨k线占比
def up_percent(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
up_count=len(df[df["close"]>df["open"]])
down_count=len(df[df["close"]<df["open"]])
if (up_count+down_count)>0:
return (up_count-down_count)/(up_count+down_count)
else:
return 0
#交易量倒数加权均值
def vol_weighted_ma(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price,vol],index=["open","high","low","close","vol"]).T
df["vol_reverse"]=1/np.log(df["vol"]+1)
df["weight"]=df["vol_reverse"]/(df["vol_reverse"].sum())
weight_ma=(df["close"]*df["weight"]).sum()/(df["weight"].sum())
return df["close"].iloc[-1]/weight_ma-1
#普通均值
def normal_ma(self,open_price,high_price,low_price,close_price,vol):
return np.mean(np.array(close_price))
#收盘价距离最低价
def close2_minlow(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
return df["close"].iloc[-1]/np.min(low_price)-1
#收盘价距离最高价
def close2_maxhigh(self,open_price,high_price,low_price,close_price,vol):
df=pd.DataFrame([open_price,high_price,low_price,close_price],index=["open","high","low","close"]).T
return df["close"].iloc[-1]/np.max(high_price)-1
#提供所有函数列表
def get_all_methold():
method_list=[]
for func in my_features_functions.__dict__:
method_list.append(func)
#print(method_list)
my_func_list=filter(lambda m: not m.startswith("__") and not m.endswith("__"),method_list)
my_func_list=list(my_func_list)
my_func_list.remove("get_all_methold")
return my_func_list
#利用时间序列数据(K线)生成滚动特征
class feature_generator(BaseEstimator, TransformerMixin):
"""
sklearn库主要使用numpy数组, 所以将dataframe全部转化为numpy数组.
初始化时添加所需要的函数
"""
def __init__(self, func_obj,rolling_window=20):
self.func_obj=func_obj
self.rolling_window=rolling_window
def fit(self, open_price,high_price,low_price,close_price,vol, y=None):
return self
def transform(self,df):
"""返回numpy数组"""
window_len=self.rolling_window
result=df["close"].rolling(window=window_len). \
apply(lambda x:self.func_obj(df.loc[x.index,"open"], \
df.loc[x.index,"high"],df.loc[x.index,"low"], \
df.loc[x.index,"close"],df.loc[x.index,"volume"]))
return np.array(result)
class feature_engineering():
def __init__(self,func_list):
self.func_list=func_list
self.rolling_window=10
def generate_transform_list(self,func_list,my_fea_generator):
my_transform_list=[]
name_list=[]
for func in func_list:
my_transform_list.append([func.__name__,my_fea_generator(func)])
name_list.append(func.__name__)
return my_transform_list,name_list
def output_feature(self,data):
#获取所有特征工程函数
my_fea_generator=feature_generator(self.func_list,self.rolling_window)
my_transform_list,name_list=self.generate_transform_list(self.func_list,my_fea_generator)
features_pipline = FeatureUnion(transformer_list=my_transform_list,n_jobs=-1)
pip_result=features_pipline.transform(data)
pip_df_result=pd.DataFrame(pip_result.reshape(len(data),len(self.func_list)),columns=name_list)
return pip_df_result
| 38.670659 | 118 | 0.671415 |
acea10aacfb8cd30c62abb1f626c0a459e00bbfe | 696 | py | Python | tests/system/action/resource/test_delete.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | tests/system/action/resource/test_delete.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | tests/system/action/resource/test_delete.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | from tests.system.action.base import BaseActionTestCase
class ResourceDeleteActionTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.create_model("resource/111", {"token": "srtgb123"})
response = self.request("resource.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("resource/111")
def test_delete_wrong_id(self) -> None:
self.create_model("resource/112", {"token": "srtgb123"})
response = self.request("resource.delete", {"id": 111})
self.assert_status_code(response, 400)
model = self.get_model("resource/112")
assert model.get("token") == "srtgb123"
| 38.666667 | 64 | 0.675287 |
acea12076bac3f67991b6a04a784653ac7a94bf4 | 31,225 | py | Python | desktop/core/ext-py/zope.interface-4.5.0/src/zope/interface/declarations.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/zope.interface-4.5.0/src/zope/interface/declarations.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/zope.interface-4.5.0/src/zope/interface/declarations.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | ##############################################################################
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
##############################################################################
"""Implementation of interface declarations
There are three flavors of declarations:
- Declarations are used to simply name declared interfaces.
- ImplementsDeclarations are used to express the interfaces that a
class implements (that instances of the class provides).
Implements specifications support inheriting interfaces.
- ProvidesDeclarations are used to express interfaces directly
provided by objects.
"""
__docformat__ = 'restructuredtext'
import sys
from types import FunctionType
from types import MethodType
from types import ModuleType
import weakref
from zope.interface.advice import addClassAdvisor
from zope.interface.interface import InterfaceClass
from zope.interface.interface import SpecificationBase
from zope.interface.interface import Specification
from zope.interface._compat import CLASS_TYPES as DescriptorAwareMetaClasses
from zope.interface._compat import PYTHON3
# Registry of class-implementation specifications
BuiltinImplementationSpecifications = {}
_ADVICE_ERROR = ('Class advice impossible in Python3. '
'Use the @%s class decorator instead.')
_ADVICE_WARNING = ('The %s API is deprecated, and will not work in Python3 '
'Use the @%s class decorator instead.')
class named(object):
def __init__(self, name):
self.name = name
def __call__(self, ob):
ob.__component_name__ = self.name
return ob
class Declaration(Specification):
"""Interface declarations"""
def __init__(self, *interfaces):
Specification.__init__(self, _normalizeargs(interfaces))
def changed(self, originally_changed):
Specification.changed(self, originally_changed)
try:
del self._v_attrs
except AttributeError:
pass
def __contains__(self, interface):
"""Test whether an interface is in the specification
"""
return self.extends(interface) and interface in self.interfaces()
def __iter__(self):
"""Return an iterator for the interfaces in the specification
"""
return self.interfaces()
def flattened(self):
"""Return an iterator of all included and extended interfaces
"""
return iter(self.__iro__)
def __sub__(self, other):
"""Remove interfaces from a specification
"""
return Declaration(
*[i for i in self.interfaces()
if not [j for j in other.interfaces()
if i.extends(j, 0)]
]
)
def __add__(self, other):
"""Add two specifications or a specification and an interface
"""
seen = {}
result = []
for i in self.interfaces():
seen[i] = 1
result.append(i)
for i in other.interfaces():
if i not in seen:
seen[i] = 1
result.append(i)
return Declaration(*result)
__radd__ = __add__
##############################################################################
#
# Implementation specifications
#
# These specify interfaces implemented by instances of classes
class Implements(Declaration):
# class whose specification should be used as additional base
inherit = None
# interfaces actually declared for a class
declared = ()
__name__ = '?'
@classmethod
def named(cls, name, *interfaces):
# Implementation method: Produce an Implements interface with
# a fully fleshed out __name__ before calling the constructor, which
# sets bases to the given interfaces and which may pass this object to
# other objects (e.g., to adjust dependents). If they're sorting or comparing
# by name, this needs to be set.
inst = cls.__new__(cls)
inst.__name__ = name
inst.__init__(*interfaces)
return inst
def __repr__(self):
return '<implementedBy %s>' % (self.__name__)
def __reduce__(self):
return implementedBy, (self.inherit, )
def __cmp(self, other):
# Yes, I did mean to name this __cmp, rather than __cmp__.
# It is a private method used by __lt__ and __gt__.
# This is based on, and compatible with, InterfaceClass.
# (The two must be mutually comparable to be able to work in e.g., BTrees.)
# Instances of this class generally don't have a __module__ other than
# `zope.interface.declarations`, whereas they *do* have a __name__ that is the
# fully qualified name of the object they are representing.
# Note, though, that equality and hashing are still identity based. This
# accounts for things like nested objects that have the same name (typically
# only in tests) and is consistent with pickling. As far as comparisons to InterfaceClass
# goes, we'll never have equal name and module to those, so we're still consistent there.
# Instances of this class are essentially intended to be unique and are
# heavily cached (note how our __reduce__ handles this) so having identity
# based hash and eq should also work.
if other is None:
return -1
n1 = (self.__name__, self.__module__)
n2 = (getattr(other, '__name__', ''), getattr(other, '__module__', ''))
# This spelling works under Python3, which doesn't have cmp().
return (n1 > n2) - (n1 < n2)
def __hash__(self):
return Declaration.__hash__(self)
# We want equality to be based on identity. However, we can't actually
# implement __eq__/__ne__ to do this because sometimes we get wrapped in a proxy.
# We need to let the proxy types implement these methods so they can handle unwrapping
# and then rely on: (1) the interpreter automatically changing `implements == proxy` into
# `proxy == implements` (which will call proxy.__eq__ to do the unwrapping) and then
# (2) the default equality semantics being identity based.
def __lt__(self, other):
c = self.__cmp(other)
return c < 0
def __le__(self, other):
c = self.__cmp(other)
return c <= 0
def __gt__(self, other):
c = self.__cmp(other)
return c > 0
def __ge__(self, other):
c = self.__cmp(other)
return c >= 0
def _implements_name(ob):
# Return the __name__ attribute to be used by its __implemented__
# property.
# This must be stable for the "same" object across processes
# because it is used for sorting. It needn't be unique, though, in cases
# like nested classes named Foo created by different functions, because
# equality and hashing is still based on identity.
# It might be nice to use __qualname__ on Python 3, but that would produce
# different values between Py2 and Py3.
return (getattr(ob, '__module__', '?') or '?') + \
'.' + (getattr(ob, '__name__', '?') or '?')
def implementedByFallback(cls):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
try:
spec = cls.__dict__.get('__implemented__')
except AttributeError:
# we can't get the class dict. This is probably due to a
# security proxy. If this is the case, then probably no
# descriptor was installed for the class.
# We don't want to depend directly on zope.security in
# zope.interface, but we'll try to make reasonable
# accommodations in an indirect way.
# We'll check to see if there's an implements:
spec = getattr(cls, '__implemented__', None)
if spec is None:
# There's no spec stred in the class. Maybe its a builtin:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
return _empty
if spec.__class__ == Implements:
# we defaulted to _empty or there was a spec. Good enough.
# Return it.
return spec
# TODO: need old style __implements__ compatibility?
# Hm, there's an __implemented__, but it's not a spec. Must be
# an old-style declaration. Just compute a spec for it
return Declaration(*_normalizeargs((spec, )))
if isinstance(spec, Implements):
return spec
if spec is None:
spec = BuiltinImplementationSpecifications.get(cls)
if spec is not None:
return spec
# TODO: need old style __implements__ compatibility?
spec_name = _implements_name(cls)
if spec is not None:
# old-style __implemented__ = foo declaration
spec = (spec, ) # tuplefy, as it might be just an int
spec = Implements.named(spec_name, *_normalizeargs(spec))
spec.inherit = None # old-style implies no inherit
del cls.__implemented__ # get rid of the old-style declaration
else:
try:
bases = cls.__bases__
except AttributeError:
if not callable(cls):
raise TypeError("ImplementedBy called for non-factory", cls)
bases = ()
spec = Implements.named(spec_name, *[implementedBy(c) for c in bases])
spec.inherit = cls
try:
cls.__implemented__ = spec
if not hasattr(cls, '__providedBy__'):
cls.__providedBy__ = objectSpecificationDescriptor
if (isinstance(cls, DescriptorAwareMetaClasses)
and
'__provides__' not in cls.__dict__):
# Make sure we get a __provides__ descriptor
cls.__provides__ = ClassProvides(
cls,
getattr(cls, '__class__', type(cls)),
)
except TypeError:
if not isinstance(cls, type):
raise TypeError("ImplementedBy called for non-type", cls)
BuiltinImplementationSpecifications[cls] = spec
return spec
implementedBy = implementedByFallback
def classImplementsOnly(cls, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace any previous declarations.
"""
spec = implementedBy(cls)
spec.declared = ()
spec.inherit = None
classImplements(cls, *interfaces)
def classImplements(cls, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
are added to any interfaces previously declared.
"""
spec = implementedBy(cls)
spec.declared += tuple(_normalizeargs(interfaces))
# compute the bases
bases = []
seen = {}
for b in spec.declared:
if b not in seen:
seen[b] = 1
bases.append(b)
if spec.inherit is not None:
for c in spec.inherit.__bases__:
b = implementedBy(c)
if b not in seen:
seen[b] = 1
bases.append(b)
spec.__bases__ = tuple(bases)
def _implements_advice(cls):
interfaces, classImplements = cls.__dict__['__implements_advice_data__']
del cls.__implements_advice_data__
classImplements(cls, *interfaces)
return cls
class implementer:
"""Declare the interfaces implemented by instances of a class.
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
@implementer(I1)
class C(object):
pass
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, DescriptorAwareMetaClasses):
classImplements(ob, *self.interfaces)
return ob
spec_name = _implements_name(ob)
spec = Implements.named(spec_name, *self.interfaces)
try:
ob.__implemented__ = spec
except AttributeError:
raise TypeError("Can't declare implements", ob)
return ob
class implementer_only:
"""Declare the only interfaces implemented by instances of a class
This function is called as a class decorator.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
@implementer_only(I1)
class C(object): pass
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
if isinstance(ob, (FunctionType, MethodType)):
# XXX Does this decorator make sense for anything but classes?
# I don't think so. There can be no inheritance of interfaces
# on a method pr function....
raise ValueError('The implementer_only decorator is not '
'supported for methods or functions.')
else:
# Assume it's a class:
classImplementsOnly(ob, *self.interfaces)
return ob
def _implements(name, interfaces, classImplements):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
frame = sys._getframe(2)
locals = frame.f_locals
# Try to make sure we were called from a class def. In 2.2.0 we can't
# check for __module__ since it doesn't seem to be added to the locals
# until later on.
if locals is frame.f_globals or '__module__' not in locals:
raise TypeError(name+" can be used only from a class definition.")
if '__implements_advice_data__' in locals:
raise TypeError(name+" can be used only once in a class definition.")
locals['__implements_advice_data__'] = interfaces, classImplements
addClassAdvisor(_implements_advice, depth=3)
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer')
_implements("implements", interfaces, classImplements)
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'implementer_only')
_implements("implementsOnly", interfaces, classImplementsOnly)
##############################################################################
#
# Instance declarations
class Provides(Declaration): # Really named ProvidesClass
"""Implement __provides__, the instance-specific specification
When an object is pickled, we pickle the interfaces that it implements.
"""
def __init__(self, cls, *interfaces):
self.__args = (cls, ) + interfaces
self._cls = cls
Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
def __reduce__(self):
return Provides, self.__args
__module__ = 'zope.interface'
def __get__(self, inst, cls):
"""Make sure that a class __provides__ doesn't leak to an instance
"""
if inst is None and cls is self._cls:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object, but only if we are
# being called on the same class that we were defined for:
return self
raise AttributeError('__provides__')
ProvidesClass = Provides
# Registry of instance declarations
# This is a memory optimization to allow objects to share specifications.
InstanceDeclarations = weakref.WeakValueDictionary()
def Provides(*interfaces):
"""Cache instance declarations
Instance declarations are shared among instances that have the same
declaration. The declarations are cached in a weak value dictionary.
"""
spec = InstanceDeclarations.get(interfaces)
if spec is None:
spec = ProvidesClass(*interfaces)
InstanceDeclarations[interfaces] = spec
return spec
Provides.__safe_for_unpickling__ = True
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications)
replace interfaces previously declared for the object.
"""
cls = getattr(object, '__class__', None)
if cls is not None and getattr(cls, '__class__', None) is cls:
# It's a meta class (well, at least it it could be an extension class)
# Note that we can't get here from Py3k tests: there is no normal
# class which isn't descriptor aware.
if not isinstance(object,
DescriptorAwareMetaClasses):
raise TypeError("Attempt to make an interface declaration on a "
"non-descriptor-aware class")
interfaces = _normalizeargs(interfaces)
if cls is None:
cls = type(object)
issub = False
for damc in DescriptorAwareMetaClasses:
if issubclass(cls, damc):
issub = True
break
if issub:
# we have a class or type. We'll use a special descriptor
# that provides some extra caching
object.__provides__ = ClassProvides(object, cls, *interfaces)
else:
object.__provides__ = Provides(cls, *interfaces)
def alsoProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or interface
specifications (``IDeclaration`` objects).
The interfaces given (including the interfaces in the specifications) are
added to the interfaces previously declared for the object.
"""
directlyProvides(object, directlyProvidedBy(object), *interfaces)
def noLongerProvides(object, interface):
""" Removes a directly provided interface from an object.
"""
directlyProvides(object, directlyProvidedBy(object) - interface)
if interface.providedBy(object):
raise ValueError("Can only remove directly provided interfaces.")
class ClassProvidesBaseFallback(object):
def __get__(self, inst, cls):
if cls is self._cls:
# We only work if called on the class we were defined for
if inst is None:
# We were accessed through a class, so we are the class'
# provides spec. Just return this object as is:
return self
return self._implements
raise AttributeError('__provides__')
ClassProvidesBasePy = ClassProvidesBaseFallback # BBB
ClassProvidesBase = ClassProvidesBaseFallback
# Try to get C base:
try:
import zope.interface._zope_interface_coptimizations
except ImportError:
pass
else:
from zope.interface._zope_interface_coptimizations import ClassProvidesBase
class ClassProvides(Declaration, ClassProvidesBase):
"""Special descriptor for class __provides__
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
"""
def __init__(self, cls, metacls, *interfaces):
self._cls = cls
self._implements = implementedBy(cls)
self.__args = (cls, metacls, ) + interfaces
Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
def __reduce__(self):
return self.__class__, self.__args
# Copy base-class method for speed
__get__ = ClassProvidesBase.__get__
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an ``IDeclaration``.
"""
provides = getattr(object, "__provides__", None)
if (provides is None # no spec
or
# We might have gotten the implements spec, as an
# optimization. If so, it's like having only one base, that we
# lop off to exclude class-supplied declarations:
isinstance(provides, Implements)
):
return _empty
# Strip off the class part of the spec:
return Declaration(provides.__bases__[:-1])
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications)
are used to create the class's direct-object interface specification.
An error will be raised if the module class has an direct interface
specification. In other words, it is an error to call this function more
than once in a class definition.
Note that the given interfaces have nothing to do with the interfaces
implemented by instances of the class.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
if PYTHON3:
raise TypeError(_ADVICE_ERROR % 'provider')
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is frame.f_globals) or ('__module__' not in locals):
raise TypeError("classProvides can be used only from a "
"class definition.")
if '__provides__' in locals:
raise TypeError(
"classProvides can only be used once in a class definition.")
locals["__provides__"] = _normalizeargs(interfaces)
addClassAdvisor(_classProvides_advice, depth=2)
def _classProvides_advice(cls):
# This entire approach is invalid under Py3K. Don't even try to fix
# the coverage for this block there. :(
interfaces = cls.__dict__['__provides__']
del cls.__provides__
directlyProvides(cls, *interfaces)
return cls
class provider:
"""Class decorator version of classProvides"""
def __init__(self, *interfaces):
self.interfaces = interfaces
def __call__(self, ob):
directlyProvides(ob, *self.interfaces)
return ob
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface specifications
(``IDeclaration`` objects).
The given interfaces (including the interfaces in the specifications) are
used to create the module's direct-object interface specification. An
error will be raised if the module already has an interface specification.
In other words, it is an error to call this function more than once in a
module definition.
This function is provided for convenience. It provides a more convenient
way to call directlyProvides. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
frame = sys._getframe(1)
locals = frame.f_locals
# Try to make sure we were called from a class def
if (locals is not frame.f_globals) or ('__name__' not in locals):
raise TypeError(
"moduleProvides can only be used from a module definition.")
if '__provides__' in locals:
raise TypeError(
"moduleProvides can only be used once in a module definition.")
locals["__provides__"] = Provides(ModuleType,
*_normalizeargs(interfaces))
##############################################################################
#
# Declaration querying support
# XXX: is this a fossil? Nobody calls it, no unit tests exercise it, no
# doctests import it, and the package __init__ doesn't import it.
def ObjectSpecification(direct, cls):
"""Provide object specifications
These combine information for the object and for it's classes.
"""
return Provides(cls, direct) # pragma: no cover fossil
def getObjectSpecificationFallback(ob):
provides = getattr(ob, '__provides__', None)
if provides is not None:
if isinstance(provides, SpecificationBase):
return provides
try:
cls = ob.__class__
except AttributeError:
# We can't get the class, so just consider provides
return _empty
return implementedBy(cls)
getObjectSpecification = getObjectSpecificationFallback
def providedByFallback(ob):
# Here we have either a special object, an old-style declaration
# or a descriptor
# Try to get __providedBy__
try:
r = ob.__providedBy__
except AttributeError:
# Not set yet. Fall back to lower-level thing that computes it
return getObjectSpecification(ob)
try:
# We might have gotten a descriptor from an instance of a
# class (like an ExtensionClass) that doesn't support
# descriptors. We'll make sure we got one by trying to get
# the only attribute, which all specs have.
r.extends
except AttributeError:
# The object's class doesn't understand descriptors.
# Sigh. We need to get an object descriptor, but we have to be
# careful. We want to use the instance's __provides__, if
# there is one, but only if it didn't come from the class.
try:
r = ob.__provides__
except AttributeError:
# No __provides__, so just fall back to implementedBy
return implementedBy(ob.__class__)
# We need to make sure we got the __provides__ from the
# instance. We'll do this by making sure we don't get the same
# thing from the class:
try:
cp = ob.__class__.__provides__
except AttributeError:
# The ob doesn't have a class or the class has no
# provides, assume we're done:
return r
if r is cp:
# Oops, we got the provides from the class. This means
# the object doesn't have it's own. We should use implementedBy
return implementedBy(ob.__class__)
return r
providedBy = providedByFallback
class ObjectSpecificationDescriptorFallback(object):
"""Implement the `__providedBy__` attribute
The `__providedBy__` attribute computes the interfaces peovided by
an object.
"""
def __get__(self, inst, cls):
"""Get an object specification for an object
"""
if inst is None:
return getObjectSpecification(cls)
provides = getattr(inst, '__provides__', None)
if provides is not None:
return provides
return implementedBy(cls)
ObjectSpecificationDescriptor = ObjectSpecificationDescriptorFallback
##############################################################################
def _normalizeargs(sequence, output = None):
"""Normalize declaration arguments
Normalization arguments might contain Declarions, tuples, or single
interfaces.
Anything but individial interfaces or implements specs will be expanded.
"""
if output is None:
output = []
cls = sequence.__class__
if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
output.append(sequence)
else:
for v in sequence:
_normalizeargs(v, output)
return output
_empty = Declaration()
try:
import zope.interface._zope_interface_coptimizations
except ImportError:
pass
else:
from zope.interface._zope_interface_coptimizations import implementedBy
from zope.interface._zope_interface_coptimizations import providedBy
from zope.interface._zope_interface_coptimizations import (
getObjectSpecification)
from zope.interface._zope_interface_coptimizations import (
ObjectSpecificationDescriptor)
objectSpecificationDescriptor = ObjectSpecificationDescriptor()
| 33.575269 | 97 | 0.661906 |
acea123f8f972cdac7afecaa7ce9aeb7f54e55ad | 2,596 | py | Python | pipeline/est/natl_state_estimates/main.py | COVID-IWG/covid-metrics-infra | 55c0827ffdcaadf9df85762314483c8d27b1612b | [
"MIT"
] | null | null | null | pipeline/est/natl_state_estimates/main.py | COVID-IWG/covid-metrics-infra | 55c0827ffdcaadf9df85762314483c8d27b1612b | [
"MIT"
] | null | null | null | pipeline/est/natl_state_estimates/main.py | COVID-IWG/covid-metrics-infra | 55c0827ffdcaadf9df85762314483c8d27b1612b | [
"MIT"
] | null | null | null | from warnings import simplefilter
import numpy as np
import pandas as pd
from epimargin.estimators import analytical_MPVS
from epimargin.smoothing import notched_smoothing
from google.cloud import storage
simplefilter("ignore")
# model details
gamma = 0.1 # 10 day infectious period
smoothing = 7
CI = 0.95
lookback = 120 # how many days back to start estimation
cutoff = 2 # most recent data to use
# cloud details
bucket_name = "daily_pipeline"
def run_estimates(_):
storage.Client()\
.bucket(bucket_name)\
.blob("pipeline/raw/india_case_timeseries.csv")\
.download_to_filename("/tmp/india_case_timeseries.csv")
india_ts = pd.read_csv("/tmp/india_case_timeseries.csv")
# country level
(dates, RR_pred, RR_CI_upper, RR_CI_lower, *_) =\
analytical_MPVS(india_ts["Hospitalized"].iloc[-(lookback+cutoff):-cutoff], CI = CI, smoothing = notched_smoothing(window = smoothing))
# state level rt estimates
state_time_series = get_time_series(state_df, 'state')
states = list(state_time_series.index.get_level_values(level=0).unique())
for state in states:
state_code = state_name_lookup[state]
try:
(dates, RR_pred, RR_CI_upper, RR_CI_lower, *_) = gamma_prior(state_time_series.loc[state]['Hospitalized'], CI = CI, smoothing = notched_smoothing(window = smoothing))
for row in zip(dates, RR_pred, RR_CI_upper, RR_CI_lower):
timeseries.append((state_code, *row))
estimates.append((state_code, RR_pred[-1], RR_CI_lower[-1], RR_CI_upper[-1], project(dates, RR_pred, smoothing)))
print(f"{state_code}: success")
except (IndexError, ValueError):
estimates.append((state, np.nan, np.nan, np.nan, np.nan))
print(f"{state_code}: error")
# save out estimates
estimates = pd.DataFrame(estimates)
estimates.columns = ["state", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"]
estimates.set_index("state", inplace=True)
estimates.to_csv(data/"Rt_estimates.csv")
timeseries = pd.DataFrame(timeseries)
timeseries.columns = ["state", "date", "Rt", "Rt_upper", "Rt_lower"]
timeseries.set_index("state", inplace=True)
timeseries.to_csv(data/"Rt_timeseries_india.csv")
# upload to cloud
bucket.blob("estimates/Rt_estimates.csv") .upload_from_filename(str(data/"Rt_estimates.csv"), content_type = "text/csv")
bucket.blob("estimates/Rt_timeseries_india.csv").upload_from_filename(str(data/"Rt_timeseries_india.csv"), content_type = "text/csv")
| 39.333333 | 178 | 0.691063 |
acea12bfbd2dccfb045685c3fb8c0bdb21f92bd3 | 9,543 | py | Python | ue-loc-gather-cmd.py | xabk/ue-loc-tools | 2d01f5b2ef20e7d7f7dd3c9e12f80a2418e2b7b6 | [
"CC0-1.0"
] | null | null | null | ue-loc-gather-cmd.py | xabk/ue-loc-tools | 2d01f5b2ef20e7d7f7dd3c9e12f80a2418e2b7b6 | [
"CC0-1.0"
] | null | null | null | ue-loc-gather-cmd.py | xabk/ue-loc-tools | 2d01f5b2ef20e7d7f7dd3c9e12f80a2418e2b7b6 | [
"CC0-1.0"
] | null | null | null | #
# Engine\Binaries\Win64\UE4Editor-cmd.exe Games\FactoryGame\FactoryGame.uproject
# -run=GatherText
# -config="Config\Localization\Game_Gather.ini;Config\Localization\Game_Export.ini"
# -SCCProvider=None
# -Unattended
# -LogLocalizationConflict
# -Log="PyCmdLocGatherAndExport.log"
# Engine\Binaries\Win64\UE4Editor-cmd.exe Games\FactoryGame\FactoryGame.uproject
# -run=GatherText
# -config="Config\Localization\Game_Gather.ini;Config\Localization\Game_ExportIO.ini"
# -SCCProvider=None
# -Unattended
# -LogLocalizationConflict
# -Log="PyCmdLocGatherAndExport.log"
# Engine\Binaries\Win64\UE4Editor-cmd.exe Games\FactoryGame\FactoryGame.uproject
# -run=GatherText
# -config="Config\Localization\Game_Import.ini;Config\Localization\Game_Compile.ini"
# -SCCProvider=None
# -Unattended
# -LogLocalizationConflicts
# -Log="PyCmdLocGatherAndImport.log"
#
# add "capture_output=True, text=True" to make it silent and catch output into result
#
# TODO: Use all loc targets by default
# TODO: Add config file support
# TODO: Move parameters to config file
import subprocess as subp
import re
from pathlib import Path
from loguru import logger
from dataclasses import dataclass, field
from libraries.utilities import LocTask
@dataclass
class UnrealLocGatherCommandlet(LocTask):
# TODO: Process all loc targets if none are specified
# TODO: Change lambda to None to process all loc targets when implemented
loc_targets: list = field(
default_factory=lambda: ['Game']
) # Localization targets, empty = process all targets
tasks: list = field(
default_factory=lambda: ['Gather', 'Export']
) # Steps to perform. Config/Localization .ini file suffixes:
# Gather, Export, Import, Сompile, GenerateReports, etc.
# Set this in task lists in config. Good combinations for text:
# ['Gather', 'Export']
# ['Import', 'Compile', 'GenerateReports']
# ['Gather', 'Import', 'Compile', 'GenerateReports']
# TODO: Do I need this here? Or rather in smth from uetools lib?
content_dir: str = '../'
project_dir: str = None # Will try to find it if None or empty
engine_dir: str = None # Will try to find it if None or empty
# TODO: Use uetools to find the directories?
try_patch_dependencies: bool = True
# Should we patch dependencies in *_Gather.ini files?
# This seems to be needed if the project and engine
# are in completely separate directories
_unreal_binary: str = 'Engine/Binaries/Win64/UE4Editor-cmd.exe'
_config_pattern: str = 'Config/Localization/{loc_target}_{task}.ini'
_content_path: Path = None
_project_path: Path = None
_uproject_path: Path = None
_engine_path: Path = None
_unreal_binary_path: Path = None
_config_str: str = None
def post_update(self):
super().post_update()
self._content_path = Path(self.content_dir).resolve()
if self.project_dir:
self._project_path = Path(self.project_dir).resolve()
else:
self._project_path = self._content_path.parent.resolve()
try:
self._uproject_path = next(self._project_path.glob('*.uproject'))
except Exception as err:
logger.error(
f'Seems like no .uproject file found in {self._project_path}. '
'Wrong path?'
)
logger.error(err)
return False
if self.engine_dir:
self._engine_path = Path(self.engine_dir).resolve()
else:
# Try to find it as if we're in Games/..
logger.info('Checking if engine path is ../../ from project directory.')
self._engine_path = self._project_path.parent.parent
self._unreal_binary_path = self._engine_path / self._unreal_binary
if not self._unreal_binary_path.exists():
# Try to find it in the .sln file
solution_file = next(self._project_path.glob('*.sln'))
logger.info(
f'Trying to find the engine path from solution file: '
f'{solution_file}'
)
if not solution_file.exists():
logger.error(
f'No solution file found in {self._project_path}. Aborting. '
'Try setting engine directory explicitely in config.'
)
return False
with open(solution_file, mode='r') as file:
s = file.read()
engine_path = re.findall(
r'"UnrealBuildTool", "(.*?)Engine\\Source\\Programs'
r'\\UnrealBuildTool\\UnrealBuildTool.csproj"',
s,
)
if len(engine_path) == 0:
logger.error(
f'Couldn\'t find Engine path in the project solution file: '
'{solution_file}. Aborting. '
'Try setting engine directory explicitely in config.'
)
return False
# TODO: .sln path absolute if game and engine on different disks?..
self._engine_path = (self._project_path / engine_path[0]).resolve()
self._unreal_binary_path = self._engine_path / self._unreal_binary
if not (self._unreal_binary_path and self._unreal_binary_path.exists()):
logger.error(
f'No unreal binary found for engine path {self._engine_path}. '
'Wrong path?'
)
return False
self._config_str = ';'.join(
[
';'.join(
[
self._config_pattern.format(loc_target=loc_target, task=t)
for t in self.tasks
]
)
for loc_target in self.loc_targets
]
)
logger.info(f'Project path: {self._project_path}.')
logger.info(f'Engine path: {self._engine_path}.')
return True
def patch_dependencies(self, loc_target: str):
# Patching the gather.ini to fix paths to engine manifest dependencies
logger.info('Trying to patch manifest dependencies...')
with open(
self._project_path / f'Config/Localization/{loc_target}_Gather.ini', 'r'
) as file:
gather_ini = file.read()
engine_path = re.subn(r'\\', '/', str(self._engine_path))[0]
gather_ini, patched_dependencies = re.subn(
r'(?<=ManifestDependencies=)[^\r\n]*?(?=Engine/Content/Localization/)',
engine_path,
gather_ini,
)
if patched_dependencies > 0:
with open(
self._project_path / f'Config/Localization/{loc_target}_Gather.ini', 'w'
) as file:
file.write(gather_ini)
logger.info(f'Patched dependencies: {patched_dependencies}')
else:
logger.info('No dependencies patched.')
return
def run_tasks(self):
logger.info(
f'Processing targets ({len(self.loc_targets)}): '
f'{self.loc_targets}. Tasks ({len(self.tasks)}): {self.tasks}'
)
if 'Gather' in self.tasks and self.try_patch_dependencies:
for loc_target in self.loc_targets:
self.patch_dependencies(loc_target)
logger.info(
f'Running Unreal loc gather commandlet with following config value: '
f'{self._config_str}'
)
with subp.Popen(
[
self._unreal_binary_path,
self._uproject_path,
'-run=GatherText',
f'-config="{self._config_str}"',
'-SCCProvider=None',
'-Unattended',
'-LogLocalizationConflict',
'-Log="PyCmdLocGatherAndExport.log"',
],
stdout=subp.PIPE,
stderr=subp.STDOUT,
cwd=self._engine_path,
universal_newlines=True,
) as process:
while True:
for line in process.stdout:
line = re.sub(r"^\[[^]]+]", "", line.strip())
if 'Error: ' in line:
logger.error(f'| UE | {line.strip()}')
elif 'Warning: ' in line:
logger.warning(f'| UE | {line.strip()}')
else:
logger.info(f'| UE | {line.strip()}')
if process.poll() != None:
break
returncode = process.returncode
return returncode
def main():
logger.add(
'logs/locsync.log',
rotation='10MB',
retention='1 month',
enqueue=True,
format='{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}',
level='INFO',
)
logger.info('')
logger.info('--- Unreal gather text commandlet script ---')
logger.info('')
task = UnrealLocGatherCommandlet()
task.read_config(Path(__file__).name, logger)
returncode = task.run_tasks()
if returncode == 0:
logger.info('')
logger.info('--- Unreal gather text commandlet script end ---')
logger.info('')
return 0
logger.error('Error occured, please see the Content/Python/Logs/locsync.log')
return 1
# Run the script if the isn't imported
if __name__ == "__main__":
main()
| 34.701818 | 88 | 0.580111 |
acea14a7d54124d5212d1bf12e99d9f1eba170dd | 1,758 | py | Python | lichess_client/abstract_endpoints/abstract_boards.py | amrout/lichess_python_SDK | 1e2a545b65111bfc58bb963c44ad56be9b4d0835 | [
"Apache-2.0"
] | 8 | 2020-03-14T23:01:59.000Z | 2021-04-02T16:02:32.000Z | lichess_client/abstract_endpoints/abstract_boards.py | amrout/lichess_python_SDK | 1e2a545b65111bfc58bb963c44ad56be9b4d0835 | [
"Apache-2.0"
] | 13 | 2020-03-08T23:38:53.000Z | 2020-03-14T20:51:16.000Z | lichess_client/abstract_endpoints/abstract_boards.py | amrout/lichess_python_SDK | 1e2a545b65111bfc58bb963c44ad56be9b4d0835 | [
"Apache-2.0"
] | 7 | 2020-04-11T16:54:43.000Z | 2021-07-18T21:24:15.000Z | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from lichess_client.utils.enums import VariantTypes, ColorType, RoomTypes
class AbstractBoards(ABC):
"""An abstract class for Bots API Endpoint"""
@abstractmethod
def stream_incoming_events(self):
"""
Stream the events reaching a lichess user in real time.
"""
pass
@abstractmethod
def create_a_seek(self,
time: int,
increment: int,
variant: 'VariantTypes',
color: 'ColorType',
rated: bool,
rating_range):
"""
Create a public seek, to start a game with a random player.
"""
pass
@abstractmethod
def stream_game_state(self, game_id: str):
"""
Stream the state of a game being played with the Board API
"""
pass
@abstractmethod
def make_move(self, game_id: str, move: str, draw: bool):
"""Make a move in a game being played with the Board API."""
pass
@abstractmethod
def abort_game(self, game_id: str):
"""Abort a game being played with the Board API."""
pass
@abstractmethod
def resign_game(self, game_id: str):
"""Resign a game being played with the Board API."""
pass
@abstractmethod
def write_in_chat(self, game_id: str, room: 'RoomTypes', message: str):
"""Post a message to the player or spectator chat, in a game being played with the Board API."""
pass
@abstractmethod
def handle_draw(self, game_id: str, accept: bool = True):
"""Create/accept/decline draw offers."""
pass
| 28.354839 | 104 | 0.589875 |
acea1500cdcf1715a2aca01753e864dc70b4866b | 6,617 | py | Python | wxsplitsecrets.py | mrwizard82d1/pydiceware | c9a09f5d6d06b4203dcb6ce37d638f240d3db14e | [
"Apache-2.0"
] | null | null | null | wxsplitsecrets.py | mrwizard82d1/pydiceware | c9a09f5d6d06b4203dcb6ce37d638f240d3db14e | [
"Apache-2.0"
] | null | null | null | wxsplitsecrets.py | mrwizard82d1/pydiceware | c9a09f5d6d06b4203dcb6ce37d638f240d3db14e | [
"Apache-2.0"
] | null | null | null | #! /cygdrive/c/Python25/python.exe
"""GUI application (using wxPython) to create passphrases."""
import wx
import diceware
class SplitSecretFrame(wx.Frame):
"""The application frame."""
def __init__(self):
"""Initialize the application frame."""
wx.Frame.__init__(self, parent=None,
title='Diceware Passphrases')
self.menuBar = self.CreateMenu()
self.statusBar = self.CreateStatusBar()
self.CreateView()
self.LayoutView()
self.view.SetSizer(self.viewSizer)
self.view.Fit()
def CreateMenu(self):
"""Create the application menu."""
self.menuBar = wx.MenuBar()
self.fileMenu = wx.Menu()
self.fileMenu.AppendSeparator()
exitItem = self.fileMenu.Append(-1,
'E&xit', 'Exits the program.')
self.Bind(wx.EVT_MENU, self.OnExit, exitItem)
self.menuBar.Append(self.fileMenu, '&File')
self.SetMenuBar(self.menuBar)
def CreateView(self):
"""Create the application view."""
self.view = wx.Panel(self, -1)
# Create box for split information.
self.splitBox = wx.StaticBox(self.view, -1, 'Split')
# Create password control.
self.passwordLabel = wx.StaticText\
(self.view, -1,
'Enter password or passphrase:')
generatedTextWidth = 233
generatedTextHeight = 89
self.passwordText = wx.TextCtrl(self.view, -1,
size=(generatedTextWidth, -1),
style=wx.TE_MULTILINE)
self.passwordText.Bind(wx.EVT_TEXT, self.OnPassword)
# Create split controls.
self.countLabel = wx.StaticText(self.view, -1, 'Share count:')
self.countSpinner = wx.SpinCtrl(self.view, -1)
self.countSpinner.SetRange(2, 10)
self.countSpinner.SetValue(5)
# Create split button.
self.splitButton = wx.Button(self.view, -1, 'Split')
self.splitButton.Disable()
self.Bind(wx.EVT_BUTTON, self.OnSplit, self.splitButton)
# Create box for shares information.
self.sharesBox = wx.StaticBox(self.view, -1, 'Shares')
self.sharesLabel = wx.StaticText(self.view, -1,
'Enter shares (one per line):')
generatedTextWidth = 73
generatedTextHeight = 450
self.sharesText = wx.TextCtrl(self.view, -1,
size=(generatedTextWidth,
generatedTextHeight),
style=wx.TE_MULTILINE)
self.sharesText.Bind(wx.EVT_TEXT, self.OnShares)
self.restoreButton = wx.Button(self.view, -1, 'Restore')
self.restoreButton.Disable()
self.Bind(wx.EVT_BUTTON, self.OnRestore, self.restoreButton)
def LayoutView(self):
"""Layout the application view."""
self.viewSizer = wx.BoxSizer(wx.HORIZONTAL)
# Layout the split view.
self.splitView = wx.StaticBoxSizer(self.splitBox,
wx.VERTICAL)
self.splitView.Add(self.passwordLabel,
proportion=0, flag=wx.EXPAND)
self.splitView.Add(self.passwordText,
proportion=1, flag=wx.EXPAND)
self.countView = wx.BoxSizer(wx.VERTICAL)
self.countView.Add(self.countLabel, proportion=0)
self.countView.Add(self.countSpinner, proportion=0)
self.actionView = wx.BoxSizer(wx.HORIZONTAL)
self.actionView.Add(self.countView,
proportion=0, flag=wx.ALIGN_BOTTOM)
self.actionView.Add((10, 10), proportion=1,
flag=(wx.ALIGN_BOTTOM |
wx.ALIGN_CENTER_HORIZONTAL))
self.actionView.Add(self.splitButton, proportion=0,
flag=(wx.ALIGN_RIGHT | wx.ALIGN_BOTTOM))
self.splitView.Add(self.actionView,
proportion=0, flag=wx.EXPAND)
# Layout the shares view
self.sharesView = wx.StaticBoxSizer(self.sharesBox,
wx.VERTICAL)
self.sharesView.Add(self.sharesLabel,
proportion=0,
flag=wx.EXPAND)
self.sharesView.Add(self.sharesText,
proportion=1,
flag=wx.EXPAND)
self.sharesView.Add(self.restoreButton,
proportion=0, flag=wx.ALIGN_RIGHT)
# Layout the view
self.viewSizer.Add(self.splitView,
flag=wx.EXPAND);
self.viewSizer.Add(self.sharesView,
proportion=1, flag=wx.EXPAND)
def OnExit(self, event):
"""Handle the user selecting the exit menu item."""
self.Close()
def OnPassword(self, event):
"""Handle the user changing the password / passphrase text."""
self.splitButton.Enable(len(self.passwordText.\
GetValue().strip()) != 0)
def OnRestore(self, event):
"""Handle the user pressing the restore button."""
# Get the shares to restore.
theSharesText = self.sharesText.GetValue()
theShares = theSharesText.split('\n')
# Restore the shares to generate the password.
thePassword = diceware.restoreSecret(theShares)
self.passwordText.SetValue(thePassword)
"""Handle the user changing the shares."""
self.restoreButton.Enable(len(self.sharesText.\
GetValue().strip()) != 0)
def OnShares(self, event):
"""Handle the user changing the shares."""
self.restoreButton.Enable(len(self.sharesText.\
GetValue().strip()) != 0)
def OnSplit(self, event):
"""Handle the user pressing the split button."""
# Split the password into shares.
password = self.passwordText.GetValue()
theCount = int(self.countSpinner.GetValue())
theShares = list(diceware.splitSecret(password, theCount))
# Display the shares.
theSharesText = '\n'.join(theShares)
self.sharesText.SetValue(theSharesText)
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = SplitSecretFrame()
frame.Show(True)
app.MainLoop()
| 36.558011 | 72 | 0.554783 |
acea15730042ddd0226fd79fe66496e3d9807683 | 17,631 | py | Python | nengo/simulator.py | seankmartin/nengo | de345f6d201ac5063fc4c5a7e56c0b16c26785c1 | [
"BSD-2-Clause"
] | null | null | null | nengo/simulator.py | seankmartin/nengo | de345f6d201ac5063fc4c5a7e56c0b16c26785c1 | [
"BSD-2-Clause"
] | null | null | null | nengo/simulator.py | seankmartin/nengo | de345f6d201ac5063fc4c5a7e56c0b16c26785c1 | [
"BSD-2-Clause"
] | null | null | null | """Reference simulator for nengo models.
This backend is relatively fast, and works on general purpose computers.
Other Nengo backends provide more specialized Simulators for custom platforms.
"""
import logging
import warnings
from collections.abc import Mapping
import numpy as np
import nengo.utils.numpy as npext
from nengo.builder import Model
from nengo.builder.optimizer import optimize as opmerge_optimize
from nengo.builder.signal import SignalDict
from nengo.cache import get_default_decoder_cache
from nengo.exceptions import ReadonlyError, SimulatorClosed, ValidationError
from nengo.utils.graphs import toposort
from nengo.utils.progress import Progress, ProgressTracker
from nengo.utils.simulator import operator_dependency_graph
logger = logging.getLogger(__name__)
class Simulator:
r"""Reference simulator for Nengo models.
The simulator takes a `.Network` and builds internal data structures to
run the model defined by that network. Run the simulator with the
`~.Simulator.run` method, and access probed data through the
``data`` attribute.
Building and running the simulation may allocate resources like files
and sockets. To properly free these resources, call the `.Simulator.close`
method. Alternatively, `.Simulator.close` will automatically be called
if you use the ``with`` syntax:
.. testcode::
with nengo.Network() as net:
ensemble = nengo.Ensemble(10, 1)
with nengo.Simulator(net, progress_bar=False) as sim:
sim.run(0.1)
Note that the ``data`` attribute is still accessible even when a simulator
has been closed. Running the simulator, however, will raise an error.
When debugging or comparing models, it can be helpful to see the full ordered
list of operators that the simulator will execute on each timestep.
.. testcode::
with nengo.Simulator(nengo.Network(), progress_bar=False) as sim:
print('\n'.join("* %s" % op for op in sim.step_order))
.. testoutput::
* TimeUpdate{}
The diff of two simulators' sorted ops tells us how two built models differ.
We can use ``difflib`` in the Python standard library to see the differences.
.. testcode::
# Original model
with nengo.Network() as net:
ensemble = nengo.Ensemble(10, 1, label="Ensemble")
sim1 = nengo.Simulator(net, progress_bar=False)
# Add a node
with net:
node = nengo.Node(output=0, label="Node")
nengo.Connection(node, ensemble)
sim2 = nengo.Simulator(net, progress_bar=False)
import difflib
print("".join(difflib.unified_diff(
sorted("%s: %s\n" % (type(op).__name__, op.tag) for op in sim1.step_order),
sorted("%s: %s\n" % (type(op).__name__, op.tag) for op in sim2.step_order),
fromfile="sim1",
tofile="sim2",
n=0,
)).strip())
sim1.close()
sim2.close()
.. testoutput::
:options:
--- sim1
+++ sim2
@@ -0,0 +1 @@
+Copy: <Connection from <Node "Node"> to <Ensemble "Ensemble">>
@@ -4,0 +6 @@
+SimProcess: Lowpass(tau=0.005)
Parameters
----------
network : Network or None
A network object to be built and then simulated. If None,
then a `.Model` with the build model must be provided instead.
dt : float, optional
The length of a simulator timestep, in seconds.
seed : int, optional
A seed for all stochastic operators used in this simulator.
Will be set to ``network.seed + 1`` if not given.
model : Model, optional
A `.Model` that contains build artifacts to be simulated.
Usually the simulator will build this model for you; however, if you
want to build the network manually, or you want to inject build
artifacts in the model before building the network, then you can
pass in a `.Model` instance.
progress_bar : bool or ProgressBar, optional
Progress bar for displaying build and simulation progress.
If ``True``, the default progress bar will be used.
If ``False``, the progress bar will be disabled.
For more control over the progress bar, pass in a ``ProgressBar``
instance.
optimize : bool, optional
If ``True``, the builder will run an additional optimization step
that can speed up simulations significantly at the cost of slower
builds. If running models for very small amounts of time,
pass ``False`` to disable the optimizer.
Attributes
----------
closed : bool
Whether the simulator has been closed.
Once closed, it cannot be reopened.
data : SimulationData
The `.SimulationData` mapping from Nengo objects to the data associated
with those objects. In particular, each `.Probe` maps to the data
probed while running the simulation.
dg : dict
A dependency graph mapping from each `.Operator` to the operators
that depend on that operator.
model : Model
The `.Model` containing the signals and operators necessary to
simulate the network.
signals : SignalDict
The `.SignalDict` mapping from `.Signal` instances to NumPy arrays.
"""
def __init__(
self, network, dt=0.001, seed=None, model=None, progress_bar=True, optimize=True
):
self.closed = True # Start closed in case constructor raises exception
self.progress_bar = progress_bar
self.optimize = optimize
if model is None:
self.model = Model(
dt=float(dt),
label="%s, dt=%f" % (network, dt),
decoder_cache=get_default_decoder_cache(),
)
else:
self.model = model
pt = ProgressTracker(progress_bar, Progress("Building", "Build"))
with pt:
if network is not None:
# Build the network into the model
self.model.build(network, progress=pt.next_stage("Building", "Build"))
# Order the steps (they are made in `Simulator.reset`)
self.dg = operator_dependency_graph(self.model.operators)
if optimize:
with pt.next_stage("Building (running optimizer)", "Optimization"):
opmerge_optimize(self.model, self.dg)
self._step_order = [op for op in toposort(self.dg) if hasattr(op, "make_step")]
# -- map from Signal.base -> ndarray
self.signals = SignalDict()
for op in self.model.operators:
op.init_signals(self.signals)
# Add built states to the raw simulation data dictionary
self._sim_data = self.model.params
# Provide a nicer interface to simulation data
self.data = SimulationData(self._sim_data)
if seed is None:
if network is not None and network.seed is not None:
seed = network.seed + 1
else:
seed = np.random.randint(npext.maxint)
self.closed = False
self.reset(seed=seed)
def __del__(self):
"""Raise a ResourceWarning if we are deallocated while open."""
if not self.closed:
warnings.warn(
"Simulator with model=%s was deallocated while open. Please "
"close simulators manually to ensure resources are properly "
"freed." % self.model,
ResourceWarning,
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getstate__(self):
signals = (
{k: v for k, v in self.signals.items() if not k.readonly}
if self.signals is not None
else {}
)
probe_outputs = {probe: self._sim_data[probe] for probe in self.model.probes}
return dict(
model=self.model,
signals=signals,
probe_outputs=probe_outputs,
dt=self.dt,
seed=self.seed,
progress_bar=self.progress_bar,
optimize=self.optimize,
closed=self.closed,
)
def __setstate__(self, state):
self.__init__(
network=None,
model=state["model"],
dt=state["dt"],
seed=state["seed"],
progress_bar=state["progress_bar"],
optimize=False, # The pickled Sim will have already been optimized
)
for key, value in state["signals"].items():
self.signals[key] = value
for key, value in state["probe_outputs"].items():
self._sim_data[key].extend(value)
# Set whether it had originally been optimized
self.optimize = state["optimize"]
if state["closed"]:
self.close()
@property
def dt(self):
"""(float) The time step of the simulator."""
return self.model.dt
@dt.setter
def dt(self, dummy):
raise ReadonlyError(attr="dt", obj=self)
@property
def n_steps(self):
"""(int) The current time step of the simulator."""
return self._n_steps
@property
def step_order(self):
"""(list) The ordered list of step functions run by this simulator."""
return self._step_order
@property
def time(self):
"""(float) The current time of the simulator."""
return self._time
def clear_probes(self):
"""Clear all probe histories.
.. versionadded:: 3.0.0
"""
for probe in self.model.probes:
self._sim_data[probe] = []
self.data.reset() # clear probe cache
def close(self):
"""Closes the simulator.
Any call to `.Simulator.run`, `.Simulator.run_steps`,
`.Simulator.step`, and `.Simulator.reset` on a closed simulator raises
a `.SimulatorClosed` exception.
"""
self.closed = True
self.signals = None # signals may no longer exist on some backends
def _probe(self):
"""Copy all probed signals to buffers."""
self._probe_step_time()
for probe in self.model.probes:
period = 1 if probe.sample_every is None else probe.sample_every / self.dt
if self.n_steps % period < 1:
tmp = self.signals[self.model.sig[probe]["in"]].copy()
self._sim_data[probe].append(tmp)
def _probe_step_time(self):
self._n_steps = self.signals[self.model.step].item()
self._time = self.signals[self.model.time].item()
def reset(self, seed=None):
"""Reset the simulator state.
Parameters
----------
seed : int, optional
A seed for all stochastic operators used in the simulator.
This will change the random sequences generated for noise
or inputs (e.g. from processes), but not the built objects
(e.g. ensembles, connections).
"""
if self.closed:
raise SimulatorClosed("Cannot reset closed Simulator.")
if seed is not None:
self.seed = seed
# reset signals
for key in self.signals:
self.signals.reset(key)
# rebuild steps (resets ops with their own state, like Processes)
self.rng = np.random.RandomState(self.seed)
self._steps = [
op.make_step(self.signals, self.dt, self.rng) for op in self._step_order
]
self.clear_probes()
self._probe_step_time()
def run(self, time_in_seconds, progress_bar=None):
"""Simulate for the given length of time.
If the given length of time is not a multiple of ``dt``,
it will be rounded to the nearest ``dt``. For example, if ``dt``
is 0.001 and ``run`` is called with ``time_in_seconds=0.0006``,
the simulator will advance one timestep, resulting in the actual
simulator time being 0.001.
The given length of time must be positive. The simulator cannot
be run backwards.
Parameters
----------
time_in_seconds : float
Amount of time to run the simulation for. Must be positive.
progress_bar : bool or ProgressBar, optional
Progress bar for displaying the progress of the simulation run.
If True, the default progress bar will be used.
If False, the progress bar will be disabled.
For more control over the progress bar, pass in a ``ProgressBar``
instance.
"""
if time_in_seconds < 0:
raise ValidationError(
"Must be positive (got %g)" % (time_in_seconds,), attr="time_in_seconds"
)
steps = int(np.round(float(time_in_seconds) / self.dt))
if steps == 0:
warnings.warn(
"%g results in running for 0 timesteps. Simulator "
"still at time %g." % (time_in_seconds, self.time)
)
else:
logger.info(
"Running %s for %f seconds, or %d steps",
self.model.label,
time_in_seconds,
steps,
)
self.run_steps(steps, progress_bar=progress_bar)
def run_steps(self, steps, progress_bar=None):
"""Simulate for the given number of ``dt`` steps.
Parameters
----------
steps : int
Number of steps to run the simulation for.
progress_bar : bool or ProgressBar, optional
Progress bar for displaying the progress of the simulation run.
If True, the default progress bar will be used.
If False, the progress bar will be disabled.
For more control over the progress bar, pass in a ``ProgressBar``
instance.
"""
if progress_bar is None:
progress_bar = self.progress_bar
with ProgressTracker(
progress_bar, Progress("Simulating", "Simulation", steps)
) as pt:
for i in range(steps):
self.step()
pt.total_progress.step()
def step(self):
"""Advance the simulator by 1 step (``dt`` seconds)."""
if self.closed:
raise SimulatorClosed("Simulator cannot run because it is closed.")
old_err = np.seterr(invalid="raise", divide="ignore")
try:
for step_fn in self._steps:
step_fn()
finally:
np.seterr(**old_err)
self._probe()
def trange(self, dt=None, sample_every=None):
"""Create a vector of times matching probed data.
Note that the range does not start at 0 as one might expect, but at
the first timestep (i.e., ``dt``).
Parameters
----------
sample_every : float, optional
The sampling period of the probe to create a range for.
If None, a time value for every ``dt`` will be produced.
.. versionchanged:: 3.0.0
Renamed from dt to sample_every
"""
if dt is not None:
if sample_every is not None:
raise ValidationError(
"Cannot specify both `dt` and `sample_every`. "
"Use `sample_every` only.",
attr="dt",
obj=self,
)
warnings.warn(
"`dt` is deprecated. Use `sample_every` instead.", DeprecationWarning
)
sample_every = dt
period = 1 if sample_every is None else sample_every / self.dt
steps = np.arange(1, self.n_steps + 1)
return self.dt * steps[steps % period < 1]
class SimulationData(Mapping):
"""Data structure used to access simulation data from the model.
The main use case for this is to access Probe data; for example,
``probe_data = sim.data[my_probe]``. However, it is also used to access the
parameters of objects in the model; for example, encoder values for an ensemble
can be accessed via ``encoders = sim.data[my_ens].encoders``.
This is like a view on the raw simulation data manipulated by the Simulator,
which allows the raw simulation data to be optimized for speed while this
provides a more user-friendly interface.
.. versionchanged:: 3.0.0
Renamed from ProbeDict to SimulationData
"""
def __init__(self, raw):
super().__init__()
self.raw = raw
self._cache = {}
def __getitem__(self, key):
"""Return simulation data for ``key`` object.
For speed reasons, the simulator uses Python lists for Probe data
and we want to return NumPy arrays.
"""
if key not in self._cache or len(self._cache[key]) != len(self.raw[key]):
rval = self.raw[key]
if isinstance(rval, list):
rval = np.asarray(rval)
rval.setflags(write=False)
self._cache[key] = rval
return self._cache[key]
def __iter__(self):
return iter(self.raw)
def __len__(self):
return len(self.raw)
def __repr__(self):
return repr(self.raw)
def __str__(self):
return str(self.raw)
def reset(self):
self._cache.clear()
class ProbeDict(SimulationData):
def __init__(self, *args, **kwargs):
warnings.warn(
"ProbeDict has been renamed to SimulationData. This alias "
"will be removed in Nengo 3.1."
)
super().__init__(*args, **kwargs)
| 34.301556 | 88 | 0.603085 |
acea157d5c5f566287b2348968b42766226ab5fa | 4,069 | py | Python | ppq/parser/ppl.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | null | null | null | ppq/parser/ppl.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | null | null | null | ppq/parser/ppl.py | wdian/ppq | 58bd1271ea6f0dfaf602eb72bdca63ea79f191b8 | [
"Apache-2.0"
] | null | null | null | import json
from typing import Union
import numpy as np
import torch
from ppq.core import (QuantizationProperty, QuantizationStates, TargetPlatform,
TensorQuantizationConfig, convert_any_to_numpy,
ppq_legacy)
from ppq.IR import BaseGraph
from ppq.IR.quantize import QuantableOperation
from .onnx_exporter import OnnxExporter
def convert_value(value: Union[int, float, np.ndarray, torch.Tensor]) -> str:
if type(value) in {int, float}: return value
else:
value = convert_any_to_numpy(value, accepet_none=False)
return value.tolist()
def convert_type(platform: TargetPlatform) -> str:
if platform == TargetPlatform.PPL_CUDA_INT8: return "INT8"
if platform == TargetPlatform.SHAPE_OR_INDEX: return None
if platform == TargetPlatform.FP32: return None
raise TypeError(f'Unsupported platform type. ({str(platform)})')
class PPLBackendExporter(OnnxExporter):
def export_quantization_config(self, config_path: str, graph: BaseGraph):
var_quant_info_recorder, op_platform_recorder = {}, {}
for operation in graph.operations.values():
if not isinstance(operation, QuantableOperation): continue
for config, var in operation.config_with_variable:
if not QuantizationStates.can_export(config.state):
raise PermissionError(
'Can not export quant config cause not all quantization configurations '
'have been correctly initialized(or some of them has been deactivated). '
f'Operation {operation.name} has an invalid quantization state({config.state}) '
f'at variable {var.name}.')
# PATCH 2021.11.25
# REMOVE BIAS FROM CONFIGURATION
if config.num_of_bits > 8: continue
if config.state in {
QuantizationStates.SOI,
QuantizationStates.DEACTIVATED,
QuantizationStates.DEQUANTIZED,
QuantizationStates.FP32
}: continue
# Simply override recorder is acceptable here,
# we do not support mix presicion quantization for CUDA backend now.
# All configurations for this variable should keep identical towards each other.
if config.state == QuantizationStates.JOINT and var.name in var_quant_info_recorder: continue
var_quant_info_recorder[var.name] = config
# ready to render config to json.
for var in var_quant_info_recorder:
config = var_quant_info_recorder[var]
assert isinstance(config, TensorQuantizationConfig)
var_quant_info_recorder[var] = {
'bit_width' : config.num_of_bits,
'per_channel': config.policy.has_property(QuantizationProperty.PER_CHANNEL),
'quant_flag' : True,
'sym' : config.policy.has_property(QuantizationProperty.SYMMETRICAL),
'scale' : convert_value(config.scale),
'zero_point' : convert_value(config.offset),
'tensor_min' : convert_value(config.scale * (config.quant_min - config.offset)),
'tensor_max' : convert_value(config.scale * (config.quant_max - config.offset)),
'q_min' : config.quant_min,
'q_max' : config.quant_max,
'hash' : config.__hash__(),
'dominator' : config.dominated_by.__hash__()
}
for op in graph.operations.values():
if convert_type(op.platform) is not None:
op_platform_recorder[op.name] = {
'data_type': convert_type(op.platform)
}
exports = {
"quant_info": var_quant_info_recorder,
"op_info": op_platform_recorder}
with open(file=config_path, mode='w') as file:
json.dump(exports, file, indent=4)
| 46.238636 | 109 | 0.615385 |
acea157df0d24188f77b85248e2e3de5e06719fa | 2,687 | py | Python | squareconnect/models/location_type.py | xethorn/connect-python-sdk | a0543b2f7ea498865c6d916de0b10370f89ebc77 | [
"Apache-2.0"
] | 53 | 2016-08-06T17:12:16.000Z | 2020-08-02T19:43:58.000Z | squareconnect/models/location_type.py | xethorn/connect-python-sdk | a0543b2f7ea498865c6d916de0b10370f89ebc77 | [
"Apache-2.0"
] | 32 | 2016-08-19T16:32:30.000Z | 2020-01-14T18:01:37.000Z | squareconnect/models/location_type.py | xethorn/connect-python-sdk | a0543b2f7ea498865c6d916de0b10370f89ebc77 | [
"Apache-2.0"
] | 45 | 2016-09-05T11:58:09.000Z | 2020-11-15T16:26:41.000Z | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class LocationType(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
LocationType - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.989583 | 77 | 0.551172 |
acea15bd96d71e42f4cd9baed7c46c8f9e810f20 | 34,769 | py | Python | user_home/.config/qtile/config.py | Dunkmania101/Dotfiles | 8fed008965767eb8ce5a340d314530a57d15f4e1 | [
"MIT"
] | null | null | null | user_home/.config/qtile/config.py | Dunkmania101/Dotfiles | 8fed008965767eb8ce5a340d314530a57d15f4e1 | [
"MIT"
] | null | null | null | user_home/.config/qtile/config.py | Dunkmania101/Dotfiles | 8fed008965767eb8ce5a340d314530a57d15f4e1 | [
"MIT"
] | null | null | null | # ================================= #
# --------------------------------- #
# -- Dunkmania101's Qtile Config -- #
# --------------------------------- #
# ================================= #
import os, subprocess, imaplib, re #, gi
#gi.require_version("Gdk", "3.0")
#from gi.repository import Gdk
from json import loads as jloads #, dumps as jdumps
from time import sleep
from shutil import which
from threading import Thread
from random import choice
from libqtile import qtile, layout, hook, bar, widget, extension
from libqtile.backend.x11.core import get_keys
# from libqtile.backend.wayland.core import keyboard as wl_kbd
from libqtile.config import Key, KeyChord, Drag, Screen, Match, Group, ScratchPad, DropDown #, Click
from libqtile.widget import base as widget_base
from libqtile.lazy import lazy
# --------------------- #
# -- Basic Functions -- #
# --------------------- #
def shcmd_exists(cmd):
return which(cmd) is not None
def sub_run_cmd(cmd, cwd):
try:
subprocess.run(cmd, cwd=cwd, shell=True)
except Exception as e:
print(str(e))
pass
def run_cmd(cmd, cwd=None, thread=True):
if thread:
Thread(target=sub_run_cmd, args=(cmd, cwd,)).start()
else:
sub_run_cmd(cmd, cwd)
def get_cmd_output(cmd, cwd=None):
output = ""
try:
output = str(subprocess.run(cmd, cwd=cwd, shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8'))
except Exception as e:
print(str(e))
pass
return output
def exec_func_no_qtile(_, func, args):
if callable(func):
func(*args)
def is_wayland():
return qtile.core.name == "wayland"
def gen_jgmenu_cmd(fmt="uxterm"):
return f"echo \'{fmt}\' | jgmenu --simple"
# -------------------- #
# -- Base Variables -- #
# -------------------- #
cfg_dir = os.path.expanduser("~/.config/qtile")
#run_cmd(cfg_dir + "/autostart.sh")
rofi_dir = cfg_dir + "/rofi"
env_file = os.path.expanduser("~/.private/data/env.json")
env_data = {}
if os.path.isfile(env_file):
with open(env_file, "r") as f:
try:
env_data = jloads(f.read())
except:
pass
# Themes
my_font = "Iosevka"
my_term_font = "Iosevka Term"
# One Dark
#bg_color = "#282c34"
#fg_color = "#5c6370"
#dark_bg_color = "#222222"
#bg_line_color = "#5c6370"
#fg_line_color = "#61afef"
#bg_line_color_alt = "#504d4d"
#fg_line_color_alt = "#4b5263"
#bg_txt_color = "#abb2bf"
#fg_txt_color = "#61afef"
#green_color = "#504945"
# Gruvbox
bg_color = "#1B2229"
fg_color = "#4f443a"
dark_bg_color = "#222222"
bg_line_color = "#3c3836"
fg_line_color = "#4f4347"
bg_line_color_alt = "#aa5000"
fg_line_color_alt = "#ff8000"
bg_txt_color = "#3c3836"
fg_txt_color = "#ebdbb2"
green_color = "#687b01"
red_color = "#cc241d"
# Base Groups (copied to each monitor)
my_base_groups = "~ 1 2 3 4 5 6 7 8 9 0 - =".split(" ")
# Screen to put the systray widget on
my_systray_screen = 0
# Gap and border sizes
my_border_width = 4
my_margin = 1
# Directories
my_wallpapers = os.path.expanduser("~/Wallpapers") # Can point to a directory or a single image file.
my_screenshots_dir = os.path.expanduser("~/Screenshots")
# Details
my_distro = "Guix"
my_check_updates_cmd = ""
#if shcmd_exists("pip"):
# my_check_updates_cmd += "; pip list --outdated --format=freeze"
if shcmd_exists("paru"):
my_check_updates_cmd += "; paru --query --upgrades"
if shcmd_exists("guix"):
my_check_updates_cmd += "; guix refresh"
if is_wayland():
my_get_monitors_cmd = "wlr-randr"
else:
my_get_monitors_cmd = "xrandr --query | grep \" connected\" | cut -d\" \" -f1"
my_mouse_move_cmd = "xdotool mousemove_relative -- "
my_mouse_move_dist = "10"
my_mouse_click_cmd = "xdotool click "
my_gmail_username = env_data.get("gmail.username", "")
my_gmail_pass = env_data.get("gmail.pass", "")
# Applications
#my_terminal = "kitty -e tmux"
#my_terminal_tmux = f"kitty -e \'{cfg_dir}/scripts/run/run-tmux-session.sh\'"
my_terminal = "kitty"
#my_terminal = f"uxterm -si -fs 10 -fa \"{my_term_font}\" -bg \'#212121\' -bd \'#212111\'"
#my_terminal_alt = "kitty"
my_terminal_alt = f"uxterm -si -fs 10 -fa \"{my_term_font}\" -bg \'#212121\' -bd \'#212111\'"
#my_terminal_alt = "st"
#my_terminal_alt = "cool-retro-term"
#my_terminal_alt = "darktile"
#my_terminal_alt = "extraterm"
#my_terminal_alt1 = "kitty -e tmux"
#my_terminal_alt1 = "kitty"
my_terminal_alt1 = "cool-retro-term"
my_terminal_alt2 = "extraterm"
my_terminal_alt3 = "urxvt"
my_terminal_alt4 = f"uxterm -si -fa \"{my_font}\""
#my_editor = cfg_dir + "/scripts/run/run-emacs.sh"
my_editor = "emacsclient -a '' -c"
my_editor_alt = "neovide --multigrid"
#my_editor_alt = "vscodium"
# my_editor_alt = "notepadqq"
my_editor_alt1 = "emacs"
my_editor_alt2 = "kitty zsh -c \'source ~/.zshrc; nvim\'"
my_browser = "nyxt -S"
#my_browser_alt = os.path.expanduser("~/.nix-profile/bin/vivaldi")
my_browser_alt = "firefox-developer-edition"
#my_browser_alt = "vivaldi"
#my_browser_alt = "vivaldi-stable"
my_browser_alt1 = "firedragon"
my_browser_alt2 = "qutebrowser"
# my_browser_alt2 = "min"
# my_browser_alt2 = "luakit"
my_browser_alt3 = "brave-beta"
my_private_browser = "nyxt --data-profile nosave"
#my_private_browser_alt = "vivaldi --incognito"
my_private_browser_alt = "vivaldi-stable --incognito"
my_browser_profile_menu = rofi_dir + "/nyxt_profile_menu/nyxt_profile_menu.sh"
my_file_manager = "pcmanfm"
my_file_manager_alt = "filezilla"
my_file_manager_alt1 = "thunar"
#my_mp = "deadbeef"
#my_mp = "kawaii-player"
#my_mp = "lollypop"
#my_mp = "celluloid"
my_mp = rofi_dir + "/mpvselect/mpvselect.sh"
my_mp_alt = rofi_dir + "/ytfzf/ytfzf.sh --savefile"
my_mp_alt1 = rofi_dir + "/ytfzf/ytfzf.sh"
#my_mp_alt1 = rofi_dir + "/notflix/notflix.sh"
my_mp_alt2 = "freetube"
#my_mp_alt = "motionbox"
#my_mp_alt = "freetube"
#my_mp_alt = "vlc"
#my_mp_private = rofi_dir + "/mpvselect/mpvselect.sh"
my_mp_private = rofi_dir + "/mpvselect/mpvselect.sh --nosave"
my_package_manager = "pamac-manager"
my_package_manager_alt = "pamac-manager"
my_calculator = "qalculate-gtk"
my_calculator_alt = "qalculate-gtk"
my_control_panel = my_terminal + " -e btop"
#my_control_panel = "kitty -e btop"
my_control_panel_alt = "stacer"
my_audio_mixer = my_terminal + " -e pulsemixer"
my_audio_mixer_alt = "easyeffects"
my_audio_mixer_alt1 = my_terminal + " -e alsamixer"
# Menus (Rofi Scripts, etc...)
my_launcher = rofi_dir + "/launcher/launcher.sh"
my_launcher_alt = "jgmenu"
my_clipmenu = rofi_dir + "/clipmenu/clipmenu.sh"
my_clipmenu_alt = "copyq toggle"
my_powermenu = rofi_dir + "/powermenu/powermenu.sh"
my_handy = rofi_dir + "/handy/handy.sh"
my_window_pager = rofi_dir + "/window/window.sh"
my_player_ctrl = rofi_dir + "/player/player.sh"
my_workspaces = rofi_dir + "/workspaces/workspaces.sh"
my_emoji = rofi_dir + "/emoji/emoji.sh"
my_window_killer = f"{my_terminal} -e xkill"
# ---------- #
# -- Keys -- #
# ---------- #
left = "Left"
right = "Right"
up = "Up"
down = "Down"
sup = "mod4"
alt = "mod1"
ctrl = "control"
shift = "shift"
space = "space"
ret = "Return"
tab = "Tab"
grave = "grave"
semicolon = "semicolon"
apostrophe = "apostrophe"
period = "period"
minus = "minus"
equal = "equal"
# quote = "quoteright"
monitors = get_cmd_output(my_get_monitors_cmd).split("\n")
#gdkdsp = Gdk.Screen.get_default()
#monitors = [gdkdsp.get_monitor_plug_name(i) for i in range(gdkdsp.get_n_monitors())]
def take_screenshot(cmd="scrot", cwd=my_screenshots_dir):
if not os.path.isdir(cwd):
os.makedirs(cwd)
run_cmd(cmd, cwd)
def run_keysboard(start=True):
if start:
run_cmd(cfg_dir + "/scripts/run/run-keysboard.sh")
else:
run_cmd('tmux kill-session -t keysboard-bar; rm -f /tmp/tmux-bar-keysboard-pipe')
def run_kmonad(start=True):
if start:
run_cmd(cfg_dir + "/scripts/run/run-kmonad.sh")
else:
run_cmd('tmux kill-session -t kmonad-bar; rm -f /tmp/tmux-bar-kmonad-pipe')
#def run_plank(start=True):
# if start:
# for _ in monitors:
# run_cmd(f"plank")
# else:
# run_cmd("killall -q plank")
# ------------------------------ #
# -- Binds & Functions Galore -- #
# ------------------------------ #
def get_full_group_name(screen_name, base_name):
return f"{screen_name}:{base_name}"
def get_full_group_names_for_screen(i):
return [get_full_group_name(i, g) for g in my_base_groups]
def get_current_screen_index(qtile):
return qtile.screens.index(qtile.current_screen)
def get_screen_by_offset(qtile, offset=1):
return (get_current_screen_index(qtile) + offset) % len(qtile.screens)
def get_current_group_index(qtile):
return qtile.groups.index(qtile.current_group)
def get_group_obj_by_name(qtile, g):
return qtile.groups_map.get(g)
def get_current_group_index_on_current_screen(qtile):
return get_current_group_index(qtile) - ((len(qtile.screens) * len(my_base_groups)))
def get_group_on_current_screen(qtile, g):
return get_full_group_name(get_current_screen_index(qtile), g)
def get_group_index_on_current_screen_by_offset(qtile, offset=1):
return ((get_current_group_index_on_current_screen(qtile) + offset) % len(my_base_groups)) + (len(my_base_groups) * get_current_screen_index(qtile))
def get_group_on_current_screen_by_offset(qtile, offset=1):
return qtile.groups[get_group_index_on_current_screen_by_offset(qtile, offset)]
def set_screen(qtile, screen, move_focus=True, move_window=True):
if move_window:
qtile.current_window.cmd_toscreen(screen)
if move_focus:
qtile.cmd_to_screen(screen)
def cycle_screen(qtile, offset=1, move_focus=True, move_window=True):
set_screen(qtile, get_screen_by_offset(qtile, offset), move_focus, move_window)
def set_current_screen_group(qtile, g, toggle=True):
if toggle:
qtile.current_screen.cmd_toggle_group(g)
else:
qtile.current_screen.set_group(get_group_obj_by_name(qtile, g))
def set_current_screen_group_on_current_screen(qtile, g, toggle=True):
set_current_screen_group(qtile, get_group_on_current_screen(qtile, g), toggle)
def set_current_screen_group_on_current_screen_no_toggle(qtile, g):
set_current_screen_group_on_current_screen(qtile, g, toggle=False)
def set_current_screen_group_by_offset(qtile, offset=1):
set_current_screen_group(qtile, get_group_on_current_screen_by_offset(qtile, offset).name)
def send_current_win_to_group(qtile, g, switch_group=True):
qtile.current_window.togroup(g, switch_group=switch_group)
def send_current_win_to_group_on_current_screen_switch(qtile, g):
send_current_win_to_group(qtile, get_group_on_current_screen(qtile, g))
def send_current_win_to_group_on_current_screen_noswitch(qtile, g):
send_current_win_to_group(qtile, get_group_on_current_screen(qtile, g), False)
def win_cycle_group_next_switch(qtile):
send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile).name, switch_group=True)
def win_cycle_group_prev_switch(qtile):
send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile, -1).name, switch_group=True)
def win_cycle_group_next_noswitch(qtile):
send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile).name, switch_group=False)
def win_cycle_group_prev_noswitch(qtile):
send_current_win_to_group(qtile, get_group_on_current_screen_by_offset(qtile, -1).name, switch_group=False)
def cycle_screen_next(qtile):
cycle_screen(qtile, 1, True, False)
def cycle_screen_prev(qtile):
cycle_screen(qtile, -1, True, False)
def cycle_group_next(qtile):
set_current_screen_group_by_offset(qtile)
def cycle_group_prev(qtile):
set_current_screen_group_by_offset(qtile, -1)
def win_cycle_screen_next_switch(qtile):
cycle_screen(qtile, 1, True, True)
def win_cycle_screen_prev_switch(qtile):
cycle_screen(qtile, -1, True, True)
def win_cycle_screen_next_noswitch(qtile):
cycle_screen(qtile, 1, False, True)
def win_cycle_screen_prev_noswitch(qtile):
cycle_screen(qtile, -1, False, True)
# ----------
keys = [
# Menus
Key([sup], space, lazy.spawn(my_launcher)),
Key([sup, shift], space, lazy.spawn(my_launcher_alt)),
Key([sup], tab, lazy.spawn(my_window_pager)),
Key([sup, shift], tab, lazy.run_extension(
extension.WindowList (
foreground=fg_color,
background=bg_color,
selected_foreground=fg_txt_color,
selected_background=bg_txt_color
)
)),
Key([sup], "v", lazy.spawn(my_clipmenu)),
Key([sup, shift], "v", lazy.spawn(my_clipmenu_alt)),
Key([sup], "q", lazy.spawn(my_powermenu)),
Key([sup], "p", lazy.spawn(my_player_ctrl)),
Key([sup], "y", lazy.spawn(my_workspaces)),
Key([sup], "r", lazy.spawn(my_handy)),
Key([sup], "i", lazy.spawn(my_emoji)),
# Window / Layout Management
Key([sup], "f", lazy.window.toggle_fullscreen()),
Key([sup], "t", lazy.window.toggle_floating()),
Key([sup], "F4", lazy.window.kill()),
Key([sup, shift], "F4", lazy.spawn(my_window_killer)),
Key([sup, shift], "q", lazy.window.kill()),
Key([sup], "j", lazy.layout.down()),
Key([sup], "k", lazy.layout.up()),
Key([sup], "h", lazy.layout.left()),
Key([sup], "l", lazy.layout.right()),
Key([sup, alt], "j", lazy.layout.shuffle_down()),
Key([sup, alt], "k", lazy.layout.shuffle_up()),
Key([sup, alt], "h", lazy.layout.shuffle_left()),
Key([sup, alt], "l", lazy.layout.shuffle_right()),
Key([sup, shift], "j", lazy.layout.grow_down()),
Key([sup, shift], "k", lazy.layout.grow_up()),
Key([sup, shift], "h", lazy.layout.grow_left()),
Key([sup, shift], "l", lazy.layout.grow_right()),
Key([sup, alt, shift], "h", lazy.layout.swap_column_left()),
Key([sup, alt, shift], "l", lazy.layout.swap_column_right()),
Key([sup], "g", lazy.layout.toggle_split()),
Key([sup, shift], "g", lazy.layout.normalize()),
# Key([sup], left, lazy.layout.shrink_main()),
# Key([sup], right, lazy.layout.grow_main()),
# Key([sup], down, lazy.layout.down()),
# Key([sup], up, lazy.layout.up()),
# Key([sup, shift], down, lazy.layout.shuffle_down()),
# Key([sup, shift], up, lazy.layout.shuffle_up()),
# Key([sup], "h", lazy.layout.shrink_main()),
# Key([sup], "l", lazy.layout.grow_main()),
# Key([sup], "j", lazy.layout.down()),
# Key([sup], "k", lazy.layout.up()),
# Key([sup, shift], "j", lazy.layout.shuffle_down()),
# Key([sup, shift], "k", lazy.layout.shuffle_up()),
# Groups
# Key([sup], "n", lazy.screen.prev_group()),
# Key([sup], "m", lazy.screen.next_group()),
Key([sup], "n", lazy.function(cycle_group_prev)),
Key([sup], "m", lazy.function(cycle_group_next)),
Key([sup, alt], "n", lazy.function(win_cycle_group_prev_switch)),
Key([sup, alt], "m", lazy.function(win_cycle_group_next_switch)),
Key([sup, shift, alt], "n", lazy.function(win_cycle_group_prev_noswitch)),
Key([sup, shift, alt], "m", lazy.function(win_cycle_group_next_noswitch)),
Key([sup, ctrl], "n", lazy.function(cycle_screen_prev)),
Key([sup, ctrl], "m", lazy.function(cycle_screen_next)),
Key([sup, ctrl, alt], "n", lazy.function(win_cycle_screen_prev_switch)),
Key([sup, ctrl, alt], "m", lazy.function(win_cycle_screen_next_switch)),
Key([sup, shift, ctrl, alt], "n", lazy.function(win_cycle_screen_prev_noswitch)),
Key([sup, shift, ctrl, alt], "m", lazy.function(win_cycle_screen_next_noswitch)),
# WM Cmds
Key([sup, shift], "r", lazy.restart()),
Key([sup, shift, ctrl, alt], "q", lazy.shutdown()),
# Mouse Emulation
Key([sup, ctrl], "h", lazy.spawn(my_mouse_move_cmd + f"-{my_mouse_move_dist} 0")),
Key([sup, ctrl], "j", lazy.spawn(my_mouse_move_cmd + f"0 {my_mouse_move_dist}")),
Key([sup, ctrl], "k", lazy.spawn(my_mouse_move_cmd + f"0 -{my_mouse_move_dist}")),
Key([sup, ctrl], "l", lazy.spawn(my_mouse_move_cmd + f"{my_mouse_move_dist} 0")),
Key([sup, ctrl], "a", lazy.spawn(my_mouse_click_cmd + "1")), # LC
Key([sup, ctrl], "d", lazy.spawn(my_mouse_click_cmd + "3")), # RC
Key([sup, ctrl], "x", lazy.spawn(my_mouse_click_cmd + "2")), # MC
Key([sup, ctrl], "s", lazy.spawn(my_mouse_click_cmd + "5")), # WU
Key([sup, ctrl], "w", lazy.spawn(my_mouse_click_cmd + "4")), # WD
# Apps
Key([sup], period, lazy.spawn(my_audio_mixer)),
Key([sup, shift], period, lazy.spawn(my_audio_mixer_alt)),
KeyChord([sup, ctrl, shift], period, [
Key([], period, lazy.spawn(my_audio_mixer)),
Key([shift], period, lazy.spawn(my_audio_mixer_alt)),
Key([], "1", lazy.spawn(my_audio_mixer_alt1)),
]),
#Key([sup], apostrophe, lazy.function(exec_func_no_qtile, run_keysboard, [True])),
Key([sup], apostrophe, lazy.function(exec_func_no_qtile, run_kmonad, [True])),
#Key([sup, shift], apostrophe, lazy.function(exec_func_no_qtile, run_keysboard, [False])),
Key([sup, shift], apostrophe, lazy.function(exec_func_no_qtile, run_kmonad, [False])),
Key([sup], ret, lazy.spawn(my_terminal)),
Key([sup, shift], ret, lazy.spawn(my_terminal_alt)),
KeyChord([sup, ctrl, shift], ret, [
Key([], ret, lazy.spawn(my_terminal)),
Key([shift], ret, lazy.spawn(my_terminal_alt)),
Key([], "1", lazy.spawn(my_terminal_alt1)),
Key([], "2", lazy.spawn(my_terminal_alt2)),
Key([], "3", lazy.spawn(my_terminal_alt3)),
Key([], "4", lazy.spawn(my_terminal_alt4)),
]),
Key([sup], "w", lazy.spawn(my_editor)),
Key([sup, shift], "w", lazy.spawn(my_editor_alt)),
KeyChord([sup, ctrl, shift], "w", [
Key([], "w", lazy.spawn(my_editor)),
Key([shift], "w", lazy.spawn(my_editor_alt)),
Key([], "1", lazy.spawn(my_editor_alt1)),
Key([], "2", lazy.spawn(my_editor_alt2)),
]),
Key([sup], "b", lazy.spawn(my_browser)),
Key([sup, alt], "b", lazy.spawn(my_browser_profile_menu)),
Key([sup, shift], "b", lazy.spawn(my_browser_alt)),
KeyChord([sup, ctrl, shift], "b", [
Key([], "b", lazy.spawn(my_browser)),
Key([shift], "b", lazy.spawn(my_browser_alt)),
Key([], "1", lazy.spawn(my_browser_alt1)),
Key([], "2", lazy.spawn(my_browser_alt2)),
Key([], "3", lazy.spawn(my_browser_alt3)),
]),
Key([sup, ctrl], "b", lazy.spawn(my_private_browser)),
Key([sup, ctrl, alt], "b", lazy.spawn(my_private_browser_alt)),
Key([sup], "e", lazy.spawn(my_file_manager)),
Key([sup, shift], "e", lazy.spawn(my_file_manager_alt)),
KeyChord([sup, ctrl, shift], "e", [
Key([], "e", lazy.spawn(my_file_manager)),
Key([shift], "e", lazy.spawn(my_file_manager_alt)),
Key([], "1", lazy.spawn(my_file_manager_alt1)),
]),
Key([sup], "x", lazy.spawn(my_mp)),
Key([sup, alt], "x", lazy.spawn(my_mp_private)),
Key([sup, shift], "x", lazy.spawn(my_mp_alt)),
KeyChord([sup, ctrl, shift], "x", [
Key([], "x", lazy.spawn(my_mp)),
Key([shift], "x", lazy.spawn(my_mp_alt)),
Key([], "1", lazy.spawn(my_mp_alt1)),
Key([], "2", lazy.spawn(my_mp_alt2)),
]),
Key([sup], "s", lazy.spawn(my_package_manager)),
Key([sup, shift], "s", lazy.spawn(my_package_manager_alt)),
Key([sup], "c", lazy.spawn(my_calculator)),
Key([sup, shift], "c", lazy.spawn(my_calculator_alt)),
Key([sup], semicolon, lazy.spawn(my_control_panel)),
Key([sup, shift], semicolon, lazy.spawn(my_control_panel_alt)),
# DropDown
KeyChord([sup], "d", [
Key([], ret, lazy.group['main-scratchpad'].dropdown_toggle('term')),
Key([], 'x', lazy.group['main-scratchpad'].dropdown_toggle('media')),
]),
# System
# Key([sup, shift, ctrl], "F11", lazy.spawn("sudo hibernate-reboot")),
# Key([sup, shift, ctrl], "F12", lazy.spawn("systemctl hibernate")),
Key([], "Print", lazy.function(exec_func_no_qtile, take_screenshot)),
# Special Keys
Key([], 'XF86AudioRaiseVolume', lazy.spawn('amixer sset Master 1%+')),
Key([], 'XF86AudioLowerVolume', lazy.spawn('amixer sset Master 1%-')),
Key([shift], 'XF86AudioRaiseVolume', lazy.spawn('amixer sset Master 1%+')),
Key([shift], 'XF86AudioLowerVolume', lazy.spawn('amixer sset Master 1%-')),
Key([], 'XF86AudioMute', lazy.spawn('amixer sset Master toggle')),
Key([], 'XF86AudioPause', lazy.spawn('playerctl play-pause')),
Key([], 'XF86AudioPlay', lazy.spawn('playerctl play-pause')),
Key([ctrl], 'XF86AudioPause', lazy.spawn('playerctl -a play-pause')),
Key([ctrl], 'XF86AudioPlay', lazy.spawn('playerctl -a play-pause')),
Key([], 'XF86AudioNext', lazy.spawn('playerctl position 1+')),
Key([], 'XF86AudioPrev', lazy.spawn('playerctl position 1-')),
Key([shift], 'XF86AudioNext', lazy.spawn('playerctl position 1+')),
Key([shift], 'XF86AudioPrev', lazy.spawn('playerctl position 1-')),
Key([], 'XF86MonBrightnessUp', lazy.spawn('brightnessctl set 1%+')),
Key([], 'XF86MonBrightnessDown', lazy.spawn('brightnessctl set 1%-')),
]
for i, g in enumerate(my_base_groups):
g_key = g
if g_key != "1" and not g_key in get_keys():
if i == 0:
g_key = grave
elif i == 11:
g_key = minus
elif i == 12:
g_key = equal
if g_key in get_keys():
keys.extend(
[
Key([sup], g_key, lazy.function(set_current_screen_group_on_current_screen, g)),
Key([sup, shift], g_key, lazy.function(set_current_screen_group_on_current_screen_no_toggle, g)),
Key([sup, alt], g_key, lazy.function(send_current_win_to_group_on_current_screen_switch, g)),
Key([sup, shift, alt], g_key, lazy.function(send_current_win_to_group_on_current_screen_noswitch, g)),
]
)
mouse = [
# Drag([sup], "Button1", lazy.window.set_position(),
# start=lazy.window.get_position()),
# Drag([sup], "Button3", lazy.window.set_size(),
# start=lazy.window.get_size()),
Drag([sup], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([sup], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
]
# -------------------------------- #
# -- Widgets & Screens & Groups -- #
# -------------------------------- #
widget_defaults = dict(
font=my_font,
fontsize=14,
padding=2,
margin=my_margin,
background=[dark_bg_color, dark_bg_color],
foreground=[fg_txt_color, fg_txt_color],
graph_color=[fg_txt_color, fg_txt_color],
fill_color=[bg_txt_color, bg_txt_color],
)
class DividerWidget(widget.TextBox):
def __init__(self, div_mid="|", div_padding_left=1, div_padding_right=1, **config):
super().__init__(f"{' ' * div_padding_left}{div_mid}{' ' * div_padding_right}", **config)
class FileReaderWidget(widget_base.ThreadPoolText):
def __init__(self, msg_base="", empty_msg="No Data", read_file="", **config):
self.msg_base = msg_base
self.empty_msg = empty_msg
self.read_file = read_file
widget_base.ThreadPoolText.__init__(self, "", **config)
def poll(self):
msg = ""
try:
if os.path.isfile(self.read_file):
with open(self.read_file, 'r') as f:
lines = f.readlines()
if len(lines) > 0:
msg = str(lines[-1])
f.close()
except Exception as e:
msg = f"Error: {e}"
finally:
if msg == "":
msg = self.empty_msg
return self.msg_base + msg
class OpenWidgetBox(widget.WidgetBox):
def __init__(self, _widgets: list[widget_base._Widget] | None = None, **config):
super().__init__(_widgets=_widgets, **config)
Thread(target=self.wait_open, daemon=True).start()
def wait_open(self):
if not self.box_is_open:
while not self.configured:
sleep(0.1)
self.cmd_toggle()
class ColorGmailChecker(widget.GmailChecker):
def __init__(self, clear_foreground=green_color, unseen_foreground=red_color, **config):
super().__init__(**config)
self.clear_foreground=clear_foreground
self.unseen_foreground=unseen_foreground
def poll(self):
self.gmail = imaplib.IMAP4_SSL("imap.gmail.com")
self.gmail.login(self.username, self.password)
answer, raw_data = self.gmail.status(self.email_path, "(MESSAGES UNSEEN)")
if answer == "OK":
dec = raw_data[0].decode()
messages = int(re.search(r"MESSAGES\s+(\d+)", dec).group(1))
unseen = int(re.search(r"UNSEEN\s+(\d+)", dec).group(1))
if unseen == 0:
self.foreground = self.clear_foreground
else:
self.foreground = self.unseen_foreground
if self.status_only_unseen:
return self.display_fmt.format(unseen)
else:
return self.display_fmt.format(messages, unseen)
else:
self.foreground = self.unseen_foreground
qtile.logger.exception(
"GmailChecker UNKNOWN error, answer: %s, raw_data: %s", answer, raw_data
)
return "UNKNOWN ERROR"
def get_sys_stat_widgets():
return [
widget.Spacer(length=5),
widget.TextBox("cpu:"),
widget.CPUGraph(
width=30,
border_width=1,
border_color=dark_bg_color,
frequency=5,
line_width=1,
samples=50,
),
widget.TextBox("mem:"),
widget.MemoryGraph(
width=30,
border_width=1,
border_color=dark_bg_color,
line_width=1,
frequency=5,
),
widget.Memory(
measure_mem = "G",
measure_swap = "G",
),
widget.Spacer(length=15),
widget.TextBox("net:"),
widget.Net(
format = '{down} ↓↑ {up}',
padding = 0
),
]
def get_widgets_1(i):
widgets = [
widget.Spacer(length=15),
widget.TextBox(
fontsize=16,
fmt='',
mouse_callbacks={'Button1': lambda: qtile.cmd_spawn(my_launcher)},
),
DividerWidget(),
OpenWidgetBox(
widgets=[
widget.GroupBox(
border_width=my_border_width,
disable_drag=True,
rounded=True,
active=[fg_txt_color, fg_txt_color],
inactive=[bg_txt_color, bg_txt_color],
highlight_method="line",
this_current_screen_border=fg_line_color_alt,
this_screen_border=bg_line_color_alt,
highlight_color=[fg_color, fg_color],
visible_groups=get_full_group_names_for_screen(i),
spacing=0,
),
],
),
DividerWidget(),
# widget.TextBox(
# fontsize=16,
# fmt='',
# mouse_callbacks={
# 'Button1': lambda: qtile.current_window.kill(),
# 'Button3': lambda: qtile.cmd_spawn(my_window_killer),
# },
# ),
widget.Spacer(),
widget.Systray(icon_size=24),
widget.Spacer(),
DividerWidget(),
widget.Clock(
format='%a %b %d %Y, %I:%M:%S',
),
DividerWidget(),
OpenWidgetBox(
widgets=[
widget.CheckUpdates(
distro=my_distro,
custom_command=my_check_updates_cmd,
no_update_string="",
colour_no_updates=green_color,
colour_have_updates=red_color,
),
widget.Spacer(length=5),
widget.Canto(),
widget.Spacer(length=5),
ColorGmailChecker(
username=my_gmail_username,
password=my_gmail_pass,
),
]
),
DividerWidget(),
widget.CapsNumLockIndicator(
frequency=0.1,
),
DividerWidget(),
widget.WidgetBox(widgets=get_sys_stat_widgets()),
DividerWidget(),
widget.TextBox(
fmt='',
mouse_callbacks={'Button1': lambda: qtile.cmd_spawn('playerctl position 2-')},
),
widget.Spacer(length=7),
widget.TextBox(
fmt='',
mouse_callbacks={'Button1': lambda: qtile.cmd_spawn('playerctl position 2+')},
),
widget.Spacer(length=7),
widget.TextBox(
fmt='',
mouse_callbacks={
'Button1': lambda: qtile.cmd_spawn('playerctl -a pause'),
'Button3': lambda: qtile.cmd_spawn('playerctl play'),
},
),
widget.Spacer(length=7),
widget.TextBox("vol:"),
widget.Volume(update_interval=0.1, step=1),
# widget.CurrentLayoutIcon(scale=0.70),
DividerWidget(),
widget.TextBox(
fontsize=16,
fmt='',
mouse_callbacks={'Button1': lambda: qtile.cmd_spawn(my_powermenu)},
),
widget.Spacer(length=15),
]
if i != my_systray_screen:
for w in widgets:
if isinstance(w, widget.Systray):
widgets.remove(w)
return widgets
def get_widgets_2(i):
widgets = [
DividerWidget(),
widget.TaskList(
border=fg_line_color,
unfocused_border=bg_line_color,
rounded=True,
),
DividerWidget(),
FileReaderWidget(
#file = "/tmp/tmux-bar-keysboard-pipe",
#msg_base = "Keysboard: ",
file = "/tmp/tmux-bar-kmonad-pipe",
msg_base = "Kmonad: ",
margin_y=4,
padding_y=4,
update_interval=0.3,
mouse_callbacks={
#'Button1': lambda: run_keysboard(True),
#'Button3': lambda: run_keysboard(False),
'Button1': lambda: run_kmonad(True),
'Button3': lambda: run_kmonad(False),
},
),
DividerWidget(),
]
return widgets
groups = [
ScratchPad(
"main-scratchpad", [
DropDown("term", my_terminal, opacity=0.8),
DropDown("media", my_mp, opacity=1.0),
]
)
]
screens = []
img_fmts = (".png", ".jpeg", ".jpg")
if os.path.isfile(my_wallpapers) and my_wallpapers.endswith(img_fmts):
wallpapers = [my_wallpapers]
elif os.path.isdir(my_wallpapers):
wallpapers = []
for f in os.listdir(my_wallpapers):
img = my_wallpapers + f"/{f}"
if not img.startswith(".") and img.endswith(img_fmts) and os.path.isfile(img):
wallpapers.append(img)
else:
wallpapers = []
i = 0
for monitor in monitors:
if len(monitor) > 0 and monitor != "\n":
if len(wallpapers) > 0:
wallpaper = wallpaper=choice(wallpapers)
else:
wallpaper = None
screens.append(
Screen(
top=bar.Bar(get_widgets_1(i), 30, background=bg_color, border_color=bg_line_color, border_width=my_border_width),
bottom=bar.Bar(get_widgets_2(i), 30, background=bg_color, border_color=bg_line_color, border_width=my_border_width),
wallpaper=wallpaper,
wallpaper_mode="stretch",
)
)
for g in get_full_group_names_for_screen(i):
groups.append(Group(g))
m_key = str(i)
if m_key in get_keys():
keys.extend(
[
Key([sup, ctrl], m_key, lazy.function(set_screen, i, True, False)),
Key([sup, ctrl, alt], m_key, lazy.function(set_screen, i, True, True)),
]
)
i += 1
# ---------- #
# -- Vars -- #
# ---------- #
#dgroups_key_binder = None
#dgroups_app_rules = []
#extentions = []
reconfigure_screens = True
follow_mouse_focus = False
bring_front_click = True
cursor_warp = False
auto_fullscreen = True
focus_on_window_activation = "smart"
wmname = "LG3D"
# --------------------- #
# -- Layouts & Hooks -- #
# --------------------- #
layouts = [
layout.Columns(
border_normal=bg_line_color,
border_focus=fg_line_color,
border_normal_stack=bg_line_color,
border_focus_stack=fg_line_color,
border_on_single=True,
border_width=my_border_width,
margin=my_margin,
num_columns=2,
ratio=0.70,
)
]
floating_layout = layout.Floating(
float_rules=[
*layout.Floating.default_float_rules,
Match(wm_class='confirmreset'), # gitk
Match(wm_class='makebranch'), # gitk
Match(wm_class='maketag'), # gitk
Match(wm_class='ssh-askpass'), # ssh-askpass
Match(title='branchdialog'), # gitk
Match(title='pinentry'), # GPG key password entry
],
border_normal=bg_line_color_alt,
border_focus=fg_line_color_alt,
border_width=my_border_width,
)
dont_auto_float_rules = []
@hook.subscribe.client_new
def floating_dialogs_hook(window):
dialog = window.window.get_wm_type() == 'dialog'
transient = window.window.get_wm_transient_for()
allowed = all(c not in dont_auto_float_rules for c in window.window.get_wm_class())
if allowed and (dialog or transient):
window.floating = True
@hook.subscribe.screen_change
def screen_change_hook(qtile):
run_cmd(cfg_dir + "scripts/run/run-monitors.sh")
@hook.subscribe.startup_complete
def autostart_hook():
run_cmd(cfg_dir + "/autostart.sh")
| 34.769 | 152 | 0.606862 |
acea167496a7281e831209aa95255570778a1d3a | 987 | py | Python | Aula_17/desafio1.py | alefks/logica_programacao_T3C5_Blueedtech | 8eb60e072e957db36961d520cfd64befb8430e64 | [
"MIT"
] | null | null | null | Aula_17/desafio1.py | alefks/logica_programacao_T3C5_Blueedtech | 8eb60e072e957db36961d520cfd64befb8430e64 | [
"MIT"
] | null | null | null | Aula_17/desafio1.py | alefks/logica_programacao_T3C5_Blueedtech | 8eb60e072e957db36961d520cfd64befb8430e64 | [
"MIT"
] | null | null | null | # Utilizando os conceitos de Orientação a Objetos (OO) vistos na aula anterior, crie um lançador de dados e moedas em que o usuário deve escolher o objeto a ser lançado. Não esqueça que os lançamentos são feitos de forma randômica.
import random
sorteio_moedas = ["Cara","Coroa"]
escolha_usuario = ["Dados","Moedas"]
sorteio = ""
class lançador:
def __init__(self, escolha_objeto):
None
def sorteiomoedas(self):
sorteio = random.choice(sorteio_moedas)
return f"O programa sorteou: {sorteio}!"
def sorteiodados(self):
sorteio = random.randint(1,6)
return f"O programa sorteou: {sorteio}"
a = input("Opções:\n- Dados\n- Moedas\nR: ").lower()
escolha = lançador(a)
r = "s"
while r == "s":
if a == "moedas":
print(escolha.sorteiomoedas())
break
elif a == "dados":
print(escolha.sorteiodados())
break
else:
print("Você digitou algo errado!")
break | 27.416667 | 231 | 0.631206 |
acea173a1de8cd1334161c8c3f81d1a656467180 | 151,644 | py | Python | gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py | baotingfang/gpdb | 7f4774e4a02d48df142820ad69c5ac71d1ded078 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2021-12-15T06:51:31.000Z | 2021-12-15T06:51:31.000Z | gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py | baotingfang/gpdb | 7f4774e4a02d48df142820ad69c5ac71d1ded078 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/test/behave/mgmt_utils/steps/mgmt_utils.py | baotingfang/gpdb | 7f4774e4a02d48df142820ad69c5ac71d1ded078 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | import math
import fnmatch
import glob
import json
import os
import re
import pipes
import shutil
import socket
import tempfile
import _thread
import time
from subprocess import check_output, Popen, PIPE
import subprocess
from collections import defaultdict
import psutil
from behave import given, when, then
from datetime import datetime, timedelta
from os import path
from contextlib import closing
from gppylib.gparray import GpArray, ROLE_PRIMARY, ROLE_MIRROR
from gppylib.commands.gp import SegmentStart, GpStandbyStart, CoordinatorStop
from gppylib.commands import gp
from gppylib.commands.unix import findCmdInPath, Scp
from gppylib.operations.startSegments import MIRROR_MODE_MIRRORLESS
from gppylib.operations.unix import ListRemoteFilesByPattern, CheckRemoteFile
from test.behave_utils.gpfdist_utils.gpfdist_mgmt import Gpfdist
from test.behave_utils.utils import *
from test.behave_utils.cluster_setup import TestCluster, reset_hosts
from test.behave_utils.cluster_expand import Gpexpand
from test.behave_utils.gpexpand_dml import TestDML
from gppylib.commands.base import Command, REMOTE
from gppylib import pgconf
from gppylib.operations.package import linux_distribution_id, linux_distribution_version
coordinator_data_dir = gp.get_coordinatordatadir()
if coordinator_data_dir is None:
raise Exception('Please set COORDINATOR_DATA_DIRECTORY in environment')
def show_all_installed(gphome):
x = linux_distribution_id(), linux_distribution_version()
name = x[0].lower()
if 'ubuntu' in name:
return "dpkg --get-selections --admindir=%s/share/packages/database/deb | awk '{print $1}'" % gphome
elif 'centos' in name or 'rhel' in name:
return "rpm -qa --dbpath %s/share/packages/database" % gphome
else:
raise Exception('UNKNOWN platform: %s' % str(x))
def remove_native_package_command(gphome, full_gppkg_name):
x = linux_distribution_id(), linux_distribution_version()
name = x[0].lower()
if 'ubuntu' in name:
return 'fakeroot dpkg --force-not-root --log=/dev/null --instdir=%s --admindir=%s/share/packages/database/deb -r %s' % (gphome, gphome, full_gppkg_name)
elif 'centos' in name or 'rhel' in name:
return 'rpm -e %s --dbpath %s/share/packages/database' % (full_gppkg_name, gphome)
else:
raise Exception('UNKNOWN platform: %s' % str(x))
def remove_gppkg_archive_command(gphome, gppkg_name):
return 'rm -f %s/share/packages/archive/%s.gppkg' % (gphome, gppkg_name)
def create_local_demo_cluster(context, extra_config='', with_mirrors='true', with_standby='true', num_primaries=None):
stop_database_if_started(context)
if num_primaries is None:
num_primaries = os.getenv('NUM_PRIMARY_MIRROR_PAIRS', 3)
os.environ['PGPORT'] = '15432'
cmd = """
cd ../gpAux/gpdemo &&
export DEMO_PORT_BASE={port_base} &&
export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} &&
export WITH_STANDBY={with_standby} &&
export WITH_MIRRORS={with_mirrors} &&
./demo_cluster.sh -d && ./demo_cluster.sh -c &&
{extra_config} ./demo_cluster.sh
""".format(port_base=os.getenv('PORT_BASE', 15432),
num_primary_mirror_pairs=num_primaries,
with_mirrors=with_mirrors,
with_standby=with_standby,
extra_config=extra_config)
run_command(context, cmd)
if context.ret_code != 0:
raise Exception('%s' % context.error_message)
def _cluster_contains_standard_demo_segments():
"""
Returns True iff a cluster contains a coordinator, a standby, and three
primary/mirror pairs, and each segment is in the correct role.
"""
# We expect four pairs -- one for each demo cluster content ID. The set
# contains a (content, role, preferred_role) tuple for each segment.
expected_segments = set()
for contentid in [-1, 0, 1, 2]:
expected_segments.add( (contentid, 'p', 'p') )
expected_segments.add( (contentid, 'm', 'm') )
# Now check to see if the actual segments match expectations.
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
actual_segments = set()
for seg in segments:
actual_segments.add( (seg.content, seg.role, seg.preferred_role) )
return expected_segments == actual_segments
@given('a standard local demo cluster is running')
def impl(context):
if (check_database_is_running(context)
and coordinator_data_dir.endswith("demoDataDir-1")
and _cluster_contains_standard_demo_segments()
and are_segments_running()):
return
create_local_demo_cluster(context, num_primaries=3)
@given('a standard local demo cluster is created')
def impl(context):
create_local_demo_cluster(context, num_primaries=3)
@given('create demo cluster config')
def impl(context):
create_local_demo_cluster(context, extra_config='ONLY_PREPARE_CLUSTER_ENV=true')
@given('the cluster config is generated with HBA_HOSTNAMES "{hba_hostnames_toggle}"')
def impl(context, hba_hostnames_toggle):
extra_config = 'env EXTRA_CONFIG="HBA_HOSTNAMES={}" ONLY_PREPARE_CLUSTER_ENV=true'.format(hba_hostnames_toggle)
create_local_demo_cluster(context, extra_config=extra_config)
@given('the cluster config is generated with data_checksums "{checksum_toggle}"')
def impl(context, checksum_toggle):
extra_config = 'env EXTRA_CONFIG="HEAP_CHECKSUM={}" ONLY_PREPARE_CLUSTER_ENV=true'.format(checksum_toggle)
create_local_demo_cluster(context, extra_config=extra_config)
@given('the cluster is generated with "{num_primaries}" primaries only')
def impl(context, num_primaries):
os.environ['PGPORT'] = '15432'
demoDir = os.path.abspath("%s/../gpAux/gpdemo" % os.getcwd())
os.environ['COORDINATOR_DATA_DIRECTORY'] = "%s/datadirs/qddir/demoDataDir-1" % demoDir
create_local_demo_cluster(context, with_mirrors='false', with_standby='false', num_primaries=num_primaries)
context.gpexpand_mirrors_enabled = False
@given('the user runs psql with "{psql_cmd}" against database "{dbname}"')
@when('the user runs psql with "{psql_cmd}" against database "{dbname}"')
@then('the user runs psql with "{psql_cmd}" against database "{dbname}"')
def impl(context, dbname, psql_cmd):
cmd = "psql -d %s %s" % (dbname, psql_cmd)
run_command(context, cmd)
if context.ret_code != 0:
raise Exception('%s' % context.error_message)
@given('the user connects to "{dbname}" with named connection "{cname}"')
def impl(context, dbname, cname):
if not hasattr(context, 'named_conns'):
context.named_conns = {}
if cname in context.named_conns:
context.named_conns[cname].close()
del context.named_conns[cname]
context.named_conns[cname] = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
@given('the user create a writable external table with name "{tabname}"')
def impl(conetxt, tabname):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
sql = ("create writable external table {tabname}(a int) location "
"('gpfdist://host.invalid:8000/file') format 'text'").format(tabname=tabname)
dbconn.execSQL(conn, sql)
@given('the user create an external table with name "{tabname}" in partition table t')
def impl(conetxt, tabname):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
sql = ("create external table {tabname}(i int, j int) location "
"('gpfdist://host.invalid:8000/file') format 'text'").format(tabname=tabname)
dbconn.execSQL(conn, sql)
sql = "create table t(i int, j int) partition by list(i) (values(2018), values(1218))"
dbconn.execSQL(conn, sql)
sql = ("alter table t exchange partition for (2018) with table {tabname} without validation").format(tabname=tabname)
dbconn.execSQL(conn, sql)
conn.commit()
@given('the user create a partition table with name "{tabname}"')
def impl(conetxt, tabname):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
sql = "create table {tabname}(i int) partition by range(i) (start(0) end(10001) every(1000)) distributed by (i)".format(tabname=tabname)
dbconn.execSQL(conn, sql)
sql = "INSERT INTO {tabname} SELECT generate_series(1, 10000)".format(tabname=tabname)
dbconn.execSQL(conn, sql)
conn.commit()
@given('the user executes "{sql}" with named connection "{cname}"')
def impl(context, cname, sql):
conn = context.named_conns[cname]
dbconn.execSQL(conn, sql)
@then('the user drops the named connection "{cname}"')
def impl(context, cname):
if cname in context.named_conns:
context.named_conns[cname].close()
del context.named_conns[cname]
@given('the database is running')
@then('the database is running')
def impl(context):
start_database_if_not_started(context)
if has_exception(context):
raise context.exception
@given('the database is initialized with checksum "{checksum_toggle}"')
def impl(context, checksum_toggle):
is_ok = check_database_is_running(context)
if is_ok:
run_command(context, "gpconfig -s data_checksums")
if context.ret_code != 0:
raise Exception("cannot run gpconfig: %s, stdout: %s" % (context.error_message, context.stdout_message))
try:
# will throw
check_stdout_msg(context, "Values on all segments are consistent")
check_stdout_msg(context, "Coordinator value: %s" % checksum_toggle)
check_stdout_msg(context, "Segment value: %s" % checksum_toggle)
except:
is_ok = False
if not is_ok:
stop_database(context)
os.environ['PGPORT'] = '15432'
port_base = os.getenv('PORT_BASE', 15432)
cmd = """
cd ../gpAux/gpdemo; \
export DEMO_PORT_BASE={port_base} && \
export NUM_PRIMARY_MIRROR_PAIRS={num_primary_mirror_pairs} && \
export WITH_MIRRORS={with_mirrors} && \
./demo_cluster.sh -d && ./demo_cluster.sh -c && \
env EXTRA_CONFIG="HEAP_CHECKSUM={checksum_toggle}" ./demo_cluster.sh
""".format(port_base=port_base,
num_primary_mirror_pairs=os.getenv('NUM_PRIMARY_MIRROR_PAIRS', 3),
with_mirrors='true',
checksum_toggle=checksum_toggle)
run_command(context, cmd)
if context.ret_code != 0:
raise Exception('%s' % context.error_message)
if ('PGDATABASE' in os.environ):
run_command(context, "createdb %s" % os.getenv('PGDATABASE'))
@given('the database is not running')
@when('the database is not running')
@then('the database is not running')
def impl(context):
stop_database_if_started(context)
if has_exception(context):
raise context.exception
@given('database "{dbname}" exists')
@then('database "{dbname}" exists')
def impl(context, dbname):
create_database_if_not_exists(context, dbname)
@given('database "{dbname}" is created if not exists on host "{HOST}" with port "{PORT}" with user "{USER}"')
@then('database "{dbname}" is created if not exists on host "{HOST}" with port "{PORT}" with user "{USER}"')
def impl(context, dbname, HOST, PORT, USER):
host = os.environ.get(HOST)
port = 0 if os.environ.get(PORT) is None else int(os.environ.get(PORT))
user = os.environ.get(USER)
create_database_if_not_exists(context, dbname, host, port, user)
@when('the database "{dbname}" does not exist')
@given('the database "{dbname}" does not exist')
@then('the database "{dbname}" does not exist')
def impl(context, dbname):
drop_database_if_exists(context, dbname)
@when('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"')
@given('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"')
@then('the database "{dbname}" does not exist on host "{HOST}" with port "{PORT}" with user "{USER}"')
def impl(context, dbname, HOST, PORT, USER):
host = os.environ.get(HOST)
port = int(os.environ.get(PORT))
user = os.environ.get(USER)
drop_database_if_exists(context, dbname, host, port, user)
def get_segment_hostlist():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segment_hostlist = sorted(gparray.get_hostlist(includeCoordinator=False))
if not segment_hostlist:
raise Exception('segment_hostlist was empty')
return segment_hostlist
@given('the user truncates "{table_list}" tables in "{dbname}"')
@when('the user truncates "{table_list}" tables in "{dbname}"')
@then('the user truncates "{table_list}" tables in "{dbname}"')
def impl(context, table_list, dbname):
if not table_list:
raise Exception('Table list is empty')
tables = table_list.split(',')
for t in tables:
truncate_table(dbname, t.strip())
@given(
'there is a partition table "{tablename}" has external partitions of gpfdist with file "{filename}" on port "{port}" in "{dbname}" with data')
def impl(context, tablename, dbname, filename, port):
create_database_if_not_exists(context, dbname)
drop_table_if_exists(context, table_name=tablename, dbname=dbname)
create_external_partition(context, tablename, dbname, port, filename)
@given('"{dbname}" does not exist')
def impl(context, dbname):
drop_database(context, dbname)
@given('{env_var} environment variable is not set')
def impl(context, env_var):
if not hasattr(context, 'orig_env'):
context.orig_env = dict()
context.orig_env[env_var] = os.environ.get(env_var)
if env_var in os.environ:
del os.environ[env_var]
@then('{env_var} environment variable should be restored')
def impl(context, env_var):
if not hasattr(context, 'orig_env'):
raise Exception('%s can not be reset' % env_var)
if env_var not in context.orig_env:
raise Exception('%s can not be reset.' % env_var)
os.environ[env_var] = context.orig_env[env_var]
del context.orig_env[env_var]
@given('the user {action} the walsender on the {segment} on content {content}')
@then('the user {action} the walsender on the {segment} on content {content}')
def impl(context, action, segment, content):
if segment == 'mirror':
role = "'m'"
elif segment == 'primary':
role = "'p'"
else:
raise Exception('segment role can only be primary or mirror')
create_fault_query = "CREATE EXTENSION IF NOT EXISTS gp_inject_fault;"
execute_sql('postgres', create_fault_query)
inject_fault_query = "SELECT gp_inject_fault_infinite('wal_sender_loop', '%s', dbid) FROM gp_segment_configuration WHERE content=%s AND role=%s;" % (action, content, role)
execute_sql('postgres', inject_fault_query)
return
@given('the user skips walreceiver flushing on the {segment} on content {content}')
@then('the user skips walreceiver flushing on the {segment} on content {content}')
def impl(context, segment, content):
if segment == 'mirror':
role = "'m'"
elif segment == 'primary':
role = "'p'"
else:
raise Exception('segment role can only be primary or mirror')
create_fault_query = "CREATE EXTENSION IF NOT EXISTS gp_inject_fault;"
execute_sql('postgres', create_fault_query)
inject_fault_query = "SELECT gp_inject_fault_infinite('walrecv_skip_flush', 'skip', dbid) FROM gp_segment_configuration WHERE content=%s AND role=%s;" % (content, role)
execute_sql('postgres', inject_fault_query)
return
@given('the user waits until all bytes are sent to mirror on content {content}')
@then('the user waits until all bytes are sent to mirror on content {content}')
def impl(context, content):
host, port = get_primary_segment_host_port_for_content(content)
query = "SELECT pg_current_wal_lsn() - sent_lsn FROM pg_stat_replication;"
desired_result = 0
wait_for_desired_query_result_on_segment(host, port, query, desired_result)
def backup_bashrc():
file = '~/.bashrc'
backup_fle = '~/.bashrc.backup'
if (os.path.isfile(file)):
command = "cp -f %s %s.backup" % (file, backup_fle)
result = run_cmd(command)
if (result[0] != 0):
raise Exception("Error while backing up bashrc file. STDERR:%s" % (result[2]))
return
def restore_bashrc():
file = '~/.bashrc'
backup_fle = '~/.bashrc.backup'
if (os.path.isfile(backup_fle)):
command = "mv -f %s.backup %s" % (backup_fle, file)
else:
command = "rm -f %s" % (file)
result = run_cmd(command)
if (result[0] != 0):
raise Exception('Error while restoring up bashrc file. ')
@given('the user runs "{command}"')
@when('the user runs "{command}"')
@then('the user runs "{command}"')
def impl(context, command):
run_gpcommand(context, command)
@when('the user sets banner on host')
def impl(context):
file = '~/.bashrc'
command = "echo 'echo \"banner test\"' >> %s; source %s" % (file, file)
result = run_cmd(command)
if(result[0] != 0):
raise Exception("Error while updating the bashrc file:%s. STDERR:"% (file))
@when('the user sets multi-line banner on host')
def impl(context):
file = '~/.bashrc'
command = "echo 'echo -e \"banner test1\\nbanner test2\\nbanner test-3\\nbanner test4\"' >> %s; source %s" % (file, file)
result = run_cmd(command)
if(result[0] != 0):
raise Exception("Error while updating the bashrc file:%s. STDERR:"% (file))
@when('the user sets banner with separator token on host')
def impl(context):
file = '~/.bashrc'
token = 'GP_DELIMITER_FOR_IGNORING_BASH_BANNER'
command = "echo 'echo -e \"banner test1\\nbanner %s test2\\nbanner test-3\\nbanner test4\\nbanner test5 %s\"' >> %s; source %s" % (token, token, file, file)
result = run_cmd(command)
if(result[0] != 0):
raise Exception("Error while updating the bashrc file:%s. STDERR:"% (file))
@given('source gp_bash_functions and run simple echo')
@then('source gp_bash_functions and run simple echo')
@when('source gp_bash_functions and run simple echo')
def impl(context):
gp_bash_functions = os.getenv("GPHOME") + '/bin/lib/gp_bash_functions.sh'
message = 'Hello World. This is a simple command output'
command = "source %s; REMOTE_EXECUTE_AND_GET_OUTPUT localhost \"echo %s\"" %(gp_bash_functions, message)
result = run_cmd(command)
if(result[0] != 0):
raise Exception ("Expected error code is 0. Command returned error code:%s.\nStderr:%s\n" % (result[0], result[2]))
if(result[1].strip() != message):
raise Exception ("Expected output is: [%s] while received output is: [%s] Return code:%s" %(message, result[1], result[0]))
@given('source gp_bash_functions and run complex command')
@then('source gp_bash_functions and run complex command')
@when('source gp_bash_functions and run complex command')
def impl(context):
gp_bash_functions = os.getenv("GPHOME") + '/bin/lib/gp_bash_functions.sh'
message = 'Hello World. This is a simple command output'
command = "source %s; REMOTE_EXECUTE_AND_GET_OUTPUT localhost \"echo %s; hostname | wc -w | xargs\"" %(gp_bash_functions, message)
result = run_cmd(command)
if(result[0] != 0):
raise Exception ("Expected error code is 0. Command returned error code:%s.\nStderr:%s\n" % (result[0], result[2]))
message = message + "\n1"
if(result[1].strip() != message):
raise Exception ("Expected output is: [%s] while received output is:[%s] Return code:%s" %(message, result[1], result[0]))
@given('source gp_bash_functions and run echo with separator token')
@then('source gp_bash_functions and run echo with separator token')
@when('source gp_bash_functions and run echo with separator token')
def impl(context):
gp_bash_functions = os.getenv("GPHOME") + '/bin/lib/gp_bash_functions.sh'
message = 'Hello World. This is a simple command output'
token = 'GP_DELIMITER_FOR_IGNORING_BASH_BANNER'
command = "source %s; REMOTE_EXECUTE_AND_GET_OUTPUT localhost \"echo %s; echo %s; echo %s\"" %(gp_bash_functions, message, token, message)
result = run_cmd(command)
if(result[0] != 0):
raise Exception ("Expected error code is 0. Command returned error code:%s.\nStderr:%s\n" % (result[0], result[2]))
message = "%s\n%s\n%s" %(message, token, message)
if(result[1].strip() != message):
raise Exception ("Expected output is: [%s] while received output is:[%s] Return code:%s" %(message, result[1], result[0]))
@given('the user asynchronously sets up to end {process_name} process in {secs} seconds')
@when('the user asynchronously sets up to end {process_name} process in {secs} seconds')
def impl(context, process_name, secs):
if process_name == 'that':
command = "sleep %d; kill -9 %d" % (int(secs), context.asyncproc.pid)
else:
command = "sleep %d; ps ux | grep %s | awk '{print $2}' | xargs kill" % (int(secs), process_name)
run_async_command(context, command)
@when('the user asynchronously sets up to end gpinitsystem process when {log_msg} is printed in the logs')
def impl(context, log_msg):
command = "while sleep 0.1; " \
"do if egrep --quiet %s ~/gpAdminLogs/gpinitsystem*log ; " \
"then ps ux | grep bin/gpinitsystem |awk '{print $2}' | xargs kill ;break 2; " \
"fi; done" % (log_msg)
run_async_command(context, command)
@when('the user asynchronously sets up to end gpcreateseg process when it starts')
def impl(context):
# We keep trying to find the gpcreateseg process using ps,grep
# and when we find it, we want to kill it only after the trap for ERROR_EXIT is setup (hence the sleep 1)
command = """timeout 10m
bash -c "while sleep 0.1;
do if ps ux | grep [g]pcreateseg ;
then sleep 1 && ps ux | grep [g]pcreateseg |awk '{print \$2}' | xargs kill ;
break 2; fi; done" """
run_async_command(context, command)
@given('the user asynchronously runs "{command}" and the process is saved')
@when('the user asynchronously runs "{command}" and the process is saved')
@then('the user asynchronously runs "{command}" and the process is saved')
def impl(context, command):
run_gpcommand_async(context, command)
@given('the async process finished with a return code of {ret_code}')
@when('the async process finished with a return code of {ret_code}')
@then('the async process finished with a return code of {ret_code}')
def impl(context, ret_code):
rc, stdout_value, stderr_value = context.asyncproc.communicate2()
if rc != int(ret_code):
raise Exception("return code of the async proccess didn't match:\n"
"rc: %s\n"
"stdout: %s\n"
"stderr: %s" % (rc, stdout_value, stderr_value))
@when('the user waits until saved async process is completed')
def impl(context):
context.asyncproc.communicate2()
@when('the user waits until {process_name} process is completed')
def impl(context, process_name):
wait_process_command = "while ps ux | grep %s | grep -v grep; do sleep 0.1; done;" % process_name
run_cmd(wait_process_command)
@given('a user runs "{command}" with gphome "{gphome}"')
@when('a user runs "{command}" with gphome "{gphome}"')
@then('a user runs "{command}" with gphome "{gphome}"')
def impl(context, command, gphome):
coordinatorhost = get_coordinator_hostname()[0][0]
cmd = Command(name='Remove archive gppkg',
cmdStr=command,
ctxt=REMOTE,
remoteHost=coordinatorhost,
gphome=gphome)
cmd.run()
context.ret_code = cmd.get_return_code()
@given('the user runs command "{command}"')
@when('the user runs command "{command}"')
@then('the user runs command "{command}"')
def impl(context, command):
run_command(context, command)
if has_exception(context):
raise context.exception
@given('the user runs remote command "{command}" on host "{hostname}"')
@when('the user runs remote command "{command}" on host "{hostname}"')
@then('the user runs remote command "{command}" on host "{hostname}"')
def impl(context, command, hostname):
run_command_remote(context,
command,
hostname,
os.getenv("GPHOME") + '/greenplum_path.sh',
'export COORDINATOR_DATA_DIRECTORY=%s' % coordinator_data_dir)
if has_exception(context):
raise context.exception
@given('the user runs command "{command}" eok')
@when('the user runs command "{command}" eok')
@then('the user runs command "{command}" eok')
def impl(context, command):
run_command(context, command)
@when('the user runs async command "{command}"')
def impl(context, command):
run_async_command(context, command)
@given('the user runs workload under "{dir}" with connection "{dbconn}"')
@when('the user runs workload under "{dir}" with connection "{dbconn}"')
def impl(context, dir, dbconn):
for file in os.listdir(dir):
if file.endswith('.sql'):
command = '%s -f %s' % (dbconn, os.path.join(dir, file))
run_command(context, command)
@given('the user modifies the external_table.sql file "{filepath}" with host "{HOST}" and port "{port}"')
@when('the user modifies the external_table.sql file "{filepath}" with host "{HOST}" and port "{port}"')
def impl(context, filepath, HOST, port):
host = os.environ.get(HOST)
substr = host + ':' + port
modify_sql_file(filepath, substr)
@given('the user starts the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"')
@then('the user starts the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"')
def impl(context, HOST, port, dir, ctxt):
host = os.environ.get(HOST)
remote_gphome = os.environ.get('GPHOME')
if not dir.startswith("/"):
dir = os.environ.get(dir)
gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh')
gpfdist = Gpfdist('gpfdist on host %s' % host, dir, port, os.path.join(dir, 'gpfdist.pid'), int(ctxt), host,
gp_source_file)
gpfdist.startGpfdist()
@given('the user stops the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"')
@then('the user stops the gpfdist on host "{HOST}" and port "{port}" in work directory "{dir}" from remote "{ctxt}"')
def impl(context, HOST, port, dir, ctxt):
host = os.environ.get(HOST)
remote_gphome = os.environ.get('GPHOME')
if not dir.startswith("/"):
dir = os.environ.get(dir)
gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh')
gpfdist = Gpfdist('gpfdist on host %s' % host, dir, port, os.path.join(dir, 'gpfdist.pid'), int(ctxt), host,
gp_source_file)
gpfdist.cleanupGpfdist()
@then('{command} should print "{err_msg}" error message')
def impl(context, command, err_msg):
check_err_msg(context, err_msg)
@then('{command} {state} print "{err_msg}" error message')
def impl(context, command, state, err_msg):
if state == "should not":
check_string_not_present_err_msg(context, err_msg)
elif state == "should":
check_err_msg(context, err_msg)
@when('{command} should print "{out_msg}" escaped to stdout')
@then('{command} should print "{out_msg}" escaped to stdout')
@then('{command} should print a "{out_msg}" escaped warning')
def impl(context, command, out_msg):
check_stdout_msg(context, out_msg, True)
@when('{command} should print "{out_msg}" to stdout')
@then('{command} should print "{out_msg}" to stdout')
@then('{command} should print a "{out_msg}" warning')
def impl(context, command, out_msg):
check_stdout_msg(context, out_msg)
@then('{command} should not print "{out_msg}" to stdout')
def impl(context, command, out_msg):
check_string_not_present_stdout(context, out_msg)
@then('{command} should print "{out_msg}" to stdout {num} times')
def impl(context, command, out_msg, num):
msg_list = context.stdout_message.split('\n')
msg_list = [x.strip() for x in msg_list]
count = 0
for line in msg_list:
if out_msg in line:
count += 1
if count != int(num):
raise Exception("Expected %s to occur %s times. Found %d. stdout: %s" % (out_msg, num, count, msg_list))
def lines_matching_both(in_str, str_1, str_2):
lines = [x.strip() for x in in_str.split('\n')]
return [x for x in lines if x.count(str_1) and x.count(str_2)]
@then('check if {command} ran "{called_command}" {num} times with args "{args}"')
def impl(context, command, called_command, num, args):
run_cmd_out = "Running Command: %s" % called_command
matches = lines_matching_both(context.stdout_message, run_cmd_out, args)
if len(matches) != int(num):
raise Exception("Expected %s to occur with %s args %s times. Found %d. \n %s"
% (called_command, args, num, len(matches), context.stdout_message))
@then('{command} should only spawn up to {num} workers in WorkerPool')
def impl(context, command, num):
workerPool_out = "WorkerPool() initialized with"
matches = lines_matching_both(context.stdout_message, workerPool_out, command)
for matched_line in matches:
iw_re = re.search('initialized with (\d+) workers', matched_line)
init_workers = int(iw_re.group(1))
if init_workers > int(num):
raise Exception("Expected Workerpool for %s to be initialized with %d workers. Found %d. \n %s"
% (command, num, init_workers, context.stdout_message))
@given('{command} should return a return code of {ret_code}')
@when('{command} should return a return code of {ret_code}')
@then('{command} should return a return code of {ret_code}')
def impl(context, command, ret_code):
check_return_code(context, ret_code)
@given('the segments are synchronized')
@when('the segments are synchronized')
@then('the segments are synchronized')
def impl(context):
times = 60
sleeptime = 10
for i in range(times):
if are_segments_synchronized():
return
time.sleep(sleeptime)
raise Exception('segments are not in sync after %d seconds' % (times * sleeptime))
@then('verify that there is no table "{tablename}" in "{dbname}"')
def impl(context, tablename, dbname):
dbname = replace_special_char_env(dbname)
tablename = replace_special_char_env(tablename)
if check_table_exists(context, dbname=dbname, table_name=tablename):
raise Exception("Table '%s' still exists when it should not" % tablename)
@then('verify that there is a "{table_type}" table "{tablename}" in "{dbname}"')
def impl(context, table_type, tablename, dbname):
if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type):
raise Exception("Table '%s' of type '%s' does not exist when expected" % (tablename, table_type))
@then('verify that there is a "{table_type}" table "{tablename}" in "{dbname}" with "{numrows}" rows')
def impl(context, table_type, tablename, dbname, numrows):
if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type):
raise Exception("Table '%s' of type '%s' does not exist when expected" % (tablename, table_type))
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
try:
rowcount = dbconn.querySingleton(conn, "SELECT count(*) FROM %s" % tablename)
if rowcount != int(numrows):
raise Exception("Expected to find %d rows in table %s, found %d" % (int(numrows), tablename, rowcount))
finally:
conn.close()
@then(
'data for partition table "{table_name}" with leaf partition distributed across all segments on "{dbname}"')
def impl(context, table_name, dbname):
validate_part_table_data_on_segments(context, table_name, dbname)
@then('verify that table "{tname}" in "{dbname}" has "{nrows}" rows')
def impl(context, tname, dbname, nrows):
check_row_count(context, tname, dbname, int(nrows))
@given('schema "{schema_list}" exists in "{dbname}"')
@then('schema "{schema_list}" exists in "{dbname}"')
def impl(context, schema_list, dbname):
schemas = [s.strip() for s in schema_list.split(',')]
for s in schemas:
drop_schema_if_exists(context, s.strip(), dbname)
create_schema(context, s.strip(), dbname)
@then('the temporary file "{filename}" is removed')
def impl(context, filename):
if os.path.exists(filename):
os.remove(filename)
def create_table_file_locally(context, filename, table_list, location=os.getcwd()):
tables = table_list.split('|')
file_path = os.path.join(location, filename)
with open(file_path, 'w') as fp:
for t in tables:
fp.write(t + '\n')
context.filename = file_path
@given('there is a file "{filename}" with tables "{table_list}"')
@then('there is a file "{filename}" with tables "{table_list}"')
def impl(context, filename, table_list):
create_table_file_locally(context, filename, table_list)
@given('the row "{row_values}" is inserted into "{table}" in "{dbname}"')
def impl(context, row_values, table, dbname):
insert_row(context, row_values, table, dbname)
@then('verify that database "{dbname}" does not exist')
def impl(context, dbname):
conn = dbconn.connect(dbconn.DbURL(dbname='template1'), unsetSearchPath=False)
try:
sql = """SELECT datname FROM pg_database"""
dbs = dbconn.query(conn, sql)
if dbname in dbs:
raise Exception('Database exists when it shouldnt "%s"' % dbname)
finally:
conn.close()
@given('the file "{filepath}" exists under coordinator data directory')
def impl(context, filepath):
fullfilepath = os.path.join(coordinator_data_dir, filepath)
if not os.path.isdir(os.path.dirname(fullfilepath)):
os.makedirs(os.path.dirname(fullfilepath))
open(fullfilepath, 'a').close()
@then('the file "{filepath}" does not exist under standby coordinator data directory')
def impl(context, filepath):
fullfilepath = os.path.join(context.standby_data_dir, filepath)
cmd = "ls -al %s" % fullfilepath
try:
run_command_remote(context,
cmd,
context.standby_hostname,
os.getenv("GPHOME") + '/greenplum_path.sh',
'export COORDINATOR_DATA_DIRECTORY=%s' % context.standby_data_dir,
validateAfter=True)
except:
pass
else:
raise Exception("file '%s' should not exist in standby coordinator data directory" % fullfilepath)
@given('results of the sql "{sql}" db "{dbname}" are stored in the context')
@when( 'results of the sql "{sql}" db "{dbname}" are stored in the context')
def impl(context, sql, dbname):
context.stored_sql_results = []
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
try:
curs = dbconn.query(conn, sql)
context.stored_sql_results = curs.fetchall()
finally:
conn.close()
@then('validate that following rows are in the stored rows')
def impl(context):
for row in context.table:
found_match = False
for stored_row in context.stored_rows:
match_this_row = True
for i in range(len(stored_row)):
value = row[i]
if isinstance(stored_row[i], bool):
value = str(True if row[i] == 't' else False)
if value != str(stored_row[i]):
match_this_row = False
break
if match_this_row:
found_match = True
break
if not found_match:
print(context.stored_rows)
raise Exception("'%s' not found in stored rows" % row)
@then('validate that first column of first stored row has "{numlines}" lines of raw output')
def impl(context, numlines):
raw_lines_count = len(context.stored_rows[0][0].splitlines())
numlines = int(numlines)
if raw_lines_count != numlines:
raise Exception("Found %d of stored query result but expected %d records" % (raw_lines_count, numlines))
def get_standby_host():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
standby_coordinator = [seg.getSegmentHostName() for seg in segments if seg.isSegmentStandby()]
if len(standby_coordinator) > 0:
return standby_coordinator[0]
else:
return []
def run_gpinitstandby(context, hostname, port, standby_data_dir, options='', remote=False):
if '-n' in options:
cmd = "gpinitstandby -a"
elif remote:
#if standby_data_dir exists on $hostname, remove it
remove_dir(hostname, standby_data_dir)
# create the data dir on $hostname
create_dir(hostname, os.path.dirname(standby_data_dir))
# We do not set port nor data dir here to test gpinitstandby's ability to autogather that info
cmd = "gpinitstandby -a -s %s" % hostname
else:
cmd = "gpinitstandby -a -s %s -P %s -S %s" % (hostname, port, standby_data_dir)
run_gpcommand(context, cmd + ' ' + options)
@when('the user initializes a standby on the same host as coordinator with same port')
def impl(context):
hostname = get_coordinator_hostname('postgres')[0][0]
temp_data_dir = tempfile.mkdtemp() + "/standby_datadir"
run_gpinitstandby(context, hostname, os.environ.get("PGPORT"), temp_data_dir)
@when('the user initializes a standby on the same host as coordinator and the same data directory')
def impl(context):
hostname = get_coordinator_hostname('postgres')[0][0]
coordinator_port = int(os.environ.get("PGPORT"))
cmd = "gpinitstandby -a -s %s -P %d" % (hostname, coordinator_port + 1)
run_gpcommand(context, cmd)
def init_standby(context, coordinator_hostname, options, segment_hostname):
remote = (coordinator_hostname != segment_hostname)
# -n option assumes gpinitstandby already ran and put standby in catalog
if "-n" not in options:
if remote:
context.standby_data_dir = coordinator_data_dir
else:
context.standby_data_dir = tempfile.mkdtemp() + "/standby_datadir"
run_gpinitstandby(context, context.standby_hostname, context.standby_port, context.standby_data_dir, options,
remote)
context.coordinator_hostname = coordinator_hostname
context.coordinator_port = os.environ.get("PGPORT")
context.standby_was_initialized = True
@when('running gpinitstandby on host "{coordinator}" to create a standby on host "{standby}"')
@given('running gpinitstandby on host "{coordinator}" to create a standby on host "{standby}"')
def impl(context, coordinator, standby):
# XXX This code was cribbed from init_standby and modified to support remote
# execution.
context.coordinator_hostname = coordinator
context.standby_hostname = standby
context.standby_port = os.environ.get("PGPORT")
context.standby_data_dir = coordinator_data_dir
remove_dir(standby, context.standby_data_dir)
create_dir(standby, os.path.dirname(context.standby_data_dir))
# We do not set port nor data dir here to test gpinitstandby's ability to autogather that info
cmd = "gpinitstandby -a -s %s" % standby
run_command_remote(context,
cmd,
context.coordinator_hostname,
os.getenv("GPHOME") + '/greenplum_path.sh',
'export COORDINATOR_DATA_DIRECTORY=%s' % context.standby_data_dir)
context.stdout_position = 0
context.coordinator_port = os.environ.get("PGPORT")
context.standby_was_initialized = True
def get_standby_variables_and_set_on_context(context):
dbname = 'postgres'
with closing(dbconn.connect(dbconn.DbURL(port=os.environ.get("PGPORT"), dbname=dbname), unsetSearchPath=False)) as conn:
query = """select distinct content, hostname from gp_segment_configuration order by content limit 2;"""
cursor = dbconn.query(conn, query)
try:
_, coordinator_hostname = cursor.fetchone()
_, segment_hostname = cursor.fetchone()
except:
raise Exception("Did not get two rows from query: %s" % query)
if coordinator_hostname != segment_hostname:
context.standby_hostname = segment_hostname
context.standby_port = os.environ.get("PGPORT")
else:
context.standby_hostname = coordinator_hostname
context.standby_port = get_open_port()
return coordinator_hostname, segment_hostname
@when('the user runs gpinitstandby with options "{options}"')
@then('the user runs gpinitstandby with options "{options}"')
@given('the user runs gpinitstandby with options "{options}"')
def impl(context, options):
coordinator_hostname, standby_hostname = get_standby_variables_and_set_on_context(context)
init_standby(context, coordinator_hostname, options, standby_hostname)
def _handle_sigpipe():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
@when('the user runs gpinitstandby and {action} the unreachable host prompt')
def impl(context, action):
coordinator_hostname, standby_hostname = get_standby_variables_and_set_on_context(context)
remote = (coordinator_hostname != standby_hostname)
context.coordinator_hostname = coordinator_hostname
context.coordinator_port = os.environ.get("PGPORT")
context.standby_was_initialized = True
if action == "accepts":
answers = "y\ny\n"
elif action == "rejects":
answers = "y\nn\n"
else:
raise Exception('Invalid action for the unreachable host prompt (valid options are "accepts" and "rejects"')
if remote:
context.standby_data_dir = coordinator_data_dir
cmd = ["ssh", standby_hostname]
else:
context.standby_data_dir = tempfile.mkdtemp() + "/standby_datadir"
cmd = ["bash", "-c"]
cmd.append("printf '%s' | gpinitstandby -s %s -S %s -P %s" % (answers, standby_hostname, context.standby_data_dir, context.standby_port))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=_handle_sigpipe,
)
context.stdout_message, context.stderr_message = p.communicate()
context.ret_code = p.returncode
if context.ret_code != 0:
context.error_message = context.stderr_message
@when('the user runs gpactivatestandby with options "{options}"')
@then('the user runs gpactivatestandby with options "{options}"')
def impl(context, options):
context.execute_steps('''Then the user runs command "gpactivatestandby -a %s" from standby coordinator''' % options)
context.standby_was_activated = True
@then('gpintsystem logs should {contain} lines about running backout script')
def impl(context, contain):
string_to_find = 'Run command bash .*backout_gpinitsystem.* on coordinator to remove these changes$'
command = "egrep '{}' ~/gpAdminLogs/gpinitsystem*log".format(string_to_find)
run_command(context, command)
if contain == "contain":
if has_exception(context):
raise context.exception
context.gpinit_backout_command = re.search('Run command(.*)on coordinator', context.stdout_message).group(1)
elif contain == "not contain":
if not has_exception(context):
raise Exception("Logs contain lines about running backout script")
else:
raise Exception("Incorrect step name, only use 'should contain' and 'should not contain'")
@then('the user runs the gpinitsystem backout script')
def impl(context):
command = context.gpinit_backout_command
run_command(context, command)
if has_exception(context):
raise context.exception
@when('the user runs command "{command}" from standby coordinator')
@then('the user runs command "{command}" from standby coordinator')
def impl(context, command):
cmd = "PGPORT=%s %s" % (context.standby_port, command)
run_command_remote(context,
cmd,
context.standby_hostname,
os.getenv("GPHOME") + '/greenplum_path.sh',
'export COORDINATOR_DATA_DIRECTORY=%s' % context.standby_data_dir,
validateAfter=False)
@when('the coordinator goes down')
@then('the coordinator goes down')
def impl(context):
coordinator = CoordinatorStop("Stopping Coordinator", coordinator_data_dir, mode='immediate')
coordinator.run()
@when('the standby coordinator goes down')
def impl(context):
coordinator = CoordinatorStop("Stopping Coordinator Standby", context.standby_data_dir, mode='immediate', ctxt=REMOTE,
remoteHost=context.standby_hostname)
coordinator.run(validateAfter=True)
@when('the coordinator goes down on "{host}"')
def impl(context, host):
coordinator = CoordinatorStop("Stopping Coordinator Standby", coordinator_data_dir, mode='immediate', ctxt=REMOTE,
remoteHost=host)
coordinator.run(validateAfter=True)
@then('clean up and revert back to original coordinator')
def impl(context):
# TODO: think about preserving the coordinator data directory for debugging
shutil.rmtree(coordinator_data_dir, ignore_errors=True)
if context.coordinator_hostname != context.standby_hostname:
# We do not set port nor data dir here to test gpinitstandby's ability to autogather that info
cmd = "gpinitstandby -a -s %s" % context.coordinator_hostname
else:
cmd = "gpinitstandby -a -s %s -P %s -S %s" % (context.coordinator_hostname, context.coordinator_port, coordinator_data_dir)
context.execute_steps('''Then the user runs command "%s" from standby coordinator''' % cmd)
coordinator = CoordinatorStop("Stopping current coordinator", context.standby_data_dir, mode='immediate', ctxt=REMOTE,
remoteHost=context.standby_hostname)
coordinator.run()
cmd = "gpactivatestandby -a -d %s" % coordinator_data_dir
run_gpcommand(context, cmd)
# from https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
@given('"{path}" has its permissions set to "{perm}"')
def impl(context, path, perm):
path = os.path.expandvars(path)
if not os.path.exists(path):
raise Exception('Path does not exist! "%s"' % path)
old_permissions = os.stat(path).st_mode # keep it as a number that has a meaningful representation in octal
test_permissions = int(perm, 8) # accept string input with octal semantics and convert to a raw number
os.chmod(path, test_permissions)
context.path_for_which_to_restore_the_permissions = path
context.permissions_to_restore_path_to = old_permissions
@then('rely on environment.py to restore path permissions')
def impl(context):
print("go look in environment.py to see how it uses the path and permissions on context to make sure it's cleaned up")
@when('the user runs pg_controldata against the standby data directory')
def impl(context):
cmd = "pg_controldata " + context.standby_data_dir
run_command_remote(context,
cmd,
context.standby_hostname,
os.getenv("GPHOME") + '/greenplum_path.sh',
'export COORDINATOR_DATA_DIRECTORY=%s' % context.standby_data_dir)
def _process_exists(pid, host):
"""
Returns True if a process of the given PID exists on the given host, and
False otherwise. If host is None, this check is done locally instead of
remotely.
"""
if host is None:
# Local case is easy.
return psutil.pid_exists(pid)
# Remote case.
cmd = Command(name="check for pid %d" % pid,
cmdStr="ps -p %d > /dev/null" % pid,
ctxt=REMOTE,
remoteHost=host)
cmd.run()
return cmd.get_return_code() == 0
@given('user stops all {segment_type} processes')
@when('user stops all {segment_type} processes')
@then('user stops all {segment_type} processes')
def stop_all_primary_or_mirror_segments(context, segment_type):
if segment_type not in ("primary", "mirror"):
raise Exception("Expected segment_type to be 'primary' or 'mirror', but found '%s'." % segment_type)
role = ROLE_PRIMARY if segment_type == 'primary' else ROLE_MIRROR
stop_segments(context, lambda seg: seg.getSegmentRole() == role and seg.content != -1)
@given('the {role} on content {contentID} is stopped')
def stop_segments_on_contentID(context, role, contentID):
if role not in ("primary", "mirror"):
raise Exception("Expected segment_type to be 'primary' or 'mirror', but found '%s'." % role)
role = ROLE_PRIMARY if role == 'primary' else ROLE_MIRROR
stop_segments(context, lambda seg: seg.getSegmentRole() == role and seg.content == int(contentID))
# where_clause is a lambda that takes a segment to select what segments to stop
def stop_segments(context, where_clause):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = filter(where_clause, gparray.getDbList())
for seg in segments:
# For demo_cluster tests that run on the CI gives the error 'bash: pg_ctl: command not found'
# Thus, need to add pg_ctl to the path when ssh'ing to a demo cluster.
subprocess.check_call(['ssh', seg.getSegmentHostName(),
'source %s/greenplum_path.sh && pg_ctl stop -m fast -D %s -w' % (
pipes.quote(os.environ.get("GPHOME")), pipes.quote(seg.getSegmentDataDirectory()))
])
@given('user immediately stops all {segment_type} processes')
@when('user immediately stops all {segment_type} processes')
@then('user immediately stops all {segment_type} processes')
def stop_all_primary_or_mirror_segments(context, segment_type):
if segment_type not in ("primary", "mirror"):
raise Exception("Expected segment_type to be 'primary' or 'mirror', but found '%s'." % segment_type)
role = ROLE_PRIMARY if segment_type == 'primary' else ROLE_MIRROR
stop_segments_immediate(context, lambda seg: seg.getSegmentRole() == role and seg.content != -1)
# where_clause is a lambda that takes a segment to select what segments to stop
def stop_segments_immediate(context, where_clause):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = filter(where_clause, gparray.getDbList())
for seg in segments:
# For demo_cluster tests that run on the CI gives the error 'bash: pg_ctl: command not found'
# Thus, need to add pg_ctl to the path when ssh'ing to a demo cluster.
subprocess.check_call(['ssh', seg.getSegmentHostName(),
'source %s/greenplum_path.sh && pg_ctl stop -m immediate -D %s -w' % (
pipes.quote(os.environ.get("GPHOME")), pipes.quote(seg.getSegmentDataDirectory()))
])
@given('user can start transactions')
@when('user can start transactions')
@then('user can start transactions')
def impl(context):
wait_for_unblocked_transactions(context)
@given('the environment variable "{var}" is set to "{val}"')
def impl(context, var, val):
context.env_var = os.environ.get(var)
os.environ[var] = val
@given('below sql is executed in "{dbname}" db')
@when('below sql is executed in "{dbname}" db')
def impl(context, dbname):
sql = context.text
execute_sql(dbname, sql)
@given('sql "{sql}" is executed in "{dbname}" db')
@when('sql "{sql}" is executed in "{dbname}" db')
@then('sql "{sql}" is executed in "{dbname}" db')
def impl(context, sql, dbname):
execute_sql(dbname, sql)
@when('execute following sql in db "{dbname}" and store result in the context')
def impl(context, dbname):
context.stored_rows = []
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
curs = dbconn.query(conn, context.text)
context.stored_rows = curs.fetchall()
@when('execute sql "{sql}" in db "{dbname}" and store result in the context')
def impl(context, sql, dbname):
context.stored_rows = []
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
curs = dbconn.query(conn, sql)
context.stored_rows = curs.fetchall()
@then('validate that "{message}" is in the stored rows')
def impl(context, message):
for row in context.stored_rows:
for column in row:
if message in column:
return
print(context.stored_rows)
print(message)
raise Exception("'%s' not found in stored rows" % message)
@then('verify that file "{filename}" exists under "{path}"')
def impl(context, filename, path):
fullpath = "%s/%s" % (path, filename)
fullpath = os.path.expandvars(fullpath)
if not os.path.exists(fullpath):
raise Exception('file "%s" is not exist' % fullpath)
@given('waiting "{second}" seconds')
@when('waiting "{second}" seconds')
@then('waiting "{second}" seconds')
def impl(context, second):
time.sleep(float(second))
def get_opened_files(filename, pidfile):
cmd = "PATH=$PATH:/usr/bin:/usr/sbin lsof -p `cat %s` | grep %s | wc -l" % (
pidfile, filename)
return subprocess.getstatusoutput(cmd)
@when('table "{tablename}" is dropped in "{dbname}"')
@then('table "{tablename}" is dropped in "{dbname}"')
@given('table "{tablename}" is dropped in "{dbname}"')
def impl(context, tablename, dbname):
drop_table_if_exists(context, table_name=tablename, dbname=dbname)
@given('all the segments are running')
@when('all the segments are running')
@then('all the segments are running')
def impl(context):
if not are_segments_running():
raise Exception("all segments are not currently running")
return
@given('the "{seg}" segment information is saved')
@when('the "{seg}" segment information is saved')
@then('the "{seg}" segment information is saved')
def impl(context, seg):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if seg == "primary":
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
context.pseg = primary_segs[0]
context.pseg_data_dir = context.pseg.getSegmentDataDirectory()
context.pseg_hostname = context.pseg.getSegmentHostName()
context.pseg_dbid = context.pseg.getSegmentDbId()
elif seg == "mirror":
mirror_segs = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()]
context.mseg = mirror_segs[0]
context.mseg_hostname = context.mseg.getSegmentHostName()
context.mseg_dbid = context.mseg.getSegmentDbId()
context.mseg_data_dir = context.mseg.getSegmentDataDirectory()
@given('the cluster configuration has no segments where "{filter}"')
def impl(context, filter):
SLEEP_PERIOD = 5
MAX_DURATION = 300
MAX_TRIES = MAX_DURATION // SLEEP_PERIOD
num_tries = 0
num_matching = 10
while num_matching and num_tries < MAX_TRIES:
num_tries += 1
time.sleep(SLEEP_PERIOD)
context.execute_steps(u'''
Given the user runs psql with "-c 'SELECT gp_request_fts_probe_scan()'" against database "postgres"
''')
with closing(dbconn.connect(dbconn.DbURL(), unsetSearchPath=False)) as conn:
sql = "SELECT count(*) FROM gp_segment_configuration WHERE %s" % filter
num_matching = dbconn.querySingleton(conn, sql)
if num_matching:
raise Exception("could not achieve desired state")
context.execute_steps(u'''
Given the user runs psql with "-c 'BEGIN; CREATE TEMP TABLE tempt(a int); COMMIT'" against database "postgres"
''')
@given('the cluster configuration is saved for "{when}"')
@then('the cluster configuration is saved for "{when}"')
def impl(context, when):
if not hasattr(context, 'saved_array'):
context.saved_array = {}
context.saved_array[when] = GpArray.initFromCatalog(dbconn.DbURL())
@when('we run a sample background script to generate a pid on "{seg}" segment')
def impl(context, seg):
if seg == "primary":
if not hasattr(context, 'pseg_hostname'):
raise Exception("primary seg host is not saved in the context")
hostname = context.pseg_hostname
elif seg == "smdw":
if not hasattr(context, 'standby_host'):
raise Exception("Standby host is not saved in the context")
hostname = context.standby_host
filename = os.path.join(os.getcwd(), './test/behave/mgmt_utils/steps/data/pid_background_script.py')
cmd = Command(name="Remove background script on remote host", cmdStr='rm -f /tmp/pid_background_script.py',
remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
cmd = Command(name="Copy background script to remote host", cmdStr='scp %s %s:/tmp' % (filename, hostname))
cmd.run(validateAfter=True)
cmd = Command(name="Run Bg process to save pid",
cmdStr='sh -c "/tmp/pid_background_script.py /tmp/bgpid" &>/dev/null &', remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
cmd = Command(name="get Bg process PID",
cmdStr='until [ -f /tmp/bgpid ]; do sleep 1; done; cat /tmp/bgpid', remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
context.bg_pid = cmd.get_stdout()
if not context.bg_pid:
raise Exception("Unable to obtain the pid of the background script. Seg Host: %s, get_results: %s" %
(hostname, cmd.get_stdout()))
@when('the background pid is killed on "{seg}" segment')
@then('the background pid is killed on "{seg}" segment')
def impl(context, seg):
if seg == "primary":
if not hasattr(context, 'pseg_hostname'):
raise Exception("primary seg host is not saved in the context")
hostname = context.pseg_hostname
elif seg == "smdw":
if not hasattr(context, 'standby_host'):
raise Exception("Standby host is not saved in the context")
hostname = context.standby_host
cmd = Command(name="killbg pid", cmdStr='kill -9 %s' % context.bg_pid, remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
@when('we generate the postmaster.pid file with the background pid on "{seg}" segment')
def impl(context, seg):
if seg == "primary":
if not hasattr(context, 'pseg_hostname'):
raise Exception("primary seg host is not saved in the context")
hostname = context.pseg_hostname
data_dir = context.pseg_data_dir
elif seg == "smdw":
if not hasattr(context, 'standby_host'):
raise Exception("Standby host is not saved in the context")
hostname = context.standby_host
data_dir = context.standby_host_data_dir
pid_file = os.path.join(data_dir, 'postmaster.pid')
pid_file_orig = pid_file + '.orig'
cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file_orig, pid_file), remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
cpCmd = Command(name='copy pid file to coordinator for editing', cmdStr='scp %s:%s /tmp' % (hostname, pid_file))
cpCmd.run(validateAfter=True)
with open('/tmp/postmaster.pid', 'r') as fr:
lines = fr.readlines()
lines[0] = "%s\n" % context.bg_pid
with open('/tmp/postmaster.pid', 'w') as fw:
fw.writelines(lines)
cpCmd = Command(name='copy pid file to segment after editing',
cmdStr='scp /tmp/postmaster.pid %s:%s' % (hostname, pid_file))
cpCmd.run(validateAfter=True)
@when('we generate the postmaster.pid file with a non running pid on the same "{seg}" segment')
def impl(context, seg):
if seg == "primary":
data_dir = context.pseg_data_dir
hostname = context.pseg_hostname
elif seg == "mirror":
data_dir = context.mseg_data_dir
hostname = context.mseg_hostname
elif seg == "smdw":
if not hasattr(context, 'standby_host'):
raise Exception("Standby host is not saved in the context")
hostname = context.standby_host
data_dir = context.standby_host_data_dir
pid_file = os.path.join(data_dir, 'postmaster.pid')
pid_file_orig = pid_file + '.orig'
cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file_orig, pid_file), remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
cpCmd = Command(name='copy pid file to coordinator for editing', cmdStr='scp %s:%s /tmp' % (hostname, pid_file))
cpCmd.run(validateAfter=True)
# Since Command creates a short-lived SSH session, we observe the PID given
# a throw-away remote process. Assume that the PID is unused and available on
# the remote in the near future.
# This pid is no longer associated with a
# running process and won't be recycled for long enough that tests
# have finished.
cmd = Command(name="get non-existing pid", cmdStr="echo $$", remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
pid = cmd.get_results().stdout.strip()
with open('/tmp/postmaster.pid', 'r') as fr:
lines = fr.readlines()
lines[0] = "%s\n" % pid
with open('/tmp/postmaster.pid', 'w') as fw:
fw.writelines(lines)
cpCmd = Command(name='copy pid file to segment after editing',
cmdStr='scp /tmp/postmaster.pid %s:%s' % (hostname, pid_file))
cpCmd.run(validateAfter=True)
@when('the user starts one "{seg}" segment')
def impl(context, seg):
if seg == "primary":
dbid = context.pseg_dbid
hostname = context.pseg_hostname
segment = context.pseg
elif seg == "mirror":
dbid = context.mseg_dbid
hostname = context.mseg_hostname
segment = context.mseg
segStartCmd = SegmentStart(name="Starting new segment dbid %s on host %s." % (str(dbid), hostname)
, gpdb=segment
, numContentsInCluster=0 # Starting seg on it's own.
, era=None
, mirrormode=MIRROR_MODE_MIRRORLESS
, utilityMode=False
, ctxt=REMOTE
, remoteHost=hostname
, pg_ctl_wait=True
, timeout=300)
segStartCmd.run(validateAfter=True)
@when('the postmaster.pid file on "{seg}" segment is saved')
def impl(context, seg):
if seg == "primary":
data_dir = context.pseg_data_dir
hostname = context.pseg_hostname
elif seg == "mirror":
data_dir = context.mseg_data_dir
hostname = context.mseg_hostname
elif seg == "smdw":
if not hasattr(context, 'standby_host'):
raise Exception("Standby host is not saved in the context")
hostname = context.standby_host
data_dir = context.standby_host_data_dir
pid_file = os.path.join(data_dir, 'postmaster.pid')
pid_file_orig = pid_file + '.orig'
cmd = Command(name="Copy pid file", cmdStr='cp %s %s' % (pid_file, pid_file_orig), remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
@then('the backup pid file is deleted on "{seg}" segment')
def impl(context, seg):
if seg == "primary":
data_dir = context.pseg_data_dir
hostname = context.pseg_hostname
elif seg == "mirror":
data_dir = context.mseg_data_dir
hostname = context.mseg_hostname
elif seg == "smdw":
data_dir = context.standby_host_data_dir
hostname = context.standby_host
cmd = Command(name="Remove pid file", cmdStr='rm -f %s' % (os.path.join(data_dir, 'postmaster.pid.orig')),
remoteHost=hostname, ctxt=REMOTE)
cmd.run(validateAfter=True)
@given('the standby is not initialized')
@then('the standby is not initialized')
def impl(context):
standby = get_standby_host()
if standby:
context.cluster_had_standby = True
context.standby_host = standby
run_gpcommand(context, 'gpinitstandby -ra')
@given('the catalog has a standby coordinator entry')
@then('verify the standby coordinator entries in catalog')
def impl(context):
check_segment_config_query = "SELECT * FROM gp_segment_configuration WHERE content = -1 AND role = 'm'"
check_stat_replication_query = "SELECT * FROM pg_stat_replication"
with closing(dbconn.connect(dbconn.DbURL(dbname='postgres'), unsetSearchPath=False)) as conn:
segconfig = dbconn.query(conn, check_segment_config_query).fetchall()
statrep = dbconn.query(conn, check_stat_replication_query).fetchall()
if len(segconfig) != 1:
raise Exception("gp_segment_configuration did not have standby coordinator")
if len(statrep) != 1:
raise Exception("pg_stat_replication did not have standby coordinator")
context.standby_dbid = segconfig[0][0]
@then('verify the standby coordinator is now acting as coordinator')
def impl(context):
check_segment_config_query = "SELECT * FROM gp_segment_configuration WHERE content = -1 AND role = 'p' AND preferred_role = 'p' AND dbid = %s" % context.standby_dbid
with closing(dbconn.connect(dbconn.DbURL(hostname=context.standby_hostname, dbname='postgres', port=context.standby_port), unsetSearchPath=False)) as conn:
segconfig = dbconn.query(conn, check_segment_config_query).fetchall()
if len(segconfig) != 1:
raise Exception("gp_segment_configuration did not have standby coordinator acting as new coordinator")
@then('verify that the schema "{schema_name}" exists in "{dbname}"')
def impl(context, schema_name, dbname):
schema_exists = check_schema_exists(context, schema_name, dbname)
if not schema_exists:
raise Exception("Schema '%s' does not exist in the database '%s'" % (schema_name, dbname))
@then('verify that the utility {utilname} ever does logging into the user\'s "{dirname}" directory')
def impl(context, utilname, dirname):
absdirname = "%s/%s" % (os.path.expanduser("~"), dirname)
if not os.path.exists(absdirname):
raise Exception('No such directory: %s' % absdirname)
pattern = "%s/%s_*.log" % (absdirname, utilname)
logs_for_a_util = glob.glob(pattern)
if not logs_for_a_util:
raise Exception('Logs matching "%s" were not created' % pattern)
@then('verify that a log was created by {utilname} in the "{dirname}" directory')
def impl(context, utilname, dirname):
if not os.path.exists(dirname):
raise Exception('No such directory: %s' % dirname)
pattern = "%s/%s_*.log" % (dirname, utilname)
logs_for_a_util = glob.glob(pattern)
if not logs_for_a_util:
raise Exception('Logs matching "%s" were not created' % pattern)
@then('drop the table "{tablename}" with connection "{dbconn}"')
def impl(context, tablename, dbconn):
command = "%s -c \'drop table if exists %s\'" % (dbconn, tablename)
run_gpcommand(context, command)
def _get_gpAdminLogs_directory():
return "%s/gpAdminLogs" % os.path.expanduser("~")
@given('an incomplete map file is created')
def impl(context):
with open('/tmp/incomplete_map_file', 'w') as fd:
fd.write('nonexistent_host,nonexistent_host')
@then('verify that function "{func_name}" exists in database "{dbname}"')
def impl(context, func_name, dbname):
SQL = """SELECT proname FROM pg_proc WHERE proname = '%s';""" % func_name
row_count = getRows(dbname, SQL)[0][0]
if row_count != 'test_function':
raise Exception('Function %s does not exist in %s"' % (func_name, dbname))
@then('verify that sequence "{seq_name}" last value is "{last_value}" in database "{dbname}"')
@when('verify that sequence "{seq_name}" last value is "{last_value}" in database "{dbname}"')
@given('verify that sequence "{seq_name}" last value is "{last_value}" in database "{dbname}"')
def impl(context, seq_name, last_value, dbname):
SQL = """SELECT last_value FROM %s;""" % seq_name
lv = getRows(dbname, SQL)[0][0]
if lv != int(last_value):
raise Exception('Sequence %s last value is not %s in %s"' % (seq_name, last_value, dbname))
@given('the user runs the command "{cmd}" in the background')
@when('the user runs the command "{cmd}" in the background')
def impl(context, cmd):
_thread.start_new_thread(run_command, (context, cmd))
time.sleep(10)
@given('the user runs the command "{cmd}" in the background without sleep')
@when('the user runs the command "{cmd}" in the background without sleep')
def impl(context, cmd):
_thread.start_new_thread(run_command, (context, cmd))
# For any pg_hba.conf line with `host ... trust`, its address should only contain FQDN
@then('verify that the file "{filename}" contains FQDN only for trusted host')
def impl(context, filename):
with open(filename) as fr:
for line in fr:
contents = line.strip()
# for example: host all all hostname trust
if contents.startswith("host") and contents.endswith("trust"):
tokens = contents.split()
if tokens.__len__() != 5:
raise Exception("failed to parse pg_hba.conf line '%s'" % contents)
hostname = tokens[3]
if hostname.__contains__("/"):
# Exempt localhost. They are part of the stock config and harmless
net = hostname.split("/")[0]
if net == "127.0.0.1" or net == "::1":
continue
raise Exception("'%s' is not valid FQDN" % hostname)
# For any pg_hba.conf line with `host ... trust`, its address should only contain CIDR
@then('verify that the file "{filename}" contains CIDR only for trusted host')
def impl(context, filename):
with open(filename) as fr:
for line in fr:
contents = line.strip()
# for example: host all all hostname trust
if contents.startswith("host") and contents.endswith("trust"):
tokens = contents.split()
if tokens.__len__() != 5:
raise Exception("failed to parse pg_hba.conf line '%s'" % contents)
cidr = tokens[3]
if not cidr.__contains__("/") and cidr not in ["samenet", "samehost"]:
raise Exception("'%s' is not valid CIDR" % cidr)
@then('verify that the file "{filename}" contains the string "{output}"')
def impl(context, filename, output):
contents = ''
with open(filename) as fr:
for line in fr:
contents = line.strip()
print(contents)
check_stdout_msg(context, output)
@then('verify that the last line of the file "{filename}" in the coordinator data directory {contain} the string "{output}"{escape}')
def impl(context, filename, contain, output, escape):
if contain == 'should contain':
valuesShouldExist = True
elif contain == 'should not contain':
valuesShouldExist = False
else:
raise Exception("only 'contains' and 'does not contain' are valid inputs")
find_string_in_coordinator_data_directory(context, filename, output, valuesShouldExist, (escape == ' escaped'))
def find_string_in_coordinator_data_directory(context, filename, output, valuesShouldExist, escapeStr=False):
contents = ''
file_path = os.path.join(coordinator_data_dir, filename)
with open(file_path) as f:
for line in f:
contents = line.strip()
if escapeStr:
output = re.escape(output)
pat = re.compile(output)
if valuesShouldExist and (not pat.search(contents)):
err_str = "Expected stdout string '%s' and found: '%s'" % (output, contents)
raise Exception(err_str)
if (not valuesShouldExist) and pat.search(contents):
err_str = "Did not expect stdout string '%s' but found: '%s'" % (output, contents)
raise Exception(err_str)
@given('verify that the file "{filename}" in the coordinator data directory has "{some}" line starting with "{output}"')
@then('verify that the file "{filename}" in the coordinator data directory has "{some}" line starting with "{output}"')
def impl(context, filename, some, output):
if (some == 'some'):
valuesShouldExist = True
elif (some == 'no'):
valuesShouldExist = False
else:
raise Exception("only 'some' and 'no' are valid inputs")
regexStr = "%s%s" % ("^[\s]*", output)
pat = re.compile(regexStr)
file_path = os.path.join(coordinator_data_dir, filename)
with open(file_path) as fr:
for line in fr:
contents = line.strip()
match = pat.search(contents)
if not valuesShouldExist:
if match:
err_str = "Expected no stdout string '%s' and found: '%s'" % (regexStr, contents)
raise Exception(err_str)
else:
if match:
return
if valuesShouldExist:
err_str = "xx Expected stdout string '%s' and found: '%s'" % (regexStr, contents)
raise Exception(err_str)
@given('verify that the file "{filename}" in each segment data directory has "{some}" line starting with "{output}"')
@then('verify that the file "{filename}" in each segment data directory has "{some}" line starting with "{output}"')
def impl(context, filename, some, output):
conn = dbconn.connect(dbconn.DbURL(dbname='template1'), unsetSearchPath=False)
try:
curs = dbconn.query(conn, "SELECT hostname, datadir FROM gp_segment_configuration WHERE role='p' AND content > -1;")
result = curs.fetchall()
segment_info = [(result[s][0], result[s][1]) for s in range(len(result))]
except Exception as e:
raise Exception("Could not retrieve segment information: %s" % e.message)
finally:
conn.close()
if (some == 'some'):
valuesShouldExist = True
elif (some == 'no'):
valuesShouldExist = False
else:
raise Exception("only 'some' and 'no' are valid inputs")
for info in segment_info:
host, datadir = info
filepath = os.path.join(datadir, filename)
regex = "%s%s" % ("^[%s]*", output)
cmd_str = 'ssh %s "grep -c %s %s"' % (host, regex, filepath)
cmd = Command(name='Running remote command: %s' % cmd_str, cmdStr=cmd_str)
cmd.run(validateAfter=False)
try:
val = int(cmd.get_stdout().strip())
if not valuesShouldExist:
if val:
raise Exception('File %s on host %s does start with "%s"(val error: %s)' % (filepath, host, output, val))
else:
if not val:
raise Exception('File %s on host %s does start not with "%s"(val error: %s)' % (filepath, host, output, val))
except:
raise Exception('File %s on host %s does start with "%s"(parse error)' % (filepath, host, output))
@then('verify that the last line of the file "{filename}" in each segment data directory {contain} the string "{output}"')
def impl(context, filename, contain, output):
if contain == 'should contain':
valuesShouldExist = True
elif contain == 'should not contain':
valuesShouldExist = False
else:
raise Exception("only 'should contain' and 'should not contain' are valid inputs")
segment_info = []
conn = dbconn.connect(dbconn.DbURL(dbname='template1'), unsetSearchPath=False)
try:
curs = dbconn.query(conn, "SELECT hostname, datadir FROM gp_segment_configuration WHERE role='p' AND content > -1;")
result = curs.fetchall()
segment_info = [(result[s][0], result[s][1]) for s in range(len(result))]
except Exception as e:
raise Exception("Could not retrieve segment information: %s" % e.message)
finally:
conn.close()
for info in segment_info:
host, datadir = info
filepath = os.path.join(datadir, filename)
cmd_str = 'ssh %s "tail -n1 %s"' % (host, filepath)
cmd = Command(name='Running remote command: %s' % cmd_str, cmdStr=cmd_str)
cmd.run(validateAfter=True)
actual = cmd.get_stdout()
if valuesShouldExist and (output not in actual):
raise Exception('File %s on host %s does not contain "%s"' % (filepath, host, output))
if (not valuesShouldExist) and (output in actual):
raise Exception('File %s on host %s contains "%s"' % (filepath, host, output))
@given('the gpfdists occupying port {port} on host "{hostfile}"')
def impl(context, port, hostfile):
remote_gphome = os.environ.get('GPHOME')
gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh')
source_map_file = os.environ.get(hostfile)
dir = '/tmp'
ctxt = 2
with open(source_map_file, 'r') as f:
for line in f:
host = line.strip().split(',')[0]
if host in ('localhost', '127.0.0.1', socket.gethostname()):
ctxt = 1
gpfdist = Gpfdist('gpfdist on host %s' % host, dir, port, os.path.join('/tmp', 'gpfdist.pid'),
ctxt, host, gp_source_file)
gpfdist.startGpfdist()
@then('the gpfdists running on port {port} get cleaned up from host "{hostfile}"')
def impl(context, port, hostfile):
remote_gphome = os.environ.get('GPHOME')
gp_source_file = os.path.join(remote_gphome, 'greenplum_path.sh')
source_map_file = os.environ.get(hostfile)
dir = '/tmp'
ctxt = 2
with open(source_map_file, 'r') as f:
for line in f:
host = line.strip().split(',')[0]
if host in ('localhost', '127.0.0.1', socket.gethostname()):
ctxt = 1
gpfdist = Gpfdist('gpfdist on host %s' % host, dir, port, os.path.join('/tmp', 'gpfdist.pid'),
ctxt, host, gp_source_file)
gpfdist.cleanupGpfdist()
@then('verify that the query "{query}" in database "{dbname}" returns "{nrows}"')
def impl(context, dbname, query, nrows):
check_count_for_specific_query(dbname, query, int(nrows))
@then('verify that the file "{filepath}" contains "{line}"')
def impl(context, filepath, line):
filepath = glob.glob(filepath)[0]
if line not in open(filepath).read():
raise Exception("The file '%s' does not contain '%s'" % (filepath, line))
@then('verify that the file "{filepath}" does not contain "{line}"')
def impl(context, filepath, line):
filepath = glob.glob(filepath)[0]
if line in open(filepath).read():
raise Exception("The file '%s' does contain '%s'" % (filepath, line))
@given('database "{dbname}" is dropped and recreated')
@when('database "{dbname}" is dropped and recreated')
@then('database "{dbname}" is dropped and recreated')
def impl(context, dbname):
drop_database_if_exists(context, dbname)
create_database(context, dbname)
@then('validate gpcheckcat logs contain skipping ACL and Owner tests')
def imp(context):
dirname = 'gpAdminLogs'
absdirname = "%s/%s" % (os.path.expanduser("~"), dirname)
if not os.path.exists(absdirname):
raise Exception('No such directory: %s' % absdirname)
pattern = "%s/gpcheckcat_*.log" % (absdirname)
logs_for_a_util = glob.glob(pattern)
if not logs_for_a_util:
raise Exception('Logs matching "%s" were not created' % pattern)
rc, error, output = run_cmd("grep 'Default skipping test:acl' %s" % pattern)
if rc:
raise Exception("Error executing grep on gpcheckcat logs while finding ACL: %s" % error)
rc, error, output = run_cmd("grep 'Default skipping test:owner' %s" % pattern)
if rc:
raise Exception("Error executing grep on gpcheckcat logs while finding Owner: %s" % error)
@then('validate and run gpcheckcat repair')
def impl(context):
context.execute_steps('''
Then gpcheckcat should print "repair script\(s\) generated in dir gpcheckcat.repair.*" to stdout
Then the path "gpcheckcat.repair.*" is found in cwd "1" times
Then run all the repair scripts in the dir "gpcheckcat.repair.*"
And the path "gpcheckcat.repair.*" is removed from current working directory
''')
@given('there is a "{tabletype}" table "{tablename}" in "{dbname}" with "{numrows}" rows')
def impl(context, tabletype, tablename, dbname, numrows):
populate_regular_table_data(context, tabletype, tablename, 'None', dbname, with_data=True, rowcount=int(numrows))
@given('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data')
@then('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data')
@when('there is a "{tabletype}" table "{tablename}" in "{dbname}" with data')
def impl(context, tabletype, tablename, dbname):
populate_regular_table_data(context, tabletype, tablename, 'None', dbname, with_data=True)
@given('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data')
@then('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data')
@when('there is a "{tabletype}" partition table "{table_name}" in "{dbname}" with data')
def impl(context, tabletype, table_name, dbname):
create_partition(context, tablename=table_name, storage_type=tabletype, dbname=dbname, with_data=True)
@then('read pid from file "{filename}" and kill the process')
@when('read pid from file "{filename}" and kill the process')
@given('read pid from file "{filename}" and kill the process')
def impl(context, filename):
retry = 0
pid = None
while retry < 5:
try:
with open(filename) as fr:
pid = fr.readline().strip()
if pid:
break
except:
retry += 1
time.sleep(retry * 0.1) # 100 millis, 200 millis, etc.
if not pid:
raise Exception("process id '%s' not found in the file '%s'" % (pid, filename))
cmd = Command(name="killing pid", cmdStr='kill -9 %s' % pid)
cmd.run(validateAfter=True)
@then('an attribute of table "{table}" in database "{dbname}" is deleted on segment with content id "{segid}"')
@when('an attribute of table "{table}" in database "{dbname}" is deleted on segment with content id "{segid}"')
def impl(context, table, dbname, segid):
local_cmd = 'psql %s -t -c "SELECT port,hostname FROM gp_segment_configuration WHERE content=%s and role=\'p\';"' % (
dbname, segid)
run_command(context, local_cmd)
port, host = context.stdout_message.split("|")
port = port.strip()
host = host.strip()
user = os.environ.get('USER')
source_file = os.path.join(os.environ.get('GPHOME'), 'greenplum_path.sh')
# Yes, the below line is ugly. It looks much uglier when done with separate strings, given the multiple levels of escaping required.
remote_cmd = """
ssh %s "source %s; export PGUSER=%s; export PGPORT=%s; export PGOPTIONS=\\\"-c gp_role=utility\\\"; psql -d %s -c \\\"SET allow_system_table_mods=true; DELETE FROM pg_attribute where attrelid=\'%s\'::regclass::oid;\\\""
""" % (host, source_file, user, port, dbname, table)
run_command(context, remote_cmd.strip())
@then('The user runs sql "{query}" in "{dbname}" on first primary segment')
@when('The user runs sql "{query}" in "{dbname}" on first primary segment')
@given('The user runs sql "{query}" in "{dbname}" on first primary segment')
def impl(context, query, dbname):
host, port = get_primary_segment_host_port()
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_role=utility\' psql -h %s -p %s -c \"%s\"; " % (
dbname, host, port, query)
Command(name='Running Remote command: %s' % psql_cmd, cmdStr=psql_cmd).run(validateAfter=True)
@then('The user runs sql "{query}" in "{dbname}" on all the segments')
@when('The user runs sql "{query}" in "{dbname}" on all the segments')
@given('The user runs sql "{query}" in "{dbname}" on all the segments')
def impl(context, query, dbname):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
host = seg.getSegmentHostName()
if seg.isSegmentPrimary() or seg.isSegmentCoordinator():
port = seg.getSegmentPort()
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_role=utility\' psql -h %s -p %s -c \"%s\"; " % (
dbname, host, port, query)
Command(name='Running Remote command: %s' % psql_cmd, cmdStr=psql_cmd).run(validateAfter=True)
@then('The user runs sql file "{file}" in "{dbname}" on all the segments')
@when('The user runs sql file "{file}" in "{dbname}" on all the segments')
@given('The user runs sql file "{file}" in "{dbname}" on all the segments')
def impl(context, file, dbname):
with open(file) as fd:
query = fd.read().strip()
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
host = seg.getSegmentHostName()
if seg.isSegmentPrimary() or seg.isSegmentCoordinator():
port = seg.getSegmentPort()
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_role=utility\' psql -h %s -p %s -c \"%s\"; " % (
dbname, host, port, query)
Command(name='Running Remote command: %s' % psql_cmd, cmdStr=psql_cmd).run(validateAfter=True)
@then('The user runs sql "{query}" in "{dbname}" on specified segment {host}:{port} in utility mode')
@when('The user runs sql "{query}" in "{dbname}" on specified segment {host}:{port} in utility mode')
@given('The user runs sql "{query}" in "{dbname}" on specified segment {host}:{port} in utility mode')
def impl(context, query, dbname, host, port):
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_role=utility\' psql -h %s -p %s -c \"%s\"; " % (
dbname, host, port, query)
cmd = Command(name='Running Remote command: %s' % psql_cmd, cmdStr=psql_cmd)
cmd.run(validateAfter=True)
context.stdout_message = cmd.get_stdout()
@when('The user runs psql "{psql_cmd}" against database "{dbname}" when utility mode is set to {utility_mode}')
@then('The user runs psql "{psql_cmd}" against database "{dbname}" when utility mode is set to {utility_mode}')
@given('The user runs psql "{psql_cmd}" against database "{dbname}" when utility mode is set to {utility_mode}')
def impl(context, psql_cmd, dbname, utility_mode):
if utility_mode == "True":
cmd = "PGOPTIONS=\'-c gp_role=utility\' psql -d \'{}\' {};".format(dbname, psql_cmd)
else:
cmd = "psql -d \'{}\' {};".format(dbname, psql_cmd)
run_command(context, cmd)
@then('table {table_name} exists in "{dbname}" on specified segment {host}:{port}')
@when('table {table_name} exists in "{dbname}" on specified segment {host}:{port}')
@given('table {table_name} exists in "{dbname}" on specified segment {host}:{port}')
def impl(context, table_name, dbname, host, port):
query = "SELECT COUNT(*) FROM pg_class WHERE relname = '%s'" % table_name
psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_role=utility\' psql -h %s -p %s -c \"%s\"; " % (
dbname, host, port, query)
cmd = Command(name='Running Remote command: %s' % psql_cmd, cmdStr=psql_cmd)
cmd.run(validateAfter=True)
keyword = "1 row"
if keyword not in cmd.get_stdout():
raise Exception(context.stdout_message)
@then('The path "{path}" is removed from current working directory')
@when('The path "{path}" is removed from current working directory')
@given('The path "{path}" is removed from current working directory')
def impl(context, path):
remove_local_path(path)
@given('the path "{path}" is found in cwd "{num}" times')
@then('the path "{path}" is found in cwd "{num}" times')
@when('the path "{path}" is found in cwd "{num}" times')
def impl(context, path, num):
result = validate_local_path(path)
if result != int(num):
raise Exception("expected %s items but found %s items in path %s" % (num, result, path))
@when('the user runs all the repair scripts in the dir "{dir}"')
@then('run all the repair scripts in the dir "{dir}"')
def impl(context, dir):
bash_files = glob.glob("%s/*.sh" % dir)
for file in bash_files:
run_command(context, "bash %s" % file)
if context.ret_code != 0:
raise Exception("Error running repair script %s: %s" % (file, context.stdout_message))
@when(
'the entry for the table "{user_table}" is removed from "{catalog_table}" with key "{primary_key}" in the database "{db_name}"')
def impl(context, user_table, catalog_table, primary_key, db_name):
delete_qry = "delete from %s where %s='%s'::regclass::oid;" % (catalog_table, primary_key, user_table)
with closing(dbconn.connect(dbconn.DbURL(dbname=db_name), unsetSearchPath=False)) as conn:
for qry in ["set allow_system_table_mods=true;", "set allow_segment_dml=true;", delete_qry]:
dbconn.execSQL(conn, qry)
@when('the entry for the table "{user_table}" is removed from "{catalog_table}" with key "{primary_key}" in the database "{db_name}" on the first primary segment')
@given('the entry for the table "{user_table}" is removed from "{catalog_table}" with key "{primary_key}" in the database "{db_name}" on the first primary segment')
def impl(context, user_table, catalog_table, primary_key, db_name):
host, port = get_primary_segment_host_port()
delete_qry = "delete from %s where %s='%s'::regclass::oid;" % (catalog_table, primary_key, user_table)
with closing(dbconn.connect(dbconn.DbURL(dbname=db_name, port=port, hostname=host), utility=True,
allowSystemTableMods=True, unsetSearchPath=False)) as conn:
for qry in [delete_qry]:
dbconn.execSQL(conn, qry)
@given('the timestamps in the repair dir are consistent')
@when('the timestamps in the repair dir are consistent')
@then('the timestamps in the repair dir are consistent')
def impl(_):
repair_regex = "gpcheckcat.repair.*"
timestamp = ""
repair_dir = ""
for file in os.listdir('.'):
if fnmatch.fnmatch(file, repair_regex):
repair_dir = file
timestamp = repair_dir.split('.')[2]
if not timestamp:
raise Exception("Timestamp was not found")
for file in os.listdir(repair_dir):
if not timestamp in file:
raise Exception("file found containing inconsistent timestamp")
@when('wait until the process "{proc}" goes down')
@then('wait until the process "{proc}" goes down')
@given('wait until the process "{proc}" goes down')
def impl(context, proc):
is_stopped = has_process_eventually_stopped(proc)
context.ret_code = 0 if is_stopped else 1
if not is_stopped:
context.error_message = 'The process %s is still running after waiting' % proc
check_return_code(context, 0)
@when('wait until the process "{proc}" is up')
@then('wait until the process "{proc}" is up')
@given('wait until the process "{proc}" is up')
def impl(context, proc):
cmd = Command(name='pgrep for %s' % proc, cmdStr="pgrep %s" % proc)
start_time = current_time = datetime.now()
while (current_time - start_time).seconds < 120:
cmd.run()
if cmd.get_return_code() > 1:
raise Exception("unexpected problem with gprep, return code: %s" % cmd.get_return_code())
if cmd.get_return_code() != 1: # 0 means match
break
time.sleep(2)
current_time = datetime.now()
context.ret_code = cmd.get_return_code()
context.error_message = ''
if context.ret_code > 1:
context.error_message = 'pgrep internal error'
check_return_code(context, 0) # 0 means one or more processes were matched
@given('the user creates an index for table "{table_name}" in database "{db_name}"')
@when('the user creates an index for table "{table_name}" in database "{db_name}"')
@then('the user creates an index for table "{table_name}" in database "{db_name}"')
def impl(context, table_name, db_name):
index_qry = "create table {0}(i int primary key, j varchar); create index test_index on index_table using bitmap(j)".format(
table_name)
with closing(dbconn.connect(dbconn.DbURL(dbname=db_name), unsetSearchPath=False)) as conn:
dbconn.execSQL(conn, index_qry)
@then('the file with the fake timestamp no longer exists')
def impl(context):
if os.path.exists(context.fake_timestamp_file):
raise Exception("expected no file at: %s" % context.fake_timestamp_file)
@then('"{gppkg_name}" gppkg files exist on all hosts')
def impl(context, gppkg_name):
remote_gphome = os.environ.get('GPHOME')
gparray = GpArray.initFromCatalog(dbconn.DbURL())
hostlist = get_all_hostnames_as_list(context, 'template1')
# We can assume the GPDB is installed at the same location for all hosts
command_list_all = show_all_installed(remote_gphome)
for hostname in set(hostlist):
cmd = Command(name='check if internal gppkg is installed',
cmdStr=command_list_all,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=True)
if not gppkg_name in cmd.get_stdout():
raise Exception( '"%s" gppkg is not installed on host: %s. \nInstalled packages: %s' % (gppkg_name, hostname, cmd.get_stdout()))
@given('the user runs command "{command}" on all hosts without validation')
@when('the user runs command "{command}" on all hosts without validation')
@then('the user runs command "{command}" on all hosts without validation')
def impl(context, command):
hostlist = get_all_hostnames_as_list(context, 'template1')
for hostname in set(hostlist):
cmd = Command(name='running command:%s' % command,
cmdStr=command,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=False)
@given('"{gppkg_name}" gppkg files do not exist on any hosts')
@when('"{gppkg_name}" gppkg files do not exist on any hosts')
@then('"{gppkg_name}" gppkg files do not exist on any hosts')
def impl(context, gppkg_name):
remote_gphome = os.environ.get('GPHOME')
hostlist = get_all_hostnames_as_list(context, 'template1')
# We can assume the GPDB is installed at the same location for all hosts
command_list_all = show_all_installed(remote_gphome)
for hostname in set(hostlist):
cmd = Command(name='check if internal gppkg is installed',
cmdStr=command_list_all,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=True)
if gppkg_name in cmd.get_stdout():
raise Exception( '"%s" gppkg is installed on host: %s. \nInstalled packages: %s' % (gppkg_name, hostname, cmd.get_stdout()))
def _remove_gppkg_from_host(context, gppkg_name, is_coordinator_host):
remote_gphome = os.environ.get('GPHOME')
if is_coordinator_host:
hostname = get_coordinator_hostname()[0][0] # returns a list of list
else:
hostlist = get_segment_hostlist()
if not hostlist:
raise Exception("Current GPDB setup is not a multi-host cluster.")
# Let's just pick whatever is the first host in the list, it shouldn't
# matter which one we remove from
hostname = hostlist[0]
command_list_all = show_all_installed(remote_gphome)
cmd = Command(name='get all from the host',
cmdStr=command_list_all,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=True)
installed_gppkgs = cmd.get_stdout_lines()
if not installed_gppkgs:
raise Exception("Found no packages installed")
full_gppkg_name = next((gppkg for gppkg in installed_gppkgs if gppkg_name in gppkg), None)
if not full_gppkg_name:
raise Exception("Found no matches for gppkg '%s'\n"
"gppkgs installed:\n%s" % (gppkg_name, installed_gppkgs))
remove_command = remove_native_package_command(remote_gphome, full_gppkg_name)
cmd = Command(name='Cleanly remove from the remove host',
cmdStr=remove_command,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=True)
remove_archive_gppkg = remove_gppkg_archive_command(remote_gphome, gppkg_name)
cmd = Command(name='Remove archive gppkg',
cmdStr=remove_archive_gppkg,
ctxt=REMOTE,
remoteHost=hostname)
cmd.run(validateAfter=True)
@when('gppkg "{gppkg_name}" is removed from a segment host')
def impl(context, gppkg_name):
_remove_gppkg_from_host(context, gppkg_name, is_coordinator_host=False)
@when('gppkg "{gppkg_name}" is removed from coordinator host')
def impl(context, gppkg_name):
_remove_gppkg_from_host(context, gppkg_name, is_coordinator_host=True)
@given('a gphome copy is created at {location} on all hosts')
def impl(context, location):
"""
Copies the contents of GPHOME from the local machine into a different
directory location for all hosts in the cluster.
"""
gphome = os.environ["GPHOME"]
greenplum_path = path.join(gphome, 'greenplum_path.sh')
# First replace the GPHOME envvar in greenplum_path.sh.
subprocess.check_call([
'sed',
'-i.bak', # we use this backup later
'-e', r's|^GPHOME=.*$|GPHOME={}|'.format(location),
greenplum_path,
])
try:
# Now copy all the files over.
hosts = set(get_all_hostnames_as_list(context, 'template1'))
host_opts = []
for host in hosts:
host_opts.extend(['-h', host])
subprocess.check_call([
'gpscp',
'-rv',
] + host_opts + [
os.getenv('GPHOME'),
'=:{}'.format(location),
])
finally:
# Put greenplum_path.sh back the way it was.
subprocess.check_call([
'mv', '{}.bak'.format(greenplum_path), greenplum_path
])
@given('all files in gpAdminLogs directory are deleted')
@then('all files in gpAdminLogs directory are deleted')
def impl(context):
log_dir = _get_gpAdminLogs_directory()
files_found = glob.glob('%s/*' % (log_dir))
for file in files_found:
os.remove(file)
@then('gpAdminLogs directory {has} "{expected_file}" files')
def impl(context, has, expected_file):
log_dir = _get_gpAdminLogs_directory()
files_found = glob.glob('%s/%s' % (log_dir, expected_file))
if files_found and (has == 'has no'):
raise Exception("expected no %s files in %s, but found %s" % (expected_file, log_dir, files_found))
if (not files_found) and (has == 'has'):
raise Exception("expected %s file in %s, but not found" % (expected_file, log_dir))
@given('"{filepath}" is copied to the install directory')
def impl(context, filepath):
gphome = os.getenv("GPHOME")
if not gphome:
raise Exception("GPHOME must be set")
shutil.copy(filepath, os.path.join(gphome, "bin"))
@then('{command} should print "{target}" to logfile')
def impl(context, command, target):
log_dir = _get_gpAdminLogs_directory()
filename = glob.glob('%s/%s_*.log' % (log_dir, command))[0]
contents = ''
with open(filename) as fr:
for line in fr:
contents += line
if target not in contents:
raise Exception("cannot find %s in %s" % (target, filename))
@given('verify that a role "{role_name}" exists in database "{dbname}"')
@then('verify that a role "{role_name}" exists in database "{dbname}"')
def impl(context, role_name, dbname):
query = "select rolname from pg_roles where rolname = '%s'" % role_name
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
try:
result = getRows(dbname, query)[0][0]
if result != role_name:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
except:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
@given('the system timezone is saved')
def impl(context):
cmd = Command(name='Get system timezone',
cmdStr='date +"%Z"')
cmd.run(validateAfter=True)
context.system_timezone = cmd.get_stdout()
@then('the database timezone is saved')
def impl(context):
cmd = Command(name='Get database timezone',
cmdStr='psql -d template1 -c "show time zone" -t')
cmd.run(validateAfter=True)
tz = cmd.get_stdout()
cmd = Command(name='Get abbreviated database timezone',
cmdStr='psql -d template1 -c "select abbrev from pg_timezone_names where name=\'%s\';" -t' % tz)
cmd.run(validateAfter=True)
context.database_timezone = cmd.get_stdout()
@then('the database timezone matches the system timezone')
def step_impl(context):
if context.database_timezone != context.system_timezone:
raise Exception("Expected database timezone to be %s, but it was %s" % (context.system_timezone, context.database_timezone))
@then('the database timezone matches "{abbreviated_timezone}"')
def step_impl(context, abbreviated_timezone):
if context.database_timezone != abbreviated_timezone:
raise Exception("Expected database timezone to be %s, but it was %s" % (abbreviated_timezone, context.database_timezone))
@then('the startup timezone is saved')
def step_impl(context):
logfile = "%s/log/startup.log" % gp.get_coordinatordatadir()
timezone = ""
with open(logfile) as l:
first_line = l.readline()
timestamp = first_line.split(",")[0]
timezone = timestamp[-3:]
if timezone == "":
raise Exception("Could not find timezone information in startup.log")
context.startup_timezone = timezone
@then('the startup timezone matches the system timezone')
def step_impl(context):
if context.startup_timezone != context.system_timezone:
raise Exception("Expected timezone in startup.log to be %s, but it was %s" % (context.system_timezone, context.startup_timezone))
@then('the startup timezone matches "{abbreviated_timezone}"')
def step_impl(context, abbreviated_timezone):
if context.startup_timezone != abbreviated_timezone:
raise Exception("Expected timezone in startup.log to be %s, but it was %s" % (abbreviated_timezone, context.startup_timezone))
@given("a working directory of the test as '{working_directory}' with mode '{mode}'")
def impl(context, working_directory, mode):
_create_working_directory(context, working_directory, mode)
@given("a working directory of the test as '{working_directory}'")
def impl(context, working_directory):
_create_working_directory(context, working_directory)
def _create_working_directory(context, working_directory, mode=''):
context.working_directory = working_directory
# Don't fail if directory already exists, which can occur for the first scenario
shutil.rmtree(context.working_directory, ignore_errors=True)
if (mode != ''):
os.mkdir(context.working_directory, int(mode,8))
else:
os.mkdir(context.working_directory)
def _create_cluster(context, coordinator_host, segment_host_list, hba_hostnames='0', with_mirrors=False, mirroring_configuration='group'):
if segment_host_list == "":
segment_host_list = []
else:
segment_host_list = segment_host_list.split(",")
global coordinator_data_dir
coordinator_data_dir = os.path.join(context.working_directory, 'data/coordinator/gpseg-1')
os.environ['COORDINATOR_DATA_DIRECTORY'] = coordinator_data_dir
try:
with closing(dbconn.connect(dbconn.DbURL(dbname='template1'), unsetSearchPath=False)) as conn:
count = dbconn.querySingleton(conn, "select count(*) from gp_segment_configuration where role='m';")
if not with_mirrors and count == 0:
print("Skipping creating a new cluster since the cluster is primary only already.")
return
elif with_mirrors and count > 0:
print("Skipping creating a new cluster since the cluster has mirrors already.")
return
except:
pass
testcluster = TestCluster(hosts=[coordinator_host]+segment_host_list, base_dir=context.working_directory,hba_hostnames=hba_hostnames)
testcluster.reset_cluster()
testcluster.create_cluster(with_mirrors=with_mirrors, mirroring_configuration=mirroring_configuration)
context.gpexpand_mirrors_enabled = with_mirrors
@then('a cluster is created with no mirrors on "{coordinator_host}" and "{segment_host_list}"')
@given('a cluster is created with no mirrors on "{coordinator_host}" and "{segment_host_list}"')
def impl(context, coordinator_host, segment_host_list):
_create_cluster(context, coordinator_host, segment_host_list, with_mirrors=False)
@given('with HBA_HOSTNAMES "{hba_hostnames}" a cluster is created with no mirrors on "{coordinator_host}" and "{segment_host_list}"')
@when('with HBA_HOSTNAMES "{hba_hostnames}" a cluster is created with no mirrors on "{coordinator_host}" and "{segment_host_list}"')
@when('with HBA_HOSTNAMES "{hba_hostnames}" a cross-subnet cluster without a standby is created with no mirrors on "{coordinator_host}" and "{segment_host_list}"')
def impl(context, coordinator_host, segment_host_list, hba_hostnames):
_create_cluster(context, coordinator_host, segment_host_list, hba_hostnames, with_mirrors=False)
@given('a cross-subnet cluster without a standby is created with mirrors on "{coordinator_host}" and "{segment_host_list}"')
@given('a cluster is created with mirrors on "{coordinator_host}" and "{segment_host_list}"')
def impl(context, coordinator_host, segment_host_list):
_create_cluster(context, coordinator_host, segment_host_list, with_mirrors=True, mirroring_configuration='group')
@given('a cluster is created with "{mirroring_configuration}" segment mirroring on "{coordinator_host}" and "{segment_host_list}"')
def impl(context, mirroring_configuration, coordinator_host, segment_host_list):
_create_cluster(context, coordinator_host, segment_host_list, with_mirrors=True, mirroring_configuration=mirroring_configuration)
@given('the user runs gpexpand interview to add {num_of_segments} new segment and {num_of_hosts} new host "{hostnames}"')
@when('the user runs gpexpand interview to add {num_of_segments} new segment and {num_of_hosts} new host "{hostnames}"')
def impl(context, num_of_segments, num_of_hosts, hostnames):
num_of_segments = int(num_of_segments)
num_of_hosts = int(num_of_hosts)
hosts = []
if num_of_hosts > 0:
hosts = hostnames.split(',')
if num_of_hosts != len(hosts):
raise Exception("Incorrect amount of hosts. number of hosts:%s\nhostnames: %s" % (num_of_hosts, hosts))
base_dir = "/tmp"
if hasattr(context, "temp_base_dir"):
base_dir = context.temp_base_dir
elif hasattr(context, "working_directory"):
base_dir = context.working_directory
primary_dir = os.path.join(base_dir, 'data', 'primary')
mirror_dir = ''
if context.gpexpand_mirrors_enabled:
mirror_dir = os.path.join(base_dir, 'data', 'mirror')
directory_pairs = []
# we need to create the tuples for the interview to work.
for i in range(0, num_of_segments):
directory_pairs.append((primary_dir,mirror_dir))
gpexpand = Gpexpand(context, working_directory=context.working_directory)
output, returncode = gpexpand.do_interview(hosts=hosts,
num_of_segments=num_of_segments,
directory_pairs=directory_pairs,
has_mirrors=context.gpexpand_mirrors_enabled)
if returncode != 0:
raise Exception("*****An error occured*****:\n %s" % output)
@given('there are no gpexpand_inputfiles')
def impl(context):
list(map(os.remove, glob.glob("gpexpand_inputfile*")))
@when('the user runs gpexpand with the latest gpexpand_inputfile with additional parameters {additional_params}')
def impl(context, additional_params=''):
gpexpand = Gpexpand(context, working_directory=context.working_directory)
ret_code, std_err, std_out = gpexpand.initialize_segments(additional_params)
if ret_code != 0:
raise Exception("gpexpand exited with return code: %d.\nstderr=%s\nstdout=%s" % (ret_code, std_err, std_out))
@when('the user runs gpexpand with the latest gpexpand_inputfile without ret code check')
def impl(context):
gpexpand = Gpexpand(context, working_directory=context.working_directory)
gpexpand.initialize_segments()
@when('the user runs gpexpand to redistribute with duration "{duration}"')
def impl(context, duration):
_gpexpand_redistribute(context, duration)
@when('the user runs gpexpand to redistribute with the --end flag')
def impl(context):
_gpexpand_redistribute(context, endtime=True)
@when('the user runs gpexpand to redistribute')
def impl(context):
_gpexpand_redistribute(context)
def _gpexpand_redistribute(context, duration=False, endtime=False):
gpexpand = Gpexpand(context, working_directory=context.working_directory)
context.command = gpexpand
ret_code, std_err, std_out = gpexpand.redistribute(duration, endtime)
if duration or endtime:
if ret_code != 0:
# gpexpand exited on time, it's expected
return
else:
raise Exception("gpexpand didn't stop at duration / endtime.\nstderr=%s\nstdout=%s" % (std_err, std_out))
if ret_code != 0:
raise Exception("gpexpand exited with return code: %d.\nstderr=%s\nstdout=%s" % (ret_code, std_err, std_out))
@given('expanded preferred primary on segment "{segment_id}" has failed')
def step_impl(context, segment_id):
stop_primary(context, int(segment_id))
wait_for_unblocked_transactions(context)
@given('the user runs gpexpand with a static inputfile for a two-node cluster with mirrors')
def impl(context):
inputfile_contents = """
sdw1|sdw1|20502|/tmp/gpexpand_behave/two_nodes/data/primary/gpseg2|6|2|p
sdw2|sdw2|21502|/tmp/gpexpand_behave/two_nodes/data/mirror/gpseg2|8|2|m
sdw2|sdw2|20503|/tmp/gpexpand_behave/two_nodes/data/primary/gpseg3|7|3|p
sdw1|sdw1|21503|/tmp/gpexpand_behave/two_nodes/data/mirror/gpseg3|9|3|m"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
inputfile_name = "%s/gpexpand_inputfile_%s" % (context.working_directory, timestamp)
with open(inputfile_name, 'w') as fd:
fd.write(inputfile_contents)
gpexpand = Gpexpand(context, working_directory=context.working_directory)
ret_code, std_err, std_out = gpexpand.initialize_segments()
if ret_code != 0:
raise Exception("gpexpand exited with return code: %d.\nstderr=%s\nstdout=%s" % (ret_code, std_err, std_out))
@when('the user runs gpexpand with a static inputfile for a single-node cluster with mirrors')
def impl(context):
inputfile_contents = """sdw1|sdw1|20502|/tmp/gpexpand_behave/data/primary/gpseg2|6|2|p
sdw1|sdw1|21502|/tmp/gpexpand_behave/data/mirror/gpseg2|8|2|m
sdw1|sdw1|20503|/tmp/gpexpand_behave/data/primary/gpseg3|7|3|p
sdw1|sdw1|21503|/tmp/gpexpand_behave/data/mirror/gpseg3|9|3|m"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
inputfile_name = "%s/gpexpand_inputfile_%s" % (context.working_directory, timestamp)
with open(inputfile_name, 'w') as fd:
fd.write(inputfile_contents)
gpexpand = Gpexpand(context, working_directory=context.working_directory)
ret_code, std_err, std_out = gpexpand.initialize_segments()
if ret_code != 0:
raise Exception("gpexpand exited with return code: %d.\nstderr=%s\nstdout=%s" % (ret_code, std_err, std_out))
@when('the user runs gpexpand with a static inputfile for a single-node cluster with mirrors without ret code check')
def impl(context):
inputfile_contents = """sdw1|sdw1|20502|/data/gpdata/gpexpand/data/primary/gpseg2|7|2|p
sdw1|sdw1|21502|/data/gpdata/gpexpand/data/mirror/gpseg2|8|2|m"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
inputfile_name = "%s/gpexpand_inputfile_%s" % (context.working_directory, timestamp)
with open(inputfile_name, 'w') as fd:
fd.write(inputfile_contents)
gpexpand = Gpexpand(context, working_directory=context.working_directory)
gpexpand.initialize_segments()
@given('the coordinator pid has been saved')
def impl(context):
data_dir = os.path.join(context.working_directory,
'data/coordinator/gpseg-1')
context.coordinator_pid = gp.get_postmaster_pid_locally(data_dir)
@then('verify that the coordinator pid has not been changed')
def impl(context):
data_dir = os.path.join(context.working_directory,
'data/coordinator/gpseg-1')
current_coordinator_pid = gp.get_postmaster_pid_locally(data_dir)
if context.coordinator_pid == current_coordinator_pid:
return
raise Exception("The coordinator pid has been changed.\nprevious: %s\ncurrent: %s" % (context.coordinator_pid, current_coordinator_pid))
@then('the numsegments of table "{tabname}" is {numsegments}')
def impl(context, tabname, numsegments):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = "select numsegments from gp_distribution_policy where localoid = '{tabname}'::regclass".format(tabname=tabname)
ns = dbconn.querySingleton(conn, query)
if ns == int(numsegments):
return
raise Exception("The numsegments of the writable external table {tabname} is {ns} (expected to be {numsegments})".format(tabname=tabname,
ns=str(ns),
numsegments=str(numsegments)))
@given('the number of segments have been saved')
@then('the number of segments have been saved')
def impl(context):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = """SELECT count(*) from gp_segment_configuration where -1 < content"""
context.start_data_segments = dbconn.querySingleton(conn, query)
@given('the gp_segment_configuration have been saved')
@when('the gp_segment_configuration have been saved')
@then('the gp_segment_configuration have been saved')
def impl(context):
dbname = 'gptest'
gp_segment_conf_backup = {}
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = """SELECT count(*) from gp_segment_configuration where -1 < content"""
segment_count = int(dbconn.querySingleton(conn, query))
query = """SELECT * from gp_segment_configuration where -1 < content order by dbid"""
cursor = dbconn.query(conn, query)
for i in range(0, segment_count):
dbid, content, role, preferred_role, mode, status,\
port, hostname, address, datadir = cursor.fetchone();
gp_segment_conf_backup[dbid] = {}
gp_segment_conf_backup[dbid]['content'] = content
gp_segment_conf_backup[dbid]['role'] = role
gp_segment_conf_backup[dbid]['preferred_role'] = preferred_role
gp_segment_conf_backup[dbid]['mode'] = mode
gp_segment_conf_backup[dbid]['status'] = status
gp_segment_conf_backup[dbid]['port'] = port
gp_segment_conf_backup[dbid]['hostname'] = hostname
gp_segment_conf_backup[dbid]['address'] = address
gp_segment_conf_backup[dbid]['datadir'] = datadir
context.gp_segment_conf_backup = gp_segment_conf_backup
@given('verify the gp_segment_configuration has been restored')
@when('verify the gp_segment_configuration has been restored')
@then('verify the gp_segment_configuration has been restored')
def impl(context):
dbname = 'gptest'
gp_segment_conf_backup = {}
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = """SELECT count(*) from gp_segment_configuration where -1 < content"""
segment_count = int(dbconn.querySingleton(conn, query))
query = """SELECT * from gp_segment_configuration where -1 < content order by dbid"""
cursor = dbconn.query(conn, query)
for i in range(0, segment_count):
dbid, content, role, preferred_role, mode, status,\
port, hostname, address, datadir = cursor.fetchone();
gp_segment_conf_backup[dbid] = {}
gp_segment_conf_backup[dbid]['content'] = content
gp_segment_conf_backup[dbid]['role'] = role
gp_segment_conf_backup[dbid]['preferred_role'] = preferred_role
gp_segment_conf_backup[dbid]['mode'] = mode
gp_segment_conf_backup[dbid]['status'] = status
gp_segment_conf_backup[dbid]['port'] = port
gp_segment_conf_backup[dbid]['hostname'] = hostname
gp_segment_conf_backup[dbid]['address'] = address
gp_segment_conf_backup[dbid]['datadir'] = datadir
if context.gp_segment_conf_backup != gp_segment_conf_backup:
raise Exception("gp_segment_configuration has not been restored")
@given('user has created {table_name} table')
def impl(context, table_name):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = """CREATE TABLE %s(a INT)""" % table_name
dbconn.execSQL(conn, query)
@given('a long-run read-only transaction exists on {table_name}')
def impl(context, table_name):
dbname = 'gptest'
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
context.long_run_select_only_conn = conn
query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name
data_result = dbconn.query(conn, query).fetchall()
context.long_run_select_only_data_result = data_result
query = """SELECT txid_current()"""
xid = dbconn.querySingleton(conn, query)
context.long_run_select_only_xid = xid
@then('verify that long-run read-only transaction still exists on {table_name}')
def impl(context, table_name):
dbname = 'gptest'
conn = context.long_run_select_only_conn
query = """SELECT gp_segment_id, * from %s order by 1, 2""" % table_name
data_result = dbconn.query(conn, query).fetchall()
query = """SELECT txid_current()"""
xid = dbconn.querySingleton(conn, query)
if (xid != context.long_run_select_only_xid or
data_result != context.long_run_select_only_data_result):
error_str = "Incorrect xid or select result of long run read-only transaction: \
xid(before %s, after %), result(before %s, after %s)"
raise Exception(error_str % (context.long_run_select_only_xid, xid, context.long_run_select_only_data_result, data_result))
@given('a long-run transaction starts')
def impl(context):
dbname = 'gptest'
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
context.long_run_conn = conn
query = """SELECT txid_current()"""
xid = dbconn.querySingleton(conn, query)
context.long_run_xid = xid
@then('verify that long-run transaction aborted for changing the catalog by creating table {table_name}')
def impl(context, table_name):
dbname = 'gptest'
conn = context.long_run_conn
query = """SELECT txid_current()"""
xid = dbconn.querySingleton(conn, query)
if context.long_run_xid != xid:
raise Exception("Incorrect xid of long run transaction: before %s, after %s" %
(context.long_run_xid, xid));
query = """CREATE TABLE %s (a INT)""" % table_name
try:
data_result = dbconn.query(conn, query)
except Exception as msg:
key_msg = "FATAL: cluster is expanded"
if key_msg not in msg.__str__():
raise Exception("transaction not abort correctly, errmsg:%s" % msg)
else:
raise Exception("transaction not abort, result:%s" % data_result)
@when('verify that the cluster has {num_of_segments} new segments')
@then('verify that the cluster has {num_of_segments} new segments')
def impl(context, num_of_segments):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = """SELECT dbid, content, role, preferred_role, mode, status, port, hostname, address, datadir from gp_segment_configuration;"""
rows = dbconn.query(conn, query).fetchall()
end_data_segments = 0
for row in rows:
content = row[1]
status = row[5]
if content > -1 and status == 'u':
end_data_segments += 1
if int(num_of_segments) == int(end_data_segments - context.start_data_segments):
return
raise Exception("Incorrect amount of segments.\nprevious: %s\ncurrent:"
"%s\ndump of gp_segment_configuration: %s" %
(context.start_data_segments, end_data_segments, rows))
@given('the cluster is setup for an expansion on hosts "{hostnames}"')
def impl(context, hostnames):
hosts = hostnames.split(",")
base_dir = "/tmp"
if hasattr(context, "temp_base_dir"):
base_dir = context.temp_base_dir
elif hasattr(context, "working_directory"):
base_dir = context.working_directory
for host in hosts:
cmd = Command(name='create data directories for expansion',
cmdStr="mkdir -p %s/data/primary; mkdir -p %s/data/mirror" % (base_dir, base_dir),
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
@given("a temporary directory under '{tmp_base_dir}' with mode '{mode}' is created")
@given('a temporary directory under "{tmp_base_dir}" to expand into')
def make_temp_dir(context,tmp_base_dir, mode=''):
if not tmp_base_dir:
raise Exception("tmp_base_dir cannot be empty")
if not os.path.exists(tmp_base_dir):
os.mkdir(tmp_base_dir)
context.temp_base_dir = tempfile.mkdtemp(dir=tmp_base_dir)
if mode:
os.chmod(path.normpath(path.join(tmp_base_dir, context.temp_base_dir)),
int(mode,8))
@given('the new host "{hostnames}" is ready to go')
def impl(context, hostnames):
hosts = hostnames.split(',')
if hasattr(context, "working_directory"):
reset_hosts(hosts, context.working_directory)
if hasattr(context, "temp_base_dir"):
reset_hosts(hosts, context.temp_base_dir)
@given('user has created expansiontest tables')
@then('user has created expansiontest tables')
def impl(context):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
for i in range(3):
query = """drop table if exists expansiontest%s""" % (i)
dbconn.execSQL(conn, query)
query = """create table expansiontest%s(a int)""" % (i)
dbconn.execSQL(conn, query)
@then('the tables have finished expanding')
def impl(context):
dbname = 'postgres'
conn = dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)
try:
query = """select fq_name from gpexpand.status_detail WHERE expansion_finished IS NULL"""
cursor = dbconn.query(conn, query)
row = cursor.fetchone()
if row:
raise Exception("table %s has not finished expanding" % row[0])
finally:
conn.close()
@given('an FTS probe is triggered')
@when('an FTS probe is triggered')
@then('an FTS probe is triggered')
def impl(context):
with closing(dbconn.connect(dbconn.DbURL(dbname='postgres'), unsetSearchPath=False)) as conn:
dbconn.querySingleton(conn, "SELECT gp_request_fts_probe_scan()")
@then('verify that gpstart on original coordinator fails due to lower Timeline ID')
def step_impl(context):
''' This assumes that gpstart still checks for Timeline ID if a standby coordinator is present '''
context.execute_steps('''
When the user runs "gpstart -a"
Then gpstart should return a return code of 2
And gpstart should print "Standby activated, this node no more can act as coordinator." to stdout
''')
@then('verify gpstate with options "{options}" output is correct')
def step_impl(context, options):
if '-f' in options:
if context.standby_hostname not in context.stdout_message or \
context.standby_data_dir not in context.stdout_message or \
str(context.standby_port) not in context.stdout_message:
raise Exception("gpstate -f output is missing expected standby coordinator information")
elif '-s' in options:
if context.standby_hostname not in context.stdout_message or \
context.standby_data_dir not in context.stdout_message or \
str(context.standby_port) not in context.stdout_message:
raise Exception("gpstate -s output is missing expected coordinator information")
elif '-Q' in options:
for stdout_line in context.stdout_message.split('\n'):
if 'up segments, from configuration table' in stdout_line:
segments_up = int(re.match(".*of up segments, from configuration table\s+=\s+([0-9]+)", stdout_line).group(1))
if segments_up <= 1:
raise Exception("gpstate -Q output does not match expectations of more than one segment up")
if 'down segments, from configuration table' in stdout_line:
segments_down = int(re.match(".*of down segments, from configuration table\s+=\s+([0-9]+)", stdout_line).group(1))
if segments_down != 0:
raise Exception("gpstate -Q output does not match expectations of all segments up")
break ## down segments comes after up segments, so we can break here
elif '-m' in options:
dbname = 'postgres'
conn = dbconn.connect(dbconn.DbURL(hostname=context.standby_hostname, port=context.standby_port, dbname=dbname), unsetSearchPath=False)
try:
query = """select datadir, port from pg_catalog.gp_segment_configuration where role='m' and content <> -1;"""
cursor = dbconn.query(conn, query)
for i in range(cursor.rowcount):
datadir, port = cursor.fetchone()
if datadir not in context.stdout_message or \
str(port) not in context.stdout_message:
raise Exception("gpstate -m output missing expected mirror info, datadir %s port %d" %(datadir, port))
finally:
conn.close()
else:
raise Exception("no verification for gpstate option given")
@given('ensure the standby directory does not exist')
def impl(context):
run_command(context, 'rm -rf $COORDINATOR_DATA_DIRECTORY/newstandby')
run_command(context, 'rm -rf /tmp/gpinitsystemtest && mkdir /tmp/gpinitsystemtest')
@when('initialize a cluster with standby using "{config_file}"')
def impl(context, config_file):
run_gpcommand(context, 'gpinitsystem -a -I %s -l /tmp/gpinitsystemtest -s localhost -P 21100 -S $COORDINATOR_DATA_DIRECTORY/newstandby -h ../gpAux/gpdemo/hostfile' % config_file)
check_return_code(context, 0)
@when('initialize a cluster using "{config_file}"')
def impl(context, config_file):
run_gpcommand(context, 'gpinitsystem -a -I %s -l /tmp/' % config_file)
check_return_code(context, 0)
@when('generate cluster config file "{config_file}"')
def impl(context, config_file):
run_gpcommand(context, 'gpinitsystem -a -c ../gpAux/gpdemo/clusterConfigFile -O %s' % config_file)
check_return_code(context, 0)
@when('check segment conf: postgresql.conf')
@then('check segment conf: postgresql.conf')
def step_impl(context):
query = "select dbid, port, hostname, datadir from gp_segment_configuration where content >= 0"
conn = dbconn.connect(dbconn.DbURL(dbname='postgres'), unsetSearchPath=False)
segments = dbconn.query(conn, query).fetchall()
for segment in segments:
dbid = "'%s'" % segment[0]
port = "'%s'" % segment[1]
hostname = segment[2]
datadir = segment[3]
## check postgresql.conf
remote_postgresql_conf = "%s/%s" % (datadir, 'postgresql.conf')
local_conf_copy = os.path.join(gp.get_coordinatordatadir(), "%s.%s" % ('postgresql.conf', hostname))
cmd = Command(name="Copy remote conf to local to diff",
cmdStr='scp %s:%s %s' % (hostname, remote_postgresql_conf, local_conf_copy))
cmd.run(validateAfter=True)
dic = pgconf.readfile(filename=local_conf_copy)
if str(dic['port']) != port:
raise Exception("port value in postgresql.conf of %s is incorrect. Expected:%s, given:%s" %
(hostname, port, dic['port']))
@given('the transactions are started for dml')
def impl(context):
dbname = 'gptest'
context.dml_jobs = []
for dml in ['insert', 'update', 'delete']:
job = TestDML.create(dbname, dml)
job.start()
context.dml_jobs.append((dml, job))
@then('verify the dml results and commit')
def impl(context):
dbname = 'gptest'
for dml, job in context.dml_jobs:
code, message = job.stop()
if not code:
raise Exception(message)
@then('verify the dml results again in a new transaction')
def impl(context):
dbname = 'gptest'
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
for dml, job in context.dml_jobs:
code, message = job.reverify(conn)
if not code:
raise Exception(message)
@given('the "{table_name}" table row count in "{dbname}" is saved')
def impl(context, table_name, dbname):
if 'table_row_count' not in context:
context.table_row_count = {}
if table_name not in context.table_row_count:
context.table_row_count[table_name] = _get_row_count_per_segment(table_name, dbname)
@given('distribution information from table "{table}" with data in "{dbname}" is saved')
def impl(context, table, dbname):
context.pre_redistribution_row_count = _get_row_count_per_segment(table, dbname)
context.pre_redistribution_dist_policy = _get_dist_policy_per_partition(table, dbname)
@then('distribution information from table "{table}" with data in "{dbname}" is verified against saved data')
def impl(context, table, dbname):
pre_distribution_row_count = context.pre_redistribution_row_count
pre_redistribution_dist_policy = context.pre_redistribution_dist_policy
post_distribution_row_count = _get_row_count_per_segment(table, dbname)
post_distribution_dist_policy = _get_dist_policy_per_partition(table, dbname)
if len(pre_distribution_row_count) >= len(post_distribution_row_count):
raise Exception("Failed to redistribute table. Expected to have more than %d segments, got %d segments" % (len(pre_distribution_row_count), len(post_distribution_row_count)))
post_distribution_num_segments = 0
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = "SELECT count(DISTINCT content) FROM gp_segment_configuration WHERE content != -1;"
cursor = dbconn.query(conn, query)
post_distribution_num_segments = cursor.fetchone()[0]
if len(post_distribution_row_count) != post_distribution_num_segments:
raise Exception("Failed to redistribute table %s. Expected table to have data on %d segments, but found %d segments" % (table, post_distribution_num_segments, len(post_distribution_row_count)))
if sum(pre_distribution_row_count) != sum(post_distribution_row_count):
raise Exception("Redistributed data does not match pre-redistribution data. Actual: %d, Expected: %d" % (sum(post_distribution_row_count), sum(pre_distribution_row_count)))
mean = sum(post_distribution_row_count) / len(post_distribution_row_count)
variance = sum(pow(row_count - mean, 2) for row_count in post_distribution_row_count) / len(post_distribution_row_count)
std_deviation = math.sqrt(variance)
std_error = std_deviation / math.sqrt(len(post_distribution_row_count))
relative_std_error = std_error / mean
tolerance = 0.01
if relative_std_error > tolerance:
raise Exception("Unexpected variance for redistributed data in table %s. Relative standard error %f exceeded tolerance factor of %f." %
(table, relative_std_error, tolerance))
for i in range(len(post_distribution_dist_policy)):
if(post_distribution_dist_policy[i][0] == pre_redistribution_dist_policy[i][0] or \
post_distribution_dist_policy[i][1] != pre_redistribution_dist_policy[i][1] or \
post_distribution_dist_policy[i][2] != pre_redistribution_dist_policy[i][2]):
raise Exception("""Redistributed policy does not match pre-redistribution policy.
before expanded: %s, after expanded: %s""" % (",".join(map(str, pre_redistribution_dist_policy[i])), \
",".join(map(str, post_distribution_dist_policy[i]))))
@then('the row count from table "{table_name}" in "{dbname}" is verified against the saved data')
def impl(context, table_name, dbname):
saved_row_count = context.table_row_count[table_name]
current_row_count = _get_row_count_per_segment(table_name, dbname)
if saved_row_count != current_row_count:
raise Exception("%s table in %s has %d rows, expected %d rows." % (table_name, dbname, current_row_count, saved_row_count))
@then('distribution information from table "{table1}" and "{table2}" in "{dbname}" are the same')
def impl(context, table1, table2, dbname):
distribution_row_count_tbl1 = _get_row_count_per_segment(table1, dbname)
distribution_row_count_tbl2 = _get_row_count_per_segment(table2, dbname)
if distribution_row_count_tbl1 != distribution_row_count_tbl2:
raise Exception("%s and %s have different distribution. Row count of %s is %s and row count of %s is %s" % (table1, table2, table1, distribution_row_count_tbl1, table2, distribution_row_count_tbl2))
def _get_row_count_per_segment(table, dbname):
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = "SELECT gp_segment_id,COUNT(i) FROM %s GROUP BY gp_segment_id ORDER BY gp_segment_id;" % table
cursor = dbconn.query(conn, query)
rows = cursor.fetchall()
return [row[1] for row in rows] # indices are the gp segment id's, so no need to store them explicitly
def _get_dist_policy_per_partition(table, dbname):
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
query = "select * from gp_distribution_policy where localoid::regclass::text like '%s%%' order by localoid;" % table
cursor = dbconn.query(conn, query)
rows = cursor.fetchall()
return [row[2:5] for row in rows] # we only need numsegments、distkey、distclass
@given('run rollback')
@then('run rollback')
@when('run rollback')
def impl(context):
gpexpand = Gpexpand(context, working_directory=context.working_directory)
ret_code, std_err, std_out = gpexpand.rollback()
if ret_code != 0:
raise Exception("rollback exited with return code: %d.\nstderr=%s\nstdout=%s" % (ret_code, std_err, std_out))
@given('create database schema table with special character')
@then('create database schema table with special character')
def impl(context):
dbname = ' a b."\'\\\\'
escape_dbname = dbname.replace('\\', '\\\\').replace('"', '\\"')
createdb_cmd = "createdb \"%s\"" % escape_dbname
run_command(context, createdb_cmd)
with closing(dbconn.connect(dbconn.DbURL(dbname=dbname), unsetSearchPath=False)) as conn:
#special char table
query = 'create table " a b.""\'\\\\"(c1 int);'
dbconn.execSQL(conn, query)
query = 'create schema " a b.""\'\\\\";'
dbconn.execSQL(conn, query)
#special char schema and table
query = 'create table " a b.""\'\\\\"." a b.""\'\\\\"(c1 int);'
dbconn.execSQL(conn, query)
#special char partition table
query = """
CREATE TABLE \" a b.'\"\"\\\\\" (id int, year int, month int, day int,
region text)
DISTRIBUTED BY (id)
PARTITION BY RANGE (year)
SUBPARTITION BY RANGE (month)
SUBPARTITION TEMPLATE (
START (1) END (13) EVERY (4),
DEFAULT SUBPARTITION other_months )
( START (2008) END (2016) EVERY (1),
DEFAULT PARTITION outlying_years);
"""
dbconn.execSQL(conn, query)
#special char schema and partition table
query = """
CREATE TABLE \" a b.\"\"'\\\\\".\" a b.'\"\"\\\\\" (id int, year int, month int, day int,
region text)
DISTRIBUTED BY (id)
PARTITION BY RANGE (year)
SUBPARTITION BY RANGE (month)
SUBPARTITION TEMPLATE (
START (1) END (13) EVERY (4),
DEFAULT SUBPARTITION other_months )
( START (2008) END (2016) EVERY (1),
DEFAULT PARTITION outlying_years);
"""
dbconn.execSQL(conn, query)
@given('the database "{dbname}" is broken with "{broken}" orphaned toast tables only on segments with content IDs "{contentIDs}"')
def break_orphaned_toast_tables(context, dbname, broken, contentIDs=None):
drop_database_if_exists(context, dbname)
create_database(context, dbname)
sql = ''
if broken == 'bad reference':
sql = '''
DROP TABLE IF EXISTS bad_reference;
CREATE TABLE bad_reference (a text);
UPDATE pg_class SET reltoastrelid = 0 WHERE relname = 'bad_reference';'''
if broken == 'mismatched non-cyclic':
sql = '''
DROP TABLE IF EXISTS mismatch_one;
CREATE TABLE mismatch_one (a text);
DROP TABLE IF EXISTS mismatch_two;
CREATE TABLE mismatch_two (a text);
DROP TABLE IF EXISTS mismatch_three;
CREATE TABLE mismatch_three (a text);
-- 1 -> 2 -> 3
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_two') WHERE relname = 'mismatch_one';
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_three') WHERE relname = 'mismatch_two';'''
if broken == 'mismatched cyclic':
sql = '''
DROP TABLE IF EXISTS mismatch_fixed;
CREATE TABLE mismatch_fixed (a text);
DROP TABLE IF EXISTS mismatch_one;
CREATE TABLE mismatch_one (a text);
DROP TABLE IF EXISTS mismatch_two;
CREATE TABLE mismatch_two (a text);
DROP TABLE IF EXISTS mismatch_three;
CREATE TABLE mismatch_three (a text);
-- fixed -> 1 -> 2 -> 3 -> 1
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_one') WHERE relname = 'mismatch_fixed'; -- "save" the reltoastrelid
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_two') WHERE relname = 'mismatch_one';
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_three') WHERE relname = 'mismatch_two';
UPDATE pg_class SET reltoastrelid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'mismatch_fixed') WHERE relname = 'mismatch_three';'''
if broken == "bad dependency":
sql = '''
DROP TABLE IF EXISTS bad_dependency;
CREATE TABLE bad_dependency (a text);
DELETE FROM pg_depend WHERE refobjid = 'bad_dependency'::regclass;'''
if broken == "double orphan - no parent":
sql = '''
DROP TABLE IF EXISTS double_orphan_no_parent;
CREATE TABLE double_orphan_no_parent (a text);
DELETE FROM pg_depend WHERE refobjid = 'double_orphan_no_parent'::regclass;
DROP TABLE double_orphan_no_parent;'''
if broken == "double orphan - valid parent":
sql = '''
DROP TABLE IF EXISTS double_orphan_valid_parent;
CREATE TABLE double_orphan_valid_parent (a text);
-- save double_orphan_valid_parent toast table oid
CREATE TEMP TABLE first_orphan_toast AS
SELECT oid, relname FROM pg_class WHERE oid = (SELECT reltoastrelid FROM pg_class WHERE oid = 'double_orphan_valid_parent'::regclass);
-- create a orphan toast table
DELETE FROM pg_depend WHERE objid = (SELECT oid FROM first_orphan_toast);
DROP TABLE double_orphan_valid_parent;
-- recreate double_orphan_valid_parent table to create a second valid toast table
CREATE TABLE double_orphan_valid_parent (a text);
-- save the second toast table oid from recreating double_orphan_valid_parent
CREATE TEMP TABLE second_orphan_toast AS
SELECT oid, relname FROM pg_class WHERE oid = (SELECT reltoastrelid FROM pg_class WHERE oid = 'double_orphan_valid_parent'::regclass);
-- swap the first_orphan_toast table with a temp name
UPDATE pg_class SET relname = (SELECT relname || '_temp' FROM second_orphan_toast)
WHERE oid = (SELECT oid FROM first_orphan_toast);
-- swap second_orphan_toast table with the original name of valid_parent toast table
UPDATE pg_class SET relname = (SELECT relname FROM first_orphan_toast)
WHERE oid = (SELECT oid FROM second_orphan_toast);
-- swap the temp name with the first_orphan_toast table
UPDATE pg_class SET relname = (SELECT relname FROM second_orphan_toast)
WHERE oid = (SELECT oid FROM first_orphan_toast);'''
if broken == "double orphan - invalid parent":
sql = '''
DROP TABLE IF EXISTS double_orphan_invalid_parent;
CREATE TABLE double_orphan_invalid_parent (a text);
DELETE FROM pg_depend
WHERE objid = (SELECT reltoastrelid FROM pg_class WHERE relname = 'double_orphan_invalid_parent')
AND refobjid = (SELECT oid FROM pg_class where relname = 'double_orphan_invalid_parent');
UPDATE pg_class SET reltoastrelid = 0 WHERE relname = 'double_orphan_invalid_parent';'''
dbURLs = [dbconn.DbURL(dbname=dbname)]
if contentIDs:
dbURLs = []
seg_config_sql = '''SELECT port,hostname FROM gp_segment_configuration WHERE role='p' AND content IN (%s);''' % contentIDs
for port, hostname in getRows(dbname, seg_config_sql):
dbURLs.append(dbconn.DbURL(dbname=dbname, hostname=hostname, port=port))
for dbURL in dbURLs:
utility = True if contentIDs else False
with closing(dbconn.connect(dbURL, allowSystemTableMods=True, utility=utility, unsetSearchPath=False)) as conn:
dbconn.execSQL(conn, sql)
@given('the database "{dbname}" is broken with "{broken}" orphaned toast tables')
def impl(context, dbname, broken):
break_orphaned_toast_tables(context, dbname, broken)
@given('the database "{dbname}" has a table that is orphaned in multiple ways')
def impl(context, dbname):
drop_database_if_exists(context, dbname)
create_database(context, dbname)
coordinator = dbconn.DbURL(dbname=dbname)
gparray = GpArray.initFromCatalog(coordinator)
primary0 = gparray.segmentPairs[0].primaryDB
primary1 = gparray.segmentPairs[1].primaryDB
seg0 = dbconn.DbURL(dbname=dbname, hostname=primary0.hostname, port=primary0.port)
seg1 = dbconn.DbURL(dbname=dbname, hostname=primary1.hostname, port=primary1.port)
with closing(dbconn.connect(coordinator, allowSystemTableMods=True, unsetSearchPath=False)) as conn:
dbconn.execSQL(conn, """
DROP TABLE IF EXISTS borked;
CREATE TABLE borked (a text);
""")
with closing(dbconn.connect(seg0, utility=True, allowSystemTableMods=True, unsetSearchPath=False)) as conn:
dbconn.execSQL(conn, """
DELETE FROM pg_depend WHERE refobjid = 'borked'::regclass;
""")
with closing(dbconn.connect(seg1, utility=True, allowSystemTableMods=True, unsetSearchPath=False)) as conn:
dbconn.execSQL(conn, """
UPDATE pg_class SET reltoastrelid = 0 WHERE oid = 'borked'::regclass;
""")
@then('verify status file and gp_segment_configuration backup file exist on standby')
def impl(context):
status_file = 'gpexpand.status'
gp_segment_configuration_backup = 'gpexpand.gp_segment_configuration'
query = "select hostname, datadir from gp_segment_configuration where content = -1 order by dbid"
conn = dbconn.connect(dbconn.DbURL(dbname='postgres'), unsetSearchPath=False)
res = dbconn.query(conn, query).fetchall()
coordinator = res[0]
standby = res[1]
coordinator_datadir = coordinator[1]
standby_host = standby[0]
standby_datadir = standby[1]
standby_remote_statusfile = "%s:%s/%s" % (standby_host, standby_datadir, status_file)
standby_local_statusfile = "%s/%s.standby" % (coordinator_datadir, status_file)
standby_remote_gp_segment_configuration_file = "%s:%s/%s" % \
(standby_host, standby_datadir, gp_segment_configuration_backup)
standby_local_gp_segment_configuration_file = "%s/%s.standby" % \
(coordinator_datadir, gp_segment_configuration_backup)
cmd = Command(name="Copy standby file to coordinator", cmdStr='scp %s %s' % \
(standby_remote_statusfile, standby_local_statusfile))
cmd.run(validateAfter=True)
cmd = Command(name="Copy standby file to coordinator", cmdStr='scp %s %s' % \
(standby_remote_gp_segment_configuration_file, standby_local_gp_segment_configuration_file))
cmd.run(validateAfter=True)
if not os.path.exists(standby_local_statusfile):
raise Exception('file "%s" is not exist' % standby_remote_statusfile)
if not os.path.exists(standby_local_gp_segment_configuration_file):
raise Exception('file "%s" is not exist' % standby_remote_gp_segment_configuration_file)
@when('the user runs {command} and selects {input}')
def impl(context, command, input):
p = Popen(command.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input=input.encode())
p.stdin.close()
context.ret_code = p.returncode
context.stdout_message = stdout.decode()
context.error_message = stderr.decode()
def are_on_different_subnets(primary_hostname, mirror_hostname):
primary_broadcast = check_output(['ssh', '-n', primary_hostname, "/sbin/ip addr show eth0 | grep 'inet .* brd' | awk '{ print $4 }'"])
mirror_broadcast = check_output(['ssh', '-n', mirror_hostname, "/sbin/ip addr show eth0 | grep 'inet .* brd' | awk '{ print $4 }'"])
if not primary_broadcast:
raise Exception("primary hostname %s has no broadcast address" % primary_hostname)
if not mirror_broadcast:
raise Exception("mirror hostname %s has no broadcast address" % mirror_hostname)
return primary_broadcast != mirror_broadcast
@then('the primaries and mirrors {including} coordinatorStandby are on different subnets')
def impl(context, including):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if including == "including":
if not gparray.standbyCoordinator:
raise Exception("no standby found for coordinator")
if not are_on_different_subnets(gparray.coordinator.hostname, gparray.standbyCoordinator.hostname):
raise Exception("coordinator %s and its standby %s are on same the subnet" % (gparray.coordinator, gparray.standbyCoordinator))
for segPair in gparray.segmentPairs:
if not segPair.mirrorDB:
raise Exception("no mirror found for segPair: %s" % segPair)
if not are_on_different_subnets(segPair.primaryDB.hostname, segPair.mirrorDB.hostname):
raise Exception("segmentPair on same subnet: %s" % segPair)
@then('content {content} is {desired_state}')
def impl(context, content, desired_state):
acceptable_states = ["balanced", "unbalanced"]
if desired_state not in acceptable_states:
raise Exception("expected desired state to be one of %s", acceptable_states)
role_operator = "=" if desired_state == "balanced" else "<>"
with closing(dbconn.connect(dbconn.DbURL(dbname="template1"), unsetSearchPath=False)) as conn:
rows = dbconn.query(conn, "SELECT role, preferred_role FROM gp_segment_configuration WHERE content = %s and preferred_role %s role" % (content, role_operator)).fetchall()
if len(rows) == 0:
raise Exception("Expected content %s to be %s." % (content, desired_state))
@given('the system locale is saved')
def impl(context):
if "LANG" in os.environ:
context.system_locale = os.environ["LANG"]
return
cmd = Command(name='Get system locale', cmdStr='locale -a | head -1')
cmd.run(validateAfter=True)
context.system_locale = cmd.get_stdout()
@then('the database locales are saved')
def impl(context):
with closing(dbconn.connect(dbconn.DbURL())) as conn:
rows = dbconn.query(conn, "SELECT name, setting FROM pg_settings WHERE name LIKE 'lc_%'").fetchall()
context.database_locales = {row.name: row.setting for row in rows}
def check_locales(database_locales, locale_names, expected):
locale_names = locale_names.split(',')
for name in locale_names:
if name not in database_locales:
raise Exception("Locale %s is invalid" % name)
locale = database_locales[name]
if locale != expected:
raise Exception("Expected %s to be %s, but it was %s" % (name, expected, locale))
def get_en_utf_locale():
cmd = Command(name='Get installed US UTF locale', cmdStr='locale -a | grep -i "en[_-]..\.utf.*8" | head -1')
cmd.run(validateAfter=True)
locale = cmd.get_stdout()
if locale == "":
raise Exception("This test requires at least one English UTF-8 locale to be installed on this system")
return locale
@then('the database locales "{locale_names}" match the locale "{expected}"')
def step_impl(context, locale_names, expected):
check_locales(context.database_locales, locale_names, expected)
@then('the database locales "{locale_names}" match the system locale')
def step_impl(context, locale_names):
check_locales(context.database_locales, locale_names, context.system_locale)
@then('the database locales "{locale_names}" match the installed UTF locale')
def step_impl(context, locale_names):
locale = get_en_utf_locale()
check_locales(context.database_locales, locale_names, locale)
@when('a demo cluster is created using gpinitsystem args "{args}"')
def impl(context, args):
context.execute_steps('''
Given the user runs command "rm -rf ../gpAux/gpdemo/datadirs/*"
And the user runs command "mkdir ../gpAux/gpdemo/datadirs/qddir; mkdir ../gpAux/gpdemo/datadirs/dbfast1; mkdir ../gpAux/gpdemo/datadirs/dbfast2; mkdir ../gpAux/gpdemo/datadirs/dbfast3"
And the user runs command "mkdir ../gpAux/gpdemo/datadirs/dbfast_mirror1; mkdir ../gpAux/gpdemo/datadirs/dbfast_mirror2; mkdir ../gpAux/gpdemo/datadirs/dbfast_mirror3"
And the user runs command "rm -rf /tmp/gpinitsystemtest && mkdir /tmp/gpinitsystemtest"
When the user runs "gpinitsystem -a %s -c ../gpAux/gpdemo/clusterConfigFile -l /tmp/gpinitsystemtest -P 21100 -h ../gpAux/gpdemo/hostfile"
''' % args)
@when('a demo cluster is created using the installed UTF locale')
def impl(context):
locale = get_en_utf_locale()
context.execute_steps('''When a demo cluster is created using gpinitsystem args "--lc-ctype=%s"''' % locale)
| 44.108202 | 219 | 0.680673 |
acea19daafabfe9ca389f9378fee87d0c87d0473 | 275 | py | Python | yc42/48.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc42/48.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc42/48.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | X, Y, L = map(int, open(0).read().split())
result = 0
if Y > 0:
result += (Y + (L - 1)) // L
if X != 0:
result += 1
result += (abs(X) + (L - 1)) // L
if Y < 0:
if X == 0:
result += 1
result += 1
result += (-Y + (L - 1)) // L
print(result)
| 15.277778 | 42 | 0.403636 |
acea1a1e68bec3e91b085d5405cd1f5a57f1761e | 24 | py | Python | expertise/models/bm25/__init__.py | matt-gardner/openreview-expertise | 561f3bae251bdfa32acd3febd3a3b0c8ba4b2ffc | [
"MIT"
] | 12 | 2020-02-25T18:45:32.000Z | 2022-02-07T22:24:48.000Z | expertise/models/bm25/__init__.py | matt-gardner/openreview-expertise | 561f3bae251bdfa32acd3febd3a3b0c8ba4b2ffc | [
"MIT"
] | 52 | 2019-12-19T08:14:43.000Z | 2022-03-23T19:40:02.000Z | expertise/models/bm25/__init__.py | matt-gardner/openreview-expertise | 561f3bae251bdfa32acd3febd3a3b0c8ba4b2ffc | [
"MIT"
] | 3 | 2019-10-24T16:52:34.000Z | 2021-09-27T21:40:39.000Z | from .bm25 import Model
| 12 | 23 | 0.791667 |
acea1a60d7f1cb6951cd417cb58b236d7e0e9835 | 14,546 | py | Python | frappe/hooks.py | Ektai-Solution-Pty-Ltd/ekt-frappe | 8d60ca017e9ec96dd821714447b7e053815eac26 | [
"MIT"
] | null | null | null | frappe/hooks.py | Ektai-Solution-Pty-Ltd/ekt-frappe | 8d60ca017e9ec96dd821714447b7e053815eac26 | [
"MIT"
] | null | null | null | frappe/hooks.py | Ektai-Solution-Pty-Ltd/ekt-frappe | 8d60ca017e9ec96dd821714447b7e053815eac26 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies"
app_description = "Full stack web framework with Python, Javascript, MariaDB, Redis, Node"
app_icon = "octicon octicon-circuit-board"
app_color = "orange"
source_link = "https://github.com/Ektai-Solution-Pty-Ltd/ekt-frappe"
app_license = "MIT"
app_logo_url = '/assets/frappe/images/frappe-framework-logo.svg'
develop_version = '13.x.x-develop'
app_email = "info@frappe.io"
docs_app = "frappe_io"
translator_url = "https://translate.erpnext.com"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
page_js = {
"setup-wizard": "public/js/frappe/setup_wizard.js"
}
# website
app_include_js = [
"libs.bundle.js",
"desk.bundle.js",
"list.bundle.js",
"form.bundle.js",
"controls.bundle.js",
"report.bundle.js",
]
app_include_css = [
"desk.bundle.css",
"report.bundle.css",
]
doctype_js = {
"Web Page": "public/js/frappe/utils/web_template.js",
"Website Settings": "public/js/frappe/utils/web_template.js"
}
web_include_js = [
"website_script.js"
]
web_include_css = []
email_css = ['email.bundle.css']
website_route_rules = [
{"from_route": "/blog/<category>", "to_route": "Blog Post"},
{"from_route": "/kb/<category>", "to_route": "Help Article"},
{"from_route": "/newsletters", "to_route": "Newsletter"},
{"from_route": "/profile", "to_route": "me"},
{"from_route": "/app/<path:app_path>", "to_route": "app"},
]
website_redirects = [
{"source": r"/desk(.*)", "target": r"/app\1"},
]
base_template = "templates/base.html"
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
email_append_to = ["Event", "ToDo", "Communication"]
get_rooms = 'frappe.chat.doctype.chat_room.chat_room.get_rooms'
calendars = ["Event"]
leaderboards = "frappe.desk.leaderboard.get_leaderboards"
# login
on_session_creation = [
"frappe.core.doctype.activity_log.feed.login_feed",
"frappe.core.doctype.user.user.notify_admin_access_to_system_manager"
]
on_logout = "frappe.core.doctype.session_default_settings.session_default_settings.clear_session_defaults"
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Dashboard Settings": "frappe.desk.doctype.dashboard_settings.dashboard_settings.get_permission_query_conditions",
"Notification Log": "frappe.desk.doctype.notification_log.notification_log.get_permission_query_conditions",
"Dashboard": "frappe.desk.doctype.dashboard.dashboard.get_permission_query_conditions",
"Dashboard Chart": "frappe.desk.doctype.dashboard_chart.dashboard_chart.get_permission_query_conditions",
"Number Card": "frappe.desk.doctype.number_card.number_card.get_permission_query_conditions",
"Notification Settings": "frappe.desk.doctype.notification_settings.notification_settings.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.get_permission_query_conditions",
"Contact": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_contact",
"Address": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_address",
"Communication": "frappe.core.doctype.communication.communication.get_permission_query_conditions_for_communication",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.get_permission_query_conditions",
"Prepared Report": "frappe.core.doctype.prepared_report.prepared_report.get_permission_query_condition"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission",
"Dashboard Chart": "frappe.desk.doctype.dashboard_chart.dashboard_chart.has_permission",
"Number Card": "frappe.desk.doctype.number_card.number_card.has_permission",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.has_permission",
"Contact": "frappe.contacts.address_and_contact.has_permission",
"Address": "frappe.contacts.address_and_contact.has_permission",
"Communication": "frappe.core.doctype.communication.communication.has_permission",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.has_permission",
"File": "frappe.core.doctype.file.file.has_permission",
"Prepared Report": "frappe.core.doctype.prepared_report.prepared_report.has_permission"
}
has_website_permission = {
"Address": "frappe.contacts.doctype.address.address.has_website_permission"
}
jinja = {
"methods": "frappe.utils.jinja_globals",
"filters": [
"frappe.utils.data.global_date_format",
"frappe.utils.markdown",
"frappe.website.utils.get_shade",
"frappe.website.utils.abs_url",
]
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"after_insert": [
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers"
],
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.core.doctype.activity_log.feed.update_feed",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions",
"frappe.automation.doctype.assignment_rule.assignment_rule.apply",
"frappe.core.doctype.file.file.attach_files_to_document",
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers",
"frappe.automation.doctype.assignment_rule.assignment_rule.update_due_date",
"frappe.core.doctype.user_type.user_type.apply_permissions_for_non_standard_user_type"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions"
],
"on_trash": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions",
"frappe.event_streaming.doctype.event_update_log.event_update_log.notify_consumers"
],
"on_change": [
"frappe.social.doctype.energy_point_rule.energy_point_rule.process_energy_points",
"frappe.automation.doctype.milestone_tracker.milestone_tracker.evaluate_milestone"
]
},
"Event": {
"after_insert": "frappe.integrations.doctype.google_calendar.google_calendar.insert_event_in_google_calendar",
"on_update": "frappe.integrations.doctype.google_calendar.google_calendar.update_event_in_google_calendar",
"on_trash": "frappe.integrations.doctype.google_calendar.google_calendar.delete_event_from_google_calendar",
},
"Contact": {
"after_insert": "frappe.integrations.doctype.google_contacts.google_contacts.insert_contacts_to_google_contacts",
"on_update": "frappe.integrations.doctype.google_contacts.google_contacts.update_contacts_to_google_contacts",
},
"DocType": {
"after_insert": "frappe.cache_manager.build_domain_restriced_doctype_cache",
"after_save": "frappe.cache_manager.build_domain_restriced_doctype_cache",
},
"Page": {
"after_insert": "frappe.cache_manager.build_domain_restriced_page_cache",
"after_save": "frappe.cache_manager.build_domain_restriced_page_cache",
}
}
scheduler_events = {
"cron": {
"0/15 * * * *": [
"frappe.oauth.delete_oauth2_data",
"frappe.website.doctype.web_page.web_page.check_publish_status",
"frappe.twofactor.delete_all_barcodes_for_users"
]
},
"all": [
"frappe.email.queue.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
"frappe.integrations.doctype.razorpay_settings.razorpay_settings.capture_payment",
'frappe.utils.global_search.sync_global_search',
"frappe.monitor.flush",
],
"hourly": [
"frappe.model.utils.link_count.update_link_count",
'frappe.model.utils.user_settings.sync_user_settings',
"frappe.utils.error.collect_error_snapshots",
"frappe.desk.page.backups.backups.delete_downloadable_backups",
"frappe.deferred_insert.save_to_db",
"frappe.desk.form.document_follow.send_hourly_updates",
"frappe.integrations.doctype.google_calendar.google_calendar.sync",
"frappe.email.doctype.newsletter.newsletter.send_scheduled_email"
],
"daily": [
"frappe.email.queue.set_expiry_for_email_queue",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.error_log.error_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.notification.notification.trigger_daily_alerts",
"frappe.utils.scheduler.restrict_scheduler_events_if_dormant",
"frappe.email.doctype.auto_email_report.auto_email_report.send_daily",
"frappe.website.doctype.personal_data_deletion_request.personal_data_deletion_request.remove_unverified_record",
"frappe.desk.form.document_follow.send_daily_updates",
"frappe.social.doctype.energy_point_settings.energy_point_settings.allocate_review_points",
"frappe.integrations.doctype.google_contacts.google_contacts.sync",
"frappe.automation.doctype.auto_repeat.auto_repeat.make_auto_repeat_entry",
"frappe.automation.doctype.auto_repeat.auto_repeat.set_auto_repeat_as_completed",
"frappe.email.doctype.unhandled_email.unhandled_email.remove_old_unhandled_emails",
"frappe.core.doctype.prepared_report.prepared_report.delete_expired_prepared_reports",
"frappe.core.doctype.log_settings.log_settings.run_log_clean_up"
],
"daily_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_daily",
"frappe.utils.change_log.check_for_update",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_daily",
"frappe.integrations.doctype.google_drive.google_drive.daily_backup"
],
"weekly_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_weekly",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_weekly",
"frappe.desk.doctype.route_history.route_history.flush_old_route_records",
"frappe.desk.form.document_follow.send_weekly_updates",
"frappe.social.doctype.energy_point_log.energy_point_log.send_weekly_summary",
"frappe.integrations.doctype.google_drive.google_drive.weekly_backup"
],
"monthly": [
"frappe.email.doctype.auto_email_report.auto_email_report.send_monthly",
"frappe.social.doctype.energy_point_log.energy_point_log.send_monthly_summary"
],
"monthly_long": [
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_monthly"
]
}
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict",
("page", "setup-wizard"): "frappe.geo.country_info.get_translated_dict"
}
sounds = [
{"name": "email", "src": "/assets/frappe/sounds/email.mp3", "volume": 0.1},
{"name": "submit", "src": "/assets/frappe/sounds/submit.mp3", "volume": 0.1},
{"name": "cancel", "src": "/assets/frappe/sounds/cancel.mp3", "volume": 0.1},
{"name": "delete", "src": "/assets/frappe/sounds/delete.mp3", "volume": 0.05},
{"name": "click", "src": "/assets/frappe/sounds/click.mp3", "volume": 0.05},
{"name": "error", "src": "/assets/frappe/sounds/error.mp3", "volume": 0.1},
{"name": "alert", "src": "/assets/frappe/sounds/alert.mp3", "volume": 0.2},
# {"name": "chime", "src": "/assets/frappe/sounds/chime.mp3"},
# frappe.chat sounds
{ "name": "chat-message", "src": "/assets/frappe/sounds/chat-message.mp3", "volume": 0.1 },
{ "name": "chat-notification", "src": "/assets/frappe/sounds/chat-notification.mp3", "volume": 0.1 }
# frappe.chat sounds
]
bot_parsers = [
'frappe.utils.bot.ShowNotificationBot',
'frappe.utils.bot.GetOpenListBot',
'frappe.utils.bot.ListBot',
'frappe.utils.bot.FindBot',
'frappe.utils.bot.CountBot'
]
setup_wizard_exception = [
"frappe.desk.page.setup_wizard.setup_wizard.email_setup_wizard_exception",
"frappe.desk.page.setup_wizard.setup_wizard.log_setup_wizard_exception"
]
before_migrate = ['frappe.patches.v11_0.sync_user_permission_doctype_before_migrate.execute']
after_migrate = ['frappe.website.doctype.website_theme.website_theme.after_migrate']
otp_methods = ['OTP App','Email','SMS']
user_data_fields = [
{"doctype": "Access Log", "strict": True},
{"doctype": "Activity Log", "strict": True},
{"doctype": "Comment", "strict": True},
{
"doctype": "Contact",
"filter_by": "email_id",
"redact_fields": ["first_name", "last_name", "phone", "mobile_no"],
"rename": True,
},
{"doctype": "Contact Email", "filter_by": "email_id"},
{
"doctype": "Address",
"filter_by": "email_id",
"redact_fields": [
"address_title",
"address_line1",
"address_line2",
"city",
"county",
"state",
"pincode",
"phone",
"fax",
],
},
{
"doctype": "Communication",
"filter_by": "sender",
"redact_fields": ["sender_full_name", "phone_no", "content"],
},
{"doctype": "Communication", "filter_by": "recipients"},
{"doctype": "Email Group Member", "filter_by": "email"},
{"doctype": "Email Unsubscribe", "filter_by": "email", "partial": True},
{"doctype": "Email Queue", "filter_by": "sender"},
{"doctype": "Email Queue Recipient", "filter_by": "recipient"},
{
"doctype": "File",
"filter_by": "attached_to_name",
"redact_fields": ["file_name", "file_url"],
},
{
"doctype": "User",
"filter_by": "name",
"redact_fields": [
"email",
"username",
"first_name",
"middle_name",
"last_name",
"full_name",
"birth_date",
"user_image",
"phone",
"mobile_no",
"location",
"banner_image",
"interest",
"bio",
"email_signature",
],
},
{"doctype": "Version", "strict": True},
]
global_search_doctypes = {
"Default": [
{"doctype": "Contact"},
{"doctype": "Address"},
{"doctype": "ToDo"},
{"doctype": "Note"},
{"doctype": "Event"},
{"doctype": "Blog Post"},
{"doctype": "Dashboard"},
{"doctype": "Country"},
{"doctype": "Currency"},
{"doctype": "Newsletter"},
{"doctype": "Letter Head"},
{"doctype": "Workflow"},
{"doctype": "Web Page"},
{"doctype": "Web Form"}
]
}
| 37.489691 | 124 | 0.760965 |
acea1a9a340ab03e1dc4df399161ff7a4744ce98 | 1,060 | py | Python | superai/utils/apikey_manager.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2020-12-03T18:18:16.000Z | 2020-12-03T18:18:16.000Z | superai/utils/apikey_manager.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 13 | 2021-02-22T18:27:58.000Z | 2022-02-10T08:14:10.000Z | superai/utils/apikey_manager.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2021-04-27T12:38:47.000Z | 2021-04-27T12:38:47.000Z | import os
import warnings
from superai.config import add_secret_settings, get_config_dir, remove_secret_settings, settings
from superai.log import logger
BASE_FOLDER = get_config_dir()
log = logger.get_logger(__name__)
def _save_api_key_secrets(api_key: str, username: str = None):
env = settings.current_env
secret = {env: {"user": {"api_key": api_key, "username": username}}}
add_secret_settings(secret)
log.info(f"Api key added to env {env}")
def save_api_key(api_key: str, username: str = None):
_save_api_key_secrets(api_key, username=username)
def load_api_key() -> str:
# First load from env variable
api_key = settings.get("user", {}).get("api_key")
if not api_key:
warnings.warn("Api key is not initialized. Run superai login --username <email> to retrieve your api key")
return api_key
def remove_api_key():
env = settings.current_env
remove_secret_settings(f"{env}__user__api_key")
remove_secret_settings(f"{env}__user__username")
log.debug(f"Api key deleted from env {env}")
| 27.894737 | 114 | 0.727358 |
acea1bacf974c777c9498cbe5c8afb4b641a8698 | 14,641 | py | Python | sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServicesOperations:
"""ServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.deploymentmanager.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_or_update(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
service_info: "_models.ServiceResource",
**kwargs: Any
) -> "_models.ServiceResource":
"""Creates or updates a service in the service topology.
Synchronously creates a new service or updates an existing service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:param service_info: The service object.
:type service_info: ~azure.mgmt.deploymentmanager.models.ServiceResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceResource, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.ServiceResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_info, 'ServiceResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
**kwargs: Any
) -> "_models.ServiceResource":
"""Gets the service.
Gets the service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceResource, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.ServiceResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
**kwargs: Any
) -> None:
"""Deletes the service.
Deletes the service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}'} # type: ignore
async def list(
self,
resource_group_name: str,
service_topology_name: str,
**kwargs: Any
) -> List["_models.ServiceResource"]:
"""Lists the services in the service topology.
Lists the services in the service topology.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ServiceResource, or the result of cls(response)
:rtype: list[~azure.mgmt.deploymentmanager.models.ServiceResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ServiceResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services'} # type: ignore
| 48.966555 | 226 | 0.678915 |
acea1cf0fca0a40feff85018ba92e13b0bced2cf | 13,641 | py | Python | hoomd/md/dihedral.py | EdwardZX/hoomd-blue | c87ac3f136534e8a80359a2faceeb730f445da21 | [
"BSD-3-Clause"
] | 204 | 2018-11-26T21:15:14.000Z | 2022-03-31T17:17:21.000Z | hoomd/md/dihedral.py | EdwardZX/hoomd-blue | c87ac3f136534e8a80359a2faceeb730f445da21 | [
"BSD-3-Clause"
] | 769 | 2019-02-15T08:58:04.000Z | 2022-03-31T17:36:48.000Z | hoomd/md/dihedral.py | YMWani/hoomd-blue | e574b49f0c2c6df3a1eac9cbb86fe612f1ee4c18 | [
"BSD-3-Clause"
] | 91 | 2018-10-04T21:07:46.000Z | 2022-03-26T02:44:11.000Z | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
r"""Dihedral potentials.
Dihedrals add forces between specified quadruplets of particles and used to
model rotation about chemical bonds.
By themselves, dihedrals that have been specified in an input file do nothing.
Only when you specify an dihedral force (e.g. `dihedral.Harmonic`), are forces
actually calculated between the listed particles.
Important: There are multiple conventions pertaining to the dihedral angle (phi)
in the literature. HOOMD utilizes the convention shown in the following figure,
where vectors are defined from the central particles to the outer particles.
These vectors correspond to a stretched state (:math:`\phi = 180` degrees) when
they are anti-parallel and a compact state (:math:`\phi = 0`) when they are
parallel.
.. image:: dihedral-angle-definition.png
:width: 400 px
:align: center
:alt: Dihedral angle definition
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force
from hoomd.md.force import Force
from hoomd.data.parameterdicts import TypeParameterDict
from hoomd.data.typeparam import TypeParameter
import hoomd
import math
class Dihedral(Force):
"""Constructs the dihedral bond potential.
Note:
:py:class:`Dihedral` is the base class for all dihedral potentials.
Users should not instantiate this class directly.
"""
def _attach(self):
# check that some dihedrals are defined
if self._simulation.state._cpp_sys_def.getDihedralData().getNGlobal(
) == 0:
self._simulation.device._cpp_msg.warning(
"No dihedrals are defined.\n")
# create the c++ mirror class
if isinstance(self._simulation.device, hoomd.device.CPU):
cpp_class = getattr(_md, self._cpp_class_name)
else:
cpp_class = getattr(_md, self._cpp_class_name + "GPU")
self._cpp_obj = cpp_class(self._simulation.state._cpp_sys_def)
super()._attach()
class Harmonic(Dihedral):
r"""Harmonic dihedral potential.
:py:class:`Harmonic` specifies a harmonic dihedral potential energy between
every defined dihedral quadruplet of particles in the simulation:
.. math::
V(\phi) = \frac{1}{2}k \left( 1 + d \cos\left(n \phi - \phi_0 \right)
\right)
where :math:`\phi` is the angle between two sides of the dihedral.
Attributes:
params (`TypeParameter` [``dihedral type``, `dict`]):
The parameter of the harmonic bonds for each dihedral type. The
dictionary has the following keys:
* ``k`` (`float`, **required**) - potential constant :math:`k`
:math:`[\mathrm{energy}]`
* ``d`` (`float`, **required**) - sign factor :math:`d`
* ``n`` (`int`, **required**) - angle multiplicity factor :math:`n`
* ``phi0`` (`float`, **required**) - phase shift :math:`\phi_0`
:math:`[\mathrm{radians}]`
Examples::
harmonic = dihedral.Harmonic()
harmonic.params['polymer'] = dict(k=3.0, d=-1, n=3, phi0=0)
harmonic.params['backbone'] = dict(k=100.0, d=1, n=4, phi0=math.pi/2)
"""
_cpp_class_name = "HarmonicDihedralForceCompute"
def __init__(self):
params = TypeParameter(
'params', 'dihedral_types',
TypeParameterDict(k=float, d=float, n=int, phi0=float, len_keys=1))
self._add_typeparam(params)
def _table_eval(theta, V, T, width):
dth = (2 * math.pi) / float(width - 1)
i = int(round((theta + math.pi) / dth))
return (V[i], T[i])
class table(force._force): # noqa
r"""Tabulated dihedral potential.
Args:
width (int): Number of points to use to interpolate V and T (see
documentation above)
name (str): Name of the force instance
:py:class:`table` specifies that a tabulated dihedral force should be
applied to every define dihedral.
:math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}}(\theta)` are
evaluated on *width* grid points between :math:`-\pi` and :math:`\pi`.
Values are interpolated linearly between grid points. For correctness, you
must specify the derivative of the potential with respect to the dihedral
angle, defined by: :math:`T = -\frac{\partial V}{\partial \theta}`.
Parameters:
- :math:`T_{\mathrm{user}}(\theta)` and :math:`V_{\mathrm{user}} (\theta)` -
evaluated by ``func`` (see example)
- coefficients passed to ``func`` - ``coeff`` (see example)
.. rubric:: Set table from a given function
When you have a functional form for V and T, you can enter that directly
into python. :py:class:`table` will evaluate the given function over *width*
points between :math:`-\pi` and :math:`\pi` and use the resulting values in
the table::
def harmonic(theta, kappa, theta0):
V = 0.5 * kappa * (theta-theta0)**2;
F = -kappa*(theta-theta0);
return (V, F)
dtable = dihedral.table(width=1000)
dtable.dihedral_coeff.set('dihedral1', func=harmonic,
coeff=dict(kappa=330, theta_0=0.0))
dtable.dihedral_coeff.set('dihedral2', func=harmonic,
coeff=dict(kappa=30, theta_0=1.0))
.. rubric:: Set a table from a file
When you have no function for for *V* or *T*, or you otherwise have the data
listed in a file, dihedral.table can use the given values directly. You must
first specify the number of rows in your tables when initializing
:py:class:`table`. Then use :py:meth:`set_from_file()` to read the file.
dtable = dihedral.table(width=1000)
dtable.set_from_file('polymer', 'dihedral.dat')
"""
def __init__(self, width, name=None):
# initialize the base class
force._force.__init__(self, name)
# create the c++ mirror class
if not hoomd.context.current.device.cpp_exec_conf.isCUDAEnabled():
self.cpp_force = _md.TableDihedralForceCompute(
hoomd.context.current.system_definition, int(width), self.name)
else:
self.cpp_force = _md.TableDihedralForceComputeGPU(
hoomd.context.current.system_definition, int(width), self.name)
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name)
# setup the coefficient matrix
self.dihedral_coeff = coeff() # noqa
# stash the width for later use
self.width = width
def update_dihedral_table(self, atype, func, coeff): # noqa
# allocate arrays to store V and F
Vtable = _hoomd.std_vector_scalar()
Ttable = _hoomd.std_vector_scalar()
# calculate dth
dth = 2.0 * math.pi / float(self.width - 1)
# evaluate each point of the function
for i in range(0, self.width):
theta = -math.pi + dth * i
(V, T) = func(theta, **coeff)
# fill out the tables
Vtable.append(V)
Ttable.append(T)
# pass the tables on to the underlying cpp compute
self.cpp_force.setTable(atype, Vtable, Ttable)
def update_coeffs(self): # noqa
# check that the dihedral coefficients are valid
if not self.dihedral_coeff.verify(["func", "coeff"]):
hoomd.context.current.device.cpp_msg.error(
"Not all dihedral coefficients are set for dihedral.table\n")
raise RuntimeError("Error updating dihedral coefficients")
# set all the params
ntypes = hoomd.context.current.system_definition.getDihedralData(
).getNTypes()
type_list = []
for i in range(0, ntypes):
type_list.append(hoomd.context.current.system_definition
.getDihedralData().getNameByType(i))
# loop through all of the unique type dihedrals and evaluate the table
for i in range(0, ntypes):
func = self.dihedral_coeff.get(type_list[i], "func")
coeff = self.dihedral_coeff.get(type_list[i], "coeff")
self.update_dihedral_table(i, func, coeff)
def set_from_file(self, dihedralname, filename):
r"""Set a dihedral pair interaction from a file.
Args:
dihedralname (str): Name of dihedral
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced theta values.
Example::
#t V T
-3.141592653589793 2.0 -3.0
-1.5707963267948966 3.0 -4.0
0.0 2.0 -3.0
1.5707963267948966 3.0 -4.0
3.141592653589793 2.0 -3.0
Note:
The theta values are not used by the code. It is assumed that a
table that has N rows will start at :math:`-\pi`, end at :math:`\pi`
and that :math:`\delta \theta = 2\pi/(N-1)`. The table is read
directly into the grid points used to evaluate
:math:`T_{\mathrm{user}}(\theta)` and
:math:`V_{\mathrm{user}}(\theta)`.
"""
# open the file
f = open(filename)
theta_table = []
V_table = []
T_table = []
# read in lines from the file
for line in f.readlines():
line = line.strip()
# skip comment lines
if line[0] == '#':
continue
# split out the columns
cols = line.split()
values = [float(f) for f in cols]
# validate the input
if len(values) != 3:
hoomd.context.current.device.cpp_msg.error(
"dihedral.table: file must have exactly 3 columns\n")
raise RuntimeError("Error reading table file")
# append to the tables
theta_table.append(values[0])
V_table.append(values[1])
T_table.append(values[2])
# validate input
if self.width != len(T_table):
hoomd.context.current.device.cpp_msg.error(
"dihedral.table: file must have exactly " + str(self.width)
+ " rows\n")
raise RuntimeError("Error reading table file")
# check for even spacing
dth = 2.0 * math.pi / float(self.width - 1)
for i in range(0, self.width):
theta = -math.pi + dth * i
if math.fabs(theta - theta_table[i]) > 1e-3:
hoomd.context.current.device.cpp_msg.error(
"dihedral.table: theta must be monotonically increasing and"
"evenly spaced, going from -pi to pi\n")
hoomd.context.current.device.cpp_msg.error("row: " + str(i)
+ " expected: "
+ str(theta)
+ " got: "
+ str(theta_table[i])
+ "\n")
self.dihedral_coeff.set(dihedralname,
func=_table_eval,
coeff=dict(V=V_table,
T=T_table,
width=self.width))
def get_metadata(self): # noqa
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['dihedral_coeff'] = self.dihedral_coeff
return data
class OPLS(Dihedral):
r"""OPLS dihedral force.
:py:class:`OPLS` specifies an OPLS-style dihedral potential energy between
every defined dihedral.
.. math::
V(\phi) = \frac{1}{2}k_1 \left( 1 + \cos\left(\phi \right) \right) +
\frac{1}{2}k_2 \left( 1 - \cos\left(2 \phi \right) \right) +
\frac{1}{2}k_3 \left( 1 + \cos\left(3 \phi \right) \right) +
\frac{1}{2}k_4 \left( 1 - \cos\left(4 \phi \right) \right)
where :math:`\phi` is the angle between two sides of the dihedral and
:math:`k_n` are the force coefficients in the Fourier series (in energy
units).
Attributes:
params (`TypeParameter` [``dihedral type``, `dict`]):
The parameter of the OPLS bonds for each particle type.
The dictionary has the following keys:
* ``k1`` (`float`, **required**) - force constant of the
first term :math:`[\mathrm{energy}]`
* ``k2`` (`float`, **required**) - force constant of the
second term :math:`[\mathrm{energy}]`
* ``k3`` (`float`, **required**) - force constant of the
third term :math:`[\mathrm{energy}]`
* ``k4`` (`float`, **required**) - force constant of the
fourth term :math:`[\mathrm{energy}]`
Examples::
opls = dihedral.OPLS()
opls.params['backbone'] = dict(k1=1.0, k2=1.0, k3=1.0, k4=1.0)
"""
_cpp_class_name = "OPLSDihedralForceCompute"
def __init__(self):
# check that some dihedrals are defined
params = TypeParameter(
'params', 'dihedral_types',
TypeParameterDict(k1=float,
k2=float,
k3=float,
k4=float,
len_keys=1))
self._add_typeparam(params)
| 36.96748 | 80 | 0.582802 |
acea1d8a739be54728f383700d58bcfffc38e277 | 24,603 | py | Python | pyqg/changed_f_beta_nk0_filter_tilde_newdomain2_sph/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | pyqg/changed_f_beta_nk0_filter_tilde_newdomain2_sph/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | pyqg/changed_f_beta_nk0_filter_tilde_newdomain2_sph/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
from numpy import pi
from . import model
try:
import mkl
np.use_fastnumpy = True
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
except ImportError:
pass
class LayeredModel(model.Model):
r"""Layered quasigeostrophic model.
This model is meant to represent flows driven by baroclinic instabilty of a
base-state shear. The potential vorticity anomalies qi are related to the
streamfunction psii through
.. math::
{q_i} = \nabla^2\psi_i + \frac{f_0^2}{H_i} \left(\frac{\psi_{i-1}-
\psi_i}{g'_{i-1}}- \frac{\psi_{i}-\psi_{i+1}}{g'_{i}}\right)\,,
\qquad i = 2,\textsf{N}-1\,,
{q_1} = \nabla^2\psi_1 + \frac{f_0^2}{H_1} \left(\frac{\psi_{2}-
\psi_1}{g'_{1}}\right)\,, \qquad i =1\,,
{q_\textsf{N}} = \nabla^2\psi_\textsf{N} +
\frac{f_0^2}{H_\textsf{N}} \left(\frac{\psi_{\textsf{N}-1}-
\psi_\textsf{N}}{g'_{\textsf{N}}}\right) + \frac{f_0}{H_\textsf{N}}h_b\,,
\qquad i =\textsf{N}\,,
where the reduced gravity, or buoyancy jump, is
.. math::
g'_i \equiv g \frac{\pt_{i+1}-\pt_i}{\pt_i}\,.
The evolution equations are
.. math::
\,{q_{i}}_t + \mathsf{J}\left(\psi_i\,, q_i\right) + \textsf{Q}_y {\psi_i}_x
- \textsf{Q}_x {\psi_i}_y = \text{ssd} -
r_{ek} \delta_{i\textsf{N}} \nabla^2 \psi_i\,, \qquad i = 1,\textsf{N}\,,
where the mean potential vorticy gradients are
.. math::
\textsf{Q}_x = \textsf{S}\textsf{V}\,,
and
.. math::
\textsf{Q}_y = \beta\,\textsf{I} - \textsf{S}\textsf{U}\,\,,
where S is the stretching matrix, I is the identity matrix,
and the background velocity is
:math:`\vec{\textsf{V}}(z) = \left(\textsf{U},\textsf{V}\right)`.
"""
def __init__(
self,
g = 9.81,
beta=1.5e-11, #? gradient of coriolis parameter
nz = 4, # number of layers
rd=15000.0, # deformation radius
H = None, # layer thickness. If a scalar number, then copy the same H for all layers
U=None, # zonal base state flow. If None, use U=0 for all layers
V=None, # meridional base state flow. If None, use V=0 for all layers
pt = None, # potential temperature
c2 = None,
delta = None, # only used for nz=2, can leave blanck if use multi-layer model
H0 = 7750, # standard atm height scale
R = 287.,
kappa = 2./7.,
tau = 40, # time scale for restoring terms, units in day
**kwargs
):
"""
Parameters
----------
nz : integer number
Number of layers (> 1)
beta : number
Gradient of coriolis parameter. Units: meters :sup:`-1`
seconds :sup:`-1`
rd : number
Deformation radius. Units: meters. Only necessary for
the two-layer (nz=2) case.
delta : number
Layer thickness ratio (H1/H2). Only necessary for the
two-layer (nz=2) case. Unitless.
U : list of size nz
Base state zonal velocity. Units: meters s :sup:`-1`
V : array of size nz
Base state meridional velocity. Units: meters s :sup:`-1`
H : array of size nz
Layer thickness. Units: meters
pt: array of size nz.
Layer Potential Temperature. Units: Kelvin
"""
# physical
if U is None:
U=np.zeros([nz])
if V is None:
V=np.zeros([nz])
if len(np.array(H))==1 and nz!=1:
H=np.tile(np.array(H),nz)
self.nz = nz
self.g = g
self.beta = beta
self.rd = rd
self.delta = delta
self.R = R
self.kappa = kappa
self.tau = tau
self.Ubg = np.array(U)
self.Vbg = np.array(V)
self.Hi = np.array(H)
self.pti = np.array(pt)
self.c2 = np.array(c2)
self.H0 = H0
super(LayeredModel, self).__init__(nz=nz, **kwargs)
self.vertical_modes()
print("nx:{}".format(self.nx))
print("ny:{}".format(self.ny))
print("nz:{}".format(self.nz))
### PRIVATE METHODS - not meant to be called by user ###
def _initialize_stretching_matrix(self):
""" Set up the stretching matrix """
self.S = np.zeros((self.nz, self.nz))
if (self.nz==2) and (self.rd) and (self.delta):
self.del1 = self.delta/(self.delta+1.)
self.del2 = (self.delta+1.)**-1
self.Us = self.Ubg[0]-self.Ubg[1]
self.F1 = self.rd**-2 / (1.+self.delta)
self.F2 = self.delta*self.F1
self.S[0,0], self.S[0,1] = -self.F1, self.F1
self.S[1,0], self.S[1,1] = self.F2, -self.F2
else:
for i in range(self.nz):
# Adding other statification terms by Wanying Kang @ Feb 14 2017
# All following S element, the second half of expression terms
# are added to represent stratification 1/H term.
# Would still have terms represent boundary conditions at top and bottom.
# q1 = q1 + (self.f*self.g/self.gpi[i]*(1-self.Hi[i]/self.H0/2))*(self.T1(x,y)/self.T0) ,i=0
# qN = qN + (self.f*self.g/self.gpi[i]*(-1-self.Hi[i]/self.H0/2))*(self.TN(x,y)/self.T0) ,i=nz-1
# delete the Hi terms at i=0 and i=nz-1 by assuming \psi_zz=0 at top and bottom
# This assumption means vertical T gradient is zero. T = -f/R*\psi_{z^*}
if i == 0:
# 1. assume \Psi_zz|_{top/bot}=0
#self.S[i,i] = (-self.f2/self.H0/self.gpi[i])
#self.S[i,i+1] = (self.f2/self.H0/self.gpi[i])
# 2. assume \Psi_z|_{out_of_range}=0, need to substract constant term to represent the constant temperature when invert \Psi.
# self.S[i,i] = (-self.f2/self.Hi[i]/self.gpi[i]-
# self.f2/self.H0/self.gpi[i]/2.)
# self.S[i,i+1] = (self.f2/self.Hi[i]/self.gpi[i]+
# self.f2/self.H0/self.gpi[i]/2.)
# 3. transform \Psi -> \tilde \Psi, use BC \Psi_zz|_{top/bot}=0
self.S[i,i] = -self.f2*self.c2[i]
# 4. transform \Psi -> \tilde \Psi, use BC \Psi_z|_{out_of_range}=0, need to substract constant term when invert \Psi.
#self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i]-self.f2*self.c2[i]
#self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
elif i == self.nz-1:
# 1.
#self.S[i,i] = (self.f2/self.H0/self.gpi[i-1])
#self.S[i,i-1] = (-self.f2/self.H0/self.gpi[i-1])
# 2.
# self.S[i,i] = (-self.f2/self.Hi[i]/self.gpi[i-1]+
# self.f2/self.H0/self.gpi[i-1]/2.)
# self.S[i,i-1] = (self.f2/self.Hi[i]/self.gpi[i-1]-
# self.f2/self.H0/self.gpi[i-1]/2.)
# 3.
self.S[i,i] = -self.f2*self.c2[i]
# 4.
#self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i-1]-self.f2*self.c2[i]
#self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
else:
# 1. or 2.
#self.S[i,i-1] = (self.f2/self.Hi[i]/self.gpi[i-1]-
# self.f2/self.H0/self.gpi[i-1]/2.)
#self.S[i,i] = (-(self.f2/self.Hi[i]/self.gpi[i] +
# self.f2/self.Hi[i]/self.gpi[i-1])-
# (self.f2/self.H0/self.gpi[i]/2.-
# self.f2/self.H0/self.gpi[i-1]/2.))
#self.S[i,i+1] = (self.f2/self.Hi[i]/self.gpi[i]+
# self.f2/self.H0/self.gpi[i]/2.)
# 3. or 4.
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i] = (-(self.f2/self.Hi[i]/self.gpi[i] +
self.f2/self.Hi[i]/self.gpi[i-1])
-self.f2*self.c2[i])
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
def _initialize_background(self):
"""Set up background state (zonal flow and PV gradients)."""
self.H = self.Hi.sum()
if not (self.nz==2):
#self.gpi = -self.g*(self.pti[1:]-self.pti[:-1])/self.pti[:-1]
self.gpi = -(self.pti[1:]-self.pti[:-1])/self.H0*self.R*np.exp(-self.kappa/self.H0*np.asarray(self.z[:-1]))
self.f2gpi = (self.f2/self.gpi)[:,np.newaxis,np.newaxis]
assert self.gpi.size == self.nz-1, "Invalid size of gpi"
assert np.all(self.gpi>0.), "Buoyancy jump has negative sign!"
assert self.Hi.size == self.nz, self.logger.error('size of Hi does not' +
'match number of vertical levels nz')
assert self.pti.size == self.nz, self.logger.error('size of pti does not' +
'match number of vertical levels nz')
assert self.Ubg.size == self.nz, self.logger.error('size of Ubg does not' +
'match number of vertical levels nz')
assert self.Vbg.size == self.nz, self.logger.error('size of Vbg does not' +
'match number of vertical levels nz')
else:
self.f2gpi = np.array(self.rd**-2 *
(self.Hi[0]*self.Hi[1])/self.H)[np.newaxis,np.newaxis]
## Initialize stretching matrix
self._initialize_stretching_matrix()
## the meridional PV gradients in each layer
## Original version
#self.Qy = self.beta - np.dot(self.S,self.Ubg)
#self.Qx = np.dot(self.S,self.Vbg)
## complex versions, multiplied by k, speeds up computations to precompute
#self.ikQy = self.Qy[:,np.newaxis,np.newaxis]*1j*self.k
#self.ilQx = self.Qx[:,np.newaxis,np.newaxis]*1j*self.l
## Set the meridional PV gradients in each layer
# Wanying Kang add lat dependent on beta.
# Qy is nz*nl*nl matrix, convolution matrix takes the nl*nl dimension
# The kernel calculate _ikQy from Qy, instead of using ikQy here.
# _ikQy is originally nz*nk matrix, different from original ikQy which is a nz*nl*nk matrix. After my modificatino, they are the same.
# This ikQy is used in stability analysis in model.py
#b_lat = np.asarray(self.coslat)**2.*(np.asarray(self.coslat)**2.-2.*np.asarray(self.sinlat)**2.)
b_lat = np.asarray(self.coslat)**2.
b_lat[int(self.nl/2):,:] = -b_lat[int(self.nl/2):,:]
b_lat1 = np.squeeze(b_lat[:,0])
b_lat = np.tile(b_lat[np.newaxis,:,:], (self.nz,1,1))
bh_lat = self.fft(b_lat)/(self.nl**2)/(self.nl)
bh_lat = np.squeeze(bh_lat[0,:,0]) # uniform in x direction, so pick k=0
#Cbh1 = (self.convmtx( bh_lat[:int(self.nl/2)] , self.nl ))[:int(self.nl/2),:]
#Cbh2 = (self.convmtx( bh_lat[int(self.nl/2):] , self.nl ))[-int(self.nl/2):,:]
#Cbh = np.concatenate( [Cbh1, Cbh2] , 0 )
order = np.concatenate([range(int(self.nl/2),self.nl),range(0,int(self.nl/2))])
Cbh_shift = self.convmtx( bh_lat[order] , self.nl )
Cbh_shift = Cbh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cbh = Cbh_shift[order,:]
Cbh = Cbh[:,order]
# Test Wanying Kang's convolution
#b_test1 = np.arange(self.nl)/2.
#b_test = np.tile(b_test1[np.newaxis,:,np.newaxis], (self.nz,1,self.nx))
#bh_test = self.fft(b_test)
#bh_test1 = np.squeeze(bh_test[0,:,0])
#b_result = b_test1*b_lat1
#bh_result = np.dot(Cbh,bh_test1)
#bh_result = self.ifft(np.tile(bh_result[np.newaxis,:,np.newaxis], (self.nz,1,self.nk)))
#bh_result = np.squeeze(bh_result[0,:,0])
#print(b_result)
#print(bh_result)
# real space version of Qy Qx:
#self.Qy = np.tile(self.beta*b_lat1[np.newaxis,:],[self.nz,1]) - np.tile((np.dot(self.S,self.Ubg))[:,np.newaxis],[1,self.nl])
#self.Qx = np.tile(np.dot(self.S,self.Vbg)[:,np.newaxis],[1,self.nl])
# spectra space version of Qy Qx:
self.Qy = np.tile(self.beta*Cbh[np.newaxis,:,:],[self.nz,1,1]) - np.tile((np.dot(self.S,self.Ubg))[:,np.newaxis,np.newaxis],[1,self.nl,self.nl])
self.Qx = np.dot(self.S,self.Vbg)
# complex versions, multiplied by k, speeds up computations to precompute
# Wanying Kang: add lat dependent on beta. ikQy is nz*nl*nl*nk matrix
self.ikQy = self.Qy[:,:,:,np.newaxis]*1j*self.kk[np.newaxis,np.newaxis,np.newaxis,:]
self.ilQx = self.Qx[:,np.newaxis,np.newaxis]*1j*self.l #Original version
## lat-dependent restoring terms
g_lat1 = (1.+50.*(1-np.tanh(7*self.sinlat1)))
# multiply sinlat1*coslat1**2 like qM, sinlat1 will weaken the drag, so maybe not..
g_lat = np.tile(g_lat1[np.newaxis,:,np.newaxis], (self.nz,1,self.nx))
gh_lat = self.fft(g_lat)/(self.nl**2)/(self.nl)
gh_lat = np.squeeze(gh_lat[0,:,0])
Cgh_shift = self.convmtx( gh_lat[order] , self.nl )
Cgh_shift = Cgh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cgh = Cgh_shift[order,:]
Cgh = Cgh[:,order]
self.gamma = np.tile(1./self.tau/86400.*Cgh[np.newaxis,:,:],[self.nz,1,1])
# def _initialize_inversion_matrix(self):
# # Original Version
# a = np.ma.zeros((self.nz, self.nz, self.nl, self.nk), np.dtype('float64'))
#
# if (self.nz==2):
# det_inv = np.ma.masked_equal(
# ( (self.S[0,0]-self.wv2)*(self.S[1,1]-self.wv2) -\
# self.S[0,1]*self.S[1,0] ), 0.)**-1
# a[0,0] = (self.S[1,1]-self.wv2)*det_inv
# a[0,1] = -self.S[0,1]*det_inv
# a[1,0] = -self.S[1,0]*det_inv
# a[1,1] = (self.S[0,0]-self.wv2)*det_inv
# else:
# I = np.eye(self.nz)[:,:,np.newaxis,np.newaxis]
# M = self.S[:,:,np.newaxis,np.newaxis]-I*self.wv2
# M[:,:,0,0] = np.nan # avoids singular matrix in inv()
# a = np.linalg.inv(M.T).T
# print(a[a!=0])
# self.a = np.ma.masked_invalid(a).filled(0.)
def _initialize_inversion_matrix(self):
# Wanying Kang: Do convolution if f has lat-stucture as
# f=f0*cos(lat)*sin(lat), f2=f0^2*cos^2(lat)*sin^2(lat)
a = np.ma.zeros((self.nz, self.nz, self.nl, self.nl, self.nk0), np.dtype(np.complex128))
if (self.nz==2):
Ij = np.eye(self.nl)[np.newaxis,np.newaxis,:,:,np.newaxis]
det_inv = np.ma.masked_equal(
( (self.S[0,0]-self.wv2)*(self.S[1,1]-self.wv2) -\
self.S[0,1]*self.S[1,0] ), 0.)**-1
for j in range(self.nl):
a[0,0,j,j] = (self.S[1,1]-self.wv2)*det_inv
a[0,1,j,j] = -self.S[0,1]*det_inv
a[1,0,j,j] = -self.S[1,0]*det_inv
a[1,1,j,j] = (self.S[0,0]-self.wv2)*det_inv
else:
Izl = np.multiply.outer(np.eye(self.nz),np.eye(self.nl))
Iz = np.eye(self.nz)
# Wanying Kang: Do convolution if f has lat-stucture as
# f=f0*cos(lat)*sin(lat), f2=f0^2*cos^2(lat)*sin^2(lat)
f_lat = np.asarray(self.coslat)**2.*np.asarray(self.sinlat)**2.
f_lat = np.tile(f_lat[np.newaxis,:,:], (self.nz,1,1))
fh_lat = self.fft(f_lat)/(self.nl**2)/(self.nl)
fh_lat = np.squeeze(fh_lat[0,:,0]) # uniform in x direction, so pick k=0
#Cfh1 = (self.convmtx( fh_lat[:int(self.nl/2)] , self.nl ))[:int(self.nl/2),:]
#Cfh2 = (self.convmtx( fh_lat[int(self.nl/2):] , self.nl ))[-int(self.nl/2):,:]
#Cfh = np.concatenate( [Cfh1, Cfh2] , 0 )
#Cfh = np.eye(self.nl) # compare with non-lat dependent case
order = np.concatenate([range(int(self.nl/2),self.nl),range(0,int(self.nl/2))])
Cfh_shift = self.convmtx( fh_lat[order] , self.nl )
Cfh_shift = Cfh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cfh = Cfh_shift[order,:]
Cfh = Cfh[:,order]
# Wanying Kang: Add spherical term -cos^2(lat)/sin(lat)*Psi_y/a
sp_lat=-np.asarray(self.coslat1)**2./np.asarray(self.sinlat1)/self.re
sp_lat = np.tile(sp_lat[np.newaxis,:,np.newaxis],(self.nz,1,self.nx))
sph_lat = self.fft(sp_lat)/(self.nl**2)/(self.nl)
sph_lat = np.squeeze(sph_lat[0,:,0])
Csph_shift = self.convmtx( sph_lat[order] , self.nl )
Csph_shift = Csph_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Csph = Csph_shift[order,:]
Csph = Csph[:,order]
Csph = Csph*1j*self.ll
# Wanying Kang: Make up poisson operator, M
M =( (np.multiply.outer(self.S,Cfh))[:,:,:,:,np.newaxis]
-Izl[:,:,:,:,np.newaxis]*self.wv2[np.newaxis,np.newaxis,np.newaxis,:,:]
+ Izl[:,:,:,:,np.newaxis]*Csph[np.newaxis,np.newaxis,:,:,np.newaxis])
# Wanying Kang: Add BC by modifying the poisson operator M,
# give up the equation for high k wavenumber, need totally nz*nk0+nz*(nk0-1) slots.
# 1. NP: p|_{NP}=0
# For all k, wave#k component has no Amp at NP.
#M[:,:,int(self.nl/2),:,0:self.nk0]=(Iz[:,:,np.newaxis,np.newaxis])*((np.exp(2*pi*1j*self.ll*(self.ny-1)/self.nl)/self.nl)[np.newaxis,np.newaxis,:,np.newaxis])
#M[:,:,int(self.nl/2),int(self.nl/2),0:self.nk0]=0.
#M[:,:,int(self.nl/2),int(self.nl*3/4),0:self.nk0]=0.
# 2. SP: p_x|_{SP}=0. 1j*k*ph|_{SP}=0 where ph is fourier transformed p in x dir.
# For k=0, the equation is automatically satisfied; For k/=0, this means wave#k component has no Amp at SP.
#M[:,:,int(self.nl*3/4),:,1:self.nk0]=(Iz[:,:,np.newaxis,np.newaxis])*(1/self.nl)*self.kk[1:self.nk0]
#M[:,:,int(self.nl/2),int(self.nl/2),1:self.nk0]=0.
#M[:,:,int(self.nl/2),int(self.nl*3/4),1:self.nk0]=0.
# Wanying Kang: calculate matrix inversion
Mt = np.ascontiguousarray(np.transpose(M,[4,0,2,1,3]))
Mt.shape=(self.nk,self.nz*self.nl,self.nz*self.nl)
#Mt[0,:,:]=np.nan # avoids singular matrix in inv(), however filterred out k=0 components.
for ik in range(self.nk0):
at = np.linalg.inv(Mt[ik,:,:])
at.shape = (self.nz,self.nl,self.nz,self.nl)
a[:,:,:,:,ik] = np.transpose(at,[0,2,1,3])
#a[:,:,0,:,0]=0.
a[:,:,0,:,:]=0.
#self.a = np.ma.masked_invalid(a).filled(0.)
self.a = a
# Wanying Kang add b matrix to invert k=0 component,
# now this is not necessary since I changed the way I calculate a above.
#Mb = np.multiply.outer(self.S,Cfh)-Izl*(self.ll**2)
#Mb[:,:,int(self.nl/2)-1,:]=(Iz[:,:,np.newaxis])*((np.exp(2*pi*1j*self.ll*(self.ny-1)/self.nl)/self.nl)[np.newaxis,np.newaxis,:])
#Mb = M[:,:,:,:,0]
#Mb[:,:,0,0]=np.nan
#Mbt = np.ascontiguousarray(np.transpose(Mb,[0,2,1,3]))
#Mbt.shape=(self.nl*self.nz,self.nl*self.nz)
#bt = np.linalg.inv(Mbt)
#bt.shape = (self.nz,self.nl,self.nz,self.nl)
#b = np.transpose(bt,[0,2,1,3])
#b [:,:,0,0]=0.+0j
#self.a[:,:,:,:,0]=b
def _initialize_forcing(self):
pass
#"""Set up frictional filter."""
# this defines the spectral filter (following Arbic and Flierl, 2003)
# cphi=0.65*pi
# wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
# self.filtr = np.exp(-self.filterfac*(wvx-cphi)**4.)
# self.filtr[wvx<=cphi] = 1.
### All the diagnostic stuff follows. ###
def _calc_cfl(self):
return np.abs(
np.hstack([self.u + self.Ubg[:,np.newaxis,np.newaxis], self.v])
).max()*self.dt/self.dx
# calculate KE: this has units of m^2 s^{-2}
# (should also multiply by H1 and H2...)
def _calc_ke(self):
ke = 0.
for j in range(self.nz):
ke += .5*self.Hi[j]*self.spec_var(self.wv*self.ph[j])
return ke.sum() / self.H
# calculate eddy turn over time
# (perhaps should change to fraction of year...)
def _calc_eddy_time(self):
""" estimate the eddy turn-over time in days """
ens = 0.
for j in range(self.nz):
ens = .5*self.Hi[j] * self.spec_var(self.wv2*self.ph[j])
return 2.*pi*np.sqrt( self.H / ens.sum() ) / 86400
def _calc_derived_fields(self):
self.p = self.ifft(self.ph)
self.xi =self.ifft(-self.wv2*self.ph)
self.Jpxi = self._advect(self.xi, self.u, self.v)
self.Jq = self._advect(self.q, self.u, self.v)
self.Sph = np.einsum("ij,jkl->ikl",self.S,self.ph)
self.Sp = self.ifft(self.Sph)
self.JSp = self._advect(self.Sp,self.u,self.v)
self.phn = self.modal_projection(self.ph)
def _initialize_model_diagnostics(self):
""" Extra diagnostics for layered model """
self.add_diagnostic('entspec',
description='barotropic enstrophy spectrum',
function= (lambda self:
np.abs((self.Hi[:,np.newaxis,np.newaxis]*self.qh).sum(axis=0))**2/self.H) )
self.add_diagnostic('KEspec_modal',
description='modal KE spectra',
function= (lambda self:
self.wv2*(np.abs(self.phn)**2)/self.M**2 ))
self.add_diagnostic('PEspec_modal',
description='modal PE spectra',
function= (lambda self:
self.kdi2[1:,np.newaxis,np.newaxis]*(np.abs(self.phn[1:,:,:])**2)/self.M**2 ))
self.add_diagnostic('APEspec',
description='available potential energy spectrum',
function= (lambda self:
(self.f2gpi*
np.abs(self.ph[:-1]-self.ph[1:])**2).sum(axis=0)/self.H))
self.add_diagnostic('KEflux',
description='spectral divergence of flux of kinetic energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.ph.conj()*self.Jpxi).real).sum(axis=0)/self.H))
self.add_diagnostic('APEflux',
description='spectral divergence of flux of available potential energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.ph.conj()*self.JSp).real).sum(axis=0)/self.H))
self.add_diagnostic('APEgenspec',
description='the spectrum of the rate of generation of available potential energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.Ubg[:,np.newaxis,np.newaxis]*self.k +
self.Vbg[:,np.newaxis,np.newaxis]*self.l)*
(1j*self.ph.conj()*self.Sph).real).sum(axis=0)/self.H))
self.add_diagnostic('ENSflux',
description='barotropic enstrophy flux',
function = (lambda self: (-self.Hi[:,np.newaxis,np.newaxis]*
(self.qh.conj()*self.Jq).real).sum(axis=0)/self.H))
# # Wanying Kang: this function cannot be used since I change the dimension of ikQy
# self.add_diagnostic('ENSgenspec',
# description='the spectrum of the rate of generation of barotropic enstrophy',
# function = (lambda self:
# -(self.Hi[:,np.newaxis,np.newaxis]*((self.ikQy -
# self.ilQx)*(self.Sph.conj()*self.ph)).real).sum(axis=0)/self.H))
| 44.895985 | 171 | 0.520018 |
acea1e2263fe727f5736d46cf452c652429a6502 | 8,114 | py | Python | riboviz/process_utils.py | rasilab/riboviz | a2ebaa7dac383e6ce0972626bf57fdba105d1780 | [
"Apache-2.0"
] | 13 | 2020-10-20T13:03:11.000Z | 2022-02-17T02:07:41.000Z | riboviz/process_utils.py | rasilab/riboviz | a2ebaa7dac383e6ce0972626bf57fdba105d1780 | [
"Apache-2.0"
] | 306 | 2020-03-04T14:23:34.000Z | 2022-02-26T14:51:02.000Z | riboviz/process_utils.py | rasilab/riboviz | a2ebaa7dac383e6ce0972626bf57fdba105d1780 | [
"Apache-2.0"
] | 9 | 2020-04-26T20:27:02.000Z | 2022-02-01T13:16:52.000Z | """
Python ``subprocess``-related functions.
"""
import subprocess
import sys
from riboviz import utils
def run_command(cmd, out=sys.stdout, err=sys.stderr):
"""
Run operating system command via Python ``subprocess``.
:param cmd: Commnand and arguments
:type cmd: list(str or unicode)
:param out: Standard output desination (``sys.stdout`` or file)
:type out: _io.TextIOWrapper
:param err: Standard error desination (``sys.stderr`` or file)
:type err: _io.TextIOWrapper
:raise AssertionError: If the command returns a non-zero exit code
"""
exit_code = subprocess.call(cmd, stdout=out, stderr=err)
assert exit_code == 0, "%s failed with exit code %d" % (cmd, exit_code)
def run_redirect_command(cmd, out, err=sys.stderr):
"""
Run operating system command via Python ``subprocess`` and
redirect output to a file. Uses a pattern suggested by:
https://www.saltycrane.com/blog/2008/09/how-get-stdout-and-stderr-using-python-subprocess-module/
:param cmd: Commnand and arguments
:type cmd: list(str or unicode)
:param out: Output file name
:type out: str or unicode
:param err: Standard error desination (``sys.stderr`` or file)
:type err: _io.TextIOWrapper
:raise FileNotFoundError: if the command to run cannot be found
:raise AssertionError: If the command returns a non-zero exit code
"""
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p_out, p_err = p.communicate()
exit_code = p.returncode
with open(out, "wb") as f:
f.write(p_out)
# communicate() returns bytes, so convert to string.
err.write(p_err.decode('utf-8'))
assert exit_code == 0, "%s failed with exit code %d" % (cmd, exit_code)
def run_pipe_command(cmd1, cmd2, out=sys.stdout, err=sys.stderr):
"""
Run operating system command via Python ``subprocess`` and pipe
output into another command. Uses pattern suggested by:
https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline
:param cmd1: Commnand and arguments
:type cmd1: list(str or unicode)
:param cmd2: Commnand and arguments
:type cmd2: list(str or unicode)
:param out: Standard output desination (``sys.stdout`` or file)
:type out: _io.TextIOWrapper
:param err: Standard error desination (``sys.stderr`` or file)
:type err: _io.TextIOWrapper
:raise FileNotFoundError: if the commands to run cannot be found
:raise AssertionError: If the commands return a non-zero exit code
"""
process1 = subprocess.Popen(cmd1,
stdout=subprocess.PIPE,
stderr=err)
process2 = subprocess.Popen(cmd2,
stdin=process1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process1.stdout.close()
p_out, p_err = process2.communicate()
out.write(p_out.decode('utf-8'))
err.write(p_err.decode('utf-8'))
exit_code = process2.returncode
assert exit_code == 0, ("%s | %s failed with exit code %d"
% (cmd1, cmd2, exit_code))
def run_logged_command(cmd,
log_file,
cmd_file=None,
dry_run=False,
cmd_to_log=None):
"""
Run operating system command via Python ``subprocess`` and capture
standard output and standard error into a log file. Uses
:py:func:`run_command`.
If ``cmd_file`` is not ``None`` then the command submitted to the
operating system is recorded into ``cmd_file``.
If ``dry_run`` is ``True`` then the command will not be submitted
to the operating system. Using this with ``cmd_file`` allows a
record of the command that *would* be submitted to be made.
For cases where the command to submit may differ depending on
whether it is run via the command-line or via Python
``subprocess``, ``cmd_to_log`` can be used to provide the version
of the command that needs to be inserted into the ``cmd_file``.
:param cmd: Commnand and arguments
:type cmd: list(str or unicode)
:param log_file: Log file
:type log_file: str or unicode
:param cmd_file: Bash commands file
:type cmd_file: str or unicode
:param dry_run: Do not submit command to operating system?
:type dry_run: bool
:param cmd_to_log: Command to log
:type cmd_to_log: list(str or unicode)
:raise FileNotFoundError: if the command to run cannot be found
:raise AssertionError: If the command returns a non-zero exit code
"""
if cmd_file is not None:
if cmd_to_log is not None:
cmd_to_log_str = utils.list_to_str(cmd_to_log)
else:
cmd_to_log_str = utils.list_to_str(cmd)
with open(cmd_file, "a") as f:
f.write(cmd_to_log_str + "\n")
if dry_run:
return
with open(log_file, "a") as f:
run_command(cmd, f, f)
def run_logged_redirect_command(cmd,
out,
log_file,
cmd_file=None,
dry_run=False):
"""
Run operating system command via Python ``subprocess`` and
redirect output to a file and capture standard error into a log
file. Uses :py:func:`run_redirect_command`.
If ``cmd_file`` is not ``None`` then the command submitted to the
operating system is recorded into ``cmd_file``.
If ``dry_run`` is ``True`` then the command will not be submitted
to the operating system. Using this with ``cmd_file`` allows a
record of the command that *would* be submitted to be made.
:param cmd: Commnand and arguments
:type cmd: list(str or unicode)
:param out: Output file name
:type out: str or unicode
:param log_file: Log file
:type log_file: str or unicode
:param cmd_file: Bash commands file
:type cmd_file: str or unicode
:param dry_run: Do not submit command to operating system?
:type dry_run: bool
:raise FileNotFoundError: if the command to run cannot be found
:raise AssertionError: If the command returns a non-zero exit code
"""
if cmd_file is not None:
with open(cmd_file, "a") as f:
f.write(("%s > %s\n" % (utils.list_to_str(cmd), out)))
if dry_run:
return
with open(log_file, "a") as f:
run_redirect_command(cmd, out, f)
def run_logged_pipe_command(cmd1,
cmd2,
log_file,
cmd_file=None,
dry_run=False):
"""
Run operating system command via Python ``subprocess`` and pipe
output into another command and capture standard output and
standard error into a log file. Uses :py:func:`run_pipe_command`.
If ``cmd_file`` is not ``None`` then the command submitted to the
operating system is recorded into ``cmd_file``.
If ``dry_run`` is ``True`` then the command will not be submitted
to the operating system. Using this with ``cmd_file`` allows a
record of the command that *would* be submitted to be made.
:param cmd1: Commnand and arguments
:type cmd1: list(str or unicode)
:param cmd2: Commnand and arguments
:type cmd2: list(str or unicode)
:param log_file: Log file
:type log_file: str or unicode
:param cmd_file: Bash commands file
:type cmd_file: str or unicode
:param dry_run: Do not submit command to operating system?
:type dry_run: bool
:raise FileNotFoundError: if the commands to run cannot be found
:raise AssertionError: If the commands return a non-zero exit code
"""
if cmd_file is not None:
with open(cmd_file, "a") as f:
f.write(("%s | %s\n" % (utils.list_to_str(cmd1),
utils.list_to_str(cmd2))))
if dry_run:
return
with open(log_file, "a") as f:
run_pipe_command(cmd1, cmd2, f, f)
| 38.638095 | 101 | 0.635815 |
acea1eac44329de9354fb470b26a1353e00a1434 | 10,976 | py | Python | scipy/misc/common.py | zeehio/scipy | 426619468ee712bbeffdb20cb2baf570273f541c | [
"BSD-3-Clause"
] | 2 | 2015-11-28T17:03:28.000Z | 2016-06-24T15:36:44.000Z | scipy/misc/common.py | zeehio/scipy | 426619468ee712bbeffdb20cb2baf570273f541c | [
"BSD-3-Clause"
] | 1 | 2016-03-13T20:18:16.000Z | 2016-03-13T22:39:20.000Z | scipy/misc/common.py | zeehio/scipy | 426619468ee712bbeffdb20cb2baf570273f541c | [
"BSD-3-Clause"
] | 5 | 2016-05-18T21:47:17.000Z | 2021-08-09T11:48:30.000Z | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array,
zeros, eye, poly1d, r_, sum, fromstring, isfinite,
squeeze, amax, reshape, sign, broadcast_arrays)
__all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena',
'ascent', 'face']
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed. Tuple of ints is not accepted if NumPy
version is lower than 1.7.0.
.. versionadded:: 0.11.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`. These values may be negative in order to
implement subtraction.
.. versionadded:: 0.12.0
return_sign : bool, optional
If this is set to True, the result will be a pair containing sign
information; if False, results that are negative will be returned
as NaN. Default is False (no sign information).
.. versionadded:: 0.16.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
sgn : ndarray
If return_sign is True, this will be an array of floating-point
numbers matching res and +1, 0, or -1 depending on the sign
of the result. If False, only one result is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
Returning a sign flag
>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)
"""
a = asarray(a)
if b is not None:
a, b = broadcast_arrays(a,b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
a_max = amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
tmp = b * exp(a - a_max)
else:
tmp = exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = log(s)
if not keepdims:
a_max = squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Function that previously returned an example image
.. note:: Removed in 0.17
Parameters
----------
None
Returns
-------
None
Raises
------
RuntimeError
This functionality has been removed due to licensing reasons.
Notes
-----
The image previously returned by this function has an incompatible license
and has been removed from SciPy. Please use `face` or `ascent` instead.
See Also
--------
face, ascent
"""
raise RuntimeError('lena() is no longer included in SciPy, please use '
'ascent() or face() instead')
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| 26.770732 | 93 | 0.567147 |
acea1eac7e2c3cb6279190f7902959a51901457c | 819 | py | Python | var/spack/repos/builtin/packages/py-more-itertools/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/py-more-itertools/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/py-more-itertools/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMoreItertools(PythonPackage):
"""Additions to the standard Python itertools package."""
homepage = "https://github.com/erikrose/more-itertools"
url = "https://pypi.io/packages/source/m/more-itertools/more-itertools-4.3.0.tar.gz"
import_modules = ['more_itertools', 'more_itertools.tests']
version('4.3.0', '42157ef9b677bdf6d3609ed6eadcbd4a')
version('4.1.0', '246f46686d95879fbad37855c115dc52')
version('2.2', 'b8d328a33f966bf40bb829bcf8da35ce')
depends_on('py-setuptools', type='build')
depends_on('py-six@1.0.0:1.999', type=('build', 'run'))
| 35.608696 | 93 | 0.71917 |
acea1f325172383b951ab027ea1a96b1d266da60 | 4,349 | py | Python | declarative_parser/types.py | jeking3/declarative-parser | 47cb98bc8750260ce4e09a511bf47f353e46da06 | [
"MIT"
] | 37 | 2017-11-25T18:42:04.000Z | 2021-07-20T00:24:07.000Z | declarative_parser/types.py | jeking3/declarative-parser | 47cb98bc8750260ce4e09a511bf47f353e46da06 | [
"MIT"
] | 5 | 2017-12-06T03:17:07.000Z | 2020-05-19T13:51:00.000Z | declarative_parser/types.py | jeking3/declarative-parser | 47cb98bc8750260ce4e09a511bf47f353e46da06 | [
"MIT"
] | 3 | 2018-03-18T18:17:05.000Z | 2019-03-25T00:16:02.000Z | from abc import ABC, abstractmethod
from argparse import ArgumentTypeError
from typing import Iterable, Any
def abstract_property(method):
return property(abstractmethod(method))
class StringHandlingMixin(ABC):
"""Turn string provided on initialization into `data_type`."""
@abstract_property
def separator(self):
"""Separator for split operation"""
pass
@abstract_property
def item_type(self):
pass
@abstract_property
def data_type(self):
pass
@property
def require_separator(self):
"""If True and the string has no separator ArgumentTypeError will be raised."""
return False
def __init__(self, string):
if self.require_separator and self.separator not in string:
name = self.__class__.__name__
raise ArgumentTypeError(
f'Given string {string} does not look like a '
f'{name} (no {self.separator}, which is required)'
)
try:
self.data = self.data_type(
[
self.item_type(value) if value != '' else None
for value in string.split(self.separator)
]
if self.separator else
self.item_type(string)
)
except (TypeError, ValueError) as e:
raise ArgumentTypeError(*e.args)
class Subset(ABC):
@abstractmethod
def get_iterator(self, iterable: Iterable[Any]) -> Iterable:
return iterable
def get(self, iterable: Iterable[Any]):
return list(self.get_iterator(iterable))
def positive_int(value):
value = int(value)
if value < 0:
raise ValueError('Indices need to be positive integers')
return value
def n_tuple(n):
"""Factory for n-tuples."""
def custom_tuple(data):
if len(data) != n:
raise TypeError(
f'{n}-tuple requires exactly {n} items '
f'({len(data)} received).'
)
return tuple(data)
return custom_tuple
def dsv(value_type, delimiter=','):
"""Delimiter Separated Values"""
def closure(value):
return [
value_type(y)
for y in value.split(delimiter)
]
return closure
def one_of(*types):
"""Create a function which attempts to cast input to any of provided types.
The order of provided `types` is meaningful - if two types accept given
input value, the first one on list will be used. Types should be able
to accept a string (if correct) as input value for their constructors.
"""
def one_of_types(string):
exceptions = []
for type_constructor in types:
try:
return type_constructor(string)
except (ArgumentTypeError, TypeError, ValueError) as e:
exceptions.append(f'{type_constructor.__name__}: {e}')
names = ', '.join(t.__name__ for t in types)
exceptions = ''.join('\n\t' + e for e in exceptions)
raise ArgumentTypeError(
f'Argument {string} does not match any of allowed types: {names}.\n' +
f'Following exceptions has been raised: {exceptions}'
)
return one_of_types
static = staticmethod
class Indices(Subset, StringHandlingMixin):
separator = ','
# negative indices may be ambiguous
item_type = static(positive_int)
# each column should be used once
data_type = set
def get_iterator(self, iterable):
for i, value in enumerate(iterable):
if i in self.data:
yield value
class Slice(Subset, StringHandlingMixin):
require_separator = True
separator = ':'
item_type = int
data_type = static(one_of(n_tuple(2), n_tuple(3)))
def get_iterator(self, iterable):
return iterable[slice(*self.data)]
class Range(Subset, StringHandlingMixin):
"""Simplified slice with '-' as separator.
Handles only start and end, does not support negative numbers.
"""
require_separator = True
separator = '-'
item_type = int
# if user provides '1-3-5' or '1--3' we will not handle that
# (such values are ambiguous, possibly typos)
data_type = static(n_tuple(2))
def get_iterator(self, iterable):
return iterable[slice(*self.data)]
| 26.198795 | 87 | 0.615544 |
acea1f9ff717bb628013c310e5cfa08ece74cee2 | 298 | py | Python | example.py | cheydrick/PyVLChttp | b3a3f670859f7dc93d13f726b76c73fb56936807 | [
"MIT"
] | 3 | 2019-05-10T16:09:34.000Z | 2019-11-01T17:38:35.000Z | example.py | cheydrick/PyVLChttp | b3a3f670859f7dc93d13f726b76c73fb56936807 | [
"MIT"
] | 2 | 2019-08-04T11:05:54.000Z | 2019-08-05T12:01:45.000Z | example.py | cheydrick/PyVLChttp | b3a3f670859f7dc93d13f726b76c73fb56936807 | [
"MIT"
] | 1 | 2019-08-04T17:43:09.000Z | 2019-08-04T17:43:09.000Z | from pyvlchttp import VLCHTTPAPI
test_mrl = 'file:///Users/chrisheydrick/Music/MojoFrankenstein/superstitious final.mp3'
if __name__ == '__main__':
vlc = VLCHTTPAPI('127.0.0.1', '8080', 'password')
vlc.add_to_playlist(test_mrl)
vlc.toggle_repeat()
vlc.play()
# TODO
| 27.090909 | 88 | 0.681208 |
acea1fe53252e4bd430f28c6e1fd3138db5ccb8f | 2,096 | py | Python | setup.py | ellisbrown/dowel | 31bc544cc6db13b73a6748fce3b1556cd2cd01b4 | [
"MIT"
] | null | null | null | setup.py | ellisbrown/dowel | 31bc544cc6db13b73a6748fce3b1556cd2cd01b4 | [
"MIT"
] | null | null | null | setup.py | ellisbrown/dowel | 31bc544cc6db13b73a6748fce3b1556cd2cd01b4 | [
"MIT"
] | null | null | null | """dowel setuptools script."""
from setuptools import find_packages
from setuptools import setup
# Required dependencies
required = [
# Please keep alphabetized
'colorama',
'matplotlib',
'numpy',
'python-dateutil',
'scipy',
'tabulate',
'tensorboardX',
'termcolor',
]
extras = dict()
extras['tensorflow'] = ['tensorflow']
extras['all'] = list(set(sum(extras.values(), [])))
# Development dependencies (*not* included in "all")
extras['dev'] = [
# Please keep alphabetized
'coverage',
'flake8',
'flake8-docstrings>=1.5.0',
'flake8-import-order',
'pep8-naming',
'pre-commit',
'pycodestyle>=2.5.0',
'pydocstyle>=4.0.0',
'pylint',
'pytest>=4.4.0', # Required for pytest-xdist
'pytest-cov',
'pytest-xdist',
'sphinx',
'recommonmark',
'yapf',
]
with open('README.md') as f:
readme = f.read()
with open('VERSION') as v:
version = v.read().strip()
setup(
name='dowel',
version=version,
author='Reinforcement Learning Working Group',
author_email='dowel@noreply.github.com',
description='A logger for machine learning research',
url='https://github.com/rlworkgroup/dowel',
packages=find_packages(where='src'),
package_dir={'': 'src'},
python_requires='>=3.5',
install_requires=required,
extras_require=extras,
license='MIT',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries',
],
)
| 26.2 | 69 | 0.62166 |
acea212b999a1243c201bdf3f837102f184f748e | 1,954 | py | Python | models/hole_history.py | xuanthuong/golfgame | 64914e5af0290504ac7b71e0416285241660301c | [
"MIT"
] | null | null | null | models/hole_history.py | xuanthuong/golfgame | 64914e5af0290504ac7b71e0416285241660301c | [
"MIT"
] | null | null | null | models/hole_history.py | xuanthuong/golfgame | 64914e5af0290504ac7b71e0416285241660301c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Description: hole_history table
# By Thuong.Tran
# Date: 31 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select, func
class hole_history():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_hole_history = Table("gmf_hole_his", _metadata,
Column("IDX", Integer, primary_key=True),
Column("HOLE_ID", Integer),
Column("CLSS_NO", Integer),
Column("ORD_NO", Integer),
Column("ACTR_ID", Integer),
Column("ACT_NM", Text),
Column("RSLT_NM", Text),
Column("ACT_SCRE", Float),
Column("DIST_NO", Float))
_metadata.create_all(_engine)
self.connection = _connection
self.hole_history = _hole_history
pass
def insert_to(self, data):
is_valid = True
# for item in data:
# if not item:
# is_valid = False
# raise DropItem("Missing %s!" % item)
if is_valid:
ins_query = self.hole_history.insert().values(data)
r = self.connection.execute(ins_query)
def get_all(self):
s = select([self.hole_history]).order_by('HOLE_ID')
result = self.connection.execute(s)
return result
def get_by_holeid(self, hole_id):
s = select([self.hole_history]).where(self.hole_history.c.HOLE_ID == hole_id)
result = self.connection.execute(s)
return result
# def get_total_distance_of_hole(self, hole_id):
# dist = select([func.count()]).select_from([self.hole_history]).where(self.hole_history.c.HOLE_ID == hole_id)
# return dist
# https://stackoverflow.com/questions/12941416/how-to-count-rows-with-select-count-with-sqlalchemy | 34.892857 | 114 | 0.605425 |
acea2191850291738b68b9dddca92f65d9592ba8 | 2,209 | py | Python | superset/db_engine_specs/kylin.py | whelan9453/incubator-superset | 4e3cea45a5136a28442eea50fddc6cf423a9ddd5 | [
"Apache-2.0"
] | 2 | 2019-10-15T01:37:05.000Z | 2019-10-15T09:55:42.000Z | superset/db_engine_specs/kylin.py | whelan9453/incubator-superset | 4e3cea45a5136a28442eea50fddc6cf423a9ddd5 | [
"Apache-2.0"
] | 8 | 2020-03-24T17:59:51.000Z | 2022-03-29T22:27:47.000Z | superset/db_engine_specs/kylin.py | whelan9453/incubator-superset | 4e3cea45a5136a28442eea50fddc6cf423a9ddd5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from superset.db_engine_specs.base import BaseEngineSpec
class KylinEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method
"""Dialect for Apache Kylin"""
engine = "kylin"
_time_grain_functions = {
None: "{col}",
"PT1S": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)",
"PT1M": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)",
"PT1H": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)",
"P1D": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)",
"P1W": "CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)",
"P1M": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)",
"P0.25Y": "CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)",
"P1Y": "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
if tt == "TIMESTAMP":
return "CAST('{}' AS TIMESTAMP)".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
| 45.081633 | 87 | 0.6555 |
acea21ada01b8cc486f558e936298f595fb41c64 | 1,385 | py | Python | src/livecli/plugins/seemeplay.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | 1 | 2019-12-04T11:54:52.000Z | 2019-12-04T11:54:52.000Z | src/livecli/plugins/seemeplay.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | src/livecli/plugins/seemeplay.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | import re
from livecli.compat import urlparse
from livecli.plugin import Plugin
from livecli.plugin.api import http, validate
from livecli.stream import HLSStream, HTTPStream
__livecli_docs__ = {
"domains": [
"seemeplay.ru",
],
"geo_blocked": [],
"notes": "",
"live": True,
"vod": True,
"last_update": "2014-08-27",
}
_url_re = re.compile(r"http(s)?://(\w+\.)?seemeplay.ru/")
_player_re = re.compile(r"""
SMP.(channel|video).player.init\({
\s+file:\s+"([^"]+)"
""", re.VERBOSE)
_schema = validate.Schema(
validate.transform(_player_re.search),
validate.any(
None,
validate.union({
"type": validate.get(1),
"url": validate.all(
validate.get(2),
validate.url(scheme="http"),
),
})
)
)
class SeeMePlay(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url, schema=_schema)
if not res:
return
if res["type"] == "channel" and urlparse(res["url"]).path.endswith("m3u8"):
return HLSStream.parse_variant_playlist(self.session, res["url"])
elif res["type"] == "video":
stream = HTTPStream(self.session, res["url"])
return dict(video=stream)
__plugin__ = SeeMePlay
| 23.87931 | 83 | 0.578339 |
acea21d5662b3cbb8cecdbaa3c7792e90ec474b5 | 14,288 | py | Python | colour/plotting/common.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | 1 | 2019-06-27T11:32:48.000Z | 2019-06-27T11:32:48.000Z | colour/plotting/common.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | colour/plotting/common.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Common Plotting
===============
Defines the common plotting objects:
- :func:`colour_cycle`
- :func:`figure_size`
- :func:`aspect`
- :func:`bounding_box`
- :func:`display`
- :func:`colour_parameter`
- :func:`colour_parameters_plot`
- :func:`single_colour_plot`
- :func:`multi_colour_plot`
"""
from __future__ import division
import functools
import itertools
import os
from collections import namedtuple
import matplotlib
import matplotlib.image
import matplotlib.path
import matplotlib.pyplot
import matplotlib.ticker
import numpy as np
import pylab
from colour.utilities import Structure
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = [
'PLOTTING_RESOURCES_DIRECTORY',
'DEFAULT_FIGURE_SIZE',
'DEFAULT_COLOUR_CYCLE',
'ColourParameter',
'colour_cycle',
'figure_size',
'aspect',
'bounding_box',
'display',
'colour_parameter',
'colour_parameters_plot',
'single_colour_plot',
'multi_colour_plot']
PLOTTING_RESOURCES_DIRECTORY = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'resources')
"""
Resources directory.
RESOURCES_DIRECTORY : unicode
"""
DEFAULT_FIGURE_SIZE = 14, 7
"""
Default plots figure size.
DEFAULT_FIGURE_SIZE : tuple
"""
DEFAULT_COLOUR_CYCLE = ('r', 'g', 'b', 'c', 'm', 'y', 'k')
ColourParameter = namedtuple('ColourParameter',
('name', 'RGB', 'x', 'y0', 'y1'))
# Defining default figure size.
pylab.rcParams['figure.figsize'] = DEFAULT_FIGURE_SIZE
# Defining an alternative font that can display scientific notations.
matplotlib.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
def colour_cycle(colour_map='hsv', count=len(DEFAULT_COLOUR_CYCLE)):
"""
Returns a colour cycle iterator using given colour map.
Parameters
----------
colour_map : unicode, optional
Matplotlib colour map.
count : int, optional
Cycle length.
Returns
-------
cycle
Colour cycle iterator.
"""
if colour_map is None:
cycle = DEFAULT_COLOUR_CYCLE
else:
cycle = getattr(matplotlib.pyplot.cm,
colour_map)(np.linspace(0, 1, count))
return itertools.cycle(cycle)
def figure_size(size=DEFAULT_FIGURE_SIZE):
"""
Sets figures sizes.
Parameters
----------
size : tuple, optional
Figure size.
Returns
-------
object
Callable object.
"""
def figure_size_decorator(callable):
"""
Sets figures sizes.
Parameters
----------
callable : object
Callable object to decorate.
Returns
-------
object
Callable object.
"""
@functools.wraps(callable)
def figure_size_wrapper(*args, **kwargs):
"""
Sets figures sizes.
Parameters
----------
\*args : \*
Arguments.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
object
Callable object.
"""
pylab.rcParams['figure.figsize'] = (
kwargs.get('figure_size')
if kwargs.get('figure_size') is not None else
size)
try:
return callable(*args, **kwargs)
finally:
pylab.rcParams['figure.figsize'] = DEFAULT_FIGURE_SIZE
return figure_size_wrapper
return figure_size_decorator
def aspect(**kwargs):
"""
Sets the figure aspect.
Parameters
----------
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
"""
settings = Structure(
**{'title': None,
'x_label': None,
'y_label': None,
'legend': False,
'legend_location': 'upper right',
'x_ticker': False,
'y_ticker': False,
'x_ticker_locator': matplotlib.ticker.AutoMinorLocator(2),
'y_ticker_locator': matplotlib.ticker.AutoMinorLocator(2),
'no_ticks': False,
'no_x_ticks': False,
'no_y_ticks': False,
'grid': False,
'axis_grid': 'both',
'x_axis_line': False,
'y_axis_line': False,
'aspect': None})
settings.update(kwargs)
if settings.title:
pylab.title(settings.title)
if settings.x_label:
pylab.xlabel(settings.x_label)
if settings.y_label:
pylab.ylabel(settings.y_label)
if settings.legend:
pylab.legend(loc=settings.legend_location)
if settings.x_ticker:
matplotlib.pyplot.gca().xaxis.set_minor_locator(
settings.x_ticker_locator)
if settings.y_ticker:
matplotlib.pyplot.gca().yaxis.set_minor_locator(
settings.y_ticker_locator)
if settings.no_ticks:
matplotlib.pyplot.gca().set_xticks([])
matplotlib.pyplot.gca().set_yticks([])
if settings.no_x_ticks:
matplotlib.pyplot.gca().set_xticks([])
if settings.no_y_ticks:
matplotlib.pyplot.gca().set_yticks([])
if settings.grid:
pylab.grid(which=settings.axis_grid)
if settings.x_axis_line:
pylab.axvline(color='black', linestyle='--')
if settings.y_axis_line:
pylab.axhline(color='black', linestyle='--')
if settings.aspect:
matplotlib.pyplot.axes().set_aspect(settings.aspect)
return True
def bounding_box(**kwargs):
"""
Sets the plot bounding box.
Parameters
----------
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
"""
settings = Structure(
**{'bounding_box': None,
'x_tighten': False,
'y_tighten': False,
'limits': [0, 1, 0, 1],
'margins': [0, 0, 0, 0]})
settings.update(kwargs)
if settings.bounding_box is None:
x_limit_min, x_limit_max, y_limit_min, y_limit_max = (
settings.limits)
x_margin_min, x_margin_max, y_margin_min, y_margin_max = (
settings.margins)
if settings.x_tighten:
pylab.xlim(x_limit_min + x_margin_min, x_limit_max + x_margin_max)
if settings.y_tighten:
pylab.ylim(y_limit_min + y_margin_min, y_limit_max + y_margin_max)
else:
pylab.xlim(settings.bounding_box[0], settings.bounding_box[1])
pylab.ylim(settings.bounding_box[2], settings.bounding_box[3])
return True
def display(**kwargs):
"""
Sets the figure display.
Parameters
----------
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
"""
settings = Structure(
**{'standalone': True,
'filename': None})
settings.update(kwargs)
if settings.standalone:
if settings.filename is not None:
pylab.savefig(**kwargs)
else:
pylab.show()
pylab.close()
return True
def colour_parameter(name=None, RGB=None, x=None, y0=None, y1=None):
"""
Defines a factory for
:attr:`colour.plotting.plots.COLOUR_PARAMETER` attribute.
Parameters
----------
name : unicode, optional
Colour name.
RGB : array_like, optional
RGB Colour.
x : numeric, optional
X data.
y0 : numeric, optional
Y0 data.
y1 : numeric, optional
Y1 data.
Returns
-------
ColourParameter
ColourParameter.
"""
return ColourParameter(name, RGB, x, y0, y1)
def colour_parameters_plot(colour_parameters,
y0_plot=True,
y1_plot=True,
**kwargs):
"""
Plots given colour colour_parameters.
Parameters
----------
colour_parameters : list
ColourParameter sequence.
y0_plot : bool, optional
Plot y0 line.
y1_plot : bool, optional
Plot y1 line.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> cp1 = colour_parameter(x=390, RGB=[0.03009021, 0, 0.12300545])
>>> cp2 = colour_parameter(x=391, RGB=[0.03434063, 0, 0.13328537], y0=0, y1=0.25) # noqa
>>> cp3 = colour_parameter(x=392, RGB=[0.03826312, 0, 0.14276247], y0=0, y1=0.35) # noqa
>>> cp4 = colour_parameter(x=393, RGB=[0.04191844, 0, 0.15158707], y0=0, y1=0.05) # noqa
>>> cp5 = colour_parameter(x=394, RGB=[0.04535085, 0, 0.15986838], y0=0, y1=-.25) # noqa
>>> colour_parameters_plot([cp1, cp2, cp3, cp3, cp4, cp5]) # noqa # doctest: +SKIP
True
"""
for i in range(len(colour_parameters) - 1):
x0 = colour_parameters[i].x
x01 = colour_parameters[i + 1].x
y0 = (0
if colour_parameters[i].y0 is None else
colour_parameters[i].y0)
y1 = (1
if colour_parameters[i].y1 is None else
colour_parameters[i].y1)
y01 = (0
if colour_parameters[i].y0 is None else
colour_parameters[i + 1].y0)
y11 = (1
if colour_parameters[i].y1 is None else
colour_parameters[i + 1].y1)
x_polygon = [x0, x01, x01, x0]
y_polygon = [y0, y01, y11, y1]
pylab.fill(x_polygon,
y_polygon,
color=colour_parameters[i].RGB,
edgecolor=colour_parameters[i].RGB)
if all([x.y0 is not None for x in colour_parameters]):
if y0_plot:
pylab.plot([x.x for x in colour_parameters],
[x.y0 for x in colour_parameters],
color='black',
linewidth=2)
if all([x.y1 is not None for x in colour_parameters]):
if y1_plot:
pylab.plot([x.x for x in colour_parameters],
[x.y1 for x in colour_parameters],
color='black',
linewidth=2)
y_limit_min0 = min(
[0 if x.y0 is None else x.y0 for x in colour_parameters])
y_limit_max0 = max(
[1 if x.y0 is None else x.y0 for x in colour_parameters])
y_limit_min1 = min(
[0 if x.y1 is None else x.y1 for x in colour_parameters])
y_limit_max1 = max(
[1 if x.y1 is None else x.y1 for x in colour_parameters])
settings = {'x_label': 'Parameter',
'y_label': 'Colour',
'limits': [min([0 if x.x is None else x.x
for x in colour_parameters]),
max([1 if x.x is None else x.x
for x in colour_parameters]),
y_limit_min0,
y_limit_max1]}
settings.update(kwargs)
bounding_box(**settings)
aspect(**settings)
return display(**settings)
def single_colour_plot(colour_parameter, **kwargs):
"""
Plots given colour.
Parameters
----------
colour_parameter : ColourParameter
ColourParameter.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> RGB = (0.32315746, 0.32983556, 0.33640183)
>>> single_colour_plot(colour_parameter(RGB)) # doctest: +SKIP
True
"""
return multi_colour_plot([colour_parameter], **kwargs)
def multi_colour_plot(colour_parameters,
width=1,
height=1,
spacing=0,
across=3,
text_display=True,
text_size='large',
text_offset=0.075,
**kwargs):
"""
Plots given colours.
Parameters
----------
colour_parameters : list
ColourParameter sequence.
width : numeric, optional
Colour polygon width.
height : numeric, optional
Colour polygon height.
spacing : numeric, optional
Colour polygons spacing.
across : int, optional
Colour polygons count per row.
text_display : bool, optional
Display colour text.
text_size : numeric, optional
Colour text size.
text_offset : numeric, optional
Colour text offset.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> cp1 = colour_parameter(RGB=(0.45293517, 0.31732158, 0.26414773))
>>> cp2 = colour_parameter(RGB=(0.77875824, 0.5772645, 0.50453169))
>>> multi_colour_plot([cp1, cp2]) # doctest: +SKIP
True
"""
offsetX = offsetY = 0
x_limit_min, x_limit_max, y_limit_min, y_limit_max = 0, width, 0, height
for i, colour_parameter in enumerate(colour_parameters):
if i % across == 0 and i != 0:
offsetX = 0
offsetY -= height + spacing
x0 = offsetX
x1 = offsetX + width
y0 = offsetY
y1 = offsetY + height
x_polygon = [x0, x1, x1, x0]
y_polygon = [y0, y0, y1, y1]
pylab.fill(x_polygon, y_polygon, color=colour_parameters[i].RGB)
if colour_parameter.name is not None and text_display:
pylab.text(x0 + text_offset, y0 + text_offset,
colour_parameter.name, clip_on=True, size=text_size)
offsetX += width + spacing
x_limit_max = min(len(colour_parameters), across)
x_limit_max = x_limit_max * width + x_limit_max * spacing - spacing
y_limit_min = offsetY
settings = {'x_tighten': True,
'y_tighten': True,
'no_ticks': True,
'limits': [x_limit_min, x_limit_max, y_limit_min, y_limit_max],
'aspect': 'equal'}
settings.update(kwargs)
bounding_box(**settings)
aspect(**settings)
return display(**settings)
| 26.216514 | 93 | 0.567609 |
acea21f8fbde3fd188c3feb7e8034e6b3499ef43 | 7,295 | py | Python | fish_feerder.py | mapo243/Fish-Feeder | 969fe80bb72c8722be2f90cc824e3526a50c3ce1 | [
"MIT"
] | null | null | null | fish_feerder.py | mapo243/Fish-Feeder | 969fe80bb72c8722be2f90cc824e3526a50c3ce1 | [
"MIT"
] | null | null | null | fish_feerder.py | mapo243/Fish-Feeder | 969fe80bb72c8722be2f90cc824e3526a50c3ce1 | [
"MIT"
] | null | null | null | #imports needed
import time
import serial
import urllib
import pandas as pd
import numpy as np
#function to read and return value of single thingspeak field
def read_field_thingspeak(channel, api_key, field, results):
read_link = 'https://api.thingspeak.com/channels/' + str(channel) + '/fields/' + str(field) + '.csv?api_key=' + str(api_key) +'&results=' + str(results)
df = pd.read_csv(urllib.request.urlopen(read_link))
df.drop(['created_at'], axis=1, inplace=True)
return np.array(df)[0][1]
#function that reads thingspeak status
def read_status_thingspeak(channel, api_key, results):
read_link = 'https://api.thingspeak.com/channels/' + str(channel) + '/status.csv?api_key=' + str(api_key) +'&results=' + str(results)
return urllib.request.urlopen(read_link)
#function that formats and requests a link to write to two thingspeak fields at once
def write_to_thingspeak(api_key, field1, value1, field2, value2):
write_link = 'https://api.thingspeak.com/update?api_key=' + str(api_key) + '&field'+ str(field1) +'=' + str(value1) +'&field'+ str(field2) +'=' + str(value2)
urllib.request.urlopen(write_link)
#function to write status to thingspeak in addtion to both fields
def write_to_thingspeak_status(api_key, field1, value1, field2, value2, status):
write_link = 'https://api.thingspeak.com/update?api_key=' + str(api_key) + '&field'+ str(field1) +'=' + str(value1) +'&field'+ str(field2) +'=' + str(value2) +'&status=' + status
urllib.request.urlopen(write_link)
#declare variables related for specific thingspeak channel
channel = '720608'
read_api_key = 'RRGPJE5YQ8JJPRIR'
write_api_key='C7MKD36DWMJS69UL'
#declare variable for COM port for serial communication
com = '/dev/cu.usbserial-DN051CFK'
#declare variables to validate user input
function_selection = ''
action = ''
print('Fish Feeder Transceiver')
while(function_selection != '0'):
print('***Main Menu***')
#prompt user to choose between transmitter or receiver functionality
function_selection = input('Choose the following:\n1: Transmitter\n2: Receiver\n0: Exit\n')
if function_selection == '1':
#while loop that runs for duration of program to prompt for user input
while(action != '0'):
#initialization delay to prevent multiple thingspeak write attempts within a short period.
print('\nInitializing', end='')
for i in range(15):
print('.', end='')
time.sleep(1)
#prompt user for input and move through program accordingly
action = input('\n\n***Transmitter***\nChoose the following:\n1: Feed Fish!\n2: Last Time Fish Fed?\n3: Feeder Status\n0: Exit\n')
if action == '1':
#read initial feeder status
status = read_field_thingspeak(channel,read_api_key,1,1)
if(status != 0):
#if status not empty write the run command to thingspeak
write_to_thingspeak(write_api_key,1,1,2,1)
#communication delay to ensure time for arduino to process command and respond to thingspeak
print('Communicating', end='')
for i in range(20):
print('.', end='')
time.sleep(1)
#read feeder status to check if feeding was succesful
status = read_field_thingspeak(channel,read_api_key,1,1)
if(status==0):
print('\nFeeder Empty!')
continue
elif(status == 1):
#read feeder command, if command is still 1 then hardware never read command, if 0 then hardware ran command
command = read_field_thingspeak(channel,read_api_key,2,1)
if(command == 1):
print('\nFeeder Not Responding!')
else:
print('\nFood Dispensed.')
else:
#if status is 0 then the feeder is empty
print('\nFeeder Empty!')
elif action == '2':
#query time last fed and report to user
df = pd.read_csv(read_status_thingspeak(channel, read_api_key, 1))
print('Fish last fed', np.array(df)[0][0])
continue
elif action == '3':
#query feeder status and report to user
#field 1:status(0=EMPTY, 1=READY)
#field 2:command(0=IDLE, 1=RUN)
status = read_field_thingspeak(channel,read_api_key,1,1)
command = read_field_thingspeak(channel,read_api_key,2,1)
if(status==0):
print('Feeder Empty!')
elif(status==1):
print('Feeder Ready.')
elif action == '0':
#terminate program
print("Transmitter Terminated.")
break
else:
print('Invalid Input!\n\n')
continue
elif function_selection == '2':
print('***Receiver***')
try:
#initialize serial communication
ser = serial.Serial(com, 9600, timeout=1)
time.sleep(2)
except:
print('Serial Communication Error!\nReturning to Main Menu.\n\n')
else:
#while loop that runs while serial communication is open
while ser.is_open:
#query thingspeak for feeder status and command
#field 1:status(0=EMPTY, 1=READY)
#field 2:command(0=IDLE, 1=RUN)
status = read_field_thingspeak(channel,read_api_key,1,1)
command = read_field_thingspeak(channel,read_api_key,2,1)
#reset input buffer to ensure latest serial output from board is read
ser.reset_input_buffer()
board_status = ser.readline()
#board is communicating empty status
if(board_status == b'EMPTY\r\n'):
if(status!=0):
write_to_thingspeak(write_api_key,1,0,2,0)
time.sleep(15)
#board is commmunicating ready status
if(board_status == b'READY\r\n'):
#command is IDLE
if(command == 0):
if(status != 1):
write_to_thingspeak(write_api_key,1,1,2,0)
time.sleep(15)
#command is RUN
if(command == 1):
ser.write(b'1')
time.sleep(15)
write_to_thingspeak_status(write_api_key,1,1,2,0,'Food Dispensed')
#delay
time.sleep(1)
#close serial communication and terminate program
ser.close()
print('Serial Communication Closed. Receiver Terminated.')
elif(function_selection =='0'):
print('Program Terminated.')
break
else:
print('Invalid Input!\n\n')
continue
| 45.880503 | 182 | 0.559973 |
acea228ba60ca53432b8805dfd7050a4e151c618 | 1,400 | py | Python | xlsxwriter/test/comparison/test_chart_size02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_size02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_size02.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_size01.xlsx')
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61355904, 61365248]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_size({'x_scale': 1.066666666, 'y_scale': 1.11111111})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.454545 | 79 | 0.569286 |
acea22ca3d5d9ee6d16ff01cbbc9e38ba7c89c8d | 7,127 | py | Python | src/zeep/wsdl/messages/mime.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 3 | 2017-04-01T16:05:52.000Z | 2019-07-26T14:32:26.000Z | src/zeep/wsdl/messages/mime.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 3 | 2021-03-31T19:37:08.000Z | 2021-12-13T20:32:23.000Z | src/zeep/wsdl/messages/mime.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 2 | 2020-11-18T09:49:46.000Z | 2021-07-08T14:02:03.000Z | """
zeep.wsdl.messages.mime
~~~~~~~~~~~~~~~~~~~~~~~
"""
import six
from defusedxml.lxml import fromstring
from lxml import etree
from zeep import ns, xsd
from zeep.helpers import serialize_object
from zeep.wsdl.messages.base import ConcreteMessage, SerializedMessage
from zeep.wsdl.utils import etree_to_string
__all__ = ["MimeContent", "MimeXML", "MimeMultipart"]
class MimeMessage(ConcreteMessage):
_nsmap = {"mime": ns.MIME}
def __init__(self, wsdl, name, operation, part_name):
super(MimeMessage, self).__init__(wsdl, name, operation)
self.part_name = part_name
def resolve(self, definitions, abstract_message):
"""Resolve the body element
The specs are (again) not really clear how to handle the message
parts in relation the message element vs type. The following strategy
is chosen, which seem to work:
- If the message part has a name and it maches then set it as body
- If the message part has a name but it doesn't match but there are no
other message parts, then just use that one.
- If the message part has no name then handle it like an rpc call,
in other words, each part is an argument.
"""
self.abstract = abstract_message
if self.part_name and self.abstract.parts:
if self.part_name in self.abstract.parts:
message = self.abstract.parts[self.part_name]
elif len(self.abstract.parts) == 1:
message = list(self.abstract.parts.values())[0]
else:
raise ValueError(
"Multiple parts for message %r while no matching part found"
% self.part_name
)
if message.element:
self.body = message.element
else:
elm = xsd.Element(self.part_name, message.type)
self.body = xsd.Element(
self.operation.name, xsd.ComplexType(xsd.Sequence([elm]))
)
else:
children = []
for name, message in self.abstract.parts.items():
if message.element:
elm = message.element.clone(name)
else:
elm = xsd.Element(name, message.type)
children.append(elm)
self.body = xsd.Element(
self.operation.name, xsd.ComplexType(xsd.Sequence(children))
)
class MimeContent(MimeMessage):
"""WSDL includes a way to bind abstract types to concrete messages in some
MIME format.
Bindings for the following MIME types are defined:
- multipart/related
- text/xml
- application/x-www-form-urlencoded
- Others (by specifying the MIME type string)
The set of defined MIME types is both large and evolving, so it is not a
goal for WSDL to exhaustively define XML grammar for each MIME type.
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param part_name:
:type type: str
"""
def __init__(self, wsdl, name, operation, content_type, part_name):
super(MimeContent, self).__init__(wsdl, name, operation, part_name)
self.content_type = content_type
def serialize(self, *args, **kwargs):
value = self.body(*args, **kwargs)
headers = {"Content-Type": self.content_type}
data = ""
if self.content_type == "application/x-www-form-urlencoded":
items = serialize_object(value)
data = six.moves.urllib.parse.urlencode(items)
elif self.content_type == "text/xml":
document = etree.Element("root")
self.body.render(document, value)
data = etree_to_string(list(document)[0])
return SerializedMessage(
path=self.operation.location, headers=headers, content=data
)
def deserialize(self, node):
node = fromstring(node)
part = list(self.abstract.parts.values())[0]
return part.type.parse_xmlelement(node)
@classmethod
def parse(cls, definitions, xmlelement, operation):
name = xmlelement.get("name")
part_name = content_type = None
content_node = xmlelement.find("mime:content", namespaces=cls._nsmap)
if content_node is not None:
content_type = content_node.get("type")
part_name = content_node.get("part")
obj = cls(definitions.wsdl, name, operation, content_type, part_name)
return obj
class MimeXML(MimeMessage):
"""To specify XML payloads that are not SOAP compliant (do not have a SOAP
Envelope), but do have a particular schema, the mime:mimeXml element may be
used to specify that concrete schema.
The part attribute refers to a message part defining the concrete schema of
the root XML element. The part attribute MAY be omitted if the message has
only a single part. The part references a concrete schema using the element
attribute for simple parts or type attribute for composite parts
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param part_name:
:type type: str
"""
def serialize(self, *args, **kwargs):
raise NotImplementedError()
def deserialize(self, node):
node = fromstring(node)
part = next(iter(self.abstract.parts.values()), None)
return part.element.parse(node, self.wsdl.types)
@classmethod
def parse(cls, definitions, xmlelement, operation):
name = xmlelement.get("name")
part_name = None
content_node = xmlelement.find("mime:mimeXml", namespaces=cls._nsmap)
if content_node is not None:
part_name = content_node.get("part")
obj = cls(definitions.wsdl, name, operation, part_name)
return obj
class MimeMultipart(MimeMessage):
"""The multipart/related MIME type aggregates an arbitrary set of MIME
formatted parts into one message using the MIME type "multipart/related".
The mime:multipartRelated element describes the concrete format of such a
message::
<mime:multipartRelated>
<mime:part> *
<-- mime element -->
</mime:part>
</mime:multipartRelated>
The mime:part element describes each part of a multipart/related message.
MIME elements appear within mime:part to specify the concrete MIME type for
the part. If more than one MIME element appears inside a mime:part, they
are alternatives.
:param wsdl: The main wsdl document
:type wsdl: zeep.wsdl.wsdl.Document
:param name:
:param operation: The operation to which this message belongs
:type operation: zeep.wsdl.bindings.soap.SoapOperation
:param part_name:
:type type: str
"""
pass
| 34.936275 | 80 | 0.644872 |
acea22eb078da517089b26ff195227ed3d8de71a | 501 | py | Python | BSPQ20E3/build/lib/panel/cache.py | SPQ19-20/BSPQ20-E3 | 5866fd3a3914252f47516c168ba984fe6cf15d0b | [
"MIT"
] | null | null | null | BSPQ20E3/build/lib/panel/cache.py | SPQ19-20/BSPQ20-E3 | 5866fd3a3914252f47516c168ba984fe6cf15d0b | [
"MIT"
] | 10 | 2020-05-05T17:44:56.000Z | 2022-03-12T00:29:33.000Z | BSPQ20E3/build/lib/panel/cache.py | SPQ19-20/BSPQ20-E3 | 5866fd3a3914252f47516c168ba984fe6cf15d0b | [
"MIT"
] | 1 | 2021-05-26T17:12:15.000Z | 2021-05-26T17:12:15.000Z |
class Cache(object):
class __Cache:
def __init__(self):
self.COUNTRIES = ()
self.DATE_CHOICES = []
self.COUNTRY_CHOICES = []
instance = None
def __new__(cls):
if not Cache.instance:
Cache.instance = Cache.__Cache()
return Cache.instance
def __getattr__(self, nombre):
return getattr(self.instance, nombre)
def __setattr__(self, nombre, valor):
return setattr(self.instance, nombre, valor) | 25.05 | 52 | 0.59481 |
acea23cb1aa157a2a1e434256b6cb9a33796d2d5 | 296 | py | Python | samplescript.py | VanDavv/codesynth | 675815c862e0140d415baa0e6e5861dfa284de28 | [
"MIT"
] | null | null | null | samplescript.py | VanDavv/codesynth | 675815c862e0140d415baa0e6e5861dfa284de28 | [
"MIT"
] | null | null | null | samplescript.py | VanDavv/codesynth | 675815c862e0140d415baa0e6e5861dfa284de28 | [
"MIT"
] | null | null | null | # just for testing
import os
import sys
from pydub import AudioSegment
from pydub.playback import play
from gtts import gTTS
from io import BytesIO
out = BytesIO()
gTTS(' '.join(sys.argv[1:]), lang='pl', slow=False).write_to_fp(out)
out.seek(0)
play(AudioSegment.from_file(out, format="mp3"))
| 19.733333 | 68 | 0.75 |
acea23cbe45a88579586f47c399376532277aae0 | 1,917 | py | Python | gitup/migrate.py | hr157/git-repo-updater | 4ad20a6979226bf066740287accc8239d82a89ec | [
"MIT"
] | 772 | 2015-01-17T09:11:07.000Z | 2022-03-23T08:50:31.000Z | gitup/migrate.py | hr157/git-repo-updater | 4ad20a6979226bf066740287accc8239d82a89ec | [
"MIT"
] | 50 | 2015-03-12T14:33:51.000Z | 2022-03-10T07:58:54.000Z | gitup/migrate.py | hr157/git-repo-updater | 4ad20a6979226bf066740287accc8239d82a89ec | [
"MIT"
] | 110 | 2015-01-30T07:27:23.000Z | 2021-12-15T07:22:20.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 Ben Kurtovic <ben.kurtovic@gmail.com>
# Released under the terms of the MIT License. See LICENSE for details.
import os
try:
from configparser import ConfigParser, NoSectionError
PY3K = True
except ImportError: # Python 2
from ConfigParser import SafeConfigParser as ConfigParser, NoSectionError
PY3K = False
__all__ = ["run_migrations"]
def _get_old_path():
"""Return the old default path to the configuration file."""
xdg_cfg = os.environ.get("XDG_CONFIG_HOME") or os.path.join("~", ".config")
return os.path.join(os.path.expanduser(xdg_cfg), "gitup", "config.ini")
def _migrate_old_path():
"""Migrate the old config location (~/.gitup) to the new one."""
old_path = os.path.expanduser(os.path.join("~", ".gitup"))
if not os.path.exists(old_path):
return
temp_path = _get_old_path()
temp_dir = os.path.dirname(temp_path)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
os.rename(old_path, temp_path)
def _migrate_old_format():
"""Migrate the old config file format (.INI) to our custom list format."""
old_path = _get_old_path()
if not os.path.exists(old_path):
return
config = ConfigParser(delimiters="=") if PY3K else ConfigParser()
config.optionxform = lambda opt: opt
config.read(old_path)
try:
bookmarks = [path for path, _ in config.items("bookmarks")]
except NoSectionError:
bookmarks = []
if PY3K:
bookmarks = [path.encode("utf8") for path in bookmarks]
new_path = os.path.join(os.path.split(old_path)[0], "bookmarks")
os.rename(old_path, new_path)
with open(new_path, "wb") as handle:
handle.write(b"\n".join(bookmarks))
def run_migrations():
"""Run any necessary migrations to ensure the config file is up-to-date."""
_migrate_old_path()
_migrate_old_format()
| 31.42623 | 79 | 0.6771 |
acea244b0e8e3b497e54baff6e11610d8031a3c4 | 139 | py | Python | LuoguCodes/AT899.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | LuoguCodes/AT899.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | LuoguCodes/AT899.py | Anguei/OI-Codes | 0ef271e9af0619d4c236e314cd6d8708d356536a | [
"MIT"
] | null | null | null | from collections import *
n = int(raw_input())
ss = []
for _ in range(n):
ss.append(raw_input())
print Counter(ss).most_common()[0][0]
| 19.857143 | 37 | 0.661871 |
acea25368ad620099a968954335875939c2ea102 | 6,116 | py | Python | adapi.py | seanthegeek/easyad-web | 821751249ce798a1341c481c56ccf5d3ff671471 | [
"Apache-2.0"
] | null | null | null | adapi.py | seanthegeek/easyad-web | 821751249ce798a1341c481c56ccf5d3ff671471 | [
"Apache-2.0"
] | null | null | null | adapi.py | seanthegeek/easyad-web | 821751249ce798a1341c481c56ccf5d3ff671471 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from os import urandom
import binascii
from datetime import datetime
from functools import wraps
from flask import Flask, jsonify, request, render_template
from ldap import LDAPError
from peewee import *
from easyad import EasyAD
app = Flask(__name__)
app.config.from_pyfile("config.py")
ad = EasyAD(app.config)
db = SqliteDatabase('easyad.db')
def generate_api_key(byte_size=16):
return binascii.hexlify(urandom(byte_size)).decode(encoding="utf-8")
class BaseModel(Model):
class Meta:
database = db
class APIUser(BaseModel):
name = CharField(unique=True)
api_key = CharField(default=generate_api_key, unique=True)
enabled = BooleanField(default=True)
created = DateTimeField(default=datetime.now)
updated = DateTimeField(default=datetime.now)
def enable(self):
self.enabled = True
self.updated = datetime.now()
self.save()
def disable(self):
self.enabled = False
self.updated = datetime.now()
self.save()
def rename(self, new_name):
self.name = new_name
self.updated = datetime.now()
self.save()
def reset(self):
self.api_key = generate_api_key()
self.updated = datetime.now()
self.save()
return self.api_key
class APIAuditRecord(BaseModel):
user = ForeignKeyField(APIUser, related_name="audit_records")
timestamp = DateTimeField(default=datetime.now)
action = CharField()
db.create_tables([APIUser, APIAuditRecord], safe=True)
def parse_ldap_error(e):
return "An LDAP error occurred - {0}".format(e.args[0]["desc"])
def api_call(function):
@wraps(function)
def process_api_call(*args, **kwargs):
if "api_key" not in request.args:
return jsonify(dict(error="You must supply an api_key parameter")), 401
try:
user = APIUser.get(api_key=request.args["api_key"])
if not user.enabled:
return jsonify(dict(error="This API key is disabled")), 403
return function(*args, **kwargs)
except DoesNotExist:
return jsonify(dict(error="The API key is invalid")), 403
except ValueError as e:
return jsonify(dict(error=str(e))), 404
except LDAPError as e:
return jsonify(dict(error=parse_ldap_error(e))), 500
return process_api_call
@app.route("/")
def index():
return render_template("index.html")
@app.route("/authenticate", methods=["GET", "POST"])
@api_call
def authenticate_user():
if request.method != "POST":
return jsonify(dict(error="Must be sent as a POST request")), 400
if "username" not in request.args or "password" not in request.args:
return jsonify(dict(error="Must provide a username and password")), 400
return ad.authenticate_user(request.args["username"], request.args["password"])
@app.route("/user/<user_string>")
@api_call
def get_user(user_string):
base = None
if "base" in request.args:
base = request.args["base"]
attributes = None
if "attributes" in request.args:
attributes = request.args["attributes"].split(",")
return jsonify(ad.get_user(user_string, base=base, attributes=attributes, json_safe=True))
@app.route("/user/<user_string>/groups")
@api_call
def user_groups(user_string):
base =None
if "base" in request.args:
base = request.args["base"]
return jsonify(ad.get_all_user_groups(user_string, base=base, json_safe=True))
@app.route("/user/<user_string>/member-of/<group_string>")
@api_call
def user_is_member_of_group(user_string, group_string):
base = None
if "base" in request.args:
base = request.args["base"]
return jsonify(dict(member=ad.user_is_member_of_group(user_string, group_string, base=base)))
@app.route("/group/<group_string>")
@api_call
def get_group(group_string):
base = None
if "base" in request.args:
base = request.args["base"]
attributes = None
if "attributes" in request.args:
attributes = request.args["attributes"].split(",")
return jsonify(ad.get_group(group_string, base=base, attributes=attributes, json_safe=True))
@app.route("/group/<group_string>/users")
@api_call
def get_group_members(group_string):
base = None
if "base" in request.args:
base = request.args["base"]
return jsonify(ad.get_all_users_in_group(group_string, base=base, json_safe=True))
@app.route("/search/users/<user_string>")
@api_call
def search_for_users(user_string):
search_attributes = None
return_attributes = None
base = None
if "base" in request.args:
base = request.args["base"]
if "search_attributes" in request.args:
search_attributes = request.args["search_attributes"].split(",")
if "return_attributes" in request.args:
return_attributes = request.args["return_attributes"].split(",")
return jsonify(ad.search_for_users(user_string,
base=base,
search_attributes=search_attributes,
return_attributes=return_attributes,
json_safe=True))
@app.route("/search/groups/<group_string>")
@api_call
def search_for_groups(group_string):
search_attributes = None
return_attributes = None
base = None
if "base" in request.args:
base = request.args["base"]
if "search_attributes" in request.args:
search_attributes = request.args["search_attributes"].split(",")
if "return_attributes" in request.args:
return_attributes = request.args["return_attributes"].split(",")
return jsonify(ad.search_for_groups(group_string,
base=base,
search_attributes=search_attributes,
return_attributes=return_attributes,
json_safe=True))
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
| 30.427861 | 97 | 0.654349 |
acea257cd013b07a1e1f983ea0fbf71ff3483f27 | 4,865 | py | Python | src/trunk/apps/fdsnws/fdsnws/reqtrack.py | thefroid/seiscomp3 | 0b05d5550dcea000a93c7d9a39c5347d8786a91a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2015-09-17T22:43:50.000Z | 2017-11-29T20:27:11.000Z | src/trunk/apps/fdsnws/fdsnws/reqtrack.py | thefroid/seiscomp3 | 0b05d5550dcea000a93c7d9a39c5347d8786a91a | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2016-04-26T00:03:09.000Z | 2017-12-05T02:24:50.000Z | src/trunk/apps/fdsnws/fdsnws/reqtrack.py | salichon/seiscomp3 | 4f7715f9ff9a35e7912c379ebf10446d0bceaeb2 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | from twisted.internet import reactor
from seiscomp3 import Core, Communication, DataModel
def callFromThread(f):
def wrap(*args, **kwargs):
reactor.callFromThread(f, *args, **kwargs)
return wrap
def enableNotifier(f):
def wrap(*args, **kwargs):
saveState = DataModel.Notifier.IsEnabled()
DataModel.Notifier.SetEnabled(True)
f(*args, **kwargs)
DataModel.Notifier.SetEnabled(saveState)
return wrap
class RequestTrackerDB(object):
def __init__(self, appName, msgConn, req_id, req_type, user, header, label, user_ip, client_ip):
self.msgConn = msgConn
self.arclinkRequest = DataModel.ArclinkRequest.Create()
self.arclinkRequest.setCreated(Core.Time.GMT())
self.arclinkRequest.setRequestID(req_id)
self.arclinkRequest.setUserID(str(user))
self.arclinkRequest.setClientID(appName)
if user_ip: self.arclinkRequest.setUserIP(user_ip)
if client_ip: self.arclinkRequest.setClientIP(client_ip)
self.arclinkRequest.setType(req_type)
self.arclinkRequest.setLabel(label)
self.arclinkRequest.setHeader(header)
self.averageTimeWindow = Core.TimeSpan(0.)
self.totalLineCount = 0
self.okLineCount = 0
self.requestLines = []
self.statusLines = []
def send(self):
msg = DataModel.Notifier.GetMessage(True)
if msg:
self.msgConn.send("LOGGING", msg)
def line_status(self, start_time, end_time, network, station, channel,
location, restricted, net_class, shared, constraints, volume, status, size, message):
if network is None or network == "":
network = "."
if station is None or station == "":
station = "."
if channel is None or channel == "":
channel = "."
if location is None or location == "":
location = "."
if volume is None:
volume = "NODATA"
if size is None:
size = 0
if message is None:
message = ""
if isinstance(constraints, list):
constr = " ".join(constraints)
else:
constr = " ".join([ a+"="+b for (a, b) in constraints.iteritems() ])
arclinkRequestLine = DataModel.ArclinkRequestLine()
arclinkRequestLine.setStart(start_time)
arclinkRequestLine.setEnd(end_time)
arclinkRequestLine.setStreamID(DataModel.WaveformStreamID(network[:8], station[:8], location[:8], channel[:8], ""))
arclinkRequestLine.setConstraints(constr)
if isinstance(restricted, bool): arclinkRequestLine.setRestricted(restricted)
arclinkRequestLine.setNetClass(net_class)
if isinstance(shared, bool):arclinkRequestLine.setShared(shared)
#
arclinkStatusLine = DataModel.ArclinkStatusLine()
arclinkStatusLine.setVolumeID(volume)
arclinkStatusLine.setStatus(status)
arclinkStatusLine.setSize(size)
arclinkStatusLine.setMessage(message)
#
arclinkRequestLine.setStatus(arclinkStatusLine)
self.requestLines.append(arclinkRequestLine)
self.averageTimeWindow += end_time - start_time
self.totalLineCount += 1
if status == "OK": self.okLineCount += 1
def volume_status(self, volume, status, size, message):
if volume is None:
volume = "NODATA"
if size is None:
size = 0
if message is None:
message = ""
arclinkStatusLine = DataModel.ArclinkStatusLine()
arclinkStatusLine.setVolumeID(volume)
arclinkStatusLine.setStatus(status)
arclinkStatusLine.setSize(size)
arclinkStatusLine.setMessage(message)
self.statusLines.append(arclinkStatusLine)
@callFromThread
@enableNotifier
def request_status(self, status, message):
if message is None:
message = ""
self.arclinkRequest.setStatus(status)
self.arclinkRequest.setMessage(message)
ars = DataModel.ArclinkRequestSummary()
tw = self.averageTimeWindow.seconds()
if self.totalLineCount > 0:
tw = self.averageTimeWindow.seconds() / self.totalLineCount # avarage request time window
if tw >= 2**31: tw = -1 # prevent 32bit int overflow
ars.setAverageTimeWindow(tw)
ars.setTotalLineCount(self.totalLineCount)
ars.setOkLineCount(self.okLineCount)
self.arclinkRequest.setSummary(ars)
al = DataModel.ArclinkLog()
al.add(self.arclinkRequest)
for obj in self.requestLines:
self.arclinkRequest.add(obj)
for obj in self.statusLines:
self.arclinkRequest.add(obj)
self.send()
def __verseed_errors(self, volume):
pass
def verseed(self, volume, file):
pass
| 32.871622 | 123 | 0.641932 |
acea27bb4f3338635abd878fbd6bcd806fd7443a | 1,393 | py | Python | RenamingTool_alpha.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | 1 | 2015-04-23T11:43:26.000Z | 2015-04-23T11:43:26.000Z | RenamingTool_alpha.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | null | null | null | RenamingTool_alpha.py | spencerpomme/coconuts-on-fire | 407d61b3583c472707a4e7b077a9a3ab12743996 | [
"Apache-2.0"
] | null | null | null | '''
A module that automatically change file names of leetcode problems only.
Copy this file to LeetCode local repository to use.
'''
import re, os, shutil
# set working directory here:
current_dir = os.path.abspath('.')
print(current_dir)
def addprefix(filename):
form = re.compile('(\d+)(#\S*._\d*\.\w+)')
m = form.search(filename)
num = m.group(1)
last = m.group(2)
if int(num) < 100 and int(num) > 9 and len(num) < 3:
num = '0' + num
elif int(num) < 10 and len(num) < 3:
num = '00' + num
else:
raise AttributeError
return num+last
def getNameList(directory):
for i in os.walk(directory):
namelist = i[2]
break # the first generated is which directory we need
return namelist
def changeName(directory):
absWorkingDir = directory
names = getNameList(directory)
for name in names:
old_name = os.path.join(absWorkingDir, name)
try:
prefixed = addprefix(name)
except AttributeError:
print('File:', old_name, 'has no need to be changed.\n')
#shutil.move(old_name, old_name)
else:
new_name = os.path.join(absWorkingDir, prefixed)
print('Renaming "%s" to "%s"...\n' % (old_name, new_name))
#shutil.move(old_name, new_name)
if __name__ == '__main__':
changeName(current_dir)
| 24.017241 | 72 | 0.605887 |
acea294f8866913ad4071c8bc793a8ec6cb08474 | 34,791 | py | Python | netbox/dcim/choices.py | HumanEquivalentUnit/netbox | 9c6938e7ae8e85d83d34f1b4b10145b4f629bc86 | [
"Apache-2.0"
] | null | null | null | netbox/dcim/choices.py | HumanEquivalentUnit/netbox | 9c6938e7ae8e85d83d34f1b4b10145b4f629bc86 | [
"Apache-2.0"
] | null | null | null | netbox/dcim/choices.py | HumanEquivalentUnit/netbox | 9c6938e7ae8e85d83d34f1b4b10145b4f629bc86 | [
"Apache-2.0"
] | null | null | null | from utilities.choices import ChoiceSet
#
# Sites
#
class SiteStatusChoices(ChoiceSet):
STATUS_PLANNED = 'planned'
STATUS_STAGING = 'staging'
STATUS_ACTIVE = 'active'
STATUS_DECOMMISSIONING = 'decommissioning'
STATUS_RETIRED = 'retired'
CHOICES = (
(STATUS_PLANNED, 'Planned'),
(STATUS_STAGING, 'Staging'),
(STATUS_ACTIVE, 'Active'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
(STATUS_RETIRED, 'Retired'),
)
CSS_CLASSES = {
STATUS_PLANNED: 'info',
STATUS_STAGING: 'primary',
STATUS_ACTIVE: 'success',
STATUS_DECOMMISSIONING: 'warning',
STATUS_RETIRED: 'danger',
}
#
# Racks
#
class RackTypeChoices(ChoiceSet):
TYPE_2POST = '2-post-frame'
TYPE_4POST = '4-post-frame'
TYPE_CABINET = '4-post-cabinet'
TYPE_WALLFRAME = 'wall-frame'
TYPE_WALLCABINET = 'wall-cabinet'
CHOICES = (
(TYPE_2POST, '2-post frame'),
(TYPE_4POST, '4-post frame'),
(TYPE_CABINET, '4-post cabinet'),
(TYPE_WALLFRAME, 'Wall-mounted frame'),
(TYPE_WALLCABINET, 'Wall-mounted cabinet'),
)
class RackWidthChoices(ChoiceSet):
WIDTH_10IN = 10
WIDTH_19IN = 19
WIDTH_21IN = 21
WIDTH_23IN = 23
CHOICES = (
(WIDTH_10IN, '10 inches'),
(WIDTH_19IN, '19 inches'),
(WIDTH_21IN, '21 inches'),
(WIDTH_23IN, '23 inches'),
)
class RackStatusChoices(ChoiceSet):
STATUS_RESERVED = 'reserved'
STATUS_AVAILABLE = 'available'
STATUS_PLANNED = 'planned'
STATUS_ACTIVE = 'active'
STATUS_DEPRECATED = 'deprecated'
CHOICES = (
(STATUS_RESERVED, 'Reserved'),
(STATUS_AVAILABLE, 'Available'),
(STATUS_PLANNED, 'Planned'),
(STATUS_ACTIVE, 'Active'),
(STATUS_DEPRECATED, 'Deprecated'),
)
CSS_CLASSES = {
STATUS_RESERVED: 'warning',
STATUS_AVAILABLE: 'success',
STATUS_PLANNED: 'info',
STATUS_ACTIVE: 'primary',
STATUS_DEPRECATED: 'danger',
}
class RackDimensionUnitChoices(ChoiceSet):
UNIT_MILLIMETER = 'mm'
UNIT_INCH = 'in'
CHOICES = (
(UNIT_MILLIMETER, 'Millimeters'),
(UNIT_INCH, 'Inches'),
)
class RackElevationDetailRenderChoices(ChoiceSet):
RENDER_JSON = 'json'
RENDER_SVG = 'svg'
CHOICES = (
(RENDER_JSON, 'json'),
(RENDER_SVG, 'svg')
)
#
# DeviceTypes
#
class SubdeviceRoleChoices(ChoiceSet):
ROLE_PARENT = 'parent'
ROLE_CHILD = 'child'
CHOICES = (
(ROLE_PARENT, 'Parent'),
(ROLE_CHILD, 'Child'),
)
#
# Devices
#
class DeviceFaceChoices(ChoiceSet):
FACE_FRONT = 'front'
FACE_REAR = 'rear'
CHOICES = (
(FACE_FRONT, 'Front'),
(FACE_REAR, 'Rear'),
)
class DeviceStatusChoices(ChoiceSet):
STATUS_OFFLINE = 'offline'
STATUS_ACTIVE = 'active'
STATUS_PLANNED = 'planned'
STATUS_STAGED = 'staged'
STATUS_FAILED = 'failed'
STATUS_INVENTORY = 'inventory'
STATUS_DECOMMISSIONING = 'decommissioning'
CHOICES = (
(STATUS_OFFLINE, 'Offline'),
(STATUS_ACTIVE, 'Active'),
(STATUS_PLANNED, 'Planned'),
(STATUS_STAGED, 'Staged'),
(STATUS_FAILED, 'Failed'),
(STATUS_INVENTORY, 'Inventory'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
)
CSS_CLASSES = {
STATUS_OFFLINE: 'warning',
STATUS_ACTIVE: 'success',
STATUS_PLANNED: 'info',
STATUS_STAGED: 'primary',
STATUS_FAILED: 'danger',
STATUS_INVENTORY: 'secondary',
STATUS_DECOMMISSIONING: 'warning',
}
#
# ConsolePorts
#
class ConsolePortTypeChoices(ChoiceSet):
TYPE_DE9 = 'de-9'
TYPE_DB25 = 'db-25'
TYPE_RJ11 = 'rj-11'
TYPE_RJ12 = 'rj-12'
TYPE_RJ45 = 'rj-45'
TYPE_USB_A = 'usb-a'
TYPE_USB_B = 'usb-b'
TYPE_USB_C = 'usb-c'
TYPE_USB_MINI_A = 'usb-mini-a'
TYPE_USB_MINI_B = 'usb-mini-b'
TYPE_USB_MICRO_A = 'usb-micro-a'
TYPE_USB_MICRO_B = 'usb-micro-b'
TYPE_USB_MICRO_AB = 'usb-micro-ab'
TYPE_OTHER = 'other'
CHOICES = (
('Serial', (
(TYPE_DE9, 'DE-9'),
(TYPE_DB25, 'DB-25'),
(TYPE_RJ11, 'RJ-11'),
(TYPE_RJ12, 'RJ-12'),
(TYPE_RJ45, 'RJ-45'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_B, 'USB Type B'),
(TYPE_USB_C, 'USB Type C'),
(TYPE_USB_MINI_A, 'USB Mini A'),
(TYPE_USB_MINI_B, 'USB Mini B'),
(TYPE_USB_MICRO_A, 'USB Micro A'),
(TYPE_USB_MICRO_B, 'USB Micro B'),
(TYPE_USB_MICRO_AB, 'USB Micro AB'),
)),
('Other', (
(TYPE_OTHER, 'Other'),
)),
)
class ConsolePortSpeedChoices(ChoiceSet):
SPEED_1200 = 1200
SPEED_2400 = 2400
SPEED_4800 = 4800
SPEED_9600 = 9600
SPEED_19200 = 19200
SPEED_38400 = 38400
SPEED_57600 = 57600
SPEED_115200 = 115200
CHOICES = (
(SPEED_1200, '1200 bps'),
(SPEED_2400, '2400 bps'),
(SPEED_4800, '4800 bps'),
(SPEED_9600, '9600 bps'),
(SPEED_19200, '19.2 kbps'),
(SPEED_38400, '38.4 kbps'),
(SPEED_57600, '57.6 kbps'),
(SPEED_115200, '115.2 kbps'),
)
#
# PowerPorts
#
class PowerPortTypeChoices(ChoiceSet):
# IEC 60320
TYPE_IEC_C6 = 'iec-60320-c6'
TYPE_IEC_C8 = 'iec-60320-c8'
TYPE_IEC_C14 = 'iec-60320-c14'
TYPE_IEC_C16 = 'iec-60320-c16'
TYPE_IEC_C20 = 'iec-60320-c20'
TYPE_IEC_C22 = 'iec-60320-c22'
# IEC 60309
TYPE_IEC_PNE4H = 'iec-60309-p-n-e-4h'
TYPE_IEC_PNE6H = 'iec-60309-p-n-e-6h'
TYPE_IEC_PNE9H = 'iec-60309-p-n-e-9h'
TYPE_IEC_2PE4H = 'iec-60309-2p-e-4h'
TYPE_IEC_2PE6H = 'iec-60309-2p-e-6h'
TYPE_IEC_2PE9H = 'iec-60309-2p-e-9h'
TYPE_IEC_3PE4H = 'iec-60309-3p-e-4h'
TYPE_IEC_3PE6H = 'iec-60309-3p-e-6h'
TYPE_IEC_3PE9H = 'iec-60309-3p-e-9h'
TYPE_IEC_3PNE4H = 'iec-60309-3p-n-e-4h'
TYPE_IEC_3PNE6H = 'iec-60309-3p-n-e-6h'
TYPE_IEC_3PNE9H = 'iec-60309-3p-n-e-9h'
# NEMA non-locking
TYPE_NEMA_115P = 'nema-1-15p'
TYPE_NEMA_515P = 'nema-5-15p'
TYPE_NEMA_520P = 'nema-5-20p'
TYPE_NEMA_530P = 'nema-5-30p'
TYPE_NEMA_550P = 'nema-5-50p'
TYPE_NEMA_615P = 'nema-6-15p'
TYPE_NEMA_620P = 'nema-6-20p'
TYPE_NEMA_630P = 'nema-6-30p'
TYPE_NEMA_650P = 'nema-6-50p'
TYPE_NEMA_1030P = 'nema-10-30p'
TYPE_NEMA_1050P = 'nema-10-50p'
TYPE_NEMA_1420P = 'nema-14-20p'
TYPE_NEMA_1430P = 'nema-14-30p'
TYPE_NEMA_1450P = 'nema-14-50p'
TYPE_NEMA_1460P = 'nema-14-60p'
TYPE_NEMA_1515P = 'nema-15-15p'
TYPE_NEMA_1520P = 'nema-15-20p'
TYPE_NEMA_1530P = 'nema-15-30p'
TYPE_NEMA_1550P = 'nema-15-50p'
TYPE_NEMA_1560P = 'nema-15-60p'
# NEMA locking
TYPE_NEMA_L115P = 'nema-l1-15p'
TYPE_NEMA_L515P = 'nema-l5-15p'
TYPE_NEMA_L520P = 'nema-l5-20p'
TYPE_NEMA_L530P = 'nema-l5-30p'
TYPE_NEMA_L550P = 'nema-l5-50p'
TYPE_NEMA_L615P = 'nema-l6-15p'
TYPE_NEMA_L620P = 'nema-l6-20p'
TYPE_NEMA_L630P = 'nema-l6-30p'
TYPE_NEMA_L650P = 'nema-l6-50p'
TYPE_NEMA_L1030P = 'nema-l10-30p'
TYPE_NEMA_L1420P = 'nema-l14-20p'
TYPE_NEMA_L1430P = 'nema-l14-30p'
TYPE_NEMA_L1450P = 'nema-l14-50p'
TYPE_NEMA_L1460P = 'nema-l14-60p'
TYPE_NEMA_L1520P = 'nema-l15-20p'
TYPE_NEMA_L1530P = 'nema-l15-30p'
TYPE_NEMA_L1550P = 'nema-l15-50p'
TYPE_NEMA_L1560P = 'nema-l15-60p'
TYPE_NEMA_L2120P = 'nema-l21-20p'
TYPE_NEMA_L2130P = 'nema-l21-30p'
# California style
TYPE_CS6361C = 'cs6361c'
TYPE_CS6365C = 'cs6365c'
TYPE_CS8165C = 'cs8165c'
TYPE_CS8265C = 'cs8265c'
TYPE_CS8365C = 'cs8365c'
TYPE_CS8465C = 'cs8465c'
# ITA/international
TYPE_ITA_C = 'ita-c'
TYPE_ITA_E = 'ita-e'
TYPE_ITA_F = 'ita-f'
TYPE_ITA_EF = 'ita-ef'
TYPE_ITA_G = 'ita-g'
TYPE_ITA_H = 'ita-h'
TYPE_ITA_I = 'ita-i'
TYPE_ITA_J = 'ita-j'
TYPE_ITA_K = 'ita-k'
TYPE_ITA_L = 'ita-l'
TYPE_ITA_M = 'ita-m'
TYPE_ITA_N = 'ita-n'
TYPE_ITA_O = 'ita-o'
# USB
TYPE_USB_A = 'usb-a'
TYPE_USB_B = 'usb-b'
TYPE_USB_C = 'usb-c'
TYPE_USB_MINI_A = 'usb-mini-a'
TYPE_USB_MINI_B = 'usb-mini-b'
TYPE_USB_MICRO_A = 'usb-micro-a'
TYPE_USB_MICRO_B = 'usb-micro-b'
TYPE_USB_MICRO_AB = 'usb-micro-ab'
TYPE_USB_3_B = 'usb-3-b'
TYPE_USB_3_MICROB = 'usb-3-micro-b'
# Direct current (DC)
TYPE_DC = 'dc-terminal'
# Proprietary
TYPE_SAF_D_GRID = 'saf-d-grid'
# Other
TYPE_HARDWIRED = 'hardwired'
CHOICES = (
('IEC 60320', (
(TYPE_IEC_C6, 'C6'),
(TYPE_IEC_C8, 'C8'),
(TYPE_IEC_C14, 'C14'),
(TYPE_IEC_C16, 'C16'),
(TYPE_IEC_C20, 'C20'),
(TYPE_IEC_C22, 'C22'),
)),
('IEC 60309', (
(TYPE_IEC_PNE4H, 'P+N+E 4H'),
(TYPE_IEC_PNE6H, 'P+N+E 6H'),
(TYPE_IEC_PNE9H, 'P+N+E 9H'),
(TYPE_IEC_2PE4H, '2P+E 4H'),
(TYPE_IEC_2PE6H, '2P+E 6H'),
(TYPE_IEC_2PE9H, '2P+E 9H'),
(TYPE_IEC_3PE4H, '3P+E 4H'),
(TYPE_IEC_3PE6H, '3P+E 6H'),
(TYPE_IEC_3PE9H, '3P+E 9H'),
(TYPE_IEC_3PNE4H, '3P+N+E 4H'),
(TYPE_IEC_3PNE6H, '3P+N+E 6H'),
(TYPE_IEC_3PNE9H, '3P+N+E 9H'),
)),
('NEMA (Non-locking)', (
(TYPE_NEMA_115P, 'NEMA 1-15P'),
(TYPE_NEMA_515P, 'NEMA 5-15P'),
(TYPE_NEMA_520P, 'NEMA 5-20P'),
(TYPE_NEMA_530P, 'NEMA 5-30P'),
(TYPE_NEMA_550P, 'NEMA 5-50P'),
(TYPE_NEMA_615P, 'NEMA 6-15P'),
(TYPE_NEMA_620P, 'NEMA 6-20P'),
(TYPE_NEMA_630P, 'NEMA 6-30P'),
(TYPE_NEMA_650P, 'NEMA 6-50P'),
(TYPE_NEMA_1030P, 'NEMA 10-30P'),
(TYPE_NEMA_1050P, 'NEMA 10-50P'),
(TYPE_NEMA_1420P, 'NEMA 14-20P'),
(TYPE_NEMA_1430P, 'NEMA 14-30P'),
(TYPE_NEMA_1450P, 'NEMA 14-50P'),
(TYPE_NEMA_1460P, 'NEMA 14-60P'),
(TYPE_NEMA_1515P, 'NEMA 15-15P'),
(TYPE_NEMA_1520P, 'NEMA 15-20P'),
(TYPE_NEMA_1530P, 'NEMA 15-30P'),
(TYPE_NEMA_1550P, 'NEMA 15-50P'),
(TYPE_NEMA_1560P, 'NEMA 15-60P'),
)),
('NEMA (Locking)', (
(TYPE_NEMA_L115P, 'NEMA L1-15P'),
(TYPE_NEMA_L515P, 'NEMA L5-15P'),
(TYPE_NEMA_L520P, 'NEMA L5-20P'),
(TYPE_NEMA_L530P, 'NEMA L5-30P'),
(TYPE_NEMA_L550P, 'NEMA L5-50P'),
(TYPE_NEMA_L615P, 'NEMA L6-15P'),
(TYPE_NEMA_L620P, 'NEMA L6-20P'),
(TYPE_NEMA_L630P, 'NEMA L6-30P'),
(TYPE_NEMA_L650P, 'NEMA L6-50P'),
(TYPE_NEMA_L1030P, 'NEMA L10-30P'),
(TYPE_NEMA_L1420P, 'NEMA L14-20P'),
(TYPE_NEMA_L1430P, 'NEMA L14-30P'),
(TYPE_NEMA_L1450P, 'NEMA L14-50P'),
(TYPE_NEMA_L1460P, 'NEMA L14-60P'),
(TYPE_NEMA_L1520P, 'NEMA L15-20P'),
(TYPE_NEMA_L1530P, 'NEMA L15-30P'),
(TYPE_NEMA_L1550P, 'NEMA L15-50P'),
(TYPE_NEMA_L1560P, 'NEMA L15-60P'),
(TYPE_NEMA_L2120P, 'NEMA L21-20P'),
(TYPE_NEMA_L2130P, 'NEMA L21-30P'),
)),
('California Style', (
(TYPE_CS6361C, 'CS6361C'),
(TYPE_CS6365C, 'CS6365C'),
(TYPE_CS8165C, 'CS8165C'),
(TYPE_CS8265C, 'CS8265C'),
(TYPE_CS8365C, 'CS8365C'),
(TYPE_CS8465C, 'CS8465C'),
)),
('International/ITA', (
(TYPE_ITA_C, 'ITA Type C (CEE 7/16)'),
(TYPE_ITA_E, 'ITA Type E (CEE 7/5)'),
(TYPE_ITA_F, 'ITA Type F (CEE 7/4)'),
(TYPE_ITA_EF, 'ITA Type E/F (CEE 7/7)'),
(TYPE_ITA_G, 'ITA Type G (BS 1363)'),
(TYPE_ITA_H, 'ITA Type H'),
(TYPE_ITA_I, 'ITA Type I'),
(TYPE_ITA_J, 'ITA Type J'),
(TYPE_ITA_K, 'ITA Type K'),
(TYPE_ITA_L, 'ITA Type L (CEI 23-50)'),
(TYPE_ITA_M, 'ITA Type M (BS 546)'),
(TYPE_ITA_N, 'ITA Type N'),
(TYPE_ITA_O, 'ITA Type O'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_B, 'USB Type B'),
(TYPE_USB_C, 'USB Type C'),
(TYPE_USB_MINI_A, 'USB Mini A'),
(TYPE_USB_MINI_B, 'USB Mini B'),
(TYPE_USB_MICRO_A, 'USB Micro A'),
(TYPE_USB_MICRO_B, 'USB Micro B'),
(TYPE_USB_MICRO_AB, 'USB Micro AB'),
(TYPE_USB_3_B, 'USB 3.0 Type B'),
(TYPE_USB_3_MICROB, 'USB 3.0 Micro B'),
)),
('DC', (
(TYPE_DC, 'DC Terminal'),
)),
('Proprietary', (
(TYPE_SAF_D_GRID, 'Saf-D-Grid'),
)),
('Other', (
(TYPE_HARDWIRED, 'Hardwired'),
)),
)
#
# PowerOutlets
#
class PowerOutletTypeChoices(ChoiceSet):
# IEC 60320
TYPE_IEC_C5 = 'iec-60320-c5'
TYPE_IEC_C7 = 'iec-60320-c7'
TYPE_IEC_C13 = 'iec-60320-c13'
TYPE_IEC_C15 = 'iec-60320-c15'
TYPE_IEC_C19 = 'iec-60320-c19'
TYPE_IEC_C21 = 'iec-60320-c21'
# IEC 60309
TYPE_IEC_PNE4H = 'iec-60309-p-n-e-4h'
TYPE_IEC_PNE6H = 'iec-60309-p-n-e-6h'
TYPE_IEC_PNE9H = 'iec-60309-p-n-e-9h'
TYPE_IEC_2PE4H = 'iec-60309-2p-e-4h'
TYPE_IEC_2PE6H = 'iec-60309-2p-e-6h'
TYPE_IEC_2PE9H = 'iec-60309-2p-e-9h'
TYPE_IEC_3PE4H = 'iec-60309-3p-e-4h'
TYPE_IEC_3PE6H = 'iec-60309-3p-e-6h'
TYPE_IEC_3PE9H = 'iec-60309-3p-e-9h'
TYPE_IEC_3PNE4H = 'iec-60309-3p-n-e-4h'
TYPE_IEC_3PNE6H = 'iec-60309-3p-n-e-6h'
TYPE_IEC_3PNE9H = 'iec-60309-3p-n-e-9h'
# NEMA non-locking
TYPE_NEMA_115R = 'nema-1-15r'
TYPE_NEMA_515R = 'nema-5-15r'
TYPE_NEMA_520R = 'nema-5-20r'
TYPE_NEMA_530R = 'nema-5-30r'
TYPE_NEMA_550R = 'nema-5-50r'
TYPE_NEMA_615R = 'nema-6-15r'
TYPE_NEMA_620R = 'nema-6-20r'
TYPE_NEMA_630R = 'nema-6-30r'
TYPE_NEMA_650R = 'nema-6-50r'
TYPE_NEMA_1030R = 'nema-10-30r'
TYPE_NEMA_1050R = 'nema-10-50r'
TYPE_NEMA_1420R = 'nema-14-20r'
TYPE_NEMA_1430R = 'nema-14-30r'
TYPE_NEMA_1450R = 'nema-14-50r'
TYPE_NEMA_1460R = 'nema-14-60r'
TYPE_NEMA_1515R = 'nema-15-15r'
TYPE_NEMA_1520R = 'nema-15-20r'
TYPE_NEMA_1530R = 'nema-15-30r'
TYPE_NEMA_1550R = 'nema-15-50r'
TYPE_NEMA_1560R = 'nema-15-60r'
# NEMA locking
TYPE_NEMA_L115R = 'nema-l1-15r'
TYPE_NEMA_L515R = 'nema-l5-15r'
TYPE_NEMA_L520R = 'nema-l5-20r'
TYPE_NEMA_L530R = 'nema-l5-30r'
TYPE_NEMA_L550R = 'nema-l5-50r'
TYPE_NEMA_L615R = 'nema-l6-15r'
TYPE_NEMA_L620R = 'nema-l6-20r'
TYPE_NEMA_L630R = 'nema-l6-30r'
TYPE_NEMA_L650R = 'nema-l6-50r'
TYPE_NEMA_L1030R = 'nema-l10-30r'
TYPE_NEMA_L1420R = 'nema-l14-20r'
TYPE_NEMA_L1430R = 'nema-l14-30r'
TYPE_NEMA_L1450R = 'nema-l14-50r'
TYPE_NEMA_L1460R = 'nema-l14-60r'
TYPE_NEMA_L1520R = 'nema-l15-20r'
TYPE_NEMA_L1530R = 'nema-l15-30r'
TYPE_NEMA_L1550R = 'nema-l15-50r'
TYPE_NEMA_L1560R = 'nema-l15-60r'
TYPE_NEMA_L2120R = 'nema-l21-20r'
TYPE_NEMA_L2130R = 'nema-l21-30r'
# California style
TYPE_CS6360C = 'CS6360C'
TYPE_CS6364C = 'CS6364C'
TYPE_CS8164C = 'CS8164C'
TYPE_CS8264C = 'CS8264C'
TYPE_CS8364C = 'CS8364C'
TYPE_CS8464C = 'CS8464C'
# ITA/international
TYPE_ITA_E = 'ita-e'
TYPE_ITA_F = 'ita-f'
TYPE_ITA_G = 'ita-g'
TYPE_ITA_H = 'ita-h'
TYPE_ITA_I = 'ita-i'
TYPE_ITA_J = 'ita-j'
TYPE_ITA_K = 'ita-k'
TYPE_ITA_L = 'ita-l'
TYPE_ITA_M = 'ita-m'
TYPE_ITA_N = 'ita-n'
TYPE_ITA_O = 'ita-o'
# USB
TYPE_USB_A = 'usb-a'
TYPE_USB_MICROB = 'usb-micro-b'
TYPE_USB_C = 'usb-c'
# Direct current (DC)
TYPE_DC = 'dc-terminal'
# Proprietary
TYPE_HDOT_CX = 'hdot-cx'
TYPE_SAF_D_GRID = 'saf-d-grid'
# Other
TYPE_HARDWIRED = 'hardwired'
CHOICES = (
('IEC 60320', (
(TYPE_IEC_C5, 'C5'),
(TYPE_IEC_C7, 'C7'),
(TYPE_IEC_C13, 'C13'),
(TYPE_IEC_C15, 'C15'),
(TYPE_IEC_C19, 'C19'),
(TYPE_IEC_C21, 'C21'),
)),
('IEC 60309', (
(TYPE_IEC_PNE4H, 'P+N+E 4H'),
(TYPE_IEC_PNE6H, 'P+N+E 6H'),
(TYPE_IEC_PNE9H, 'P+N+E 9H'),
(TYPE_IEC_2PE4H, '2P+E 4H'),
(TYPE_IEC_2PE6H, '2P+E 6H'),
(TYPE_IEC_2PE9H, '2P+E 9H'),
(TYPE_IEC_3PE4H, '3P+E 4H'),
(TYPE_IEC_3PE6H, '3P+E 6H'),
(TYPE_IEC_3PE9H, '3P+E 9H'),
(TYPE_IEC_3PNE4H, '3P+N+E 4H'),
(TYPE_IEC_3PNE6H, '3P+N+E 6H'),
(TYPE_IEC_3PNE9H, '3P+N+E 9H'),
)),
('NEMA (Non-locking)', (
(TYPE_NEMA_115R, 'NEMA 1-15R'),
(TYPE_NEMA_515R, 'NEMA 5-15R'),
(TYPE_NEMA_520R, 'NEMA 5-20R'),
(TYPE_NEMA_530R, 'NEMA 5-30R'),
(TYPE_NEMA_550R, 'NEMA 5-50R'),
(TYPE_NEMA_615R, 'NEMA 6-15R'),
(TYPE_NEMA_620R, 'NEMA 6-20R'),
(TYPE_NEMA_630R, 'NEMA 6-30R'),
(TYPE_NEMA_650R, 'NEMA 6-50R'),
(TYPE_NEMA_1030R, 'NEMA 10-30R'),
(TYPE_NEMA_1050R, 'NEMA 10-50R'),
(TYPE_NEMA_1420R, 'NEMA 14-20R'),
(TYPE_NEMA_1430R, 'NEMA 14-30R'),
(TYPE_NEMA_1450R, 'NEMA 14-50R'),
(TYPE_NEMA_1460R, 'NEMA 14-60R'),
(TYPE_NEMA_1515R, 'NEMA 15-15R'),
(TYPE_NEMA_1520R, 'NEMA 15-20R'),
(TYPE_NEMA_1530R, 'NEMA 15-30R'),
(TYPE_NEMA_1550R, 'NEMA 15-50R'),
(TYPE_NEMA_1560R, 'NEMA 15-60R'),
)),
('NEMA (Locking)', (
(TYPE_NEMA_L115R, 'NEMA L1-15R'),
(TYPE_NEMA_L515R, 'NEMA L5-15R'),
(TYPE_NEMA_L520R, 'NEMA L5-20R'),
(TYPE_NEMA_L530R, 'NEMA L5-30R'),
(TYPE_NEMA_L550R, 'NEMA L5-50R'),
(TYPE_NEMA_L615R, 'NEMA L6-15R'),
(TYPE_NEMA_L620R, 'NEMA L6-20R'),
(TYPE_NEMA_L630R, 'NEMA L6-30R'),
(TYPE_NEMA_L650R, 'NEMA L6-50R'),
(TYPE_NEMA_L1030R, 'NEMA L10-30R'),
(TYPE_NEMA_L1420R, 'NEMA L14-20R'),
(TYPE_NEMA_L1430R, 'NEMA L14-30R'),
(TYPE_NEMA_L1450R, 'NEMA L14-50R'),
(TYPE_NEMA_L1460R, 'NEMA L14-60R'),
(TYPE_NEMA_L1520R, 'NEMA L15-20R'),
(TYPE_NEMA_L1530R, 'NEMA L15-30R'),
(TYPE_NEMA_L1550R, 'NEMA L15-50R'),
(TYPE_NEMA_L1560R, 'NEMA L15-60R'),
(TYPE_NEMA_L2120R, 'NEMA L21-20R'),
(TYPE_NEMA_L2130R, 'NEMA L21-30R'),
)),
('California Style', (
(TYPE_CS6360C, 'CS6360C'),
(TYPE_CS6364C, 'CS6364C'),
(TYPE_CS8164C, 'CS8164C'),
(TYPE_CS8264C, 'CS8264C'),
(TYPE_CS8364C, 'CS8364C'),
(TYPE_CS8464C, 'CS8464C'),
)),
('ITA/International', (
(TYPE_ITA_E, 'ITA Type E (CEE7/5)'),
(TYPE_ITA_F, 'ITA Type F (CEE7/3)'),
(TYPE_ITA_G, 'ITA Type G (BS 1363)'),
(TYPE_ITA_H, 'ITA Type H'),
(TYPE_ITA_I, 'ITA Type I'),
(TYPE_ITA_J, 'ITA Type J'),
(TYPE_ITA_K, 'ITA Type K'),
(TYPE_ITA_L, 'ITA Type L (CEI 23-50)'),
(TYPE_ITA_M, 'ITA Type M (BS 546)'),
(TYPE_ITA_N, 'ITA Type N'),
(TYPE_ITA_O, 'ITA Type O'),
)),
('USB', (
(TYPE_USB_A, 'USB Type A'),
(TYPE_USB_MICROB, 'USB Micro B'),
(TYPE_USB_C, 'USB Type C'),
)),
('DC', (
(TYPE_DC, 'DC Terminal'),
)),
('Proprietary', (
(TYPE_HDOT_CX, 'HDOT Cx'),
(TYPE_SAF_D_GRID, 'Saf-D-Grid'),
)),
('Other', (
(TYPE_HARDWIRED, 'Hardwired'),
)),
)
class PowerOutletFeedLegChoices(ChoiceSet):
FEED_LEG_A = 'A'
FEED_LEG_B = 'B'
FEED_LEG_C = 'C'
CHOICES = (
(FEED_LEG_A, 'A'),
(FEED_LEG_B, 'B'),
(FEED_LEG_C, 'C'),
)
#
# Interfaces
#
class InterfaceTypeChoices(ChoiceSet):
# Virtual
TYPE_VIRTUAL = 'virtual'
TYPE_LAG = 'lag'
# Ethernet
TYPE_100ME_FIXED = '100base-tx'
TYPE_1GE_FIXED = '1000base-t'
TYPE_1GE_GBIC = '1000base-x-gbic'
TYPE_1GE_SFP = '1000base-x-sfp'
TYPE_2GE_FIXED = '2.5gbase-t'
TYPE_5GE_FIXED = '5gbase-t'
TYPE_10GE_FIXED = '10gbase-t'
TYPE_10GE_CX4 = '10gbase-cx4'
TYPE_10GE_SFP_PLUS = '10gbase-x-sfpp'
TYPE_10GE_XFP = '10gbase-x-xfp'
TYPE_10GE_XENPAK = '10gbase-x-xenpak'
TYPE_10GE_X2 = '10gbase-x-x2'
TYPE_25GE_SFP28 = '25gbase-x-sfp28'
TYPE_50GE_SFP56 = '50gbase-x-sfp56'
TYPE_40GE_QSFP_PLUS = '40gbase-x-qsfpp'
TYPE_50GE_QSFP28 = '50gbase-x-sfp28'
TYPE_100GE_CFP = '100gbase-x-cfp'
TYPE_100GE_CFP2 = '100gbase-x-cfp2'
TYPE_100GE_CFP4 = '100gbase-x-cfp4'
TYPE_100GE_CPAK = '100gbase-x-cpak'
TYPE_100GE_QSFP28 = '100gbase-x-qsfp28'
TYPE_200GE_CFP2 = '200gbase-x-cfp2'
TYPE_200GE_QSFP56 = '200gbase-x-qsfp56'
TYPE_400GE_QSFP_DD = '400gbase-x-qsfpdd'
TYPE_400GE_OSFP = '400gbase-x-osfp'
# Wireless
TYPE_80211A = 'ieee802.11a'
TYPE_80211G = 'ieee802.11g'
TYPE_80211N = 'ieee802.11n'
TYPE_80211AC = 'ieee802.11ac'
TYPE_80211AD = 'ieee802.11ad'
TYPE_80211AX = 'ieee802.11ax'
# Cellular
TYPE_GSM = 'gsm'
TYPE_CDMA = 'cdma'
TYPE_LTE = 'lte'
# SONET
TYPE_SONET_OC3 = 'sonet-oc3'
TYPE_SONET_OC12 = 'sonet-oc12'
TYPE_SONET_OC48 = 'sonet-oc48'
TYPE_SONET_OC192 = 'sonet-oc192'
TYPE_SONET_OC768 = 'sonet-oc768'
TYPE_SONET_OC1920 = 'sonet-oc1920'
TYPE_SONET_OC3840 = 'sonet-oc3840'
# Fibrechannel
TYPE_1GFC_SFP = '1gfc-sfp'
TYPE_2GFC_SFP = '2gfc-sfp'
TYPE_4GFC_SFP = '4gfc-sfp'
TYPE_8GFC_SFP_PLUS = '8gfc-sfpp'
TYPE_16GFC_SFP_PLUS = '16gfc-sfpp'
TYPE_32GFC_SFP28 = '32gfc-sfp28'
TYPE_64GFC_QSFP_PLUS = '64gfc-qsfpp'
TYPE_128GFC_QSFP28 = '128gfc-sfp28'
# InfiniBand
TYPE_INFINIBAND_SDR = 'infiniband-sdr'
TYPE_INFINIBAND_DDR = 'infiniband-ddr'
TYPE_INFINIBAND_QDR = 'infiniband-qdr'
TYPE_INFINIBAND_FDR10 = 'infiniband-fdr10'
TYPE_INFINIBAND_FDR = 'infiniband-fdr'
TYPE_INFINIBAND_EDR = 'infiniband-edr'
TYPE_INFINIBAND_HDR = 'infiniband-hdr'
TYPE_INFINIBAND_NDR = 'infiniband-ndr'
TYPE_INFINIBAND_XDR = 'infiniband-xdr'
# Serial
TYPE_T1 = 't1'
TYPE_E1 = 'e1'
TYPE_T3 = 't3'
TYPE_E3 = 'e3'
# ATM/DSL
TYPE_XDSL = 'xdsl'
# Stacking
TYPE_STACKWISE = 'cisco-stackwise'
TYPE_STACKWISE_PLUS = 'cisco-stackwise-plus'
TYPE_FLEXSTACK = 'cisco-flexstack'
TYPE_FLEXSTACK_PLUS = 'cisco-flexstack-plus'
TYPE_JUNIPER_VCP = 'juniper-vcp'
TYPE_SUMMITSTACK = 'extreme-summitstack'
TYPE_SUMMITSTACK128 = 'extreme-summitstack-128'
TYPE_SUMMITSTACK256 = 'extreme-summitstack-256'
TYPE_SUMMITSTACK512 = 'extreme-summitstack-512'
# Other
TYPE_OTHER = 'other'
CHOICES = (
(
'Virtual interfaces',
(
(TYPE_VIRTUAL, 'Virtual'),
(TYPE_LAG, 'Link Aggregation Group (LAG)'),
),
),
(
'Ethernet (fixed)',
(
(TYPE_100ME_FIXED, '100BASE-TX (10/100ME)'),
(TYPE_1GE_FIXED, '1000BASE-T (1GE)'),
(TYPE_2GE_FIXED, '2.5GBASE-T (2.5GE)'),
(TYPE_5GE_FIXED, '5GBASE-T (5GE)'),
(TYPE_10GE_FIXED, '10GBASE-T (10GE)'),
(TYPE_10GE_CX4, '10GBASE-CX4 (10GE)'),
)
),
(
'Ethernet (modular)',
(
(TYPE_1GE_GBIC, 'GBIC (1GE)'),
(TYPE_1GE_SFP, 'SFP (1GE)'),
(TYPE_10GE_SFP_PLUS, 'SFP+ (10GE)'),
(TYPE_10GE_XFP, 'XFP (10GE)'),
(TYPE_10GE_XENPAK, 'XENPAK (10GE)'),
(TYPE_10GE_X2, 'X2 (10GE)'),
(TYPE_25GE_SFP28, 'SFP28 (25GE)'),
(TYPE_50GE_SFP56, 'SFP56 (50GE)'),
(TYPE_40GE_QSFP_PLUS, 'QSFP+ (40GE)'),
(TYPE_50GE_QSFP28, 'QSFP28 (50GE)'),
(TYPE_100GE_CFP, 'CFP (100GE)'),
(TYPE_100GE_CFP2, 'CFP2 (100GE)'),
(TYPE_200GE_CFP2, 'CFP2 (200GE)'),
(TYPE_100GE_CFP4, 'CFP4 (100GE)'),
(TYPE_100GE_CPAK, 'Cisco CPAK (100GE)'),
(TYPE_100GE_QSFP28, 'QSFP28 (100GE)'),
(TYPE_200GE_QSFP56, 'QSFP56 (200GE)'),
(TYPE_400GE_QSFP_DD, 'QSFP-DD (400GE)'),
(TYPE_400GE_OSFP, 'OSFP (400GE)'),
)
),
(
'Wireless',
(
(TYPE_80211A, 'IEEE 802.11a'),
(TYPE_80211G, 'IEEE 802.11b/g'),
(TYPE_80211N, 'IEEE 802.11n'),
(TYPE_80211AC, 'IEEE 802.11ac'),
(TYPE_80211AD, 'IEEE 802.11ad'),
(TYPE_80211AX, 'IEEE 802.11ax'),
)
),
(
'Cellular',
(
(TYPE_GSM, 'GSM'),
(TYPE_CDMA, 'CDMA'),
(TYPE_LTE, 'LTE'),
)
),
(
'SONET',
(
(TYPE_SONET_OC3, 'OC-3/STM-1'),
(TYPE_SONET_OC12, 'OC-12/STM-4'),
(TYPE_SONET_OC48, 'OC-48/STM-16'),
(TYPE_SONET_OC192, 'OC-192/STM-64'),
(TYPE_SONET_OC768, 'OC-768/STM-256'),
(TYPE_SONET_OC1920, 'OC-1920/STM-640'),
(TYPE_SONET_OC3840, 'OC-3840/STM-1234'),
)
),
(
'FibreChannel',
(
(TYPE_1GFC_SFP, 'SFP (1GFC)'),
(TYPE_2GFC_SFP, 'SFP (2GFC)'),
(TYPE_4GFC_SFP, 'SFP (4GFC)'),
(TYPE_8GFC_SFP_PLUS, 'SFP+ (8GFC)'),
(TYPE_16GFC_SFP_PLUS, 'SFP+ (16GFC)'),
(TYPE_32GFC_SFP28, 'SFP28 (32GFC)'),
(TYPE_64GFC_QSFP_PLUS, 'QSFP+ (64GFC)'),
(TYPE_128GFC_QSFP28, 'QSFP28 (128GFC)'),
)
),
(
'InfiniBand',
(
(TYPE_INFINIBAND_SDR, 'SDR (2 Gbps)'),
(TYPE_INFINIBAND_DDR, 'DDR (4 Gbps)'),
(TYPE_INFINIBAND_QDR, 'QDR (8 Gbps)'),
(TYPE_INFINIBAND_FDR10, 'FDR10 (10 Gbps)'),
(TYPE_INFINIBAND_FDR, 'FDR (13.5 Gbps)'),
(TYPE_INFINIBAND_EDR, 'EDR (25 Gbps)'),
(TYPE_INFINIBAND_HDR, 'HDR (50 Gbps)'),
(TYPE_INFINIBAND_NDR, 'NDR (100 Gbps)'),
(TYPE_INFINIBAND_XDR, 'XDR (250 Gbps)'),
)
),
(
'Serial',
(
(TYPE_T1, 'T1 (1.544 Mbps)'),
(TYPE_E1, 'E1 (2.048 Mbps)'),
(TYPE_T3, 'T3 (45 Mbps)'),
(TYPE_E3, 'E3 (34 Mbps)'),
)
),
(
'ATM',
(
(TYPE_XDSL, 'xDSL'),
)
),
(
'Stacking',
(
(TYPE_STACKWISE, 'Cisco StackWise'),
(TYPE_STACKWISE_PLUS, 'Cisco StackWise Plus'),
(TYPE_FLEXSTACK, 'Cisco FlexStack'),
(TYPE_FLEXSTACK_PLUS, 'Cisco FlexStack Plus'),
(TYPE_JUNIPER_VCP, 'Juniper VCP'),
(TYPE_SUMMITSTACK, 'Extreme SummitStack'),
(TYPE_SUMMITSTACK128, 'Extreme SummitStack-128'),
(TYPE_SUMMITSTACK256, 'Extreme SummitStack-256'),
(TYPE_SUMMITSTACK512, 'Extreme SummitStack-512'),
)
),
(
'Other',
(
(TYPE_OTHER, 'Other'),
)
),
)
class InterfaceModeChoices(ChoiceSet):
MODE_ACCESS = 'access'
MODE_TAGGED = 'tagged'
MODE_TAGGED_ALL = 'tagged-all'
CHOICES = (
(MODE_ACCESS, 'Access'),
(MODE_TAGGED, 'Tagged'),
(MODE_TAGGED_ALL, 'Tagged (All)'),
)
#
# FrontPorts/RearPorts
#
class PortTypeChoices(ChoiceSet):
TYPE_8P8C = '8p8c'
TYPE_8P6C = '8p6c'
TYPE_8P4C = '8p4c'
TYPE_8P2C = '8p2c'
TYPE_6P6C = '6p6c'
TYPE_6P4C = '6p4c'
TYPE_6P2C = '6p2c'
TYPE_4P4C = '4p4c'
TYPE_4P2C = '4p2c'
TYPE_GG45 = 'gg45'
TYPE_TERA4P = 'tera-4p'
TYPE_TERA2P = 'tera-2p'
TYPE_TERA1P = 'tera-1p'
TYPE_110_PUNCH = '110-punch'
TYPE_BNC = 'bnc'
TYPE_F = 'f'
TYPE_N = 'n'
TYPE_MRJ21 = 'mrj21'
TYPE_ST = 'st'
TYPE_SC = 'sc'
TYPE_SC_APC = 'sc-apc'
TYPE_FC = 'fc'
TYPE_LC = 'lc'
TYPE_LC_APC = 'lc-apc'
TYPE_MTRJ = 'mtrj'
TYPE_MPO = 'mpo'
TYPE_LSH = 'lsh'
TYPE_LSH_APC = 'lsh-apc'
TYPE_SPLICE = 'splice'
TYPE_CS = 'cs'
TYPE_SN = 'sn'
TYPE_SMA_905 = 'sma-905'
TYPE_SMA_906 = 'sma-906'
TYPE_URM_P2 = 'urm-p2'
TYPE_URM_P4 = 'urm-p4'
TYPE_URM_P8 = 'urm-p8'
CHOICES = (
(
'Copper',
(
(TYPE_8P8C, '8P8C'),
(TYPE_8P6C, '8P6C'),
(TYPE_8P4C, '8P4C'),
(TYPE_8P2C, '8P2C'),
(TYPE_6P6C, '6P6C'),
(TYPE_6P4C, '6P4C'),
(TYPE_6P2C, '6P2C'),
(TYPE_4P4C, '4P4C'),
(TYPE_4P2C, '4P2C'),
(TYPE_GG45, 'GG45'),
(TYPE_TERA4P, 'TERA 4P'),
(TYPE_TERA2P, 'TERA 2P'),
(TYPE_TERA1P, 'TERA 1P'),
(TYPE_110_PUNCH, '110 Punch'),
(TYPE_BNC, 'BNC'),
(TYPE_F, 'F Connector'),
(TYPE_N, 'N Connector'),
(TYPE_MRJ21, 'MRJ21'),
),
),
(
'Fiber Optic',
(
(TYPE_FC, 'FC'),
(TYPE_LC, 'LC'),
(TYPE_LC_APC, 'LC/APC'),
(TYPE_LSH, 'LSH'),
(TYPE_LSH_APC, 'LSH/APC'),
(TYPE_MPO, 'MPO'),
(TYPE_MTRJ, 'MTRJ'),
(TYPE_SC, 'SC'),
(TYPE_SC_APC, 'SC/APC'),
(TYPE_ST, 'ST'),
(TYPE_CS, 'CS'),
(TYPE_SN, 'SN'),
(TYPE_SMA_905, 'SMA 905'),
(TYPE_SMA_906, 'SMA 906'),
(TYPE_URM_P2, 'URM-P2'),
(TYPE_URM_P4, 'URM-P4'),
(TYPE_URM_P8, 'URM-P8'),
(TYPE_SPLICE, 'Splice'),
)
)
)
#
# Cables
#
class CableTypeChoices(ChoiceSet):
TYPE_CAT3 = 'cat3'
TYPE_CAT5 = 'cat5'
TYPE_CAT5E = 'cat5e'
TYPE_CAT6 = 'cat6'
TYPE_CAT6A = 'cat6a'
TYPE_CAT7 = 'cat7'
TYPE_CAT7A = 'cat7a'
TYPE_CAT8 = 'cat8'
TYPE_DAC_ACTIVE = 'dac-active'
TYPE_DAC_PASSIVE = 'dac-passive'
TYPE_MRJ21_TRUNK = 'mrj21-trunk'
TYPE_COAXIAL = 'coaxial'
TYPE_MMF = 'mmf'
TYPE_MMF_OM1 = 'mmf-om1'
TYPE_MMF_OM2 = 'mmf-om2'
TYPE_MMF_OM3 = 'mmf-om3'
TYPE_MMF_OM4 = 'mmf-om4'
TYPE_MMF_OM5 = 'mmf-om5'
TYPE_SMF = 'smf'
TYPE_SMF_OS1 = 'smf-os1'
TYPE_SMF_OS2 = 'smf-os2'
TYPE_AOC = 'aoc'
TYPE_POWER = 'power'
CHOICES = (
(
'Copper', (
(TYPE_CAT3, 'CAT3'),
(TYPE_CAT5, 'CAT5'),
(TYPE_CAT5E, 'CAT5e'),
(TYPE_CAT6, 'CAT6'),
(TYPE_CAT6A, 'CAT6a'),
(TYPE_CAT7, 'CAT7'),
(TYPE_CAT7A, 'CAT7a'),
(TYPE_CAT8, 'CAT8'),
(TYPE_DAC_ACTIVE, 'Direct Attach Copper (Active)'),
(TYPE_DAC_PASSIVE, 'Direct Attach Copper (Passive)'),
(TYPE_MRJ21_TRUNK, 'MRJ21 Trunk'),
(TYPE_COAXIAL, 'Coaxial'),
),
),
(
'Fiber', (
(TYPE_MMF, 'Multimode Fiber'),
(TYPE_MMF_OM1, 'Multimode Fiber (OM1)'),
(TYPE_MMF_OM2, 'Multimode Fiber (OM2)'),
(TYPE_MMF_OM3, 'Multimode Fiber (OM3)'),
(TYPE_MMF_OM4, 'Multimode Fiber (OM4)'),
(TYPE_MMF_OM5, 'Multimode Fiber (OM5)'),
(TYPE_SMF, 'Singlemode Fiber'),
(TYPE_SMF_OS1, 'Singlemode Fiber (OS1)'),
(TYPE_SMF_OS2, 'Singlemode Fiber (OS2)'),
(TYPE_AOC, 'Active Optical Cabling (AOC)'),
),
),
(TYPE_POWER, 'Power'),
)
class CableStatusChoices(ChoiceSet):
STATUS_CONNECTED = 'connected'
STATUS_PLANNED = 'planned'
STATUS_DECOMMISSIONING = 'decommissioning'
CHOICES = (
(STATUS_CONNECTED, 'Connected'),
(STATUS_PLANNED, 'Planned'),
(STATUS_DECOMMISSIONING, 'Decommissioning'),
)
CSS_CLASSES = {
STATUS_CONNECTED: 'success',
STATUS_PLANNED: 'info',
STATUS_DECOMMISSIONING: 'warning',
}
class CableLengthUnitChoices(ChoiceSet):
# Metric
UNIT_KILOMETER = 'km'
UNIT_METER = 'm'
UNIT_CENTIMETER = 'cm'
# Imperial
UNIT_MILE = 'mi'
UNIT_FOOT = 'ft'
UNIT_INCH = 'in'
CHOICES = (
(UNIT_KILOMETER, 'Kilometers'),
(UNIT_METER, 'Meters'),
(UNIT_CENTIMETER, 'Centimeters'),
(UNIT_MILE, 'Miles'),
(UNIT_FOOT, 'Feet'),
(UNIT_INCH, 'Inches'),
)
#
# PowerFeeds
#
class PowerFeedStatusChoices(ChoiceSet):
STATUS_OFFLINE = 'offline'
STATUS_ACTIVE = 'active'
STATUS_PLANNED = 'planned'
STATUS_FAILED = 'failed'
CHOICES = (
(STATUS_OFFLINE, 'Offline'),
(STATUS_ACTIVE, 'Active'),
(STATUS_PLANNED, 'Planned'),
(STATUS_FAILED, 'Failed'),
)
CSS_CLASSES = {
STATUS_OFFLINE: 'warning',
STATUS_ACTIVE: 'success',
STATUS_PLANNED: 'info',
STATUS_FAILED: 'danger',
}
class PowerFeedTypeChoices(ChoiceSet):
TYPE_PRIMARY = 'primary'
TYPE_REDUNDANT = 'redundant'
CHOICES = (
(TYPE_PRIMARY, 'Primary'),
(TYPE_REDUNDANT, 'Redundant'),
)
CSS_CLASSES = {
TYPE_PRIMARY: 'success',
TYPE_REDUNDANT: 'info',
}
class PowerFeedSupplyChoices(ChoiceSet):
SUPPLY_AC = 'ac'
SUPPLY_DC = 'dc'
CHOICES = (
(SUPPLY_AC, 'AC'),
(SUPPLY_DC, 'DC'),
)
class PowerFeedPhaseChoices(ChoiceSet):
PHASE_SINGLE = 'single-phase'
PHASE_3PHASE = 'three-phase'
CHOICES = (
(PHASE_SINGLE, 'Single phase'),
(PHASE_3PHASE, 'Three-phase'),
)
| 28.9925 | 69 | 0.537524 |
acea2a95a8874ffd519635cf8b450d816a06f486 | 192 | bzl | Python | tools/build/bazel/variables.bzl | vanvantong/onos | e5b1d250d439c492643068a76120e95158c9f177 | [
"Apache-2.0"
] | 2 | 2020-02-17T07:51:36.000Z | 2021-02-10T18:47:51.000Z | tools/build/bazel/variables.bzl | vanvantong/onos | e5b1d250d439c492643068a76120e95158c9f177 | [
"Apache-2.0"
] | 17 | 2020-03-04T23:40:00.000Z | 2022-03-02T05:50:27.000Z | tools/build/bazel/variables.bzl | vanvantong/onos | e5b1d250d439c492643068a76120e95158c9f177 | [
"Apache-2.0"
] | 1 | 2019-11-04T11:13:30.000Z | 2019-11-04T11:13:30.000Z | ONOS_VERSION = "2.2.0-SNAPSHOT"
ONOS_ARTIFACT_BASE = "onos-"
ONOS_GROUP_ID = "org.onosproject"
ONOS_ORIGIN = "ONOS Community"
APP_PREFIX = ONOS_GROUP_ID + "."
DEFAULT_APP_CATEGORY = "Utility"
| 27.428571 | 33 | 0.760417 |
acea2b6f8fa7ccfdd42940db8f994d231eca906c | 281 | py | Python | src/clisy/searcher/searcher_impl/amazon_searcher.py | nilukush/clisy | f37b9b7f0498fe7cc5485506a7313cf71a6bd3cf | [
"MIT"
] | null | null | null | src/clisy/searcher/searcher_impl/amazon_searcher.py | nilukush/clisy | f37b9b7f0498fe7cc5485506a7313cf71a6bd3cf | [
"MIT"
] | null | null | null | src/clisy/searcher/searcher_impl/amazon_searcher.py | nilukush/clisy | f37b9b7f0498fe7cc5485506a7313cf71a6bd3cf | [
"MIT"
] | null | null | null | from clisy.searcher.base_searcher import BaseSearcher
class AmazonSearcher(BaseSearcher):
def __init__(self):
super(AmazonSearcher, self).__init__("https://www.amazon.in/s?k=")
def open(self, query_string):
super(AmazonSearcher, self).open(query_string)
| 28.1 | 74 | 0.729537 |
acea2c9c4f919ef382e00f97cd8eba86c56a3a8d | 3,298 | py | Python | profiles_project/settings.py | akotronis/profiles-rest-api | c4f31ba1870fcb97b58c25efb70a1f98ae0edc5f | [
"MIT"
] | null | null | null | profiles_project/settings.py | akotronis/profiles-rest-api | c4f31ba1870fcb97b58c25efb70a1f98ae0edc5f | [
"MIT"
] | null | null | null | profiles_project/settings.py | akotronis/profiles-rest-api | c4f31ba1870fcb97b58c25efb70a1f98ae0edc5f | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'drx_o=5ro)5ovn8o1+$c#ryms#c!8@-14i7^0!!4$1vpn_889a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# MY APPS
'profiles_api',
# THIRD PART APPS
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
##### CUSTOM THINGS #####
AUTH_USER_MODEL = 'profiles_api.UserProfile' | 25.565891 | 91 | 0.695876 |
acea2ca98258046db679d66e8839fd422f1209e8 | 891 | py | Python | test/fixture/fake_rules/.some_dot_file.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | 1 | 2015-10-15T04:18:14.000Z | 2015-10-15T04:18:14.000Z | test/fixture/fake_rules/.some_dot_file.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | null | null | null | test/fixture/fake_rules/.some_dot_file.py | oldmantaiter/inferno | 88da465625d18c6848f4be5fb37e20a5ae2c6db1 | [
"MIT"
] | null | null | null | from inferno.lib.rule import InfernoRule
from inferno.lib.rule import Keyset
AUTORUN = True
RULES = [
InfernoRule(
name='automatic_rule_1',
keysets={
'keyset_1':Keyset(
key_parts=['key_1'],
value_parts=['value_1'],
),
'keyset_2':Keyset(
key_parts=['key_2'],
value_parts=['value_2']
)
}
),
InfernoRule(
name='automatic_rule_2',
keysets={
'keyset_1':Keyset(
key_parts=['key_1'],
value_parts=['value_1'],
),
'keyset_2':Keyset(
key_parts=['key_2'],
value_parts=['value_2']
)
}
),
InfernoRule(
name='automatic_rule_3',
key_parts=['key_1'],
value_parts=['value_2'],
),
]
| 22.846154 | 40 | 0.453423 |
acea2d80620f6c9bcb9263061cba0ac4f38dd908 | 3,374 | py | Python | src/01_load_and_clean_data.py | alistair-clark/dsci_532_group-209 | f6ae44b8b984aba577f0c4a2dea32217bc36b27a | [
"MIT"
] | null | null | null | src/01_load_and_clean_data.py | alistair-clark/dsci_532_group-209 | f6ae44b8b984aba577f0c4a2dea32217bc36b27a | [
"MIT"
] | 12 | 2019-11-23T01:22:58.000Z | 2019-12-17T01:43:49.000Z | src/01_load_and_clean_data.py | alistair-clark/dsci_532_group-209 | f6ae44b8b984aba577f0c4a2dea32217bc36b27a | [
"MIT"
] | 3 | 2019-11-20T23:32:40.000Z | 2019-11-20T23:42:45.000Z | # Load packages
import pandas as pd
# Load datasets
df = pd.read_csv("../data/drinks_raw.csv")
df_location = pd.read_csv("../data/country_codes_raw.csv")
# Clean geography dataset country names so that they can be merged
df_location = df_location.replace({'name':
{'Antigua and Barbuda': 'Antigua & Barbuda',
'Bolivia (Plurinational State of)': 'Bolivia',
'Bosnia and Herzegovina': 'Bosnia-Herzegovina',
'Brunei Darussalam': 'Brunei',
"Côte d'Ivoire": "Cote d'Ivoire",
'Czechia': 'Czech Republic',
"Korea (Democratic People's Republic of)": 'North Korea',
'Congo, Democratic Republic of the': 'DR Congo',
'Iran (Islamic Republic of)': 'Iran',
"Lao People's Democratic Republic": 'Laos',
'Micronesia (Federated States of)': 'Micronesia',
'Korea, Republic of': 'South Korea',
'Moldova, Republic of': 'Moldova',
'Saint Kitts and Nevis': 'St. Kitts & Nevis',
'Saint Lucia': 'St. Lucia',
'Saint Vincent and the Grenadines': 'St. Vincent & the Grenadines',
'Sao Tome and Principe': 'Sao Tome & Principe',
'Eswatini': 'Swaziland',
'Syrian Arab Republic': 'Syria',
'North Macedonia': 'Macedonia',
'Trinidad and Tobago': 'Trinidad & Tobago',
'United Kingdom of Great Britain and Northern Ireland': 'United Kingdom',
'Tanzania, United Republic of': 'Tanzania',
'United States of America': 'USA',
'Venezuela (Bolivarian Republic of)': 'Venezuela',
'Viet Nam': 'Vietnam'
}}).rename(columns={'name': 'country'})
# merge datasets together
df = df.merge(df_location[['country', 'country-code', 'region']], how='left')
# add new columns for plotting
df['total_servings'] = df.iloc[:, 1:4].sum(axis=1)
df['prop_wine'] = df['wine_servings'] / df['total_servings']
df['prop_beer'] = df['beer_servings'] / df['total_servings']
df['prop_spirits'] = df['spirit_servings'] / df['total_servings']
# Global Rank of Drink
df['rank_wine'] = df['prop_wine'].rank(ascending = False)
df['rank_beer'] = df['prop_beer'].rank(ascending = False)
df['rank_spirit'] = df['prop_spirits'].rank(ascending = False)
# Relative ranks of drink based on region
df['relative_rank_wine'] = df.groupby('region')['prop_wine'].rank(ascending = False)
df['relative_rank_beer'] = df.groupby('region')['prop_beer'].rank(ascending = False)
df['relative_rank_spirit'] = df.groupby('region')['prop_spirits'].rank(ascending = False)
# rename 'country-code' to 'id'
df = df.rename(columns={'country-code':'id'})
df.to_csv("../data/merged_data_clean.csv") | 58.172414 | 109 | 0.50652 |
acea2f030bb92d381c3e44d36c838ad6d4fc40dc | 381 | py | Python | env/lib/python3.8/site-packages/plotly/graph_objs/indicator/delta/__init__.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/indicator/delta/__init__.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/indicator/delta/__init__.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import sys
if sys.version_info < (3, 7):
from ._decreasing import Decreasing
from ._font import Font
from ._increasing import Increasing
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._decreasing.Decreasing", "._font.Font", "._increasing.Increasing"],
)
| 25.4 | 78 | 0.67979 |
acea2f226587bb3d4a0c3c9ee901492a64c5e56b | 9,531 | py | Python | conventions/services/upload_objects.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | conventions/services/upload_objects.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | conventions/services/upload_objects.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | from io import BytesIO
from zipfile import BadZipFile
import datetime
from decimal import Decimal
from openpyxl import load_workbook
from core.storage import client
from programmes.models import TypologieLogement
from . import utils
def save_uploaded_file(my_file, convention, file_name):
my_file.seek(0)
client.put_object(
my_file=my_file.read(),
target=f"conventions/{convention.uuid}/uploads/{file_name}",
)
def handle_uploaded_xlsx(upform, my_file, myClass, convention, file_name):
# pylint: disable=R0912
try:
my_file.seek(0)
my_wb = load_workbook(filename=BytesIO(my_file.read()), data_only=True)
except BadZipFile:
upform.add_error(
"file",
"Le fichier importé ne semble pas être du bon format, 'xlsx' est le format attendu",
)
return {"success": utils.ReturnStatus.ERROR}
try:
my_ws = my_wb[myClass.sheet_name]
except KeyError:
upform.add_error(
"file",
f"Le fichier importé doit avoir une feuille nommée '{myClass.sheet_name}'",
)
return {"success": utils.ReturnStatus.ERROR}
save_uploaded_file(my_file, convention, file_name)
import_warnings = []
column_from_index = {}
for col in my_ws.iter_cols(
min_col=1, max_col=my_ws.max_column, min_row=1, max_row=1
):
for cell in col:
if cell.value is None:
continue
if cell.value not in myClass.import_mapping:
import_warnings.append(
Exception(
f"La colonne nommée '{cell.value}' est inconnue, "
+ "elle sera ignorée. Les colonnes attendues sont : "
+ f"{', '.join(myClass.import_mapping.keys())}"
)
)
continue
column_from_index[cell.column] = str(cell.value).strip()
error_column = False
for key in myClass.import_mapping:
if key not in list(column_from_index.values()):
upform.add_error(
"file", f"Le fichier importé doit avoir une colonne nommée '{key}'"
)
error_column = True
if error_column:
return {"success": utils.ReturnStatus.ERROR}
# transform each line into object
my_objects, import_warnings = get_object_from_worksheet(
my_ws, column_from_index, myClass, import_warnings
)
return {
"success": utils.ReturnStatus.SUCCESS
if len(import_warnings) == 0
else utils.ReturnStatus.WARNING,
"objects": my_objects,
"import_warnings": import_warnings,
}
def get_object_from_worksheet(my_ws, column_from_index, myClass, import_warnings):
my_objects = []
for row in my_ws.iter_rows(
min_row=3, max_row=my_ws.max_row, min_col=1, max_col=my_ws.max_column
):
my_row, empty_line, new_warnings = extract_row(
row, column_from_index, myClass.import_mapping
)
import_warnings = [*import_warnings, *new_warnings]
# Ignore if the line is empty
if not empty_line:
my_objects.append(my_row)
return my_objects, import_warnings
def extract_row(row, column_from_index, import_mapping):
# pylint: disable=R0912
new_warnings = []
my_row = {}
empty_line = True
for cell in row:
# Ignore unknown column
if cell.column not in column_from_index or cell.value is None:
continue
# Check the empty lines to don't fill it
empty_line = False
value = None
model_field = import_mapping[column_from_index[cell.column]]
if isinstance(model_field, str):
key = model_field
value = cell.value
else:
key = model_field.name
# Date case
if model_field.get_internal_type() == "DateField":
if isinstance(cell.value, datetime.datetime):
value = utils.format_date_for_form(cell.value)
else:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une date"
)
)
# TextChoices case
elif (
model_field.get_internal_type() == "CharField"
and model_field.choices is not None
):
if cell.value is not None:
tmp_value = cell.value
if model_field.choices == TypologieLogement.choices:
tmp_value = TypologieLogement.map_string(tmp_value)
value = next(
(x[0] for x in model_field.choices if x[1] == tmp_value), None
)
if (
value is None
): # value is not Null but not in the choices neither
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit faire partie des valeurs : "
+ f"{', '.join(map(lambda x : x[1], model_field.choices))}"
)
)
# Float case
elif model_field.get_internal_type() == "FloatField":
if cell.value is not None:
if isinstance(cell.value, (float, int)):
value = float(cell.value)
else:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une valeur numérique"
)
)
# Decimal case
elif model_field.get_internal_type() == "DecimalField":
value, new_warnings = _get_value_from_decimal_field(
cell, model_field, column_from_index, new_warnings
)
# Integer case
elif model_field.get_internal_type() == "IntegerField":
if cell.value is not None:
if isinstance(cell.value, (float, int)):
value = int(cell.value)
else:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une valeur numérique"
)
)
# String case
elif model_field.get_internal_type() == "CharField":
if cell.value is not None:
if isinstance(cell.value, (float, int, str)):
value = cell.value
else:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une valeur alphanumeric"
)
)
my_row[key] = value
return my_row, empty_line, new_warnings
def _get_value_from_decimal_field(cell, model_field, column_from_index, new_warnings):
value = None
if cell.value is not None:
if isinstance(cell.value, str):
try:
local_format = "{:." + str(model_field.decimal_places) + "f}"
value = Decimal(
local_format.format(_extract_float_from_string(cell.value))
)
except ValueError:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une valeur numérique"
)
)
elif isinstance(cell.value, (float, int)):
local_format = "{:." + str(model_field.decimal_places) + "f}"
value = Decimal(local_format.format(cell.value))
else:
new_warnings.append(
Exception(
f"{cell.column_letter}{cell.row} : La valeur '{cell.value}' "
+ f"de la colonne {column_from_index[cell.column]} "
+ "doit être une valeur numérique"
)
)
return value, new_warnings
def _extract_float_from_string(my_string: str):
my_string = my_string.strip()
my_string = my_string.replace(",", ".")
i = 0
for char in my_string:
if char in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "."]:
i += 1
else:
break
return float(my_string[:i])
| 37.821429 | 96 | 0.516105 |
acea2f5e352c141d746ca64317401ac188a09476 | 503 | py | Python | app.py | ageuribeiro/bmi-calculator | 8819da4b8651705d8cf26e7f196be41dd86b0d0c | [
"MIT"
] | null | null | null | app.py | ageuribeiro/bmi-calculator | 8819da4b8651705d8cf26e7f196be41dd86b0d0c | [
"MIT"
] | null | null | null | app.py | ageuribeiro/bmi-calculator | 8819da4b8651705d8cf26e7f196be41dd86b0d0c | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def index():
bmi = ''
if request.method == 'POST' and 'weight' in request.form:
weight = float(request.form.get('weight'))
height = float(request.form.get('height'))
bmi = calc_bmi(weight, height)
return render_template('bmi_calc.html', bmi = bmi)
def calc_bmi(weight, height):
return round((weight / ((height / 100) ** 2)), 2)
app.run(debug=True) | 29.588235 | 61 | 0.640159 |
acea2f717aad2198f3f719b44a4c3194fd8bcfe6 | 158 | py | Python | contrib/wallettools/walletunlock.py | caffeine239/muskcoin | cf7e7862b7e42c458a5e1be648f4ec8f5cf18449 | [
"MIT"
] | null | null | null | contrib/wallettools/walletunlock.py | caffeine239/muskcoin | cf7e7862b7e42c458a5e1be648f4ec8f5cf18449 | [
"MIT"
] | 1 | 2021-03-08T09:24:05.000Z | 2021-03-15T09:10:11.000Z | contrib/wallettools/walletunlock.py | caffeine239/muskcoin | cf7e7862b7e42c458a5e1be648f4ec8f5cf18449 | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:2331")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| 31.6 | 46 | 0.765823 |
acea300ed343284e2a5ed2b67247d7d24c59a45f | 27,406 | py | Python | synapse/storage/databases/main/search.py | 3ayazaya/synapse | e3fe6347be1da930b6a0ed2005b565369800a327 | [
"Apache-2.0"
] | 1 | 2022-02-22T21:40:29.000Z | 2022-02-22T21:40:29.000Z | synapse/storage/databases/main/search.py | 3ayazaya/synapse | e3fe6347be1da930b6a0ed2005b565369800a327 | [
"Apache-2.0"
] | null | null | null | synapse/storage/databases/main/search.py | 3ayazaya/synapse | e3fe6347be1da930b6a0ed2005b565369800a327 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import TYPE_CHECKING, Collection, Iterable, List, Optional, Set
import attr
from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class SearchEntry:
key: str
value: str
event_id: str
room_id: str
stream_ordering: Optional[int]
origin_server_ts: int
def _clean_value_for_search(value: str) -> str:
"""
Replaces any null code points in the string with spaces as
Postgres and SQLite do not like the insertion of strings with
null code points into the full-text search tables.
"""
return value.replace("\u0000", " ")
class SearchWorkerStore(SQLBaseStore):
def store_search_entries_txn(
self, txn: LoggingTransaction, entries: Iterable[SearchEntry]
) -> None:
"""Add entries to the search table
Args:
txn:
entries: entries to be added to the table
"""
if not self.hs.config.server.enable_search:
return
if isinstance(self.database_engine, PostgresEngine):
sql = (
"INSERT INTO event_search"
" (event_id, room_id, key, vector, stream_ordering, origin_server_ts)"
" VALUES (?,?,?,to_tsvector('english', ?),?,?)"
)
args = (
(
entry.event_id,
entry.room_id,
entry.key,
_clean_value_for_search(entry.value),
entry.stream_ordering,
entry.origin_server_ts,
)
for entry in entries
)
txn.execute_batch(sql, args)
elif isinstance(self.database_engine, Sqlite3Engine):
sql = (
"INSERT INTO event_search (event_id, room_id, key, value)"
" VALUES (?,?,?,?)"
)
args = (
(
entry.event_id,
entry.room_id,
entry.key,
_clean_value_for_search(entry.value),
)
for entry in entries
)
txn.execute_batch(sql, args)
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
class SearchBackgroundUpdateStore(SearchWorkerStore):
EVENT_SEARCH_UPDATE_NAME = "event_search"
EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order"
EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist"
EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin"
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
if not hs.config.server.enable_search:
return
self.db_pool.updates.register_background_update_handler(
self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search
)
self.db_pool.updates.register_background_update_handler(
self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order
)
# we used to have a background update to turn the GIN index into a
# GIST one; we no longer do that (obviously) because we actually want
# a GIN index. However, it's possible that some people might still have
# the background update queued, so we register a handler to clear the
# background update.
self.db_pool.updates.register_noop_background_update(
self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME
)
self.db_pool.updates.register_background_update_handler(
self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search
)
async def _background_reindex_search(self, progress, batch_size):
# we work through the events table from highest stream id to lowest
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
rows_inserted = progress.get("rows_inserted", 0)
TYPES = ["m.room.name", "m.room.message", "m.room.topic"]
def reindex_search_txn(txn):
sql = (
"SELECT stream_ordering, event_id, room_id, type, json, "
" origin_server_ts FROM events"
" JOIN event_json USING (room_id, event_id)"
" WHERE ? <= stream_ordering AND stream_ordering < ?"
" AND (%s)"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
# we could stream straight from the results into
# store_search_entries_txn with a generator function, but that
# would mean having two cursors open on the database at once.
# Instead we just build a list of results.
rows = self.db_pool.cursor_to_dict(txn)
if not rows:
return 0
min_stream_id = rows[-1]["stream_ordering"]
event_search_rows = []
for row in rows:
try:
event_id = row["event_id"]
room_id = row["room_id"]
etype = row["type"]
stream_ordering = row["stream_ordering"]
origin_server_ts = row["origin_server_ts"]
try:
event_json = db_to_json(row["json"])
content = event_json["content"]
except Exception:
continue
if etype == "m.room.message":
key = "content.body"
value = content["body"]
elif etype == "m.room.topic":
key = "content.topic"
value = content["topic"]
elif etype == "m.room.name":
key = "content.name"
value = content["name"]
else:
raise Exception("unexpected event type %s" % etype)
except (KeyError, AttributeError):
# If the event is missing a necessary field then
# skip over it.
continue
if not isinstance(value, str):
# If the event body, name or topic isn't a string
# then skip over it
continue
event_search_rows.append(
SearchEntry(
key=key,
value=value,
event_id=event_id,
room_id=room_id,
stream_ordering=stream_ordering,
origin_server_ts=origin_server_ts,
)
)
self.store_search_entries_txn(txn, event_search_rows)
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
"rows_inserted": rows_inserted + len(event_search_rows),
}
self.db_pool.updates._background_update_progress_txn(
txn, self.EVENT_SEARCH_UPDATE_NAME, progress
)
return len(event_search_rows)
result = await self.db_pool.runInteraction(
self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
)
if not result:
await self.db_pool.updates._end_background_update(
self.EVENT_SEARCH_UPDATE_NAME
)
return result
async def _background_reindex_gin_search(self, progress, batch_size):
"""This handles old synapses which used GIST indexes, if any;
converting them back to be GIN as per the actual schema.
"""
def create_index(conn):
conn.rollback()
# we have to set autocommit, because postgres refuses to
# CREATE INDEX CONCURRENTLY without it.
conn.set_session(autocommit=True)
try:
c = conn.cursor()
# if we skipped the conversion to GIST, we may already/still
# have an event_search_fts_idx; unfortunately postgres 9.4
# doesn't support CREATE INDEX IF EXISTS so we just catch the
# exception and ignore it.
import psycopg2
try:
c.execute(
"CREATE INDEX CONCURRENTLY event_search_fts_idx"
" ON event_search USING GIN (vector)"
)
except psycopg2.ProgrammingError as e:
logger.warning(
"Ignoring error %r when trying to switch from GIST to GIN", e
)
# we should now be able to delete the GIST index.
c.execute("DROP INDEX IF EXISTS event_search_fts_idx_gist")
finally:
conn.set_session(autocommit=False)
if isinstance(self.database_engine, PostgresEngine):
await self.db_pool.runWithConnection(create_index)
await self.db_pool.updates._end_background_update(
self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME
)
return 1
async def _background_reindex_search_order(self, progress, batch_size):
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
rows_inserted = progress.get("rows_inserted", 0)
have_added_index = progress["have_added_indexes"]
if not have_added_index:
def create_index(conn):
conn.rollback()
conn.set_session(autocommit=True)
c = conn.cursor()
# We create with NULLS FIRST so that when we search *backwards*
# we get the ones with non null origin_server_ts *first*
c.execute(
"CREATE INDEX CONCURRENTLY event_search_room_order ON event_search("
"room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
)
c.execute(
"CREATE INDEX CONCURRENTLY event_search_order ON event_search("
"origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
)
conn.set_session(autocommit=False)
await self.db_pool.runWithConnection(create_index)
pg = dict(progress)
pg["have_added_indexes"] = True
await self.db_pool.runInteraction(
self.EVENT_SEARCH_ORDER_UPDATE_NAME,
self.db_pool.updates._background_update_progress_txn,
self.EVENT_SEARCH_ORDER_UPDATE_NAME,
pg,
)
def reindex_search_txn(txn):
sql = (
"UPDATE event_search AS es SET stream_ordering = e.stream_ordering,"
" origin_server_ts = e.origin_server_ts"
" FROM events AS e"
" WHERE e.event_id = es.event_id"
" AND ? <= e.stream_ordering AND e.stream_ordering < ?"
" RETURNING es.stream_ordering"
)
min_stream_id = max_stream_id - batch_size
txn.execute(sql, (min_stream_id, max_stream_id))
rows = txn.fetchall()
if min_stream_id < target_min_stream_id:
# We've recached the end.
return len(rows), False
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
"rows_inserted": rows_inserted + len(rows),
"have_added_indexes": True,
}
self.db_pool.updates._background_update_progress_txn(
txn, self.EVENT_SEARCH_ORDER_UPDATE_NAME, progress
)
return len(rows), True
num_rows, finished = await self.db_pool.runInteraction(
self.EVENT_SEARCH_ORDER_UPDATE_NAME, reindex_search_txn
)
if not finished:
await self.db_pool.updates._end_background_update(
self.EVENT_SEARCH_ORDER_UPDATE_NAME
)
return num_rows
class SearchStore(SearchBackgroundUpdateStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
async def search_msgs(
self, room_ids: Collection[str], search_term: str, keys: Iterable[str]
) -> JsonDict:
"""Performs a full text search over events with given keys.
Args:
room_ids: List of room ids to search in
search_term: Search term to search for
keys: List of keys to search in, currently supports
"content.body", "content.name", "content.topic"
Returns:
Dictionary of results
"""
clauses = []
search_query = _parse_query(self.database_engine, search_term)
args = []
# Make sure we don't explode because the person is in too many rooms.
# We filter the results below regardless.
if len(room_ids) < 500:
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", room_ids
)
clauses = [clause]
local_clauses = []
for key in keys:
local_clauses.append("key = ?")
args.append(key)
clauses.append("(%s)" % (" OR ".join(local_clauses),))
count_args = args
count_clauses = clauses
if isinstance(self.database_engine, PostgresEngine):
sql = (
"SELECT ts_rank_cd(vector, to_tsquery('english', ?)) AS rank,"
" room_id, event_id"
" FROM event_search"
" WHERE vector @@ to_tsquery('english', ?)"
)
args = [search_query, search_query] + args
count_sql = (
"SELECT room_id, count(*) as count FROM event_search"
" WHERE vector @@ to_tsquery('english', ?)"
)
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
sql = (
"SELECT rank(matchinfo(event_search)) as rank, room_id, event_id"
" FROM event_search"
" WHERE value MATCH ?"
)
args = [search_query] + args
count_sql = (
"SELECT room_id, count(*) as count FROM event_search"
" WHERE value MATCH ?"
)
count_args = [search_term] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
for clause in clauses:
sql += " AND " + clause
for clause in count_clauses:
count_sql += " AND " + clause
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
sql += " ORDER BY rank DESC LIMIT 500"
results = await self.db_pool.execute(
"search_msgs", self.db_pool.cursor_to_dict, sql, *args
)
results = list(filter(lambda row: row["room_id"] in room_ids, results))
# We set redact_behaviour to BLOCK here to prevent redacted events being returned in
# search results (which is a data leak)
events = await self.get_events_as_list(
[r["event_id"] for r in results],
redact_behaviour=EventRedactBehaviour.BLOCK,
)
event_map = {ev.event_id: ev for ev in events}
highlights = None
if isinstance(self.database_engine, PostgresEngine):
highlights = await self._find_highlights_in_postgres(search_query, events)
count_sql += " GROUP BY room_id"
count_results = await self.db_pool.execute(
"search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
)
count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
return {
"results": [
{"event": event_map[r["event_id"]], "rank": r["rank"]}
for r in results
if r["event_id"] in event_map
],
"highlights": highlights,
"count": count,
}
async def search_rooms(
self,
room_ids: Collection[str],
search_term: str,
keys: Iterable[str],
limit,
pagination_token: Optional[str] = None,
) -> JsonDict:
"""Performs a full text search over events with given keys.
Args:
room_ids: The room_ids to search in
search_term: Search term to search for
keys: List of keys to search in, currently supports "content.body",
"content.name", "content.topic"
pagination_token: A pagination token previously returned
Returns:
Each match as a dictionary.
"""
clauses = []
search_query = _parse_query(self.database_engine, search_term)
args = []
# Make sure we don't explode because the person is in too many rooms.
# We filter the results below regardless.
if len(room_ids) < 500:
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", room_ids
)
clauses = [clause]
local_clauses = []
for key in keys:
local_clauses.append("key = ?")
args.append(key)
clauses.append("(%s)" % (" OR ".join(local_clauses),))
# take copies of the current args and clauses lists, before adding
# pagination clauses to main query.
count_args = list(args)
count_clauses = list(clauses)
if pagination_token:
try:
origin_server_ts, stream = pagination_token.split(",")
origin_server_ts = int(origin_server_ts)
stream = int(stream)
except Exception:
raise SynapseError(400, "Invalid pagination token")
clauses.append(
"(origin_server_ts < ?"
" OR (origin_server_ts = ? AND stream_ordering < ?))"
)
args.extend([origin_server_ts, origin_server_ts, stream])
if isinstance(self.database_engine, PostgresEngine):
sql = (
"SELECT ts_rank_cd(vector, to_tsquery('english', ?)) as rank,"
" origin_server_ts, stream_ordering, room_id, event_id"
" FROM event_search"
" WHERE vector @@ to_tsquery('english', ?) AND "
)
args = [search_query, search_query] + args
count_sql = (
"SELECT room_id, count(*) as count FROM event_search"
" WHERE vector @@ to_tsquery('english', ?) AND "
)
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
# We use CROSS JOIN here to ensure we use the right indexes.
# https://sqlite.org/optoverview.html#crossjoin
#
# We want to use the full text search index on event_search to
# extract all possible matches first, then lookup those matches
# in the events table to get the topological ordering. We need
# to use the indexes in this order because sqlite refuses to
# MATCH unless it uses the full text search index
sql = (
"SELECT rank(matchinfo) as rank, room_id, event_id,"
" origin_server_ts, stream_ordering"
" FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
" FROM event_search"
" WHERE value MATCH ?"
" )"
" CROSS JOIN events USING (event_id)"
" WHERE "
)
args = [search_query] + args
count_sql = (
"SELECT room_id, count(*) as count FROM event_search"
" WHERE value MATCH ? AND "
)
count_args = [search_term] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
sql += " AND ".join(clauses)
count_sql += " AND ".join(count_clauses)
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
if isinstance(self.database_engine, PostgresEngine):
sql += (
" ORDER BY origin_server_ts DESC NULLS LAST,"
" stream_ordering DESC NULLS LAST LIMIT ?"
)
elif isinstance(self.database_engine, Sqlite3Engine):
sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?"
else:
raise Exception("Unrecognized database engine")
args.append(limit)
results = await self.db_pool.execute(
"search_rooms", self.db_pool.cursor_to_dict, sql, *args
)
results = list(filter(lambda row: row["room_id"] in room_ids, results))
# We set redact_behaviour to BLOCK here to prevent redacted events being returned in
# search results (which is a data leak)
events = await self.get_events_as_list(
[r["event_id"] for r in results],
redact_behaviour=EventRedactBehaviour.BLOCK,
)
event_map = {ev.event_id: ev for ev in events}
highlights = None
if isinstance(self.database_engine, PostgresEngine):
highlights = await self._find_highlights_in_postgres(search_query, events)
count_sql += " GROUP BY room_id"
count_results = await self.db_pool.execute(
"search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args
)
count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
return {
"results": [
{
"event": event_map[r["event_id"]],
"rank": r["rank"],
"pagination_token": "%s,%s"
% (r["origin_server_ts"], r["stream_ordering"]),
}
for r in results
if r["event_id"] in event_map
],
"highlights": highlights,
"count": count,
}
async def _find_highlights_in_postgres(
self, search_query: str, events: List[EventBase]
) -> Set[str]:
"""Given a list of events and a search term, return a list of words
that match from the content of the event.
This is used to give a list of words that clients can match against to
highlight the matching parts.
Args:
search_query
events: A list of events
Returns:
A set of strings.
"""
def f(txn):
highlight_words = set()
for event in events:
# As a hack we simply join values of all possible keys. This is
# fine since we're only using them to find possible highlights.
values = []
for key in ("body", "name", "topic"):
v = event.content.get(key, None)
if v:
v = _clean_value_for_search(v)
values.append(v)
if not values:
continue
value = " ".join(values)
# We need to find some values for StartSel and StopSel that
# aren't in the value so that we can pick results out.
start_sel = "<"
stop_sel = ">"
while start_sel in value:
start_sel += "<"
while stop_sel in value:
stop_sel += ">"
query = "SELECT ts_headline(?, to_tsquery('english', ?), %s)" % (
_to_postgres_options(
{
"StartSel": start_sel,
"StopSel": stop_sel,
"MaxFragments": "50",
}
)
)
txn.execute(query, (value, search_query))
(headline,) = txn.fetchall()[0]
# Now we need to pick the possible highlights out of the haedline
# result.
matcher_regex = "%s(.*?)%s" % (
re.escape(start_sel),
re.escape(stop_sel),
)
res = re.findall(matcher_regex, headline)
highlight_words.update([r.lower() for r in res])
return highlight_words
return await self.db_pool.runInteraction("_find_highlights", f)
def _to_postgres_options(options_dict):
return "'%s'" % (",".join("%s=%s" % (k, v) for k, v in options_dict.items()),)
def _parse_query(database_engine, search_term):
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
We use this so that we can add prefix matching, which isn't something
that is supported by default.
"""
# Pull out the individual words, discarding any non-word characters.
results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
if isinstance(database_engine, PostgresEngine):
return " & ".join(result + ":*" for result in results)
elif isinstance(database_engine, Sqlite3Engine):
return " & ".join(result + "*" for result in results)
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
| 36.251323 | 92 | 0.565278 |
acea309cfbe62ed781b6ad24945a79dda72cd4d5 | 609 | py | Python | tests/test/lib/plugin_manager/plugin_1/package_3/__init__.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 9 | 2020-08-21T08:46:34.000Z | 2021-09-02T15:47:41.000Z | tests/test/lib/plugin_manager/plugin_1/package_3/__init__.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 10 | 2020-11-26T15:31:43.000Z | 2021-11-08T15:00:01.000Z | tests/test/lib/plugin_manager/plugin_1/package_3/__init__.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 1 | 2022-03-10T07:14:49.000Z | 2022-03-10T07:14:49.000Z | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__doc__ = "bi_group_1"
| 38.0625 | 75 | 0.755337 |
acea334206d8387293d0904e32e31067d4804ae8 | 8,367 | py | Python | tests/unit/cli/test_daemons.py | exe01/salt | 0e4e8a458afc120a149eab83e5b9389c474fedf7 | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/test_daemons.py | exe01/salt | 0e4e8a458afc120a149eab83e5b9389c474fedf7 | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/test_daemons.py | exe01/salt | 0e4e8a458afc120a149eab83e5b9389c474fedf7 | [
"Apache-2.0"
] | null | null | null | """
:codeauthor: Bo Maryniuk <bo@suse.de>
"""
import logging
import multiprocessing
import salt.cli.daemons as daemons
from tests.support.helpers import slowTest
from tests.support.mixins import SaltClientTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class LoggerMock:
"""
Logger data collector
"""
def __init__(self):
"""
init
:return:
"""
self.reset()
def reset(self):
"""
Reset values
:return:
"""
self.messages = []
def info(self, message, *args, **kwargs):
"""
Collects the data from the logger of info type.
:param data:
:return:
"""
self.messages.append(
{"message": message, "args": args, "kwargs": kwargs, "type": "info"}
)
def warning(self, message, *args, **kwargs):
"""
Collects the data from the logger of warning type.
:param data:
:return:
"""
self.messages.append(
{"message": message, "args": args, "kwargs": kwargs, "type": "warning"}
)
def has_message(self, msg, log_type=None):
"""
Check if log has message.
:param data:
:return:
"""
for data in self.messages:
log_str = (
data["message"] % data["args"]
) # pylint: disable=incompatible-py3-code
if (data["type"] == log_type or not log_type) and log_str.find(msg) > -1:
return True
return False
def _master_exec_test(child_pipe):
def _create_master():
"""
Create master instance
:return:
"""
obj = daemons.Master()
obj.config = {"user": "dummy", "hash_type": alg}
for attr in ["start_log_info", "prepare", "shutdown", "master"]:
setattr(obj, attr, MagicMock())
return obj
_logger = LoggerMock()
ret = True
try:
with patch("salt.cli.daemons.check_user", MagicMock(return_value=True)):
with patch("salt.cli.daemons.log", _logger):
for alg in ["md5", "sha1"]:
_create_master().start()
ret = ret and _logger.has_message(
"Do not use {alg}".format(alg=alg), log_type="warning"
)
_logger.reset()
for alg in ["sha224", "sha256", "sha384", "sha512"]:
_create_master().start()
ret = (
ret
and _logger.messages
and not _logger.has_message("Do not use ")
)
except Exception: # pylint: disable=broad-except
log.exception("Exception raised in master daemon unit test")
ret = False
child_pipe.send(ret)
child_pipe.close()
def _minion_exec_test(child_pipe):
def _create_minion():
"""
Create minion instance
:return:
"""
obj = daemons.Minion()
obj.config = {"user": "dummy", "hash_type": alg}
for attr in ["start_log_info", "prepare", "shutdown"]:
setattr(obj, attr, MagicMock())
setattr(obj, "minion", MagicMock(restart=False))
return obj
ret = True
try:
_logger = LoggerMock()
with patch("salt.cli.daemons.check_user", MagicMock(return_value=True)):
with patch("salt.cli.daemons.log", _logger):
for alg in ["md5", "sha1"]:
_create_minion().start()
ret = ret and _logger.has_message(
"Do not use {alg}".format(alg=alg), log_type="warning"
)
_logger.reset()
for alg in ["sha224", "sha256", "sha384", "sha512"]:
_create_minion().start()
ret = (
ret
and _logger.messages
and not _logger.has_message("Do not use ")
)
except Exception: # pylint: disable=broad-except
log.exception("Exception raised in minion daemon unit test")
ret = False
child_pipe.send(ret)
child_pipe.close()
def _proxy_exec_test(child_pipe):
def _create_proxy_minion():
"""
Create proxy minion instance
:return:
"""
obj = daemons.ProxyMinion()
obj.config = {"user": "dummy", "hash_type": alg}
for attr in ["minion", "start_log_info", "prepare", "shutdown", "tune_in"]:
setattr(obj, attr, MagicMock())
obj.minion.restart = False
return obj
ret = True
try:
_logger = LoggerMock()
with patch("salt.cli.daemons.check_user", MagicMock(return_value=True)):
with patch("salt.cli.daemons.log", _logger):
for alg in ["md5", "sha1"]:
_create_proxy_minion().start()
ret = ret and _logger.has_message(
"Do not use {alg}".format(alg=alg), log_type="warning"
)
_logger.reset()
for alg in ["sha224", "sha256", "sha384", "sha512"]:
_create_proxy_minion().start()
ret = (
ret
and _logger.messages
and not _logger.has_message("Do not use ")
)
except Exception: # pylint: disable=broad-except
log.exception("Exception raised in proxy daemon unit test")
ret = False
child_pipe.send(ret)
child_pipe.close()
def _syndic_exec_test(child_pipe):
def _create_syndic():
"""
Create syndic instance
:return:
"""
obj = daemons.Syndic()
obj.config = {"user": "dummy", "hash_type": alg}
for attr in ["syndic", "start_log_info", "prepare", "shutdown"]:
setattr(obj, attr, MagicMock())
return obj
ret = True
try:
_logger = LoggerMock()
with patch("salt.cli.daemons.check_user", MagicMock(return_value=True)):
with patch("salt.cli.daemons.log", _logger):
for alg in ["md5", "sha1"]:
_create_syndic().start()
ret = ret and _logger.has_message(
"Do not use {alg}".format(alg=alg), log_type="warning"
)
_logger.reset()
for alg in ["sha224", "sha256", "sha384", "sha512"]:
_create_syndic().start()
ret = (
ret
and _logger.messages
and not _logger.has_message("Do not use ")
)
except Exception: # pylint: disable=broad-except
log.exception("Exception raised in syndic daemon unit test")
ret = False
child_pipe.send(ret)
child_pipe.close()
class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin):
"""
Unit test for the daemons starter classes.
"""
def _multiproc_exec_test(self, exec_test):
m_parent, m_child = multiprocessing.Pipe()
p_ = multiprocessing.Process(target=exec_test, args=(m_child,))
p_.start()
self.assertTrue(m_parent.recv())
p_.join()
@slowTest
def test_master_daemon_hash_type_verified(self):
"""
Verify if Master is verifying hash_type config option.
:return:
"""
self._multiproc_exec_test(_master_exec_test)
@slowTest
def test_minion_daemon_hash_type_verified(self):
"""
Verify if Minion is verifying hash_type config option.
:return:
"""
self._multiproc_exec_test(_minion_exec_test)
@slowTest
def test_proxy_minion_daemon_hash_type_verified(self):
"""
Verify if ProxyMinion is verifying hash_type config option.
:return:
"""
self._multiproc_exec_test(_proxy_exec_test)
@slowTest
def test_syndic_daemon_hash_type_verified(self):
"""
Verify if Syndic is verifying hash_type config option.
:return:
"""
self._multiproc_exec_test(_syndic_exec_test)
| 29.461268 | 85 | 0.533046 |
acea337b4528763709eaa0f7b55c2ee923ddb4cb | 346 | py | Python | app/migrations/0002_auto_20200113_0108.py | janakhpon/Django-todo | f111de72faf1b348301a1d4bc1deca4cc96e4ab2 | [
"BSD-2-Clause"
] | null | null | null | app/migrations/0002_auto_20200113_0108.py | janakhpon/Django-todo | f111de72faf1b348301a1d4bc1deca4cc96e4ab2 | [
"BSD-2-Clause"
] | null | null | null | app/migrations/0002_auto_20200113_0108.py | janakhpon/Django-todo | f111de72faf1b348301a1d4bc1deca4cc96e4ab2 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.2.6 on 2020-01-13 01:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='todo',
old_name='todotext',
new_name='text',
),
]
| 18.210526 | 47 | 0.563584 |
acea341f30446388fed8bcec0c4f32554d316661 | 5,820 | py | Python | pietorch/DuRN_U.py | vis-opt-group/GTANet | 269ff4418ee5f0267987e1fa4c69bda13e5cb00d | [
"MIT"
] | null | null | null | pietorch/DuRN_U.py | vis-opt-group/GTANet | 269ff4418ee5f0267987e1fa4c69bda13e5cb00d | [
"MIT"
] | 1 | 2022-03-28T07:15:44.000Z | 2022-03-28T07:15:44.000Z | pietorch/DuRN_U.py | vis-opt-group/GTANet | 269ff4418ee5f0267987e1fa4c69bda13e5cb00d | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import itertools
from PIL import Image
from torchvision import models
from torch.autograd import Variable
from .N_modules import InsNorm
class cleaner(nn.Module):
def __init__(self):
super(cleaner, self).__init__()
# Initial convolutional layers
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3)
self.norm1 = FeatNorm('instance', 64)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.norm2 = FeatNorm('instance', 128)
self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.norm3 = FeatNorm('instance', 256)
# DuRBs, a DualUpDownLayer is a DuRB_U.
self.rud1 = DualUpDownLayer(256, 256, 128, f_size=3, dilation=3, norm_type='instance')
self.rud2 = DualUpDownLayer(256, 256, 128, f_size=7, dilation=1, norm_type='instance')
self.rud3 = DualUpDownLayer(256, 256, 128, f_size=3, dilation=3, norm_type='instance')
self.rud4 = DualUpDownLayer(256, 256, 128, f_size=7, dilation=1, norm_type='instance')
self.rud5 = DualUpDownLayer(256, 256, 128, f_size=3, dilation=2, norm_type='instance')
self.rud6 = DualUpDownLayer(256, 256, 128, f_size=5, dilation=1, norm_type='instance')
# Last layers
# -- Up1 --
self.upconv1 = ConvLayer(256, 512, kernel_size=1, stride=1)
self.upnorm1 = FeatNorm('instance', 512)
self.upsamp1 = nn.PixelShuffle(2)
# ---------
self.conv4 = ConvLayer(128, 128, kernel_size=3, stride=1)
self.norm4 = FeatNorm('instance', 128)
# -- Up2 --
self.upconv2 = ConvLayer(128, 256, kernel_size=1, stride=1)
self.upnorm2 = FeatNorm('instance', 256)
self.upsamp2 = nn.PixelShuffle(2)
# ---------
self.conv5 = ConvLayer(64, 64, kernel_size=3, stride=1)
self.norm5 = FeatNorm('instance', 64)
self.end_conv = nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
residual = x
x = self.relu(self.norm1(self.conv1(x)))
x = self.relu(self.norm2(self.conv2(x)))
res = x
x = self.relu(self.norm3(self.conv3(x)))
x, res = self.rud1(x, res)
x, res = self.rud2(x, res)
x, res = self.rud3(x, res)
x, res = self.rud4(x, res)
x, res = self.rud5(x, res)
x, res = self.rud6(x, res)
x = self.upnorm1(self.upconv1(x))
x = self.upsamp1(x)
x = self.relu(self.norm4(self.conv4(x)))
x = self.upnorm2(self.upconv2(x))
x = self.upsamp2(x)
x = self.relu(self.norm5(self.conv5(x)))
x = self.tanh(self.end_conv(x))
x = x + residual
return x
# DualUpDownLayer IS DuRB_U, defined here:
class DualUpDownLayer(nn.Module):
def __init__(self, in_dim, out_dim, res_dim, f_size=3, dilation=1, norm_type="instance", with_relu=True):
super(DualUpDownLayer, self).__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.norm1 = FeatNorm(norm_type, in_dim)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.norm2 = FeatNorm(norm_type, in_dim)
# T^{l}_{1}: (Up+conv+insnorm)
# -- Up --
self.conv_pre = ConvLayer(in_dim, 2 * in_dim, 1, 1)
self.norm_pre = FeatNorm(norm_type, 2 * in_dim)
self.upsamp = nn.PixelShuffle(2)
# --------
self.up_conv = ConvLayer(res_dim, res_dim, kernel_size=f_size, stride=1, dilation=dilation)
self.up_norm = FeatNorm(norm_type, res_dim)
# T^{l}_{2}: (conv+insnorm), stride=2 for down-scaling.
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=3, stride=2)
self.down_norm = FeatNorm(norm_type, out_dim)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, x, res):
x_r = x
x = self.relu(self.norm1(self.conv1(x)))
x = self.conv2(x)
x += x_r
x = self.relu(self.norm2(x))
x = self.norm_pre(self.conv_pre(x))
x = self.upsamp(x)
x = self.up_conv(x)
x += res
x = self.relu(self.up_norm(x))
res = x
x = self.down_conv(x)
x += x_r
x = self.down_norm(x)
if self.with_relu:
x = self.relu(x)
else:
pass
return x, res
# ------------------------------------------
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super(ConvLayer, self).__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride, dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride, dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class FeatNorm(nn.Module):
def __init__(self, norm_type, dim):
super(FeatNorm, self).__init__()
if norm_type == "instance":
self.norm = InsNorm(dim)
elif norm_type == "batch_norm":
self.norm = nn.BatchNorm2d(dim)
else:
raise Exception("Normalization type incorrect.")
def forward(self, x):
out = self.norm(x)
return out
| 33.641618 | 110 | 0.591581 |
acea34749738641a081e0c15a96b9d9d1dde8bbf | 9,058 | py | Python | rand_param_envs/mujoco_py/util.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 1,838 | 2017-08-10T04:19:28.000Z | 2022-03-29T07:41:19.000Z | rand_param_envs/mujoco_py/util.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 120 | 2016-10-05T09:16:16.000Z | 2017-07-27T22:57:31.000Z | rand_param_envs/mujoco_py/util.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 498 | 2017-08-16T03:34:28.000Z | 2022-03-31T04:41:32.000Z | import ctypes, os, sys
from ctypes import *
import six
# MAXINT on Python 2, undefined on Python 3
MAXINT = 9223372036854775807
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=MAXINT):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=MAXINT):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=MAXINT):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=MAXINT):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=MAXINT):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=MAXINT):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=MAXINT):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [('raw', POINTER(c_char)),
('data', c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, six.text_type):
self.data = obj.encode('ascii')
elif isinstance(obj, six.binary_type):
self.data = obj
elif isinstance(obj, UserString):
self.data = six.b(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from str
elif isinstance(obj, str):
return cls(obj)
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
| 39.043103 | 80 | 0.633804 |
acea350373534c13e9cbc1c59182ca1c540dcf68 | 5,013 | py | Python | externalscripts/s3_stats.py | syphernl/zabbix-cloudwatch | ee571260055c6c8d2be43e532c929a2d005c7e44 | [
"MIT"
] | null | null | null | externalscripts/s3_stats.py | syphernl/zabbix-cloudwatch | ee571260055c6c8d2be43e532c929a2d005c7e44 | [
"MIT"
] | null | null | null | externalscripts/s3_stats.py | syphernl/zabbix-cloudwatch | ee571260055c6c8d2be43e532c929a2d005c7e44 | [
"MIT"
] | 1 | 2020-02-28T10:25:04.000Z | 2020-02-28T10:25:04.000Z | #!/usr/bin/env python3
import datetime
from optparse import OptionParser
import boto.ec2.cloudwatch
# Arguments
parser = OptionParser()
parser.add_option("-i", "--instance-id", dest="instance_id",
help="BucketName")
parser.add_option("-s", "--storage-type", dest="storage_type",
help="StorageType")
parser.add_option("-a", "--access-key", dest="access_key",
help="AWS Access Key")
parser.add_option("-k", "--secret-key", dest="secret_key",
help="AWS Secret Access Key")
parser.add_option("-m", "--metric", dest="metric",
help="RDS cloudwatch metric")
parser.add_option("-r", "--region", dest="region",
help="RDS region")
(options, args) = parser.parse_args()
if options.instance_id is None:
parser.error("-i BucketName is required")
if options.storage_type is None:
parser.error("-s StorageType is required")
if options.access_key is None:
parser.error("-a AWS Access Key is required")
if options.secret_key is None:
parser.error("-k AWS Secret Key is required")
if options.metric is None:
parser.error("-m RDS cloudwatch metric is required")
metrics = {
# Amazon S3 CloudWatch Daily Storage Metrics for Buckets
"BucketSizeBytes": {"statistic": "Average", "type": "float", "value": None, "minutes": 1440, "units": "Bytes"},
"NumberOfObjects": {"statistic": "Average", "type": "float", "value": None, "minutes": 1440, "units": "Count"},
# Amazon S3 CloudWatch Request metrics
"AllRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"GetRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"PutRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"DeleteRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"HeadRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"PostRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"ListRequests": {"statistic": "Sum", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"BytesDownloaded": {"statistic": "Average", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"BytesUploaded": {"statistic": "Average", "type": "float", "value": None, "minutes": 5, "units": "Count"},
"4xxErrors": {"statistic": "Average", "type": "int", "value": None, "minutes": 5, "units": "Count"},
"5xxErrors": {"statistic": "Average", "type": "int", "value": None, "minutes": 5, "units": "Count"},
"FirstByteLatency": {"statistic": "Average", "type": "float", "value": None, "minutes": 5, "units": "Milliseconds"},
"TotalRequestLatency": {"statistic": "Average", "type": "float", "value": None, "minutes": 5,
"units": "Milliseconds"},
}
# get the region
if options.region is None:
options.region = 'eu-west-1'
for r in boto.ec2.cloudwatch.regions():
if r.name == options.region:
region = r
break
conn = boto.ec2.cloudwatch.CloudWatchConnection(options.access_key, options.secret_key, region=region)
for metric_name, values in metrics.items():
if metric_name == options.metric:
minutes = values.get('minutes', 5)
end = datetime.datetime.utcnow()
# Daily statistics can only be queried between 00:00 and 00:00, not based on the current hh:mm
if values.get('minutes') == 1440:
end = end.replace(hour=0, minute=0, second=0, microsecond=0)
start = end - datetime.timedelta(minutes=values.get('minutes', 5))
try:
res = conn.get_metric_statistics(60,
start,
end,
metric_name,
"AWS/S3",
values.get('statistic'),
{
"BucketName": options.instance_id,
"StorageType": options.storage_type
}
)
except Exception as e:
print("status err Error running s3_stats: %s" % e.error_message)
exit(1)
if len(res) > 0:
average = res[-1][values.get('statistic')] # last item in result set
else:
average = 0
if values.get('type') == "float":
metrics[metric_name]["value"] = "%.4f" % average
if values.get('type') == "int" or values.get('type') == "boolean":
metrics[metric_name]["value"] = "%i" % average
print("%s" % (values.get('value')))
exit(0)
print("Unknown metric '%s'" % options.metric)
exit(1)
| 45.162162 | 120 | 0.55376 |
acea361e27cc6f2ced55e164d3386ed9d7bc0f2e | 40,649 | py | Python | zproject/settings.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | zproject/settings.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | zproject/settings.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports prod_settings.py, and any site-specific configuration
# belongs there. The template for prod_settings.py is prod_settings_template.py
#
# See http://zulip.readthedocs.io/en/latest/settings.html for more information
#
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
import six
########################################################################
# INITIAL SETTINGS
########################################################################
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if 'DEBUG' not in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# Import variables like secrets from the prod_settings file
# Import prod_settings after determining the deployment/machine type
if PRODUCTION:
from .prod_settings import *
else:
from .dev_settings import *
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in prod_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'EMAIL_GATEWAY_EXTRA_PATTERN_HACK': None,
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'MAX_FILE_UPLOAD_SIZE': 25,
'ERROR_REPORTING': True,
'STAGING_ERROR_NOTIFICATIONS': False,
'EVENT_LOGS_ENABLED': False,
'SAVE_FRONTEND_STACKTRACES': False,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
'RABBITMQ_HOST': 'localhost',
'RABBITMQ_USERNAME': 'zulip',
'MEMCACHED_LOCATION': '127.0.0.1:11211',
'RATE_LIMITING': True,
'REDIS_HOST': '127.0.0.1',
'REDIS_PORT': 6379,
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'APNS_KEY_FILE': None,
'APNS_SANDBOX': True,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': 'feedback@zulip.com',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'ADMINS': '',
'SHARE_THE_LOVE': False,
'INLINE_IMAGE_PREVIEW': True,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'SEND_MISSED_MESSAGE_EMAILS_AS_USER': False,
'SERVER_EMAIL': None,
'FEEDBACK_EMAIL': None,
'WELCOME_EMAIL_SENDER': None,
'EMAIL_DELIVERER_DISABLED': False,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'SHOW_OSS_ANNOUNCEMENT': False,
'REGISTER_LINK_DISABLED': False,
'LOGIN_LINK_DISABLED': False,
'ABOUT_LINK_DISABLED': False,
'CUSTOM_LOGO_URL': None,
'VERBOSE_SUPPORT_OFFERS': False,
'STATSD_HOST': '',
'OPEN_REALM_CREATION': False,
'REALMS_HAVE_SUBDOMAINS': False,
'SUBDOMAINS_HOMEPAGE': False,
'ROOT_SUBDOMAIN_ALIASES': ["www"],
'REMOTE_POSTGRES_HOST': '',
'REMOTE_POSTGRES_SSLMODE': '',
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
'GOOGLE_CLIENT_ID': '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com',
'SOCIAL_AUTH_GITHUB_KEY': None,
'SOCIAL_AUTH_GITHUB_ORG_NAME': None,
'SOCIAL_AUTH_GITHUB_TEAM_ID': None,
'DBX_APNS_CERT_FILE': None,
'DBX_APNS_KEY_FILE': None,
'PERSONAL_ZMIRROR_SERVER': None,
'EXTRA_INSTALLED_APPS': [],
'DEFAULT_NEW_REALM_STREAMS': ["social", "general", "zulip"],
'REALM_CREATION_LINK_VALIDITY_DAYS': 7,
'TERMS_OF_SERVICE': None,
'TOS_VERSION': None,
'SYSTEM_ONLY_REALMS': {"zulip.com"},
'FIRST_TIME_TOS_TEMPLATE': None,
'USING_PGROONGA': False,
'POST_MIGRATION_CACHE_FLUSHING': False,
'ENABLE_FILE_LINKS': False,
}
for setting_name, setting_val in six.iteritems(DEFAULT_SETTINGS):
if setting_name not in vars():
vars()[setting_name] = setting_val
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado).
ALLOWED_HOSTS += ['127.0.0.1', 'localhost']
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "zulip-admin@example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "noreply@example.com"),
("DEFAULT_FROM_EMAIL", "Zulip <zulip@example.com>"),
("ALLOWED_HOSTS", ["*", '127.0.0.1', 'localhost']),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
# this directory will be used to store logs for development environment
DEVELOPMENT_LOG_DIRECTORY = os.path.join(DEPLOY_ROOT, 'var', 'log')
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PRODUCTION:
# Template caching is a significant performance win in production.
LOADERS = [('django.template.loaders.cached.Loader', LOADERS)]
TEMPLATES = [
{
'BACKEND': 'zproject.jinja2.backends.Jinja2',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'environment': 'zproject.jinja2.environment',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'pipeline.jinja2.PipelineExtension',
],
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
'django.template.context_processors.i18n',
],
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(DEPLOY_ROOT, 'django_templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'debug': DEBUG,
'loaders': LOADERS,
'context_processors': [
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
],
},
},
]
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
# A site can include additional installed apps via the
# EXTRA_INSTALLED_APPS setting
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
'social.apps.django_app.default',
]
if USING_PGROONGA:
INSTALLED_APPS += ['pgroonga']
INSTALLED_APPS += EXTRA_INSTALLED_APPS
ZILENCER_ENABLED = 'zilencer' in INSTALLED_APPS
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://127.0.0.1:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
},
}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
if get_secret("postgres_password") is not None:
DATABASES['default'].update({
'PASSWORD': get_secret("postgres_password"),
})
if REMOTE_POSTGRES_SSLMODE != '':
DATABASES['default']['OPTIONS']['sslmode'] = REMOTE_POSTGRES_SSLMODE
else:
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
if USING_PGROONGA:
# We need to have "pgroonga" schema before "pg_catalog" schema in
# the PostgreSQL search path, because "pgroonga" schema overrides
# the "@@" operator from "pg_catalog" schema, and "pg_catalog"
# schema is searched first if not specified in the search path.
# See also: http://www.postgresql.org/docs/current/static/runtime-config-client.html
pg_options = '-c search_path=%(SCHEMA)s,zulip,public,pgroonga,pg_catalog' % \
DATABASES['default']
DATABASES['default']['OPTIONS']['options'] = pg_options
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': MEMCACHED_LOCATION,
'TIMEOUT': 3600,
'OPTIONS': {
'verify_keys': True,
'tcp_nodelay': True,
'retry_timeout': 1,
}
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
DEBUG_RATE_LIMITING = DEBUG
REDIS_PASSWORD = get_secret('redis_password')
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except six.moves.configparser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
SERVER_URI = EXTERNAL_URI_SCHEME + EXTERNAL_HOST
if "NAGIOS_BOT_HOST" not in vars():
NAGIOS_BOT_HOST = EXTERNAL_HOST
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [{'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'}]
if PRODUCTION:
INTERNAL_BOTS += [
{'var_name': 'NAGIOS_STAGING_SEND_BOT',
'email_template': 'nagios-staging-send-bot@%s',
'name': 'Nagios Staging Send Bot'},
{'var_name': 'NAGIOS_STAGING_RECEIVE_BOT',
'email_template': 'nagios-staging-receive-bot@%s',
'name': 'Nagios Staging Receive Bot'},
]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if vars().get(bot['var_name']) is None:
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name']] = bot_email
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
if PRODUCTION:
FEEDBACK_TARGET = "https://zulip.com/api"
else:
FEEDBACK_TARGET = "http://localhost:9991/api"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE_ENABLED = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
if PIPELINE_ENABLED:
STATIC_ROOT = os.path.abspath('prod-static/serve')
else:
STATIC_ROOT = os.path.abspath('static/')
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'pipeline.finders.PipelineFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = os.path.abspath('prod-static/serve')
LOCALE_PATHS = (os.path.join(STATIC_ROOT, 'locale'),)
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE_ENABLED = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
PIPELINE = {
'PIPELINE_ENABLED': PIPELINE_ENABLED,
'CSS_COMPRESSOR': 'pipeline.compressors.yui.YUICompressor',
'YUI_BINARY': '/usr/bin/env yui-compressor',
'STYLESHEETS': {
# If you add a style here, please update stylesheets()
# in frontend_tests/zjsunit/output.js as needed.
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/media.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/components.css',
'styles/zulip.css',
'styles/settings.css',
'styles/subscriptions.css',
'styles/compose.css',
'styles/left-sidebar.css',
'styles/overlay.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
'styles/media.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
},
'JAVASCRIPT': {},
}
JS_SPECS = {
'common': {
'source_filenames': (
'node_modules/jquery/dist/jquery.js',
'third/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
),
'output_filename': 'min/common.js'
},
'signup': {
'source_filenames': (
'js/signup.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
),
'output_filename': 'min/signup.js'
},
'api': {
'source_filenames': ('js/api.js',),
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ('js/debug.js',),
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'node_modules/jquery-validation/dist/jquery.validate.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.5.2.js',
'third/xdate/xdate.dev.js',
'third/spin/spin.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/string-prototype-codepointat/codepointat.js',
'third/winchan/winchan.js',
'third/sockjs/sockjs-0.3.4.js',
'third/handlebars/handlebars.runtime.js',
'third/marked/lib/marked.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/components.js',
'js/localstorage.js',
'js/channel.js',
'js/setup.js',
'js/unread_ui.js',
'js/muting.js',
'js/muting_ui.js',
'js/viewport.js',
'js/rows.js',
'js/people.js',
'js/unread.js',
'js/topic_list.js',
'js/pm_list.js',
'js/stream_list.js',
'js/filter.js',
'js/message_list_view.js',
'js/message_list.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/admin.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/pointer.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/avatar.js',
'js/settings.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
# JS bundled by webpack is also included here if PIPELINE_ENABLED setting is true
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': (
'third/sorttable/sorttable.js',
),
'output_filename': 'min/activity.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ('third/sockjs/sockjs-0.3.4.js',),
'output_filename': 'min/sockjs-0.3.4.min.js'
},
}
if PIPELINE_ENABLED:
# This is also done in test_settings.py, see comment there..
JS_SPECS['app']['source_filenames'].append('js/bundle.js')
app_srcs = JS_SPECS['app']['source_filenames']
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email_mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
("ANALYTICS_LOG_PATH", "/var/log/zulip/analytics.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if EVENT_LOGS_ENABLED:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
else:
EVENT_LOG_DIR = None
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.join(DEVELOPMENT_LOG_DIRECTORY, os.path.basename(path))
# only `JSON_PERSISTENT_QUEUE_FILENAME` will be stored in `var`
if var == 'JSON_PERSISTENT_QUEUE_FILENAME':
path = os.path.join(os.path.join(DEPLOY_ROOT, 'var'), os.path.basename(path))
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.logging_handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [])
+ ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.queue': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'WARNING',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['file'],
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if (len(AUTHENTICATION_BACKENDS) == 1 and
AUTHENTICATION_BACKENDS[0] == "zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
'zproject.backends.ZulipLDAPAuthBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# GITHUB AUTHENTICATION SETTINGS
########################################################################
# SOCIAL_AUTH_GITHUB_KEY is set in /etc/zulip/settings.py
SOCIAL_AUTH_GITHUB_SECRET = get_secret('social_auth_github_secret')
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login/'
SOCIAL_AUTH_GITHUB_SCOPE = ['email']
SOCIAL_AUTH_GITHUB_ORG_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_ORG_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GITHUB_TEAM_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_TEAM_SECRET = SOCIAL_AUTH_GITHUB_SECRET
########################################################################
# EMAIL SETTINGS
########################################################################
# If an email host is not specified, fail silently and gracefully
if not EMAIL_HOST and PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
if EMAIL_GATEWAY_PASSWORD is None:
EMAIL_GATEWAY_PASSWORD = get_secret('email_gateway_password')
if vars().get("AUTH_LDAP_BIND_PASSWORD") is None:
AUTH_LDAP_BIND_PASSWORD = get_secret('auth_ldap_bind_password')
# Set the sender email address for Django traceback error reporting
if SERVER_EMAIL is None:
SERVER_EMAIL = DEFAULT_FROM_EMAIL
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('feedback@zulip.com', 'notification-bot@zulip.com'))
| 37.568392 | 128 | 0.577653 |
acea36272b4191d3e3ad3dbd8d1a1902daa8c96e | 2,059 | py | Python | seriesWorksheetAutomation/createAndPrepareWorksheet.py | ItaloPussi/pythonProjects | 319475bb7d8f80a9648f18760d8b675b1f4a4151 | [
"MIT"
] | 1 | 2021-06-05T23:07:17.000Z | 2021-06-05T23:07:17.000Z | seriesWorksheetAutomation/createAndPrepareWorksheet.py | ItaloPussi/pythonProjects | 319475bb7d8f80a9648f18760d8b675b1f4a4151 | [
"MIT"
] | null | null | null | seriesWorksheetAutomation/createAndPrepareWorksheet.py | ItaloPussi/pythonProjects | 319475bb7d8f80a9648f18760d8b675b1f4a4151 | [
"MIT"
] | null | null | null | import openpyxl as xl
from openpyxl.styles import Font
from openpyxl.styles import PatternFill
from openpyxl.utils import get_column_letter
from openpyxl.formatting.rule import CellIsRule
def createWorkbook():
workbook = xl.Workbook()
return workbook
def populateWorksheetWithTemplate(ws):
ws["A1"] = 'Name:'
ws["B1"] = 'Situation:'
ws["C1"] = 'Last watched episode:'
ws["D1"] = 'Next episode date:'
ws["E1"] = 'Pending episodes:'
for cell in ws['1:1']:
cell.font = Font(color="000000", bold=True)
cell.fill = PatternFill(start_color="c0c0c0", end_color="c0c0c0", fill_type = "solid")
formatColumnWidthSize(ws)
addConditionalFormatting(ws)
def formatColumnWidthSize(ws):
for col in ws.columns:
max_length = 0
column = get_column_letter(col[0].column)
# Get the max_length of column by the greatest cell length
for cell in col:
try:
if len(str(cell.value)) > max_length:
max_length = len(cell.value)
except:
pass
adjusted_width = (max_length + 2) * 1.2
ws.column_dimensions[column].width = adjusted_width
return ws
def addConditionalFormatting(ws):
redFill = PatternFill(start_color="FFC7CE", end_color="FFC7CE", fill_type = "solid")
redFont = Font(color="9C0006")
greenFill = PatternFill(start_color="C6EFCE", end_color="C6EFCE", fill_type = "solid")
greenFont = Font(color="006100")
greyFill = PatternFill(start_color="808080", end_color="808080", fill_type = "solid")
greyFont = Font(color="000000")
ws.conditional_formatting.add('A1:XFD1048576', CellIsRule(operator='equal', formula=['"Canceled"'], fill=redFill, font=redFont))
ws.conditional_formatting.add('A1:XFD1048576', CellIsRule(operator='equal', formula=['"Renewed"'], fill=greenFill, font=greenFont))
ws.conditional_formatting.add('A1:XFD1048576', CellIsRule(operator='equal', formula=['"Finished"'], fill=greyFill, font=greyFont))
return ws
| 34.316667 | 135 | 0.6678 |
acea366ac81de8168fe18a07c4e262d25cf361ec | 871 | py | Python | src/opencmiss/importer/ply.py | OpenCMISS-Bindings/opencmiss.importer | 2ad540002eb0f1376f4042b84a1c2407e3a39620 | [
"Apache-2.0"
] | null | null | null | src/opencmiss/importer/ply.py | OpenCMISS-Bindings/opencmiss.importer | 2ad540002eb0f1376f4042b84a1c2407e3a39620 | [
"Apache-2.0"
] | null | null | null | src/opencmiss/importer/ply.py | OpenCMISS-Bindings/opencmiss.importer | 2ad540002eb0f1376f4042b84a1c2407e3a39620 | [
"Apache-2.0"
] | 1 | 2022-03-23T10:35:39.000Z | 2022-03-23T10:35:39.000Z | from opencmiss.importer.errors import OpenCMISSImportUnknownParameter
from opencmiss.importer.trimesh import import_data, import_data_into_region
def identifier():
return "PLY"
def parameters(parameter_name=None):
importer_parameters = {
"version": "0.1.0",
"id": identifier(),
"title": "PLY",
"description":
"Polygon file format for 3D meshes.",
"input": {
"mimetype": "text/plain",
},
"output": {
"mimetype": "text/x.vnd.abi.exf+plain",
}
}
if parameter_name is not None:
if parameter_name in importer_parameters:
return importer_parameters[parameter_name]
else:
raise OpenCMISSImportUnknownParameter(f"Importer '{identifier()}' does not have parameter: {parameter_name}")
return importer_parameters
| 25.617647 | 121 | 0.628014 |
acea371245c75b386d27239db34c72e01b26c11f | 5,742 | py | Python | python/seldon_deploy_sdk/models/metric_spec.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 6 | 2021-02-18T14:37:54.000Z | 2022-01-13T13:27:43.000Z | python/seldon_deploy_sdk/models/metric_spec.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 14 | 2021-01-04T16:32:03.000Z | 2021-12-13T17:53:59.000Z | python/seldon_deploy_sdk/models/metric_spec.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 7 | 2021-03-17T09:05:55.000Z | 2022-01-05T10:39:56.000Z | # coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: hello@seldon.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MetricSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'external': 'ExternalMetricSource',
'object': 'ObjectMetricSource',
'pods': 'PodsMetricSource',
'resource': 'ResourceMetricSource',
'type': 'MetricSourceType'
}
attribute_map = {
'external': 'external',
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
def __init__(self, external=None, object=None, pods=None, resource=None, type=None): # noqa: E501
"""MetricSpec - a model defined in Swagger""" # noqa: E501
self._external = None
self._object = None
self._pods = None
self._resource = None
self._type = None
self.discriminator = None
if external is not None:
self.external = external
if object is not None:
self.object = object
if pods is not None:
self.pods = pods
if resource is not None:
self.resource = resource
if type is not None:
self.type = type
@property
def external(self):
"""Gets the external of this MetricSpec. # noqa: E501
:return: The external of this MetricSpec. # noqa: E501
:rtype: ExternalMetricSource
"""
return self._external
@external.setter
def external(self, external):
"""Sets the external of this MetricSpec.
:param external: The external of this MetricSpec. # noqa: E501
:type: ExternalMetricSource
"""
self._external = external
@property
def object(self):
"""Gets the object of this MetricSpec. # noqa: E501
:return: The object of this MetricSpec. # noqa: E501
:rtype: ObjectMetricSource
"""
return self._object
@object.setter
def object(self, object):
"""Sets the object of this MetricSpec.
:param object: The object of this MetricSpec. # noqa: E501
:type: ObjectMetricSource
"""
self._object = object
@property
def pods(self):
"""Gets the pods of this MetricSpec. # noqa: E501
:return: The pods of this MetricSpec. # noqa: E501
:rtype: PodsMetricSource
"""
return self._pods
@pods.setter
def pods(self, pods):
"""Sets the pods of this MetricSpec.
:param pods: The pods of this MetricSpec. # noqa: E501
:type: PodsMetricSource
"""
self._pods = pods
@property
def resource(self):
"""Gets the resource of this MetricSpec. # noqa: E501
:return: The resource of this MetricSpec. # noqa: E501
:rtype: ResourceMetricSource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this MetricSpec.
:param resource: The resource of this MetricSpec. # noqa: E501
:type: ResourceMetricSource
"""
self._resource = resource
@property
def type(self):
"""Gets the type of this MetricSpec. # noqa: E501
:return: The type of this MetricSpec. # noqa: E501
:rtype: MetricSourceType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MetricSpec.
:param type: The type of this MetricSpec. # noqa: E501
:type: MetricSourceType
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetricSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MetricSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.1 | 122 | 0.565134 |
acea37b2cfd312c9c3fa1514a5603f312ca19cdb | 183 | py | Python | hello/hello.py | Factotum8/github_action_helloworld | f622fef3c92c3e93dcdfba1498c6958737707669 | [
"MIT"
] | null | null | null | hello/hello.py | Factotum8/github_action_helloworld | f622fef3c92c3e93dcdfba1498c6958737707669 | [
"MIT"
] | null | null | null | hello/hello.py | Factotum8/github_action_helloworld | f622fef3c92c3e93dcdfba1498c6958737707669 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'У меня получилось!'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | 16.636364 | 39 | 0.655738 |
acea38caa1924bed5bb0b44eda7575e4c43ee9a0 | 3,977 | py | Python | nemo_nowcast/workers/rotate_logs.py | douglatornell/nemo_nowcast | 1b9181c29eee34a83e34869d13a0c0bf607882fa | [
"Apache-2.0"
] | 1 | 2020-02-06T01:10:27.000Z | 2020-02-06T01:10:27.000Z | nemo_nowcast/workers/rotate_logs.py | douglatornell/nemo_nowcast | 1b9181c29eee34a83e34869d13a0c0bf607882fa | [
"Apache-2.0"
] | 3 | 2021-04-06T18:03:49.000Z | 2021-12-13T21:17:34.000Z | nemo_nowcast/workers/rotate_logs.py | 43ravens/NEMO_Nowcast | 1b9181c29eee34a83e34869d13a0c0bf607882fa | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2021 Doug Latornell, 43ravens
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO_Nowcast framework rotate_logs worker.
Iterate through the nowcast system logging handlers, calling the
:py:meth:`doRollover` method on any that are instances of
:py:class:`logging.handlers.RotatingFileHandler`.
This worker is normally launched in automation at the end of a nowcast
processing cycle (e.g. end of the day).
It can also be launched from the command-line by the nowcast administrator
as necessary for system maintenance.
"""
import logging
import logging.config
from pathlib import Path
from nemo_nowcast import NowcastWorker
from nemo_nowcast.fileutils import FilePerms
NAME = "rotate_logs"
logger = logging.getLogger(NAME)
def main():
"""Set up and run the worker.
For command-line usage see:
:command:`python -m nemo_nowcast.workers.rotate_logs --help`
"""
worker = NowcastWorker(NAME, description=__doc__, package="nemo_nowcast.workers")
worker.init_cli()
worker.run(rotate_logs, success, failure)
def success(parsed_args):
# logger_name is required because file system handlers get loaded in
# rotate_logs()
logger.info("log files rotated", extra={"logger_name": NAME})
msg_type = "success"
return msg_type
def failure(parsed_args):
# logger_name is required because file system handlers get loaded in
# rotate_logs()
logger.critical("failed to rotate log files", extra={"logger_name": NAME})
msg_type = "failure"
return msg_type
def rotate_logs(parsed_args, config, *args):
# logger_name is required because file system handlers get loaded below
logger.info("rotating log files", extra={"logger_name": NAME})
checklist = {"log files": []}
checklist_logger = logging.getLogger("checklist")
if "aggregator" in config["logging"]:
pub_handlers = config["logging"]["publisher"]["handlers"]
if "checklist" in pub_handlers:
pub_loggers = config["logging"]["publisher"]["loggers"]
config["logging"]["aggregator"]["handlers"]["checklist"] = pub_handlers[
"checklist"
]
try:
config["logging"]["aggregator"]["loggers"].update(
{"checklist": pub_handlers["loggers"]["checklist"]}
)
except KeyError:
config["logging"]["aggregator"].update(
{"loggers": {"checklist": pub_loggers["checklist"]}}
)
logging.config.dictConfig(config["logging"]["aggregator"])
for handler in logger.root.handlers + checklist_logger.handlers:
if not hasattr(handler, "when"):
try:
handler.flush()
handler.doRollover()
except AttributeError:
# Handler without a doRollover() method;
# Probably a StreamHandler
continue
logger.info(
f"log file rotated: {handler.baseFilename}", extra={"logger_name": NAME}
)
p = Path(handler.baseFilename)
p.chmod(int(FilePerms(user="rw", group="rw", other="r")))
logger.debug(
f"new {handler.baseFilename} log file permissions set to rw-rw-r--",
extra={"logger_name": NAME},
)
checklist["log files"].append(handler.baseFilename)
return checklist
if __name__ == "__main__":
main() # pragma: no cover
| 35.828829 | 88 | 0.657279 |
acea38d901a8c36b2ba15f8a1a75edd7af63d3b1 | 19,501 | py | Python | src/cryptoadvance/specter/server_endpoints/devices.py | Sh0ham/specter-desktop | fd7d7eddd191b79515a51183a5e52fee14241717 | [
"MIT"
] | 1 | 2022-01-14T02:46:03.000Z | 2022-01-14T02:46:03.000Z | src/cryptoadvance/specter/server_endpoints/devices.py | Sh0ham/specter-desktop | fd7d7eddd191b79515a51183a5e52fee14241717 | [
"MIT"
] | null | null | null | src/cryptoadvance/specter/server_endpoints/devices.py | Sh0ham/specter-desktop | fd7d7eddd191b79515a51183a5e52fee14241717 | [
"MIT"
] | null | null | null | import copy, random, json, re
from flask import (
Flask,
Blueprint,
render_template,
request,
redirect,
url_for,
jsonify,
flash,
)
from flask import current_app as app
from flask_babel import lazy_gettext as _
from flask_login import login_required, current_user
from mnemonic import Mnemonic
from ..devices.bitcoin_core import BitcoinCore
from ..helpers import is_testnet, generate_mnemonic, validate_mnemonic
from ..key import Key
from ..managers.device_manager import get_device_class
from ..managers.wallet_manager import purposes
from ..specter_error import handle_exception
rand = random.randint(0, 1e32) # to force style refresh
# Setup endpoint blueprint
devices_endpoint = Blueprint("devices_endpoint", __name__)
################## New device #######################
# New device type
@devices_endpoint.route("/new_device_type/", methods=["GET", "POST"])
@login_required
def new_device_type():
return render_template(
"device/new_device/new_device_type.jinja",
specter=app.specter,
rand=rand,
)
@devices_endpoint.route("/new_device_keys/<device_type>/", methods=["GET", "POST"])
@login_required
def new_device_keys(device_type):
err = None
mnemonic = ""
passphrase = ""
file_password = ""
range_start = 0
range_end = 1000
existing_device = None
if request.method == "POST":
mnemonic = request.form.get("mnemonic", "")
passphrase = request.form.get("passphrase", "")
file_password = request.form.get("file_password", "")
range_start = int(request.form.get("range_start", "0"))
range_end = int(request.form.get("range_end", "1000"))
existing_device = request.form.get("existing_device", None)
if existing_device:
device = app.specter.device_manager.get_by_alias(existing_device)
else:
device_name = request.form.get("device_name", "")
if not device_name:
err = _("Device name cannot be empty")
elif device_name in app.specter.device_manager.devices_names:
err = _("Device with this name already exists")
xpubs_rows_count = int(request.form["xpubs_rows_count"]) + 1
keys = []
paths = []
keys_purposes = []
for i in range(0, xpubs_rows_count):
purpose = request.form.get("xpubs-table-row-{}-purpose".format(i), "Custom")
xpub = request.form.get("xpubs-table-row-{}-xpub-hidden".format(i), "-")
path = request.form.get(
"xpubs-table-row-{}-derivation-hidden".format(i), ""
)
if path != "":
paths.append(path)
keys_purposes.append(purpose)
if xpub != "-":
try:
keys.append(Key.parse_xpub(xpub, purpose=purpose))
except:
err = _("Failed to parse these xpubs") + ":\n" + "\n".join(xpub)
break
if not keys and not err:
if device_type in ["bitcoincore", "elementscore"]:
if not paths:
err = _("No paths were specified, please provide at least one.")
if err is None:
if existing_device:
device.add_hot_wallet_keys(
mnemonic,
passphrase,
paths,
file_password,
app.specter.wallet_manager,
is_testnet(app.specter.chain),
keys_range=[range_start, range_end],
keys_purposes=keys_purposes,
)
flash(_("{} keys were added successfully").format(len(paths)))
return redirect(
url_for(
"devices_endpoint.device", device_alias=device.alias
)
)
device = app.specter.device_manager.add_device(
name=device_name, device_type=device_type, keys=[]
)
try:
device.setup_device(file_password, app.specter.wallet_manager)
device.add_hot_wallet_keys(
mnemonic,
passphrase,
paths,
file_password,
app.specter.wallet_manager,
is_testnet(app.specter.chain),
keys_range=[range_start, range_end],
keys_purposes=keys_purposes,
)
flash(_("{} was added successfully!").format(device_name))
return redirect(
url_for(
"devices_endpoint.device", device_alias=device.alias
)
+ "?newdevice=true"
)
except Exception as e:
handle_exception(e)
flash(
_("Failed to setup hot wallet. Error: {}").format(e),
"error",
)
app.specter.device_manager.remove_device(
device,
app.specter.wallet_manager,
bitcoin_datadir=app.specter.bitcoin_datadir,
chain=app.specter.chain,
)
else:
err = _("xpubs list must not be empty")
elif not err:
if existing_device:
device.add_keys(keys)
flash(_("{} keys were added successfully").format(len(keys)))
return redirect(
url_for("devices_endpoint.device", device_alias=device.alias)
)
device = app.specter.device_manager.add_device(
name=device_name, device_type=device_type, keys=keys
)
if app.specter.is_liquid:
return render_template(
"device/device_blinding_key.jinja",
new_device=True,
device=device,
error=err,
specter=app.specter,
rand=rand,
)
else:
flash(_("{} was added successfully!").format(device_name))
return redirect(
url_for("devices_endpoint.device", device_alias=device.alias)
+ "?newdevice=true"
)
return render_template(
"device/new_device/new_device_keys.jinja",
device_class=get_device_class(device_type),
mnemonic=mnemonic,
passphrase=passphrase,
file_password=file_password,
range_start=range_start,
range_end=range_end,
existing_device=app.specter.device_manager.get_by_alias(existing_device)
if existing_device
else None,
error=err,
specter=app.specter,
rand=rand,
)
@devices_endpoint.route("/new_device_mnemonic/<device_type>/", methods=["GET", "POST"])
@login_required
def new_device_mnemonic(device_type):
err = None
strength = 128
mnemonic = generate_mnemonic(
strength=strength, language_code=app.get_language_code()
)
existing_device = None
if request.method == "POST":
if len(request.form["mnemonic"].split(" ")) not in [12, 15, 18, 21, 24]:
err = _(
"Invalid mnemonic entered: Must contain either: 12, 15, 18, 21, or 24 words."
)
if not validate_mnemonic(words=request.form["mnemonic"]):
err = _("Invalid mnemonic entered.")
range_start = int(request.form["range_start"])
range_end = int(request.form["range_end"])
if range_start > range_end:
err = _("Invalid address range selected.")
mnemonic = request.form["mnemonic"]
passphrase = request.form["passphrase"]
file_password = request.form["file_password"]
existing_device = request.form.get("existing_device", None)
if existing_device:
existing_device = app.specter.device_manager.get_by_alias(existing_device)
if not err:
return render_template(
"device/new_device/new_device_keys.jinja",
device_class=get_device_class(device_type),
mnemonic=mnemonic,
passphrase=passphrase,
file_password=file_password,
range_start=range_start,
range_end=range_end,
existing_device=existing_device,
error=err,
specter=app.specter,
rand=rand,
)
return render_template(
"device/new_device/new_device_mnemonic.jinja",
device_type=device_type,
strength=strength,
mnemonic=mnemonic,
existing_device=existing_device,
error=err,
specter=app.specter,
rand=rand,
)
@devices_endpoint.route("/device_blinding_key/<device_alias>/", methods=["GET", "POST"])
@login_required
def device_blinding_key(device_alias):
err = None
try:
device = app.specter.device_manager.get_by_alias(device_alias)
except:
return render_template(
"base.jinja", error=_("Device not found"), specter=app.specter, rand=rand
)
if not device:
return redirect(url_for("index"))
if request.method == "POST":
new_device = request.form.get("new_device", False)
blinding_key = request.form.get("blinding_key")
device.set_blinding_key(blinding_key)
if not new_device:
flash(_("Master blinding key was added successfully"))
return redirect(
url_for("devices_endpoint.device", device_alias=device.alias)
+ ("?newdevice=true" if new_device else "")
)
return render_template(
"device/device_blinding_key.jinja",
device=device,
error=err,
specter=app.specter,
rand=rand,
)
# New device "manual" (deprecated)
@devices_endpoint.route("/new_device_manual/", methods=["GET", "POST"])
@login_required
def new_device_manual():
err = None
device_type = ""
device_name = ""
xpubs = ""
strength = 128
mnemonic = generate_mnemonic(
strength=strength, language_code=app.get_language_code()
)
if request.method == "POST":
action = request.form["action"]
device_type = request.form["device_type"]
device_name = request.form["device_name"]
if action == "newcolddevice":
if not device_name:
err = _("Device name cannot be empty")
elif device_name in app.specter.device_manager.devices_names:
err = _("Device with this name already exists")
xpubs = request.form["xpubs"]
if not xpubs:
err = _("xpubs name cannot be empty")
keys, failed = Key.parse_xpubs(xpubs)
if len(failed) > 0:
err = _("Failed to parse these xpubs") + ":\n" + "\n".join(failed)
if err is None:
device = app.specter.device_manager.add_device(
name=device_name, device_type=device_type, keys=keys
)
return redirect(
url_for("devices_endpoint.device", device_alias=device.alias)
)
elif action == "newhotdevice":
if not device_name:
err = _("Device name cannot be empty")
elif device_name in app.specter.device_manager.devices_names:
err = _("Device with this name already exists")
if len(request.form["mnemonic"].split(" ")) not in [12, 15, 18, 21, 24]:
err = _(
"Invalid mnemonic entered: Must contain either: 12, 15, 18, 21, or 24 words."
)
if not validate_mnemonic(words=request.form["mnemonic"]):
err = _("Invalid mnemonic entered.")
range_start = int(request.form["range_start"])
range_end = int(request.form["range_end"])
if range_start > range_end:
err = _("Invalid address range selected.")
if err is None:
mnemonic = request.form["mnemonic"]
paths = [
l.strip()
for l in request.form["derivation_paths"].split("\n")
if len(l) > 0
]
passphrase = request.form["passphrase"]
file_password = request.form["file_password"]
device = app.specter.device_manager.add_device(
name=device_name, device_type=device_type, keys=[]
)
try:
device.setup_device(file_password, app.specter.wallet_manager)
device.add_hot_wallet_keys(
mnemonic,
passphrase,
paths,
file_password,
app.specter.wallet_manager,
is_testnet(app.specter.chain),
keys_range=[range_start, range_end],
)
return redirect(
url_for("devices_endpoint.device", device_alias=device.alias)
)
except Exception as e:
handle_exception(e)
flash(_("Failed to setup hot wallet. Error: {}").format(e), "error")
app.specter.device_manager.remove_device(
device,
app.specter.wallet_manager,
bitcoin_datadir=app.specter.bitcoin_datadir,
chain=app.specter.chain,
)
elif action == "generatemnemonic":
strength = int(request.form["strength"])
mnemonic = generate_mnemonic(
strength=strength, language_code=app.get_language_code()
)
return render_template(
"device/new_device_manual.jinja",
device_type=device_type,
device_name=device_name,
xpubs=xpubs,
mnemonic=mnemonic,
strength=strength,
error=err,
specter=app.specter,
rand=rand,
)
################## Device page #######################
@devices_endpoint.route("device/<device_alias>/", methods=["GET", "POST"])
@login_required
def device(device_alias):
err = None
try:
device = app.specter.device_manager.get_by_alias(device_alias)
except:
return render_template(
"base.jinja", error=_("Device not found"), specter=app.specter, rand=rand
)
if not device:
return redirect(url_for("index"))
wallets = device.wallets(app.specter.wallet_manager)
if request.method == "POST":
action = request.form["action"]
if action == "forget":
if len(wallets) != 0:
# TODO: Long message strings like this should be moved into a template.
err = (
_(
"Device could not be removed since it is used in wallets: {}"
).format([wallet.name for wallet in wallets])
+ "<br>"
+ _(
"You must delete those wallets before you can remove this device."
)
+ "<br>"
+ _("You can delete a wallet from its Settings -> Advanced page.")
)
else:
app.specter.device_manager.remove_device(
device,
app.specter.wallet_manager,
bitcoin_datadir=app.specter.bitcoin_datadir,
chain=app.specter.chain,
)
return redirect("")
elif action == "delete_key":
key = Key.from_json({"original": request.form["key"]})
wallets_with_key = [w for w in wallets if key in w.keys]
if len(wallets_with_key) != 0:
# TODO: Long message strings like this should be moved into a template.
err = (
_(
"Key could not be removed since it is used in wallets: {}"
).format(", ".join([wallet.name for wallet in wallets_with_key]))
+ "<br>"
+ _("You must delete those wallets before you can remove this key.")
+ "<br>"
+ _("You can delete a wallet from its Settings -> Advanced page.")
)
else:
device.remove_key(key)
elif action == "rename":
device_name = request.form["newtitle"]
if not device_name:
flash(_("Device name cannot be empty"), "error")
elif device_name == device.name:
pass
elif device_name in app.specter.device_manager.devices_names:
flash(_("Device already exists"), "error")
else:
device.rename(device_name)
elif action == "add_keys":
strength = 128
mnemonic = generate_mnemonic(
strength=strength, language_code=app.get_language_code()
)
if device.hot_wallet:
return render_template(
"device/new_device/new_device_mnemonic.jinja",
mnemonic=mnemonic,
strength=strength,
existing_device=device,
device_alias=device_alias,
device_class=get_device_class(device.device_type),
specter=app.specter,
rand=rand,
)
else:
return render_template(
"device/new_device/new_device_keys.jinja",
existing_device=device,
device_alias=device_alias,
device_class=get_device_class(device.device_type),
specter=app.specter,
rand=rand,
)
elif action == "settype":
device_type = request.form["device_type"]
device.set_type(device_type)
device = copy.deepcopy(device)
def sort_accounts(k):
# Ordering: 1) chain 2) account 3) purpose
pattern = r"^m\/([0-9]+)h\/([0-9])h\/([0-9]+)h"
if not k.derivation:
return 0
match = re.search(pattern, k.derivation)
if not match:
return 0
return (
int(match.group(1))
+ (int(match.group(2)) + 1) * 1000000
+ (int(match.group(3)) + 1) * 2000
)
device.keys.sort(key=sort_accounts, reverse=False)
return render_template(
"device/device.jinja",
device=device,
device_alias=device_alias,
purposes=purposes,
wallets=wallets,
error=err,
specter=app.specter,
rand=rand,
)
| 39.316532 | 97 | 0.528075 |
acea3a269a828044e6d105bafa17e990fe34224f | 41,434 | py | Python | cime/scripts/lib/CIME/XML/env_batch.py | bwZh/E3SM | 9a439c9eea194f7a29ff1426bf6e297ed5db5a99 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | cime/scripts/lib/CIME/XML/env_batch.py | bwZh/E3SM | 9a439c9eea194f7a29ff1426bf6e297ed5db5a99 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | cime/scripts/lib/CIME/XML/env_batch.py | bwZh/E3SM | 9a439c9eea194f7a29ff1426bf6e297ed5db5a99 | [
"FTL",
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null | """
Interface to the env_batch.xml file. This class inherits from EnvBase
"""
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, get_cime_config, get_batch_script_for_job, get_logging_options
from collections import OrderedDict
import stat, re, math
logger = logging.getLogger(__name__)
# pragma pylint: disable=attribute-defined-outside-init
class EnvBatch(EnvBase):
def __init__(self, case_root=None, infile="env_batch.xml", read_only=False):
"""
initialize an object interface to file env_batch.xml in the case directory
"""
self._batchtype = None
# This arbitrary setting should always be overwritten
self._default_walltime = "00:20:00"
schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd")
super(EnvBatch,self).__init__(case_root, infile, schema=schema, read_only=read_only)
# pylint: disable=arguments-differ
def set_value(self, item, value, subgroup=None, ignore_type=False):
"""
Override the entry_id set_value function with some special cases for this class
"""
val = None
if item == "JOB_QUEUE":
expect(value in self._get_all_queue_names() or ignore_type,
"Unknown Job Queue specified use --force to set")
# allow the user to set item for all jobs if subgroup is not provided
if subgroup is None:
gnodes = self.get_children("group")
for gnode in gnodes:
node = self.get_optional_child("entry", {"id":item}, root=gnode)
if node is not None:
self._set_value(node, value, vid=item, ignore_type=ignore_type)
val = value
else:
group = self.get_optional_child("group", {"id":subgroup})
if group is not None:
node = self.get_optional_child("entry", {"id":item}, root=group)
if node is not None:
val = self._set_value(node, value, vid=item, ignore_type=ignore_type)
return val
# pylint: disable=arguments-differ
def get_value(self, item, attribute=None, resolved=True, subgroup=None):
"""
Must default subgroup to something in order to provide single return value
"""
value = None
node = self.get_optional_child(item, attribute)
if node is None:
# this will take the last instance of item listed in all batch_system elements
bs_nodes = self.get_children("batch_system")
for bsnode in bs_nodes:
cnode = self.get_optional_child(item, attribute, root=bsnode)
if cnode is not None:
node = cnode
if node is None or item in ("BATCH_SYSTEM", "PROJECT_REQUIRED"):
value = super(EnvBatch, self).get_value(item,attribute,resolved)
else:
value = self.text(node)
if resolved:
value = self.get_resolved_value(value)
return value
def get_type_info(self, vid):
gnodes = self.get_children("group")
for gnode in gnodes:
nodes = self.get_children("entry",{"id":vid}, root=gnode)
type_info = None
for node in nodes:
new_type_info = self._get_type_info(node)
if type_info is None:
type_info = new_type_info
else:
expect( type_info == new_type_info,
"Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info))
return type_info
def get_jobs(self):
groups = self.get_children("group")
results = []
for group in groups:
if self.get(group, "id") not in ["job_submission", "config_batch"]:
results.append(self.get(group, "id"))
return results
def create_job_groups(self, batch_jobs, is_test):
# Subtle: in order to support dynamic batch jobs, we need to remove the
# job_submission group and replace with job-based groups
orig_group = self.get_child("group", {"id":"job_submission"},
err_msg="Looks like job groups have already been created")
orig_group_children = super(EnvBatch, self).get_children(root=orig_group)
childnodes = []
for child in reversed(orig_group_children):
childnodes.append(child)
self.remove_child(orig_group)
for name, jdict in batch_jobs:
if name == "case.run" and is_test:
pass # skip
elif name == "case.test" and not is_test:
pass # skip
elif name == "case.run.sh":
pass # skip
else:
new_job_group = self.make_child("group", {"id":name})
for field in jdict.keys():
val = jdict[field]
node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group)
self.make_child("type", root=node, text="char")
for child in childnodes:
self.add_child(self.copy(child), root=new_job_group)
def cleanupnode(self, node):
if self.get(node, "id") == "batch_system":
fnode = self.get_child(name="file", root=node)
self.remove_child(fnode, root=node)
gnode = self.get_child(name="group", root=node)
self.remove_child(gnode, root=node)
vnode = self.get_optional_child(name="values", root=node)
if vnode is not None:
self.remove_child(vnode, root=node)
else:
node = super(EnvBatch, self).cleanupnode(node)
return node
def set_batch_system(self, batchobj, batch_system_type=None):
if batch_system_type is not None:
self.set_batch_system_type(batch_system_type)
if batchobj.batch_system_node is not None and batchobj.machine_node is not None:
for node in batchobj.get_children("",root=batchobj.machine_node):
name = self.name(node)
if name != 'directives':
oldnode = batchobj.get_optional_child(name, root=batchobj.batch_system_node)
if oldnode is not None:
logger.debug( "Replacing {}".format(self.name(oldnode)))
batchobj.remove_child(oldnode, root=batchobj.batch_system_node)
if batchobj.batch_system_node is not None:
self.add_child(self.copy(batchobj.batch_system_node))
if batchobj.machine_node is not None:
self.add_child(self.copy(batchobj.machine_node))
self.set_value("BATCH_SYSTEM", batch_system_type)
def get_job_overrides(self, job, case):
env_workflow = case.get_env('workflow')
total_tasks, num_nodes, tasks_per_node, thread_count = env_workflow.get_job_specs(job)
overrides = {}
if total_tasks:
overrides["total_tasks"] = total_tasks
overrides["num_nodes"] = num_nodes
overrides["tasks_per_node"] = tasks_per_node
if thread_count:
overrides["thread_count"] = thread_count
else:
total_tasks = case.get_value("TOTALPES")*int(case.thread_count)
thread_count = case.thread_count
if int(total_tasks)*int(thread_count) < case.get_value("MAX_TASKS_PER_NODE"):
overrides["max_tasks_per_node"] = int(total_tasks)
overrides["mpirun"] = case.get_mpirun_cmd(job=job, overrides=overrides)
return overrides
def make_batch_script(self, input_template, job, case, outfile=None):
expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template))
overrides = self.get_job_overrides(job, case)
ext = os.path.splitext(job)[-1]
if len(ext) == 0:
ext = job
if ext.startswith('.'):
ext = ext[1:]
overrides["job_id"] = ext + '.' + case.get_value("CASE")
if "pleiades" in case.get_value("MACH"):
# pleiades jobname needs to be limited to 15 chars
overrides["job_id"] = overrides["job_id"][:15]
overrides["batchdirectives"] = self.get_batch_directives(case, job, overrides=overrides)
output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, overrides=overrides)
output_name = get_batch_script_for_job(job) if outfile is None else outfile
logger.info("Creating file {}".format(output_name))
with open(output_name, "w") as fd:
fd.write(output_text)
# make sure batch script is exectuble
os.chmod(output_name, os.stat(output_name).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def set_job_defaults(self, batch_jobs, case):
if self._batchtype is None:
self._batchtype = self.get_batch_system_type()
if self._batchtype == "none":
return
env_workflow = case.get_env('workflow')
known_jobs = env_workflow.get_jobs()
for job, jsect in batch_jobs:
if job not in known_jobs:
continue
walltime = case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) else None
force_queue = case.get_value("USER_REQUESTED_QUEUE", subgroup=job) if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) else None
logger.info("job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {}".format(job, walltime, force_queue))
task_count = int(jsect["task_count"]) if "task_count" in jsect else case.total_tasks
walltime = jsect["walltime"] if ("walltime" in jsect and walltime is None) else walltime
if "task_count" in jsect:
# job is using custom task_count, need to compute a node_count based on this
node_count = int(math.ceil(float(task_count)/float(case.tasks_per_node)))
else:
node_count = case.num_nodes
if force_queue:
if not self.queue_meets_spec(force_queue, node_count, task_count, walltime=walltime, job=job):
logger.warning("WARNING: User-requested queue '{}' does not meet requirements for job '{}'".format(force_queue, job))
if self.queue_meets_spec(force_queue, node_count, task_count, walltime=None, job=job):
if case.get_value("TEST"):
walltime = self.get_queue_specs(force_queue)[3]
logger.warning(" Using walltime '{}' instead".format(walltime))
else:
logger.warning(" Continuing with suspect walltime, batch submission may fail")
queue = force_queue
else:
queue = self.select_best_queue(node_count, task_count, walltime=walltime, job=job)
if queue is None and walltime is not None:
# Try to see if walltime was the holdup
queue = self.select_best_queue(node_count, task_count, walltime=None, job=job)
if queue is not None:
# It was, override the walltime if a test, otherwise just warn the user
new_walltime = self.get_queue_specs(queue)[3]
expect(new_walltime is not None, "Should never make it here")
logger.warning("WARNING: Requested walltime '{}' could not be matched by any queue".format(walltime))
if case.get_value("TEST"):
logger.warning(" Using walltime '{}' instead".format(new_walltime))
walltime = new_walltime
else:
logger.warning(" Continuing with suspect walltime, batch submission may fail")
if queue is None:
logger.warning("WARNING: No queue on this system met the requirements for this job. Falling back to defaults")
default_queue_node = self.get_default_queue()
queue = self.text(default_queue_node)
walltime = self.get_queue_specs(queue)[3]
specs = self.get_queue_specs(queue)
if walltime is None:
# Figure out walltime
if specs is None:
# Queue is unknown, use specs from default queue
walltime = self.get(self.get_default_queue(), "walltimemax")
else:
walltime = specs[3]
walltime = self._default_walltime if walltime is None else walltime # last-chance fallback
env_workflow.set_value("JOB_QUEUE", queue, subgroup=job, ignore_type=specs is None)
env_workflow.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job)
logger.debug("Job {} queue {} walltime {}".format(job, queue, walltime))
def _match_attribs(self, attribs, case, queue):
# check for matches with case-vars
for attrib in attribs:
if attrib in ["default", "prefix"]:
# These are not used for matching
continue
elif attrib == "queue":
if not self._match(queue, attribs["queue"]):
return False
else:
val = case.get_value(attrib.upper())
expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper())
if not self._match(val, attribs[attrib]):
return False
return True
def _match(self, my_value, xml_value):
if xml_value.startswith("!"):
result = re.match(xml_value[1:],str(my_value)) is None
elif isinstance(my_value, bool):
if my_value: result = xml_value == "TRUE"
else: result = xml_value == "FALSE"
else:
result = re.match(xml_value,str(my_value)) is not None
logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result))
return result
def get_batch_directives(self, case, job, overrides=None, output_format='default'):
"""
"""
result = []
directive_prefix = None
roots = self.get_children("batch_system")
queue = self.get_value("JOB_QUEUE", subgroup=job)
if self._batchtype != "none" and not queue in self._get_all_queue_names():
unknown_queue = True
qnode = self.get_default_queue()
default_queue = self.text(qnode)
else:
unknown_queue = False
for root in roots:
if root is not None:
if directive_prefix is None:
if output_format == 'default':
directive_prefix = self.get_element_text("batch_directive", root=root)
elif output_format == 'cylc':
directive_prefix = " "
if unknown_queue:
unknown_queue_directives = self.get_element_text("unknown_queue_directives",
root=root)
if unknown_queue_directives is None:
queue = default_queue
else:
queue = unknown_queue_directives
dnodes = self.get_children("directives", root=root)
for dnode in dnodes:
nodes = self.get_children("directive", root=dnode)
if self._match_attribs(self.attrib(dnode), case, queue):
for node in nodes:
directive = self.get_resolved_value("" if self.text(node) is None else self.text(node))
if output_format == 'cylc':
if self._batchtype == 'pbs':
# cylc includes the -N itself, no need to add
if directive.startswith("-N"):
directive=''
continue
m = re.match(r'\s*(-[\w])', directive)
if m:
directive = re.sub(r'(-[\w]) ','{} = '.format(m.group(1)), directive)
default = self.get(node, "default")
if default is None:
directive = transform_vars(directive, case=case, subgroup=job, default=default, overrides=overrides)
else:
directive = transform_vars(directive, default=default)
custom_prefix = self.get(node, "prefix")
prefix = directive_prefix if custom_prefix is None else custom_prefix
result.append("{}{}".format("" if not prefix else (prefix + " "), directive))
return "\n".join(result)
def get_submit_args(self, case, job):
'''
return a list of touples (flag, name)
'''
submitargs = " "
bs_nodes = self.get_children("batch_system")
submit_arg_nodes = []
for node in bs_nodes:
sanode = self.get_optional_child("submit_args", root=node)
if sanode is not None:
submit_arg_nodes += self.get_children("arg",root=sanode)
for arg in submit_arg_nodes:
flag = self.get(arg, "flag")
name = self.get(arg, "name")
if self._batchtype == "cobalt" and job == "case.st_archive":
if flag == "-n":
name = 'task_count'
if flag == "--mode":
continue
if name is None:
submitargs+=" {}".format(flag)
else:
if name.startswith("$"):
name = name[1:]
if '$' in name:
# We have a complex expression and must rely on get_resolved_value.
# Hopefully, none of the values require subgroup
val = case.get_resolved_value(name)
else:
val = case.get_value(name, subgroup=job)
if val is not None and len(str(val)) > 0 and val != "None":
# Try to evaluate val if it contains any whitespace
if " " in val:
try:
rval = eval(val)
except Exception:
rval = val
else:
rval = val
# need a correction for tasks per node
if flag == "-n" and rval<= 0:
rval = 1
if flag == "-q" and rval == "batch" and case.get_value("MACH") == "blues":
# Special case. Do not provide '-q batch' for blues
continue
if flag.rfind("=", len(flag)-1, len(flag)) >= 0 or\
flag.rfind(":", len(flag)-1, len(flag)) >= 0:
submitargs+=" {}{}".format(flag,str(rval).strip())
else:
submitargs+=" {} {}".format(flag,str(rval).strip())
return submitargs
def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl=False,
allow_fail=False, resubmit_immediate=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False):
env_workflow = case.get_env('workflow')
external_workflow = case.get_value("EXTERNAL_WORKFLOW")
alljobs = env_workflow.get_jobs()
alljobs = [j for j in alljobs
if os.path.isfile(os.path.join(self._caseroot,get_batch_script_for_job(j)))]
startindex = 0
jobs = []
firstjob = job
if job is not None:
expect(job in alljobs, "Do not know about batch job {}".format(job))
startindex = alljobs.index(job)
for index, job in enumerate(alljobs):
logger.debug( "Index {:d} job {} startindex {:d}".format(index, job, startindex))
if index < startindex:
continue
try:
prereq = env_workflow.get_value('prereq', subgroup=job, resolved=False)
if external_workflow or prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"):
prereq = True
else:
prereq = case.get_resolved_value(prereq)
prereq = eval(prereq)
except Exception:
expect(False,"Unable to evaluate prereq expression '{}' for job '{}'".format(self.get_value('prereq',subgroup=job), job))
if prereq:
jobs.append((job, env_workflow.get_value('dependency', subgroup=job)))
if self._batchtype == "cobalt":
break
depid = OrderedDict()
jobcmds = []
if resubmit_immediate:
num_submit = case.get_value("RESUBMIT") + 1
case.set_value("RESUBMIT", 0)
if num_submit <= 0:
num_submit = 1
else:
num_submit = 1
prev_job = None
batch_job_id = None
for _ in range(num_submit):
for job, dependency in jobs:
if dependency is not None:
deps = dependency.split()
else:
deps = []
dep_jobs = []
if user_prereq is not None:
dep_jobs.append(user_prereq)
for dep in deps:
if dep in depid.keys() and depid[dep] is not None:
dep_jobs.append(str(depid[dep]))
if prev_job is not None:
dep_jobs.append(prev_job)
logger.debug("job {} depends on {}".format(job, dep_jobs))
result = self._submit_single_job(case, job,
skip_pnl=skip_pnl,
resubmit_immediate=resubmit_immediate,
dep_jobs=dep_jobs,
allow_fail=allow_fail,
no_batch=no_batch,
mail_user=mail_user,
mail_type=mail_type,
batch_args=batch_args,
dry_run=dry_run)
batch_job_id = str(alljobs.index(job)) if dry_run else result
depid[job] = batch_job_id
jobcmds.append( (job, result) )
if self._batchtype == "cobalt" or external_workflow:
break
if not external_workflow and not no_batch:
expect(batch_job_id, "No result from jobs {}".format(jobs))
prev_job = batch_job_id
if dry_run:
return jobcmds
else:
return depid
@staticmethod
def _get_supported_args(job, no_batch):
"""
Returns a map of the supported parameters and their arguments to the given script
TODO: Maybe let each script define this somewhere?
>>> EnvBatch._get_supported_args("", False)
{}
>>> EnvBatch._get_supported_args("case.test", False)
{'skip_pnl': '--skip-preview-namelist'}
>>> EnvBatch._get_supported_args("case.st_archive", True)
{'resubmit': '--resubmit'}
"""
supported = {}
if job in ["case.run", "case.test"]:
supported["skip_pnl"] = "--skip-preview-namelist"
if job == "case.run":
supported["set_continue_run"] = "--completion-sets-continue-run"
if job in ["case.st_archive", "case.run"]:
if job == "case.st_archive" and no_batch:
supported["resubmit"] = "--resubmit"
else:
supported["submit_resubmits"] = "--resubmit"
return supported
@staticmethod
def _build_run_args(job, no_batch, **run_args):
"""
Returns a map of the filtered parameters for the given script,
as well as the values passed and the equivalent arguments for calling the script
>>> EnvBatch._build_run_args("case.run", False, skip_pnl=True, cthulu="f'taghn")
{'skip_pnl': (True, '--skip-preview-namelist')}
>>> EnvBatch._build_run_args("case.run", False, skip_pnl=False, cthulu="f'taghn")
{}
"""
supported_args = EnvBatch._get_supported_args(job, no_batch)
args = {}
for arg_name, arg_value in run_args.items():
if arg_value and (arg_name in supported_args.keys()):
args[arg_name] = (arg_value, supported_args[arg_name])
return args
def _build_run_args_str(self, job, no_batch, **run_args):
"""
Returns a string of the filtered arguments for the given script,
based on the arguments passed
"""
args = self._build_run_args(job, no_batch, **run_args)
run_args_str = " ".join(param for _, param in args.values())
logging_options = get_logging_options()
if logging_options:
run_args_str += " {}".format(logging_options)
batch_env_flag = self.get_value("batch_env", subgroup=None)
if not batch_env_flag:
return run_args_str
elif len(run_args_str) > 0:
batch_system = self.get_value("BATCH_SYSTEM", subgroup=None)
logger.debug("batch_system: {}: ".format(batch_system))
if batch_system == "lsf":
return "{} \"all, ARGS_FOR_SCRIPT={}\"".format(batch_env_flag, run_args_str)
else:
return "{} ARGS_FOR_SCRIPT='{}'".format(batch_env_flag, run_args_str)
else:
return ""
def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False,
no_batch=False, skip_pnl=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False, resubmit_immediate=False):
if not dry_run:
logger.warning("Submit job {}".format(job))
batch_system = self.get_value("BATCH_SYSTEM", subgroup=None)
if batch_system is None or batch_system == "none" or no_batch:
logger.info("Starting job script {}".format(job))
function_name = job.replace(".", "_")
job_name = "."+job
if not dry_run:
args = self._build_run_args(job, True, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate,
submit_resubmits=not resubmit_immediate)
try:
if hasattr(case, function_name):
getattr(case, function_name)(**{k: v for k, (v, _) in args.items()})
else:
expect(os.path.isfile(job_name),"Could not find file {}".format(job_name))
run_cmd_no_fail(os.path.join(self._caseroot,job_name), combine_output=True, verbose=True, from_dir=self._caseroot)
except Exception as e:
# We don't want exception from the run phases getting into submit phase
logger.warning("Exception from {}: {}".format(function_name, str(e)))
return
submitargs = self.get_submit_args(case, job)
args_override = self.get_value("BATCH_COMMAND_FLAGS", subgroup=job)
if args_override:
submitargs = args_override
if dep_jobs is not None and len(dep_jobs) > 0:
logger.debug("dependencies: {}".format(dep_jobs))
if allow_fail:
dep_string = self.get_value("depend_allow_string", subgroup=None)
if dep_string is None:
logger.warning("'depend_allow_string' is not defined for this batch system, " +
"falling back to the 'depend_string'")
dep_string = self.get_value("depend_string", subgroup=None)
else:
dep_string = self.get_value("depend_string", subgroup=None)
expect(dep_string is not None, "'depend_string' is not defined for this batch system")
separator_string = self.get_value("depend_separator", subgroup=None)
expect(separator_string is not None,"depend_separator string not defined")
expect("jobid" in dep_string, "depend_string is missing jobid for prerequisite jobs")
dep_ids_str = str(dep_jobs[0])
for dep_id in dep_jobs[1:]:
dep_ids_str += separator_string + str(dep_id)
dep_string = dep_string.replace("jobid",dep_ids_str.strip()) # pylint: disable=maybe-no-member
submitargs += " " + dep_string
if batch_args is not None:
submitargs += " " + batch_args
cime_config = get_cime_config()
if mail_user is None and cime_config.has_option("main", "MAIL_USER"):
mail_user = cime_config.get("main", "MAIL_USER")
if mail_user is not None:
mail_user_flag = self.get_value('batch_mail_flag', subgroup=None)
if mail_user_flag is not None:
submitargs += " " + mail_user_flag + " " + mail_user
if mail_type is None:
if job == "case.test" and cime_config.has_option("create_test", "MAIL_TYPE"):
mail_type = cime_config.get("create_test", "MAIL_TYPE")
elif cime_config.has_option("main", "MAIL_TYPE"):
mail_type = cime_config.get("main", "MAIL_TYPE")
else:
mail_type = self.get_value("batch_mail_default")
if mail_type:
mail_type = mail_type.split(",") # pylint: disable=no-member
if mail_type:
mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None)
if mail_type_flag is not None:
mail_type_args = []
for indv_type in mail_type:
mail_type_arg = self.get_batch_mail_type(indv_type)
mail_type_args.append(mail_type_arg)
if mail_type_flag == "-m":
# hacky, PBS-type systems pass multiple mail-types differently
submitargs += " {} {}".format(mail_type_flag, "".join(mail_type_args))
else:
submitargs += " {} {}".format(mail_type_flag, " {} ".format(mail_type_flag).join(mail_type_args))
batchsubmit = self.get_value("batch_submit", subgroup=None)
expect(batchsubmit is not None,
"Unable to determine the correct command for batch submission.")
batchredirect = self.get_value("batch_redirect", subgroup=None)
batch_env_flag = self.get_value("batch_env", subgroup=None)
run_args = self._build_run_args_str(job, False, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate,
submit_resubmits=not resubmit_immediate)
if batch_system == 'lsf':
sequence = (run_args, batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job))
elif batch_env_flag:
sequence = (batchsubmit, submitargs, run_args, batchredirect, get_batch_script_for_job(job))
else:
sequence = (batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job), run_args)
submitcmd = " ".join(s.strip() for s in sequence if s is not None)
if dry_run:
return submitcmd
else:
logger.info("Submitting job script {}".format(submitcmd))
output = run_cmd_no_fail(submitcmd, combine_output=True)
jobid = self.get_job_id(output)
logger.info("Submitted job id is {}".format(jobid))
return jobid
def get_batch_mail_type(self, mail_type):
raw = self.get_value("batch_mail_type", subgroup=None)
mail_types = [item.strip() for item in raw.split(",")] # pylint: disable=no-member
idx = ["never", "all", "begin", "end", "fail"].index(mail_type)
return mail_types[idx] if idx < len(mail_types) else None
def get_batch_system_type(self):
nodes = self.get_children("batch_system")
for node in nodes:
type_ = self.get(node, "type")
if type_ is not None:
self._batchtype = type_
return self._batchtype
def set_batch_system_type(self, batchtype):
self._batchtype = batchtype
def get_job_id(self, output):
jobid_pattern = self.get_value("jobid_pattern", subgroup=None)
expect(jobid_pattern is not None, "Could not find jobid_pattern in env_batch.xml")
search_match = re.search(jobid_pattern, output)
expect(search_match is not None,
"Couldn't match jobid_pattern '{}' within submit output:\n '{}'".format(jobid_pattern, output))
jobid = search_match.group(1)
return jobid
def queue_meets_spec(self, queue, num_nodes, num_tasks, walltime=None, job=None):
specs = self.get_queue_specs(queue)
if specs is None:
logger.warning("WARNING: queue '{}' is unknown to this system".format(queue))
return True
nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict = specs
# A job name match automatically meets spec
if job is not None and jobname is not None:
return jobname == job
if nodemin is not None and num_nodes < nodemin or \
nodemax is not None and num_nodes > nodemax or \
jobmin is not None and num_tasks < jobmin or \
jobmax is not None and num_tasks > jobmax:
return False
if walltime is not None and walltimemax is not None and strict:
walltime_s = convert_to_seconds(walltime)
walltimemax_s = convert_to_seconds(walltimemax)
if walltime_s > walltimemax_s:
return False
return True
def _get_all_queue_names(self):
all_queues = []
all_queues = self.get_all_queues()
# Default queue needs to be first
all_queues.insert(0, self.get_default_queue())
queue_names = []
for queue in all_queues:
queue_names.append(self.text(queue))
return queue_names
def select_best_queue(self, num_nodes, num_tasks, walltime=None, job=None):
# Make sure to check default queue first.
qnames = self._get_all_queue_names()
for qname in qnames:
if self.queue_meets_spec(qname, num_nodes, num_tasks, walltime=walltime, job=job):
return qname
return None
def get_queue_specs(self, queue):
"""
Get queue specifications by name.
Returns (nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, is_strict)
"""
for queue_node in self.get_all_queues():
if self.text(queue_node) == queue:
nodemin = self.get(queue_node, "nodemin")
nodemin = None if nodemin is None else int(nodemin)
nodemax = self.get(queue_node, "nodemax")
nodemax = None if nodemax is None else int(nodemax)
jobmin = self.get(queue_node, "jobmin")
jobmin = None if jobmin is None else int(jobmin)
jobmax = self.get(queue_node, "jobmax")
jobmax = None if jobmax is None else int(jobmax)
expect( nodemin is None or jobmin is None, "Cannot specify both nodemin and jobmin for a queue")
expect( nodemax is None or jobmax is None, "Cannot specify both nodemax and jobmax for a queue")
jobname = self.get(queue_node, "jobname")
walltimemax = self.get(queue_node, "walltimemax")
strict = self.get(queue_node, "strict") == "true"
return nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict
return None
def get_default_queue(self):
bs_nodes = self.get_children("batch_system")
node = None
for bsnode in bs_nodes:
qnodes = self.get_children("queues", root=bsnode)
for qnode in qnodes:
node = self.get_optional_child("queue", attributes={"default" : "true"}, root=qnode)
if node is None:
node = self.get_optional_child("queue", root=qnode)
expect(node is not None, "No queues found")
return node
def get_all_queues(self):
bs_nodes = self.get_children("batch_system")
nodes = []
for bsnode in bs_nodes:
qnode = self.get_optional_child("queues", root=bsnode)
if qnode is not None:
nodes.extend(self.get_children("queue", root=qnode))
return nodes
def get_children(self, name=None, attributes=None, root=None):
if name == "PROJECT_REQUIRED":
nodes = super(EnvBatch, self).get_children("entry", attributes={"id":name}, root=root)
else:
nodes = super(EnvBatch, self).get_children(name, attributes=attributes, root=root)
return nodes
def get_status(self, jobid):
batch_query = self.get_optional_child("batch_query")
if batch_query is None:
logger.warning("Batch queries not supported on this platform")
else:
cmd = self.text(batch_query) + " "
if self.has(batch_query, "per_job_arg"):
cmd += self.get(batch_query, "per_job_arg") + " "
cmd += jobid
status, out, err = run_cmd(cmd)
if status != 0:
logger.warning("Batch query command '{}' failed with error '{}'".format(cmd, err))
else:
return out.strip()
def cancel_job(self, jobid):
batch_cancel = self.get_optional_child("batch_cancel")
if batch_cancel is None:
logger.warning("Batch cancellation not supported on this platform")
return False
else:
cmd = self.text(batch_cancel) + " " + str(jobid)
status, out, err = run_cmd(cmd)
if status != 0:
logger.warning("Batch cancel command '{}' failed with error '{}'".format(cmd, out + "\n" + err))
else:
return True
def compare_xml(self, other):
xmldiffs = {}
f1batchnodes = self.get_children("batch_system")
for bnode in f1batchnodes:
f2bnodes = other.get_children("batch_system",
attributes = self.attrib(bnode))
f2bnode=None
if len(f2bnodes):
f2bnode = f2bnodes[0]
f1batchnodes = self.get_children(root=bnode)
for node in f1batchnodes:
name = self.name(node)
text1 = self.text(node)
text2 = ""
attribs = self.attrib(node)
f2matches = other.scan_children(name, attributes=attribs, root=f2bnode)
foundmatch=False
for chkmatch in f2matches:
name2 = other.name(chkmatch)
attribs2 = other.attrib(chkmatch)
text2 = other.text(chkmatch)
if(name == name2 and attribs==attribs2 and text1==text2):
foundmatch=True
break
if not foundmatch:
xmldiffs[name] = [text1, text2]
f1groups = self.get_children("group")
for node in f1groups:
group = self.get(node, "id")
f2group = other.get_child("group", attributes={"id":group})
xmldiffs.update(super(EnvBatch, self).compare_xml(other,
root=node, otherroot=f2group))
return xmldiffs
def make_all_batch_files(self, case):
machdir = case.get_value("MACHDIR")
env_workflow = case.get_env("workflow")
logger.info("Creating batch scripts")
jobs = env_workflow.get_jobs()
for job in jobs:
template = case.get_resolved_value(env_workflow.get_value('template', subgroup=job))
if os.path.isabs(template):
input_batch_script = template
else:
input_batch_script = os.path.join(machdir,template)
if os.path.isfile(input_batch_script):
logger.info("Writing {} script from input template {}".format(job, input_batch_script))
self.make_batch_script(input_batch_script, job, case)
else:
logger.warning("Input template file {} for job {} does not exist or cannot be read.".format(input_batch_script, job))
| 45.382256 | 150 | 0.566902 |
acea3b25e3d68fcb46c6e4dbfe02b262056833b5 | 669 | py | Python | examples/async_httpbin.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | 163 | 2017-04-13T21:24:12.000Z | 2022-02-21T04:55:47.000Z | examples/async_httpbin.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | 39 | 2017-08-02T14:46:12.000Z | 2021-12-10T12:57:29.000Z | examples/async_httpbin.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | 50 | 2017-06-05T22:39:52.000Z | 2021-12-26T21:09:52.000Z | import asyncio
from simple_rest_client.api import API
from simple_rest_client.resource import AsyncResource
class BasicAuthResource(AsyncResource):
actions = {"retrieve": {"method": "GET", "url": "basic-auth/{}/{}"}}
# https://httpbin.org/
auth = ("username", "password")
httpbin_api = API(api_root_url="https://httpbin.org/")
httpbin_api.add_resource(resource_name="basic_auth", resource_class=BasicAuthResource)
async def main():
response = await httpbin_api.basic_auth.retrieve("username", "password", auth=auth)
print("httpbin_api.basic_auth.retrieve={!r}".format(response.body))
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 27.875 | 87 | 0.750374 |
acea3b78498ec1a36af44b10e369d6b4c6564768 | 906 | py | Python | nicos_ess/labs/premp/setups/vinci_pump.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-03-26T10:30:45.000Z | 2021-03-26T10:30:45.000Z | nicos_ess/labs/premp/setups/vinci_pump.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_ess/labs/premp/setups/vinci_pump.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 3 | 2020-08-04T18:35:05.000Z | 2021-04-16T11:22:08.000Z | description = 'Vinci pump'
pv_root = 'utg-vincipump-001:Dr1:'
devices = dict(
vinci_volume=device(
'nicos_ess.devices.epics.pva.EpicsReadable',
description='The volume',
readpv='{}Volume'.format(pv_root),
),
vinci_flow=device(
'nicos_ess.devices.epics.pva.EpicsReadable',
description='The flow',
readpv='{}Flow'.format(pv_root),
),
vinci_pressure=device(
'nicos_ess.devices.epics.pva.EpicsAnalogMoveable',
description='The pressure',
readpv='{}Pressure'.format(pv_root),
writepv='{}PM_Press_SP'.format(pv_root),
abslimits=(0, 200),
),
vinci_start=device(
'nicos_ess.devices.epics.pva.EpicsMappedMoveable',
description='Set the burster function',
readpv='{}Start_Cmd'.format(pv_root),
writepv='{}Start_Cmd'.format(pv_root),
lowlevel=True,
),
)
| 28.3125 | 58 | 0.625828 |
acea3d9103b8af0b574bc414f40edc2a415a1f1c | 3,648 | py | Python | spyder/dependencies.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | spyder/dependencies.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | spyder/dependencies.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Module checking Spyder runtime dependencies"""
import os
# Local imports
from spyder.utils import programs
class Dependency(object):
"""Spyder's dependency
version may starts with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')"""
OK = 'OK'
NOK = 'NOK'
def __init__(self, modname, features, required_version,
installed_version=None, optional=False):
self.modname = modname
self.features = features
self.required_version = required_version
self.optional = optional
if installed_version is None:
try:
self.installed_version = programs.get_module_version(modname)
except:
# NOTE: Don't add any exception type here!
# Modules can fail to import in several ways besides
# ImportError
self.installed_version = None
else:
self.installed_version = installed_version
def check(self):
"""Check if dependency is installed"""
return programs.is_module_installed(self.modname,
self.required_version,
self.installed_version)
def get_installed_version(self):
"""Return dependency status (string)"""
if self.check():
return '%s (%s)' % (self.installed_version, self.OK)
else:
return '%s (%s)' % (self.installed_version, self.NOK)
def get_status(self):
"""Return dependency status (string)"""
if self.check():
return self.OK
else:
return self.NOK
DEPENDENCIES = []
def add(modname, features, required_version, installed_version=None,
optional=False):
"""Add Spyder dependency"""
global DEPENDENCIES
for dependency in DEPENDENCIES:
if dependency.modname == modname:
raise ValueError("Dependency has already been registered: %s"\
% modname)
DEPENDENCIES += [Dependency(modname, features, required_version,
installed_version, optional)]
def check(modname):
"""Check if required dependency is installed"""
for dependency in DEPENDENCIES:
if dependency.modname == modname:
return dependency.check()
else:
raise RuntimeError("Unkwown dependency %s" % modname)
def status(deps=DEPENDENCIES, linesep=os.linesep):
"""Return a status of dependencies"""
maxwidth = 0
col1 = []
col2 = []
for dependency in deps:
title1 = dependency.modname
title1 += ' ' + dependency.required_version
col1.append(title1)
maxwidth = max([maxwidth, len(title1)])
col2.append(dependency.get_installed_version())
text = ""
for index in range(len(deps)):
text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep
return text[:-1]
def missing_dependencies():
"""Return the status of missing dependencies (if any)"""
missing_deps = []
for dependency in DEPENDENCIES:
if not dependency.check() and not dependency.optional:
missing_deps.append(dependency)
if missing_deps:
return status(deps=missing_deps, linesep='<br>')
else:
return ""
| 32 | 78 | 0.586349 |
acea3ebe1bdcf4e440451449f51c6237d24f97a5 | 449 | py | Python | sfm/api/serializers.py | Xtuden-com/sfm-ui | 4c294a79f0946924e5877e864d94ad76e1edd5bd | [
"MIT"
] | 129 | 2015-10-08T18:49:38.000Z | 2022-02-19T23:16:24.000Z | sfm/api/serializers.py | Xtuden-com/sfm-ui | 4c294a79f0946924e5877e864d94ad76e1edd5bd | [
"MIT"
] | 941 | 2015-08-31T17:57:54.000Z | 2022-03-16T22:02:34.000Z | sfm/api/serializers.py | Xtuden-com/sfm-ui | 4c294a79f0946924e5877e864d94ad76e1edd5bd | [
"MIT"
] | 31 | 2016-03-06T09:25:02.000Z | 2021-02-03T11:53:29.000Z | from rest_framework.serializers import HyperlinkedModelSerializer
from ui.models import Warc, Collection
class WarcSerializer(HyperlinkedModelSerializer):
class Meta:
model = Warc
fields = ('warc_id', 'path', 'sha1', 'bytes', 'date_created', 'harvest_type')
class CollectionSerializer(HyperlinkedModelSerializer):
class Meta:
model = Collection
fields = ('collection_id', 'harvest_type', 'name', 'is_on')
| 29.933333 | 85 | 0.714922 |
acea4071a2692d7355b771ff79fc5c632c99b704 | 19,314 | py | Python | main.py | team-alpha-kr/PUBG-API | d26c4ebc054750cb2a28eba8feff00674e34f263 | [
"MIT"
] | 22 | 2021-01-05T10:04:23.000Z | 2021-01-05T10:05:18.000Z | main.py | gunyu1019/PUBG-API | d26c4ebc054750cb2a28eba8feff00674e34f263 | [
"MIT"
] | 1 | 2021-06-30T05:53:41.000Z | 2021-07-03T09:22:00.000Z | main.py | team-alpha-kr/PUBG-API | d26c4ebc054750cb2a28eba8feff00674e34f263 | [
"MIT"
] | 1 | 2022-03-04T00:49:37.000Z | 2022-03-04T00:49:37.000Z | import asyncio
import pymysql
import os
import sys
import json
import aiohttp
import importlib
import datetime
from math import trunc
from sanic import Sanic
from pytz import timezone
import sanic.response as response
from log_config import LOGGING_CONFIG
app = Sanic(__name__, log_config=LOGGING_CONFIG)
platform_name = ["Steam","Kakao","XBOX","PS","Stadia"]
platform_site = ["steam","kakao","xbox","psn","stadia"]
DB_platform = ["Steam","Kakao","XBOX","PSN","Stadia"]
directory = os.path.dirname(os.path.abspath(__file__)).replace("\\","/")
db_f = open(f"{directory}/data/database.json",mode='r')
db = db_f.read()
db_f.close()
db_json = json.loads(db)
db_ip = db_json["mysql"]["ip"]
db_user = db_json["mysql"]["user"]
db_pw = db_json["mysql"]["password"]
db_name = db_json["mysql"]["database"]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8') #클라이언트 API키 불러오기.
cur = connect.cursor()
cur.execute("SELECT * from PUBG_BOT")
client_list = cur.fetchall()
pubg_token = client_list[0][2]
connect.close()
sys.path.append(directory + "/modules") #다른 파일내 함수추가
p_info = importlib.import_module("player")
s_info = importlib.import_module("status")
header = {
"Authorization": "Bearer " + pubg_token,
"Accept": "application/vnd.api+json"
}
sample_f = open(f"{directory}/data/last_update_sample.json",mode='r')
sample1 = json.loads(sample_f.read())
sample_f.close()
async def get_season(pubg_platform):
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw, db='PUBG_BOT', charset='utf8')
cur = connect.cursor()
sql = f"SELECT {DB_platform[pubg_platform]} FROM SEASON_STATUS"
cur.execute(sql)
cache = cur.fetchone()
html = cache[0]
data_json = json.loads(html)['data']
for i in data_json:
if i['attributes']['isCurrentSeason']:
least_season = i
return least_season['id']
def time_num(f_playtime):
playtime = datetime.datetime.fromtimestamp(f_playtime, timezone('UTC'))
if playtime.month == 1:
if playtime.day == 1:
if playtime.hour == 0:
if playtime.minute == 0: return f"{playtime.second}초"
return f"{playtime.minute}분 {playtime.second}초"
return f"{playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
return f"{playtime.day-1}일 {playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
return f"{playtime.month-1}달 {playtime.day-1}일 {playtime.hour}시간 {playtime.minute}분 {playtime.second}초"
@app.route("/api/PUBG/")
async def main(request):
return response.redirect("https://github.com/team-alpha-kr/PUBG-API")
@app.route("/api/PUBG/player")
async def player(request):
args = request.get_args(keep_blank_values=True)
if not ("nickname" in args): return response.json({'code':'01', 'msg':"Please write your nickname."}, status=400)
else: nickname = args['nickname'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE name=%s) as succees;")
cur.execute(exist_nickname,(nickname))
exist = cur.fetchone()
if exist[0]:
command = pymysql.escape_string("SELECT id, name, platform, last_update FROM player WHERE name=%s")
cur.execute(command,(nickname))
fetch = cur.fetchone()
connect.close()
data = {
"id":fetch[0],
"nickname":fetch[1],
"platform":fetch[2],
"lastupdate":json.loads(fetch[3])
}
return response.json(data,status=200)
else:
if not ("platform" in args): return response.json({"code":"02","msg":"The value is not stored in DB, so you need to create a platform."},status=400)
else:
try: platform = int(args['platform'][0])
except ValueError: return response.json({'code':'06', 'msg':"Platform values can only contain numbers."}, status=400)
if not (platform >= 0 and platform < 5): return response.json({'code':'07', 'msg':"Platform values can contain only 0-4 values."}, status=400)
async with aiohttp.ClientSession() as session:
async with session.get(f"https://api.pubg.com/shards/{platform_site[platform]}/players?filter[playerNames]={nickname}", headers=header) as resp:
if resp.status == 200:
json_data = await resp.json()
else:
e_resp = s_info.response_num(resp.status)
print(await resp.json(),resp.status)
return response.json({'code': e_resp[1], 'msg': e_resp[2]}, status=e_resp[0])
data = {
"id":json_data["data"][0]["id"],
"nickname":json_data["data"][0]["attributes"]["name"],
"platform":platform,
"lastupdate":sample1
}
command = pymysql.escape_string("insert into player(id,name,last_update,platform) value(%s,%s,%s,%s)")
cur.execute(command,(json_data["data"][0]["id"],json_data["data"][0]["attributes"]["name"],json.dumps(sample1),platform))
connect.commit()
connect.close()
return response.json(data,status=200)
@app.route("/api/PUBG/normal")
async def normal_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
status, html = await s_info.season_status(pubg_id,platform_info,season)
if not status:
return response.json({'code': html[1], 'msg': html[2]}, status=html[0])
else:
data = {
"id":pubg_id,
"gameMode":{}
}
gamestat = html['data']['attributes']['gameModeStats']
for i in ['solo','solo-fpp','duo','duo-fpp','squad','squad-fpp']:
modestat = gamestat[i]
losses = modestat['losses']
if losses == 0:
losses = 1
KDA_point = round((modestat['assists'] + modestat['kills']) / losses,2)
KD_point = round(modestat['kills'] / losses,2)
i_data = {
i:{
"assists":modestat['assists'],
"boosts": modestat['boosts'],
"dBNOs": modestat['dBNOs'],
"dailyKills": modestat['dailyKills'],
"dailyWins": modestat['dailyWins'],
"damageDealt": modestat['damageDealt'],
"days": modestat['days'],
"headshotKills": modestat['headshotKills'],
"heals": modestat['heals'],
"KDA_point": KDA_point,
"KD_point": KD_point,
"kills": modestat['kills'],
"longestKill": modestat['longestKill'],
"longestTimeSurvived": modestat['longestTimeSurvived'],
"longestTimeSurvivedAnswer": time_num(modestat['longestTimeSurvived']),
"losses": modestat['losses'],
"maxKillStreaks": modestat['maxKillStreaks'],
"mostSurvivalTime": modestat['mostSurvivalTime'],
"revives": modestat['revives'],
"rideDistance": modestat['rideDistance'],
"roadKills": modestat['roadKills'],
"roundMostKills": modestat['roundMostKills'],
"roundsPlayed": modestat['roundsPlayed'],
"suicides": modestat['suicides'],
"swimDistance": modestat['swimDistance'],
"teamKills": modestat['teamKills'],
"timeSurvived": modestat['timeSurvived'],
"timeSurvivedAnswer": time_num(modestat['timeSurvived']),
"top10s": modestat['top10s'],
"vehicleDestroys": modestat['vehicleDestroys'],
"walkDistance": modestat['walkDistance'],
"weaponsAcquired": modestat['weaponsAcquired'],
"weeklyKills": modestat['weeklyKills'],
"weeklyWins": modestat['weeklyWins'],
"wins": modestat['wins']
}
}
data['gameMode'].update(i_data)
return response.json(data, status=200)
@app.route("/api/PUBG/normal/update")
async def update_normal_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
await s_info.season_status_update(pubg_id, platform_info, season)
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
@app.route("/api/PUBG/ranked")
async def ranked_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
status, html = await s_info.ranked_status(pubg_id,platform_info,season)
if not status:
return response.json({'code': html[1], 'msg': html[2]}, status=html[0])
else:
data = {
"id":pubg_id,
"gameMode":{}
}
gamestat = html['data']['attributes']['rankedGameModeStats']
for i in ['solo','solo-fpp','squad','squad-fpp']:
if not (i in gamestat):
i_data = {
i: {
"assists": 0,
"avgRank": 0,
"currentRank":{
"tier":"Unranked",
"subTier":"1"
},
"currentRankAnswer":"Unranked",
"currentRankPoint":0,
"bestRank":{
"tier":"Unranked",
"subTier":"1"
},
"bestRankAnswer":"Unranked",
"bestRankPoint": 0,
"damageDealt": 0,
"deaths": 0,
"dBNOs": 0,
"KDA_point": 0,
"KD_point": 0,
"kills": 0,
"top10s": 0,
"top10_point": 0,
"wins": 0,
"win_point": 0
}
}
data['gameMode'].update(i_data)
continue
modestat = gamestat[i]
losses = modestat['deaths']
if losses == 0:
losses = 1
KD_point = round(modestat['kills'] / losses,2)
currentTier1 = modestat["currentTier"]["tier"]
currentTier2 = modestat["currentTier"]["subTier"]
bestTier1 = modestat["bestTier"]["tier"]
bestTier2 = modestat["bestTier"]["subTier"]
if currentTier1 == "Unranked" or currentTier1 == "Master": tier_name1 = currentTier1
else: tier_name1 = f"{currentTier1} {currentTier2}"
if bestTier1 == "Unranked" or bestTier1 == "Master": tier_name2 = bestTier1
else: tier_name2 = f"{bestTier1} {bestTier2}"
i_data = {
i:{
"assists": modestat['assists'],
"avgRank": modestat['avgRank'],
"currentRank":modestat['currentTier'],
"currentRankAnswer":tier_name1,
"currentRankPoint":modestat['currentRankPoint'],
"bestRank":modestat['bestTier'],
"bestRankAnswer":tier_name2,
"bestRankPoint": modestat['bestRankPoint'],
"damageDealt": modestat['damageDealt'],
"deaths": modestat['deaths'],
"dBNOs": modestat['dBNOs'],
"KDA_point": modestat['kda'],
"KD_point": KD_point,
"kills": modestat['kills'],
"roundsPlayed": modestat['roundsPlayed'],
"top10s": trunc(modestat['top10Ratio'] * modestat['roundsPlayed']),
"top10_point": modestat['top10Ratio'],
"wins": modestat['wins'],
"win_point": modestat['winRatio']
}
}
data['gameMode'].update(i_data)
return response.json(data, status=200)
@app.route("/api/PUBG/ranked/update")
async def update_ranked_status(request):
args = request.get_args(keep_blank_values=True)
if not ("id" in args): return response.json({'code':'01', 'msg':"Please write your id."}, status=400)
else: pubg_id = args['id'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE id=%s) as succees;")
cur.execute(exist_nickname,(pubg_id))
fetch1 = cur.fetchone()
if fetch1[0]:
command = pymysql.escape_string("SELECT platform FROM player WHERE id=%s")
cur.execute(command, (pubg_id))
platform_info = cur.fetchone()[0]
else: return response.json({'code':'05', 'msg':"No information about the user was found. Please proceed with \"/PUBG/player\" first."}, status=400)
if ("season" in args):
try: season = int(args['season'][0])
except ValueError: return response.json({'code':'08', 'msg':"Season values can only contain numbers."}, status=400)
if platform_info >= 0 and platform_info <= 1: type_season = "pc-2018"
else: type_season = "console"
if len(str(season)) < 2: season = f"division.bro.official.{type_season}-0{season}"
else: season = f"division.bro.official.{type_season}-{season}"
else: season = await get_season(platform_info)
await s_info.ranked_status_update(pubg_id, platform_info, season)
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
@app.route("/api/PUBG/player/change_platform")
async def change_platform(request):
args = request.get_args(keep_blank_values=True)
if not ("nickname" in args): return response.json({'code':'01', 'msg':"Please write your nickname."}, status=400)
else: nickname = args['nickname'][0]
connect = pymysql.connect(host=db_ip, user=db_user, password=db_pw,db=db_name, charset='utf8')
cur = connect.cursor()
exist_nickname = pymysql.escape_string("SELECT EXISTS (SELECT name FROM player WHERE name=%s) as succees;")
cur.execute(exist_nickname,(nickname))
exist = cur.fetchone()
if exist[0]:
if not ("platform" in args): return response.json({'code':'02', 'msg':"Please write your platform."}, status=400)
else:
try: platform = int(args['platform'][0])
except ValueError: return response.json({'code':'06', 'msg':"Platform values can only contain numbers."}, status=400)
if not (platform >= 0 and platform < 5): return response.json({'code':'07', 'msg':"Platform values can contain only 0-4 values."}, status=400)
command = pymysql.escape_string("UPDATE player SET platform=%s WHERE name=%s")
cur.execute(command,(platform,nickname))
connect.commit()
connect.close()
return response.json({
"code":"00",
"msg":"Updated successfully."
},status=200)
else:
connect.close()
return response.json({'code': '05','msg': "No information about the user was found. Please proceed with \"/PUBG/player\" first."},status=400)
app.run('127.0.0.1', 3200) | 48.285 | 156 | 0.591281 |
acea40c5a2535b5bd6046cf4c74055bcf0e1be68 | 42,316 | py | Python | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/git/cmd.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 2 | 2021-02-16T16:17:07.000Z | 2021-11-08T20:27:13.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/git/cmd.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 7 | 2021-03-30T14:10:56.000Z | 2022-03-12T00:43:13.000Z | tests/sandbox/.venv_ccf_sandbox/lib/python3.8/site-packages/git/cmd.py | iLuSIAnn/test | 10d0a20dc1a646b5c1f6c7bff2960e3f5df0510e | [
"Apache-2.0"
] | 4 | 2020-11-14T17:05:36.000Z | 2020-11-16T18:44:54.000Z | # cmd.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from contextlib import contextmanager
import io
import logging
import os
import signal
from subprocess import (
call,
Popen,
PIPE
)
import subprocess
import sys
import threading
from collections import OrderedDict
from textwrap import dedent
from git.compat import (
defenc,
force_bytes,
safe_decode,
is_posix,
is_win,
)
from git.exc import CommandError
from git.util import is_cygwin_git, cygpath, expand_path
from .exc import (
GitCommandError,
GitCommandNotFound
)
from .util import (
LazyMixin,
stream_copy,
)
execute_kwargs = {'istream', 'with_extended_output',
'with_exceptions', 'as_process', 'stdout_as_string',
'output_stream', 'with_stdout', 'kill_after_timeout',
'universal_newlines', 'shell', 'env', 'max_chunk_size'}
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
__all__ = ('Git',)
# ==============================================================================
## @name Utilities
# ------------------------------------------------------------------------------
# Documentation
## @{
def handle_process_output(process, stdout_handler, stderr_handler,
finalizer=None, decode_streams=True):
"""Registers for notifications to learn that process output is ready to read, and dispatches lines to
the respective line handlers.
This function returns once the finalizer returns
:return: result of finalizer
:param process: subprocess.Popen instance
:param stdout_handler: f(stdout_line_string), or None
:param stderr_handler: f(stderr_line_string), or None
:param finalizer: f(proc) - wait for proc to finish
:param decode_streams:
Assume stdout/stderr streams are binary and decode them before pushing \
their contents to handlers.
Set it to False if `universal_newline == True` (then streams are in text-mode)
or if decoding must happen later (i.e. for Diffs).
"""
# Use 2 "pump" threads and wait for both to finish.
def pump_stream(cmdline, name, stream, is_decode, handler):
try:
for line in stream:
if handler:
if is_decode:
line = line.decode(defenc)
handler(line)
except Exception as ex:
log.error("Pumping %r of cmd(%s) failed due to: %r", name, cmdline, ex)
raise CommandError(['<%s-pump>' % name] + cmdline, ex) from ex
finally:
stream.close()
cmdline = getattr(process, 'args', '') # PY3+ only
if not isinstance(cmdline, (tuple, list)):
cmdline = cmdline.split()
pumps = []
if process.stdout:
pumps.append(('stdout', process.stdout, stdout_handler))
if process.stderr:
pumps.append(('stderr', process.stderr, stderr_handler))
threads = []
for name, stream, handler in pumps:
t = threading.Thread(target=pump_stream,
args=(cmdline, name, stream, decode_streams, handler))
t.setDaemon(True)
t.start()
threads.append(t)
## FIXME: Why Join?? Will block if `stdin` needs feeding...
#
for t in threads:
t.join()
if finalizer:
return finalizer(process)
def dashify(string):
return string.replace('_', '-')
def slots_to_dict(self, exclude=()):
return {s: getattr(self, s) for s in self.__slots__ if s not in exclude}
def dict_to_slots_and__excluded_are_none(self, d, excluded=()):
for k, v in d.items():
setattr(self, k, v)
for k in excluded:
setattr(self, k, None)
## -- End Utilities -- @}
# value of Windows process creation flag taken from MSDN
CREATE_NO_WINDOW = 0x08000000
## CREATE_NEW_PROCESS_GROUP is needed to allow killing it afterwards,
# see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal
PROC_CREATIONFLAGS = (CREATE_NO_WINDOW | subprocess.CREATE_NEW_PROCESS_GROUP
if is_win else 0)
class Git(LazyMixin):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
__slots__ = ("_working_dir", "cat_file_all", "cat_file_header", "_version_info",
"_git_options", "_persistent_git_options", "_environment")
_excluded_ = ('cat_file_all', 'cat_file_header', '_version_info')
def __getstate__(self):
return slots_to_dict(self, exclude=self._excluded_)
def __setstate__(self, d):
dict_to_slots_and__excluded_are_none(self, d, excluded=self._excluded_)
# CONFIGURATION
git_exec_name = "git" # default that should work on linux and windows
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
# If True, a shell will be used when executing git commands.
# This should only be desirable on Windows, see https://github.com/gitpython-developers/GitPython/pull/126
# and check `git/test_repo.py:TestRepo.test_untracked_files()` TC for an example where it is required.
# Override this value using `Git.USE_SHELL = True`
USE_SHELL = False
# Provide the full path to the git executable. Otherwise it assumes git is in the path
_git_exec_env_var = "GIT_PYTHON_GIT_EXECUTABLE"
_refresh_env_var = "GIT_PYTHON_REFRESH"
GIT_PYTHON_GIT_EXECUTABLE = None
# note that the git executable is actually found during the refresh step in
# the top level __init__
@classmethod
def refresh(cls, path=None):
"""This gets called by the refresh function (see the top level
__init__).
"""
# discern which path to refresh with
if path is not None:
new_git = os.path.expanduser(path)
new_git = os.path.abspath(new_git)
else:
new_git = os.environ.get(cls._git_exec_env_var, cls.git_exec_name)
# keep track of the old and new git executable path
old_git = cls.GIT_PYTHON_GIT_EXECUTABLE
cls.GIT_PYTHON_GIT_EXECUTABLE = new_git
# test if the new git executable path is valid
# - a GitCommandNotFound error is spawned by ourselves
# - a PermissionError is spawned if the git executable provided
# cannot be executed for whatever reason
has_git = False
try:
cls().version()
has_git = True
except (GitCommandNotFound, PermissionError):
pass
# warn or raise exception if test failed
if not has_git:
err = dedent("""\
Bad git executable.
The git executable must be specified in one of the following ways:
- be included in your $PATH
- be set via $%s
- explicitly set via git.refresh()
""") % cls._git_exec_env_var
# revert to whatever the old_git was
cls.GIT_PYTHON_GIT_EXECUTABLE = old_git
if old_git is None:
# on the first refresh (when GIT_PYTHON_GIT_EXECUTABLE is
# None) we only are quiet, warn, or error depending on the
# GIT_PYTHON_REFRESH value
# determine what the user wants to happen during the initial
# refresh we expect GIT_PYTHON_REFRESH to either be unset or
# be one of the following values:
# 0|q|quiet|s|silence
# 1|w|warn|warning
# 2|r|raise|e|error
mode = os.environ.get(cls._refresh_env_var, "raise").lower()
quiet = ["quiet", "q", "silence", "s", "none", "n", "0"]
warn = ["warn", "w", "warning", "1"]
error = ["error", "e", "raise", "r", "2"]
if mode in quiet:
pass
elif mode in warn or mode in error:
err = dedent("""\
%s
All git commands will error until this is rectified.
This initial warning can be silenced or aggravated in the future by setting the
$%s environment variable. Use one of the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
Example:
export %s=%s
""") % (
err,
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error),
cls._refresh_env_var,
quiet[0])
if mode in warn:
print("WARNING: %s" % err)
else:
raise ImportError(err)
else:
err = dedent("""\
%s environment variable has been set but it has been set with an invalid value.
Use only the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
""") % (
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error))
raise ImportError(err)
# we get here if this was the init refresh and the refresh mode
# was not error, go ahead and set the GIT_PYTHON_GIT_EXECUTABLE
# such that we discern the difference between a first import
# and a second import
cls.GIT_PYTHON_GIT_EXECUTABLE = cls.git_exec_name
else:
# after the first refresh (when GIT_PYTHON_GIT_EXECUTABLE
# is no longer None) we raise an exception
raise GitCommandNotFound("git", err)
return has_git
@classmethod
def is_cygwin(cls):
return is_cygwin_git(cls.GIT_PYTHON_GIT_EXECUTABLE)
@classmethod
def polish_url(cls, url, is_cygwin=None):
if is_cygwin is None:
is_cygwin = cls.is_cygwin()
if is_cygwin:
url = cygpath(url)
else:
"""Remove any backslahes from urls to be written in config files.
Windows might create config-files containing paths with backslashed,
but git stops liking them as it will escape the backslashes.
Hence we undo the escaping just to be sure.
"""
url = os.path.expandvars(url)
if url.startswith('~'):
url = os.path.expanduser(url)
url = url.replace("\\\\", "\\").replace("\\", "/")
return url
class AutoInterrupt(object):
"""Kill/Interrupt the stored process instance once this instance goes out of scope. It is
used to prevent processes piling up in case iterators stop reading.
Besides all attributes are wired through to the contained process object.
The wait method was overridden to perform automatic status code checking
and possibly raise."""
__slots__ = ("proc", "args")
def __init__(self, proc, args):
self.proc = proc
self.args = args
def __del__(self):
if self.proc is None:
return
proc = self.proc
self.proc = None
if proc.stdin:
proc.stdin.close()
if proc.stdout:
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
# did the process finish already so we have a return code ?
try:
if proc.poll() is not None:
return
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
# can be that nothing really exists anymore ...
if os is None or getattr(os, 'kill', None) is None:
return
# try to kill it
try:
proc.terminate()
proc.wait() # ensure process goes away
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
except AttributeError:
# try windows
# for some reason, providing None for stdout/stderr still prints something. This is why
# we simply use the shell and redirect to nul. Its slower than CreateProcess, question
# is whether we really want to see all these messages. Its annoying no matter what.
if is_win:
call(("TASKKILL /F /T /PID %s 2>nul 1>nul" % str(proc.pid)), shell=True)
# END exception handling
def __getattr__(self, attr):
return getattr(self.proc, attr)
def wait(self, stderr=b''): # TODO: Bad choice to mimic `proc.wait()` but with different args.
"""Wait for the process and return its status code.
:param stderr: Previously read value of stderr, in case stderr is already closed.
:warn: may deadlock if output or error pipes are used and not handled separately.
:raise GitCommandError: if the return status is not 0"""
if stderr is None:
stderr = b''
stderr = force_bytes(data=stderr, encoding='utf-8')
status = self.proc.wait()
def read_all_from_possibly_closed_stream(stream):
try:
return stderr + force_bytes(stream.read())
except ValueError:
return stderr or b''
if status != 0:
errstr = read_all_from_possibly_closed_stream(self.proc.stderr)
log.debug('AutoInterrupt wait stderr: %r' % (errstr,))
raise GitCommandError(self.args, status, errstr)
# END status handling
return status
# END auto interrupt
class CatFileContentStream(object):
"""Object representing a sized read-only stream returning the contents of
an object.
It behaves like a stream, but counts the data read and simulates an empty
stream once our sized content region is empty.
If not all data is read to the end of the objects's lifetime, we read the
rest to assure the underlying stream continues to work"""
__slots__ = ('_stream', '_nbr', '_size')
def __init__(self, size, stream):
self._stream = stream
self._size = size
self._nbr = 0 # num bytes read
# special case: if the object is empty, has null bytes, get the
# final newline right away.
if size == 0:
stream.read(1)
# END handle empty streams
def read(self, size=-1):
bytes_left = self._size - self._nbr
if bytes_left == 0:
return b''
if size > -1:
# assure we don't try to read past our limit
size = min(bytes_left, size)
else:
# they try to read all, make sure its not more than what remains
size = bytes_left
# END check early depletion
data = self._stream.read(size)
self._nbr += len(data)
# check for depletion, read our final byte to make the stream usable by others
if self._size - self._nbr == 0:
self._stream.read(1) # final newline
# END finish reading
return data
def readline(self, size=-1):
if self._nbr == self._size:
return b''
# clamp size to lowest allowed value
bytes_left = self._size - self._nbr
if size > -1:
size = min(bytes_left, size)
else:
size = bytes_left
# END handle size
data = self._stream.readline(size)
self._nbr += len(data)
# handle final byte
if self._size - self._nbr == 0:
self._stream.read(1)
# END finish reading
return data
def readlines(self, size=-1):
if self._nbr == self._size:
return []
# leave all additional logic to our readline method, we just check the size
out = []
nbr = 0
while True:
line = self.readline()
if not line:
break
out.append(line)
if size > -1:
nbr += len(line)
if nbr > size:
break
# END handle size constraint
# END readline loop
return out
# skipcq: PYL-E0301
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def __del__(self):
bytes_left = self._size - self._nbr
if bytes_left:
# read and discard - seeking is impossible within a stream
# includes terminating newline
self._stream.read(bytes_left + 1)
# END handle incomplete read
def __init__(self, working_dir=None):
"""Initialize this instance with:
:param working_dir:
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd().
It is meant to be the working tree directory if available, or the
.git directory in case of bare repositories."""
super(Git, self).__init__()
self._working_dir = expand_path(working_dir)
self._git_options = ()
self._persistent_git_options = []
# Extra environment variables to pass to git commands
self._environment = {}
# cached command slots
self.cat_file_header = None
self.cat_file_all = None
def __getattr__(self, name):
"""A convenience method as it allows to call the command as if it was
an object.
:return: Callable object that will execute call _call_process with your arguments."""
if name[0] == '_':
return LazyMixin.__getattr__(self, name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
def set_persistent_git_options(self, **kwargs):
"""Specify command line options to the git executable
for subsequent subcommand calls
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
"""
self._persistent_git_options = self.transform_kwargs(
split_single_char_options=True, **kwargs)
def _set_cache_(self, attr):
if attr == '_version_info':
# We only use the first 4 numbers, as everything else could be strings in fact (on windows)
version_numbers = self._call_process('version').split(' ')[2]
self._version_info = tuple(int(n) for n in version_numbers.split('.')[:4] if n.isdigit())
else:
super(Git, self)._set_cache_(attr)
# END handle version info
@property
def working_dir(self):
""":return: Git directory we are working on"""
return self._working_dir
@property
def version_info(self):
"""
:return: tuple(int, int, int, int) tuple with integers representing the major, minor
and additional version numbers as parsed from git version.
This value is generated on demand and is cached"""
return self._version_info
def execute(self, command,
istream=None,
with_extended_output=False,
with_exceptions=True,
as_process=False,
output_stream=None,
stdout_as_string=True,
kill_after_timeout=None,
with_stdout=True,
universal_newlines=False,
shell=None,
env=None,
max_chunk_size=io.DEFAULT_BUFFER_SIZE,
**subprocess_kwargs
):
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
log.info(' '.join(command))
# Allow the user to have the command executed in their working dir.
cwd = self._working_dir or os.getcwd()
# Start the process
inline_env = env
env = os.environ.copy()
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env["LANGUAGE"] = "C"
env["LC_ALL"] = "C"
env.update(self._environment)
if inline_env is not None:
env.update(inline_env)
if is_win:
cmd_not_found_exception = OSError
if kill_after_timeout:
raise GitCommandError(command, '"kill_after_timeout" feature is not supported on Windows.')
else:
if sys.version_info[0] > 2:
cmd_not_found_exception = FileNotFoundError # NOQA # exists, flake8 unknown @UndefinedVariable
else:
cmd_not_found_exception = OSError
# end handle
stdout_sink = (PIPE
if with_stdout
else getattr(subprocess, 'DEVNULL', None) or open(os.devnull, 'wb'))
istream_ok = "None"
if istream:
istream_ok = "<valid stream>"
log.debug("Popen(%s, cwd=%s, universal_newlines=%s, shell=%s, istream=%s)",
command, cwd, universal_newlines, shell, istream_ok)
try:
proc = Popen(command,
env=env,
cwd=cwd,
bufsize=-1,
stdin=istream,
stderr=PIPE,
stdout=stdout_sink,
shell=shell is not None and shell or self.USE_SHELL,
close_fds=is_posix, # unsupported on windows
universal_newlines=universal_newlines,
creationflags=PROC_CREATIONFLAGS,
**subprocess_kwargs
)
except cmd_not_found_exception as err:
raise GitCommandNotFound(command, err) from err
if as_process:
return self.AutoInterrupt(proc, command)
def _kill_process(pid):
""" Callback method to kill a process. """
p = Popen(['ps', '--ppid', str(pid)], stdout=PIPE,
creationflags=PROC_CREATIONFLAGS)
child_pids = []
for line in p.stdout:
if len(line.split()) > 0:
local_pid = (line.split())[0]
if local_pid.isdigit():
child_pids.append(int(local_pid))
try:
# Windows does not have SIGKILL, so use SIGTERM instead
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
os.kill(pid, sig)
for child_pid in child_pids:
try:
os.kill(child_pid, sig)
except OSError:
pass
kill_check.set() # tell the main routine that the process was killed
except OSError:
# It is possible that the process gets completed in the duration after timeout
# happens and before we try to kill the process.
pass
return
# end
if kill_after_timeout:
kill_check = threading.Event()
watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))
# Wait for the process to return
status = 0
stdout_value = b''
stderr_value = b''
try:
if output_stream is None:
if kill_after_timeout:
watchdog.start()
stdout_value, stderr_value = proc.communicate()
if kill_after_timeout:
watchdog.cancel()
if kill_check.isSet():
stderr_value = ('Timeout: the command "%s" did not complete in %d '
'secs.' % (" ".join(command), kill_after_timeout)).encode(defenc)
# strip trailing "\n"
if stdout_value.endswith(b"\n"):
stdout_value = stdout_value[:-1]
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
stream_copy(proc.stdout, output_stream, max_chunk_size)
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(b"\n"):
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(command)
def as_text(stdout_value):
return not output_stream and safe_decode(stdout_value) or '<OUTPUT_STREAM>'
# end
if stderr_value:
log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr, status, as_text(stdout_value), safe_decode(stderr_value))
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(command, status, stderr_value, stdout_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = safe_decode(stdout_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, safe_decode(stderr_value))
else:
return stdout_value
def environment(self):
return self._environment
def update_environment(self, **kwargs):
"""
Set environment variables for future git invocations. Return all changed
values in a format that can be passed back into this function to revert
the changes:
``Examples``::
old_env = self.update_environment(PWD='/tmp')
self.update_environment(**old_env)
:param kwargs: environment variables to use for git processes
:return: dict that maps environment variables to their old values
"""
old_env = {}
for key, value in kwargs.items():
# set value if it is None
if value is not None:
old_env[key] = self._environment.get(key)
self._environment[key] = value
# remove key from environment if its value is None
elif key in self._environment:
old_env[key] = self._environment[key]
del self._environment[key]
return old_env
@contextmanager
def custom_environment(self, **kwargs):
"""
A context manager around the above ``update_environment`` method to restore the
environment back to its previous state after operation.
``Examples``::
with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'):
repo.remotes.origin.fetch()
:param kwargs: see update_environment
"""
old_env = self.update_environment(**kwargs)
try:
yield
finally:
self.update_environment(**old_env)
def transform_kwarg(self, name, value, split_single_char_options):
if len(name) == 1:
if value is True:
return ["-%s" % name]
elif value not in (False, None):
if split_single_char_options:
return ["-%s" % name, "%s" % value]
else:
return ["-%s%s" % (name, value)]
else:
if value is True:
return ["--%s" % dashify(name)]
elif value is not False and value is not None:
return ["--%s=%s" % (dashify(name), value)]
return []
def transform_kwargs(self, split_single_char_options=True, **kwargs):
"""Transforms Python style kwargs into git command line options."""
args = []
kwargs = OrderedDict(sorted(kwargs.items(), key=lambda x: x[0]))
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
for value in v:
args += self.transform_kwarg(k, value, split_single_char_options)
else:
args += self.transform_kwarg(k, v, split_single_char_options)
return args
@classmethod
def __unpack_args(cls, arg_list):
if not isinstance(arg_list, (list, tuple)):
return [str(arg_list)]
outlist = []
for arg in arg_list:
if isinstance(arg_list, (list, tuple)):
outlist.extend(cls.__unpack_args(arg))
# END recursion
else:
outlist.append(str(arg))
# END for each arg
return outlist
def __call__(self, **kwargs):
"""Specify command line options to the git executable
for a subcommand call
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
``Examples``::
git(work_tree='/tmp').difftool()"""
self._git_options = self.transform_kwargs(
split_single_char_options=True, **kwargs)
return self
def _call_process(self, method, *args, **kwargs):
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
It contains key-values for the following:
- the :meth:`execute()` kwds, as listed in :var:`execute_kwargs`;
- "command options" to be converted by :meth:`transform_kwargs()`;
- the `'insert_kwargs_after'` key which its value must match one of ``*args``,
and any cmd-options will be appended after the matched arg.
Examples::
git.rev_list('master', max_count=10, header=True)
turns into::
git rev-list max-count 10 --header master
:return: Same as ``execute``"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
exec_kwargs = {k: v for k, v in kwargs.items() if k in execute_kwargs}
opts_kwargs = {k: v for k, v in kwargs.items() if k not in execute_kwargs}
insert_after_this_arg = opts_kwargs.pop('insert_kwargs_after', None)
# Prepare the argument list
opt_args = self.transform_kwargs(**opts_kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
if insert_after_this_arg is None:
args = opt_args + ext_args
else:
try:
index = ext_args.index(insert_after_this_arg)
except ValueError as err:
raise ValueError("Couldn't find argument '%s' in args %s to insert cmd options after"
% (insert_after_this_arg, str(ext_args))) from err
# end handle error
args = ext_args[:index + 1] + opt_args + ext_args[index + 1:]
# end handle opts_kwargs
call = [self.GIT_PYTHON_GIT_EXECUTABLE]
# add persistent git options
call.extend(self._persistent_git_options)
# add the git options, then reset to empty
# to avoid side_effects
call.extend(self._git_options)
self._git_options = ()
call.append(dashify(method))
call.extend(args)
return self.execute(call, **exec_kwargs)
def _parse_object_header(self, header_line):
"""
:param header_line:
<hex_sha> type_string size_as_int
:return: (hex_sha, type_string, size_as_int)
:raise ValueError: if the header contains indication for an error due to
incorrect input sha"""
tokens = header_line.split()
if len(tokens) != 3:
if not tokens:
raise ValueError("SHA could not be resolved, git returned: %r" % (header_line.strip()))
else:
raise ValueError("SHA %s could not be resolved, git returned: %r" % (tokens[0], header_line.strip()))
# END handle actual return value
# END error handling
if len(tokens[0]) != 40:
raise ValueError("Failed to parse header: %r" % header_line)
return (tokens[0], tokens[1], int(tokens[2]))
def _prepare_ref(self, ref):
# required for command to separate refs on stdin, as bytes
refstr = ref
if isinstance(ref, bytes):
# Assume 40 bytes hexsha - bin-to-ascii for some reason returns bytes, not text
refstr = ref.decode('ascii')
elif not isinstance(ref, str):
refstr = str(ref) # could be ref-object
if not refstr.endswith("\n"):
refstr += "\n"
return refstr.encode(defenc)
def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
cur_val = getattr(self, attr_name)
if cur_val is not None:
return cur_val
options = {"istream": PIPE, "as_process": True}
options.update(kwargs)
cmd = self._call_process(cmd_name, *args, **options)
setattr(self, attr_name, cmd)
return cmd
def __get_object_header(self, cmd, ref):
cmd.stdin.write(self._prepare_ref(ref))
cmd.stdin.flush()
return self._parse_object_header(cmd.stdout.readline())
def get_object_header(self, ref):
""" Use this method to quickly examine the type and size of the object behind
the given ref.
:note: The method will only suffer from the costs of command invocation
once and reuses the command in subsequent calls.
:return: (hexsha, type_string, size_as_int)"""
cmd = self._get_persistent_cmd("cat_file_header", "cat_file", batch_check=True)
return self.__get_object_header(cmd, ref)
def get_object_data(self, ref):
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data)
def stream_object_data(self, ref):
""" As get_object_header, but returns the data as a stream
:return: (hexsha, type_string, size_as_int, stream)
:note: This method is not threadsafe, you need one independent Command instance per thread to be safe !"""
cmd = self._get_persistent_cmd("cat_file_all", "cat_file", batch=True)
hexsha, typename, size = self.__get_object_header(cmd, ref)
return (hexsha, typename, size, self.CatFileContentStream(size, cmd.stdout))
def clear_cache(self):
"""Clear all kinds of internal caches to release resources.
Currently persistent commands will be interrupted.
:return: self"""
for cmd in (self.cat_file_all, self.cat_file_header):
if cmd:
cmd.__del__()
self.cat_file_all = None
self.cat_file_header = None
return self
| 38.434151 | 117 | 0.576449 |
acea40dcffd01e0129ebe2fb36089b6c73fb1dc5 | 224 | py | Python | projecteuler/012.py | Alu0331/python | 0a751b5acf067eae1b04e8f34559b7f580317cb3 | [
"MIT"
] | null | null | null | projecteuler/012.py | Alu0331/python | 0a751b5acf067eae1b04e8f34559b7f580317cb3 | [
"MIT"
] | 1 | 2020-12-30T14:34:06.000Z | 2020-12-31T12:00:29.000Z | projecteuler/012.py | Alu0331/python | 0a751b5acf067eae1b04e8f34559b7f580317cb3 | [
"MIT"
] | null | null | null | def divs(n, m):
if m==1:
return [1]
if n%m == 0:
return [m]+ divs(n, m-1)
return divs(n, m-1)
l = []
n = 7
while (len(l)<100):
n = (n*(n+1))/2
m = n
l = divs(n,m)
n = n+1
print(n) | 14.933333 | 32 | 0.401786 |
acea40f933204788c8146c8c9431251acbb38239 | 258 | py | Python | test/test_last_name.py | Marat-github/python_training_1 | 494752ebcdab53d326cd3c80dec59703d82187cd | [
"Apache-2.0"
] | null | null | null | test/test_last_name.py | Marat-github/python_training_1 | 494752ebcdab53d326cd3c80dec59703d82187cd | [
"Apache-2.0"
] | null | null | null | test/test_last_name.py | Marat-github/python_training_1 | 494752ebcdab53d326cd3c80dec59703d82187cd | [
"Apache-2.0"
] | null | null | null |
def test_last_name_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.last_name == contact_from_edit_page.last_name | 43 | 79 | 0.829457 |
acea41165fb34b47ea94afeb6866ff0083612400 | 5,755 | py | Python | manager.py | darkphoenix2601/Memberadder | c422643efea9933e02899416cb5c6944e66fca69 | [
"MIT"
] | 46 | 2022-02-06T08:08:37.000Z | 2022-03-30T13:00:47.000Z | manager.py | darkphoenix2601/Memberadder | c422643efea9933e02899416cb5c6944e66fca69 | [
"MIT"
] | 6 | 2022-02-15T05:13:09.000Z | 2022-03-17T14:17:31.000Z | manager.py | darkphoenix2601/Memberadder | c422643efea9933e02899416cb5c6944e66fca69 | [
"MIT"
] | 10 | 2022-02-22T04:25:21.000Z | 2022-03-25T20:26:38.000Z | import requests
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, pyfiglet
from colorama import init, Fore
import os, random
from time import sleep
init()
lg = Fore.LIGHTGREEN_EX
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
r = Fore.RED
n = Fore.RESET
colors = [lg, r, w, cy, ye]
def banner():
f = pyfiglet.Figlet(font='slant')
banner = f.renderText('Telegram')
print(f'{random.choice(colors)}{banner}{n}')
print(r+' Version: 1.1 | Author: Shabani'+n+'\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
#print(r)
banner()
#print(n)
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] List out all the accounts'+n)
print(lg+'[4] Delete specific accounts'+n)
#print(lg+'[5] Update your Genisys'+n)
print(lg+'[5] Quit')
a = int(input(f'\nEnter your choice: {r}'))
if a == 1:
with open('vars.txt', 'ab') as g:
newly_added = []
while True:
a = int(input(f'\n{lg}Enter API ID: {r}'))
b = str(input(f'{lg}Enter API Hash: {r}'))
c = str(input(f'{lg}Enter Phone Number: {r}'))
p = ''.join(c.split())
pickle.dump([a, b, p], g)
newly_added.append([a, b, p])
ab = input(f'\nDo you want to add more accounts?[y/n]: ')
if 'y' in ab:
pass
else:
print('\n'+lg+'[i] Saved all accounts in vars.txt'+n)
g.close()
sleep(3)
clr()
print(lg + '[*] Logging in from new accounts...\n')
for added in newly_added:
c = TelegramClient(f'sessions/{added[2]}', added[0], added[1])
try:
c.start()
print(f'n\n{lg}[+] Logged in - {added[2]}')
c.disconnect()
except PhoneNumberBannedError:
print(f'{r}[!] {added[2]} is banned! Filter it using option 2')
continue
print('\n')
input(f'\n{lg}Press enter to goto main menu...')
break
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
api_id = int(account[0])
api_hash = str(account[1])
phone = str(account[2])
client = TelegramClient(f'sessions\\{phone}', api_id, api_hash)
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('[+] Enter the code: '))
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Id = a[0]
Hash = a[1]
Phone = a[2]
pickle.dump([Id, Hash, Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu')
elif a == 3:
display = []
j = open('vars.txt', 'rb')
while True:
try:
display.append(pickle.load(j))
except EOFError:
break
j.close()
print(f'\n{lg}')
print(f'API ID | API Hash | Phone')
print(f'==========================================================')
i = 0
for z in display:
print(f'{z[0]} | {z[1]} | {z[2]}')
i += 1
print(f'==========================================================')
input('\nPress enter to goto main menu')
elif a == 4:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[2]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][2])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'{lg}Press enter to goto main menu{n}')
f.close()
elif a == 5:
clr()
banner()
quit() | 34.255952 | 91 | 0.441182 |
acea4232b048d81515dc0e036db18fba30f0a647 | 6,068 | py | Python | Tests/hosting/editor_svcs/errorlistener.py | dsonbill/IronPython3-NETCore | 8c76bdbec1754233f04b41ecd28e9bae2c862fd0 | [
"Apache-2.0"
] | 2 | 2019-09-21T22:22:30.000Z | 2020-05-09T12:45:51.000Z | Tests/hosting/editor_svcs/errorlistener.py | dsonbill/IronPython3-NETCore | 8c76bdbec1754233f04b41ecd28e9bae2c862fd0 | [
"Apache-2.0"
] | null | null | null | Tests/hosting/editor_svcs/errorlistener.py | dsonbill/IronPython3-NETCore | 8c76bdbec1754233f04b41ecd28e9bae2c862fd0 | [
"Apache-2.0"
] | null | null | null | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
##
## Testing ErrorListener
##
from iptest.assert_util import *
skiptest("win32")
load_iron_python_test()
import Microsoft.Scripting.Hosting
from Microsoft.Scripting import Severity, SourceCodeKind, SourceSpan, SourceLocation
from Microsoft.Scripting.Hosting import ErrorListener, ScriptSource, ScriptRuntime
from IronPython.Hosting import Python
From, To = SourceLocation, SourceLocation
Warning, Error, FatalError = Severity.Warning, Severity.Error, Severity.FatalError
#------------------------------------------------------------------------------
# Globals
engine = Python.CreateEngine()
#------------------------------------------------------------------------------
# Utils
class MyErrorListener(ErrorListener):
def __init__(self):
self.__errors= []
errors = property(lambda obj: obj.__errors)
def ErrorReported(self, src, msg, span, errorCode, severity):
line = src.GetCodeLine(span.Start.Line);
if line \
and span.Start.Line == span.End.Line \
and span.Start.Column != span.End.Column:
bad = line[span.Start.Column - 1 : span.End.Column - 1]
else:
bad = (span.Start.Line, span.Start.Column)
self.__errors.append((msg, bad, errorCode, severity))
def compile_expression(expression):
source = engine.CreateScriptSourceFromString(expression, SourceCodeKind.Expression)
return compile_source(source)
def compile_file(stmts):
source = engine.CreateScriptSourceFromString(stmts, SourceCodeKind.File)
return compile_source(source)
def compile_source(source):
errorlistener = MyErrorListener()
try:
source.Compile(errorlistener)
except System.Exception as e:
pass
return errorlistener.errors
#------------------------------------------------------------------------------
# Tests
def test_no_errors():
AreEqual([], compile_expression("1+1"))
def test_empty():
AreEqual([], compile_file(""))
expected = [
("unexpected EOF while parsing", (1,1), 17, FatalError),
]
actual = compile_expression("")
AreEqual(expected, actual)
def test_unexpected_token():
expected = [
("unexpected token 'foo'", "foo", 16, FatalError)
]
actual = compile_expression("1.foo")
AreEqual(expected, actual)
def test_multiple_errors():
expected = [
("unexpected token 'print'", "print", 16, FatalError),
("EOL while scanning single-quoted string", '"hello', 16, FatalError),
("unexpected token 'print'", "print", 16, FatalError),
]
actual = compile_expression("""print "hello""")
AreEqual(expected, actual)
def test_not_indented_class():
expected = [
("expected an indented block", "pass", 32, FatalError),
]
code = """\
class Foo:
pass"""
AreEqual(expected, compile_file(code))
def test_bad_indentation():
expected = [
("unindent does not match any outer indentation level", ' ', 32, FatalError),
]
code = """\
class Foo:
pass
pass"""
AreEqual(expected, compile_file(code))
def test_non_fatal_error():
expected = [
("'break' outside loop", "break", 16, FatalError),
]
code = """\
1+1
break"""
AreEqual(expected, compile_file(code))
def test_assignment_to_none():
expected = [
("cannot assign to None", "None", 80, FatalError),
]
actual = compile_file("None = 42")
AreEqual(expected, actual)
def test_multiple_erroneous_statements():
expected = [
("cannot assign to None", "None", 80, FatalError),
("cannot assign to None", "None", 80, FatalError),
]
code = """\
None = 2
None = 3"""
AreEqual(expected, compile_file(code))
def test_warning():
expected = [
("name 'a' is assigned to before global declaration", "global a", -1, Warning),
]
code = """\
def foo():
a=2
global a"""
AreEqual(expected, compile_file(code))
def test_should_report_multiple_warnings_negative():
"Bug #17541, http://www.codeplex.com/IronPython/WorkItem/View.aspx?WorkItemId=17541"
expected = [
("Variable a assigned before global declaration", "global a", -1, Warning),
("Variable b assigned before global declaration", "global b", -1, Warning),
]
code = """\
def foo():
a=2
global a
def bar():
b=2
global b"""
AssertError(AssertionError, AreEqual, expected, compile_file(code))
def test_should_report_both_errors_and_warnings_negative():
"Bug #17541, http://www.codeplex.com/IronPython/WorkItem/View.aspx?WorkItemId=17541"
expected = [
("cannot assign to None", "None", -1, Error),
("Variable a assigned before global declaration", "global a", -1, Warning),
]
code = """\
None = 2
def foo():
a=2
global a"""
AssertError(AssertionError, AreEqual, expected, compile_file(code))
def test_all_together():
expected = [
('cannot assign to None', 'None', 80,FatalError),
]
code = """\
None = 2
dict={foo}
def foo():
a=2
global a"""
AreEqual(expected, compile_file(code))
run_test(__name__)
| 30.959184 | 96 | 0.592947 |
acea42674027398d0977b5d47912776524661d4f | 15,144 | py | Python | esp32/components/graphics/tools/compile-tilemap.py | nsec/nsec16 | 7c012abac54a4f7627da27dce38b0370918b1717 | [
"MIT"
] | 25 | 2017-12-22T18:49:30.000Z | 2021-11-09T11:59:42.000Z | esp32/components/graphics/tools/compile-tilemap.py | nsec/nsec16 | 7c012abac54a4f7627da27dce38b0370918b1717 | [
"MIT"
] | 6 | 2018-10-01T19:27:51.000Z | 2021-05-30T21:28:02.000Z | esp32/components/graphics/tools/compile-tilemap.py | nsec/nsec16 | 7c012abac54a4f7627da27dce38b0370918b1717 | [
"MIT"
] | 2 | 2019-03-22T03:15:30.000Z | 2020-11-05T19:39:57.000Z | #!/usr/bin/env python
"""Compile scene definition from SVG into binary tilemap format.
The source is a standard SVG file that can be edited with Inkscape, but with
several restrictions:
- The document origin is at the top-left corner of the document, so older
Inkscape versions may not work.
- The width and the height must be divisible by the tile size - 24px - and no
unit scaling is allowed.
- All objects must be placed in layers named l0 through l8.
- The only supported kind of element is the <image /> element. Images must be
"linked" and not "embedded" into the document.
- It is recommended to keep the SVG and all images in the same directory, so
that the "xlink:href" attribute contains only the filename, without the path
component. Inkscape keeps track of the absolute path in the
"sodipodi:absref" attribute but its value is ignored.
- All images must be aligned to the grid, i.e. x and y coordinates must be
multiples of the tile size. Image width and height may be arbitrary and they
can span several tiles, but not more than three tiles. Images larger than
that must be split into multiple tiles, and it is generally recommended to
avoid the use of many multi-tile images. No element scaling is allowed;
while the compilation will probably work with scaled images, the rendering
will likely produce an unexpected result.
- One layer cannot contain more than one element at the same (x,y) coordinate.
Overlapping images must be put into different layer.
The following document may be used as a template:
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="1200"
height="1200"
viewBox="0 0 1200 1200"
version="1.1"
id="svg1"
inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07, custom)">
<defs id="defs2" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:document-units="px"
showgrid="true"
units="px"
borderlayer="true">
<inkscape:grid type="xygrid" id="grid833" spacingx="24" spacingy="24" dotted="true" />
</sodipodi:namedview>
<metadata id="metadata5">
<rdf:RDF>
<cc:Work rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
<cc:license rdf:resource="http://creativecommons.org/licenses/by/4.0/" />
</cc:Work>
<cc:License rdf:about="http://creativecommons.org/licenses/by/4.0/">
<cc:permits rdf:resource="http://creativecommons.org/ns#Reproduction" />
<cc:permits rdf:resource="http://creativecommons.org/ns#Distribution" />
<cc:requires rdf:resource="http://creativecommons.org/ns#Notice" />
<cc:requires rdf:resource="http://creativecommons.org/ns#Attribution" />
<cc:permits rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
</cc:License>
</rdf:RDF>
</metadata>
<g inkscape:groupmode="layer" id="layer1" inkscape:label="l0" />
<g inkscape:groupmode="layer" id="layer2" inkscape:label="l1" />
<g inkscape:groupmode="layer" id="layer3" inkscape:label="l2" />
<g inkscape:groupmode="layer" id="layer4" inkscape:label="l3" />
<g inkscape:groupmode="layer" id="layer5" inkscape:label="l4" />
<g inkscape:groupmode="layer" id="layer6" inkscape:label="l5" />
<g inkscape:groupmode="layer" id="layer7" inkscape:label="l6" />
<g inkscape:groupmode="layer" id="layer8" inkscape:label="l7" />
<g inkscape:groupmode="layer" id="layer9" inkscape:label="x" />
</svg>
"""
import math
import struct
import sys
import xml.etree.ElementTree as ET
from dataclasses import dataclass
from images_registry import load_images_registry
DEFINED_LAYERS = 8
TILE_SIZE = 24
class BlockedMask:
"""Area of the tilemap blocked for player movement."""
def __init__(self, width: int, height: int):
self.width = width
self.height = height
self.line_words = math.ceil(width / (2 * TILE_SIZE))
self.area = [0] * (self.line_words * math.ceil(height / 6))
def block_pixel(self, x: int, y: int):
"""Exclude the pixel from the area allowed for character movement."""
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return
a = y // 6
b = x // (2 * TILE_SIZE)
c = (x % (2 * TILE_SIZE)) // 6
self.area[a * self.line_words + b] |= 1 << c
def compute_tilemap_mask(self, tile_x: int, tile_y: int):
reduced = [0] * 16
for x in range(tile_x * TILE_SIZE, (tile_x + 1) * TILE_SIZE):
for y in range(tile_y * TILE_SIZE, (tile_y + 1) * TILE_SIZE):
index = (y % TILE_SIZE) // 6 * 4 + (x % TILE_SIZE) // 6
reduced[index] += self.area[y * self.width + x]
mask = 0
for index, value in enumerate(reduced):
mask |= (1 if value > 18 else 0) << index
return mask
class Empty:
"""Representation of an unoccupied position on the grid."""
pass
@dataclass
class Image:
"""Parsed reference to an image file."""
href: str = None
width: int = 0
height: int = 0
class Tile:
"""A single tile reference with merged layers."""
def __init__(self, x: int, y: int):
self.components = [Empty] * DEFINED_LAYERS
self.dependency = {
'backward_x': 0,
'backward_y': 0,
'forward_x': 0,
'forward_y': 0,
}
self.flags = {
'jpeg': False,
}
self.x = x
self.y = y
def __getitem__(self, key):
return self.components[key]
def __setitem__(self, key, value):
self.components[key] = value
class Tilemap:
"""A grid of tiles.
This class defines the sequential layout of tiles in the output binary.
"""
def __init__(self, width_tiles: int, height_tiles: int):
self.width_tiles = width_tiles
self.height_tiles = height_tiles
self.tiles = []
for y in range(height_tiles):
for x in range(width_tiles):
self.tiles.append(Tile(x=x, y=y))
def __len__(self):
return len(self.tiles)
def __getitem__(self, key):
if isinstance(key, int):
return self.tiles[key]
if isinstance(key, tuple) and len(key) == 2:
if key[0] < 0 or key[1] < 0:
raise KeyError()
if key[0] >= self.width_tiles or key[1] >= self.height_tiles:
raise KeyError()
return self.tiles[key[1] * self.width_tiles + key[0]]
raise TypeError(
'Tilemap supports two indexing modes: traditional list indexing '
'with an integer, or two integer tuple with x and y coordinates '
'on the grid.')
def parse_blocked_layer_into_blocked_maks(layer, blocked):
"""Read all rectangles in the layer and place pixels into the blocked area."""
if any(i.tag != '{http://www.w3.org/2000/svg}rect' for i in layer):
raise ValueError(
'The blocking layer must be built from rectangles. Any elements '
'other than <rect/> are not supported.')
for rect in layer:
id_, width, height, rect_x, rect_y = (
rect.attrib['id'],
int(float(rect.attrib['width'])),
int(float(rect.attrib['height'])),
int(float(rect.attrib['x'])),
int(float(rect.attrib['y'])),
)
for x in range(rect_x, rect_x + width):
for y in range(rect_y, rect_y + height):
blocked.block_pixel(x, y)
def parse_graphic_layer_into_tilemap(layer_id, layer, tilemap):
"""Read all images from a single layer and place the on the grid."""
if any(i.tag != '{http://www.w3.org/2000/svg}image' for i in layer):
raise ValueError(
f'Layer {layer_id} contains some non-image elements. Graphic '
f'layers may contain only <image/> elements that reference PNG '
f'files.')
for image in layer:
id_, href, width, height, x, y = (
image.attrib['id'],
image.attrib['{http://www.w3.org/1999/xlink}href'],
float(image.attrib['width']),
float(image.attrib['height']),
float(image.attrib['x']),
float(image.attrib['y']),
)
if x % TILE_SIZE != 0.0 or y % TILE_SIZE != 0.0:
raise ValueError(
f'Image id="{id_}" in layer {layer_id} is not alligned to '
f'grid: x={x}, y={y}')
if width == 0.0 or height == 0.0:
raise ValueError(
f'Image id="{id_}" in layer {layer_id} at x={x}, y={y} has zero size.')
if width % 1 != 0.0 or height % 1 != 0.0:
raise ValueError(
f'Image id="{id_}" in layer {layer_id} does not have an exact '
f'size: width={width}, height={height}. It is not recommended, '
f'but the size of images does not have to be a multiple of tile '
f'size. However, images cannot be scaled to a fraction of a pixel.')
width, height, x, y = int(width), int(height), int(x), int(y)
tile_x, tile_y = x // TILE_SIZE, y // TILE_SIZE
if width > TILE_SIZE * 3 or height > TILE_SIZE * 3:
raise ValueError(
f'Image id="{id_}" in layer {layer_id} is larger than three '
f'tiles. The maximum allowed size is {TILE_SIZE * 3}px in '
f'either dimention. Larger images must be split into several '
f'smaller ones.')
try:
if tilemap[tile_x, tile_y][layer_id] is not Empty:
raise ValueError(
f'Layer {layer_id} has more than one tile at x={x}, y={y}')
except KeyError:
raise ValueError(
f'Image id="{id_}" in layer {layer_id} is outside of the '
f'grid at x={x}, y={y}')
tilemap[tile_x, tile_y][layer_id] = Image(href, width, height)
overlap_x, overlap_y = math.ceil(width / TILE_SIZE), math.ceil(height / TILE_SIZE)
tilemap[tile_x, tile_y].dependency['forward_x'] = max(
overlap_x - 1, tilemap[tile_x, tile_y].dependency['forward_x'])
tilemap[tile_x, tile_y].dependency['forward_y'] = max(
overlap_y - 1, tilemap[tile_x, tile_y].dependency['forward_y'])
for i in range(overlap_x):
for j in range(overlap_y):
tilemap[tile_x + i, tile_y + j].dependency['backward_x'] = max(
i,
tilemap[tile_x + i, tile_y + j].dependency['backward_x'])
tilemap[tile_x + i, tile_y + j].dependency['backward_y'] = max(
j,
tilemap[tile_x + i, tile_y + j].dependency['backward_y'])
def parse_tilemap(source_path):
"""Prase the SVG into a tilemap grid."""
source = ET.parse(source_path)
source_root = source.getroot()
width = int(source_root.attrib['width'])
height = int(source_root.attrib['height'])
if width % TILE_SIZE != 0 or height % TILE_SIZE != 0:
raise ValueError(
f'Canvas width and height must be divisible by {TILE_SIZE} to '
f'create a whole number of tiles.')
if source_root.attrib['viewBox'] != f'0 0 {width} {height}':
raise ValueError(
f'Document viewBox attribute must be set to "0 0 {width} '
f'{height}" to avoid coordinates scaling.')
width_tiles = width // TILE_SIZE
height_tiles = height // TILE_SIZE
tilemap = Tilemap(width_tiles, height_tiles)
for layer_id in range(0, DEFINED_LAYERS):
for layer in source_root:
if layer.attrib.get('{http://www.inkscape.org/namespaces/inkscape}label') == f'l{layer_id}':
break
else:
raise ValueError(
f'Layer group "l{layer_id}" is not found in the document.')
parse_graphic_layer_into_tilemap(layer_id, layer, tilemap)
for layer in source_root:
if layer.attrib.get('{http://www.inkscape.org/namespaces/inkscape}label') == 'x':
break
else:
raise ValueError(
f'Layer group "x" is not found in the document.')
blocked = BlockedMask(width, height)
parse_blocked_layer_into_blocked_maks(layer, blocked)
return tilemap, blocked
def main(images_registry_path, source_path, scene_path, blocked_path):
images_registry = load_images_registry(images_registry_path)
tilemap, blocked = parse_tilemap(source_path)
with open(scene_path, 'wb') as f:
# Add two dummy rows at the beginning.
for y in range(2):
for _ in range(((DEFINED_LAYERS + 2) * (tilemap.width_tiles + 2))):
f.write(struct.pack('<H', 0))
for y in range(tilemap.height_tiles):
# Add two dummy columns at the beginning of each row.
for _ in range(2 * (DEFINED_LAYERS + 2)):
f.write(struct.pack('<H', 0))
for x in range(tilemap.width_tiles):
tile = tilemap[x, y]
for layer_id in range(DEFINED_LAYERS):
image = tile[layer_id]
if image is Empty:
f.write(struct.pack('<H', 0))
elif image.href in images_registry:
if images_registry[image.href]['format'] == 'JPEG':
tile.flags['jpeg'] = True
f.write(struct.pack('<H', images_registry[image.href]['index']))
else:
raise Exception(
f'Image {image.href} is not in the registry.')
# Sanity check.
for i in tile.dependency.values():
assert(0 <= i < 3)
dependency = 0
dependency |= tile.dependency['backward_x'] << 6
dependency |= tile.dependency['backward_y'] << 4
dependency |= tile.dependency['forward_x'] << 2
dependency |= tile.dependency['forward_y']
flags = 0
flags |= 2 if tile.flags['jpeg'] else 0
flags |= 4 if dependency else 0
f.write(struct.pack('<H', flags))
f.write(struct.pack('<H', dependency))
with open(blocked_path, 'wb') as f:
f.write(bytes(blocked.area))
if __name__ == '__main__':
if len(sys.argv) != 5:
print(f'Usage: {sys.argv[0]} <images registry> <source> <scene file> <blocked file>')
sys.exit(1)
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
| 36.403846 | 104 | 0.595087 |
acea4403a7b962643a9d05ccea6fbd5fa5eb052f | 2,486 | py | Python | program/qiskit-backends/hello_quantum.py | ctuning/qiskit | 33126f7c2e00331303727b712717ded6c7420e18 | [
"BSD-3-Clause"
] | 7 | 2018-05-05T09:47:54.000Z | 2019-04-04T10:00:38.000Z | program/qiskit-backends/hello_quantum.py | ctuning/qiskit | 33126f7c2e00331303727b712717ded6c7420e18 | [
"BSD-3-Clause"
] | 7 | 2018-03-13T15:38:12.000Z | 2018-12-21T15:15:20.000Z | program/qiskit-backends/hello_quantum.py | ctuning/qiskit | 33126f7c2e00331303727b712717ded6c7420e18 | [
"BSD-3-Clause"
] | 2 | 2018-10-05T11:58:06.000Z | 2020-01-19T21:10:17.000Z | """
Example used in the readme. In this example a Bell state is made
"""
import sys
import os
from pprint import pprint
# so we need a relative position from this file path.
# TODO: Relative imports for intra-package imports are highly discouraged.
# http://stackoverflow.com/a/7506006
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from qiskit import QuantumProgram, QISKitError, available_backends, register
try:
import Qconfig
register(Qconfig.APItoken, Qconfig.config["url"], verify=False,
hub=Qconfig.config["hub"],
group=Qconfig.config["group"],
project=Qconfig.config["project"])
except:
offline = True
print("""WARNING: There's no connection with IBMQuantumExperience servers.
cannot test I/O intesive tasks, will only test CPU intensive tasks
running the jobs in the local simulator""")
# Running this block before registering quietly returns a list of local-only simulators
#
print("The backends available for use are:")
backends = available_backends()
pprint(backends)
print("\n")
if 'CK_IBM_BACKEND' in os.environ:
backend = os.environ['CK_IBM_BACKEND']
if backend not in backends:
print("Your choice '%s' was not available, so picking a random one for you..." % backend)
backend = backends[0]
print("Picked '%s' backend!" % backend)
try:
# Create a QuantumProgram object instance.
Q_program = QuantumProgram()
# Create a Quantum Register called "qr" with 2 qubits.
qr = Q_program.create_quantum_register("qr", 2)
# Create a Classical Register called "cr" with 2 bits.
cr = Q_program.create_classical_register("cr", 2)
# Create a Quantum Circuit called "qc". involving the Quantum Register "qr"
# and the Classical Register "cr".
qc = Q_program.create_circuit("bell", [qr], [cr])
# Add the H gate in the Qubit 0, putting this qubit in superposition.
qc.h(qr[0])
# Add the CX gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state
qc.cx(qr[0], qr[1])
# Add a Measure gate to see the state.
qc.measure(qr, cr)
# Compile and execute the Quantum Program in the local_qasm_simulator.
result = Q_program.execute(["bell"], backend=backend, shots=1024, seed=1)
# Show the results.
print(result)
print(result.get_data("bell"))
except QISKitError as ex:
print('There was an error in the circuit!. Error = {}'.format(ex))
| 36.558824 | 92 | 0.687852 |
acea44d44a3470d10e00f6228eb775b676f843f6 | 13,095 | py | Python | fpga/lib/axis/tb/test_axis_register.py | alexforencich/hdg2000 | 57562f76682f673c9c3090b1a6d6dc5e938ac3c5 | [
"MIT"
] | 3 | 2015-03-10T23:43:34.000Z | 2017-04-06T13:52:35.000Z | fpga/lib/axis/tb/test_axis_register.py | alexforencich/hdg2000 | 57562f76682f673c9c3090b1a6d6dc5e938ac3c5 | [
"MIT"
] | null | null | null | fpga/lib/axis/tb/test_axis_register.py | alexforencich/hdg2000 | 57562f76682f673c9c3090b1a6d6dc5e938ac3c5 | [
"MIT"
] | 2 | 2015-02-08T00:18:20.000Z | 2021-06-10T03:46:35.000Z | #!/usr/bin/env python
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_register'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_register(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_register(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame.user = 1
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
assert rx_frame.user[-1]
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 32.65586 | 117 | 0.526537 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.